aboutsummaryrefslogtreecommitdiffstats
path: root/drivers
diff options
context:
space:
mode:
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/Kconfig12
-rw-r--r--drivers/acpi/Makefile2
-rw-r--r--drivers/acpi/acpi_pad.c514
-rw-r--r--drivers/acpi/bus.c49
-rw-r--r--drivers/acpi/dock.c16
-rw-r--r--drivers/acpi/ec.c56
-rw-r--r--drivers/acpi/osl.c8
-rw-r--r--drivers/acpi/proc.c2
-rw-r--r--drivers/acpi/processor_core.c7
-rw-r--r--drivers/acpi/processor_idle.c8
-rw-r--r--drivers/acpi/scan.c704
-rw-r--r--drivers/acpi/video.c6
-rw-r--r--drivers/atm/ambassador.c1
-rw-r--r--drivers/atm/fore200e.c4
-rw-r--r--drivers/atm/he.c2
-rw-r--r--drivers/atm/solos-pci.c3
-rw-r--r--drivers/block/DAC960.c156
-rw-r--r--drivers/block/cciss.c755
-rw-r--r--drivers/block/cciss.h12
-rw-r--r--drivers/block/cpqarray.c63
-rw-r--r--drivers/char/agp/agp.h2
-rw-r--r--drivers/char/agp/alpha-agp.c2
-rw-r--r--drivers/char/apm-emulation.c2
-rw-r--r--drivers/char/bfin-otp.c2
-rw-r--r--drivers/char/cyclades.c2
-rw-r--r--drivers/char/dtlk.c1
-rw-r--r--drivers/char/hvc_console.c6
-rw-r--r--drivers/char/hvc_console.h12
-rw-r--r--drivers/char/hvc_iucv.c4
-rw-r--r--drivers/char/hw_random/omap-rng.c4
-rw-r--r--drivers/char/ipmi/ipmi_devintf.c1
-rw-r--r--drivers/char/ipmi/ipmi_msghandler.c1
-rw-r--r--drivers/char/mem.c2
-rw-r--r--drivers/char/mspec.c2
-rw-r--r--drivers/char/pty.c47
-rw-r--r--drivers/char/serial167.c7
-rw-r--r--drivers/char/tty_io.c15
-rw-r--r--drivers/char/tty_ldisc.c7
-rw-r--r--drivers/char/vt_ioctl.c6
-rw-r--r--drivers/char/xilinx_hwicap/xilinx_hwicap.c2
-rw-r--r--drivers/firewire/core-cdev.c1
-rw-r--r--drivers/gpio/gpiolib.c2
-rw-r--r--drivers/gpu/drm/drm_crtc.c1
-rw-r--r--drivers/gpu/drm/drm_crtc_helper.c88
-rw-r--r--drivers/gpu/drm/drm_edid.c46
-rw-r--r--drivers/gpu/drm/drm_fb_helper.c235
-rw-r--r--drivers/gpu/drm/drm_modes.c3
-rw-r--r--drivers/gpu/drm/drm_vm.c8
-rw-r--r--drivers/gpu/drm/i915/intel_fb.c5
-rw-r--r--drivers/gpu/drm/radeon/.gitignore3
-rw-r--r--drivers/gpu/drm/radeon/avivod.h9
-rw-r--r--drivers/gpu/drm/radeon/r100.c197
-rw-r--r--drivers/gpu/drm/radeon/r100_track.h69
-rw-r--r--drivers/gpu/drm/radeon/r200.c79
-rw-r--r--drivers/gpu/drm/radeon/r300.c137
-rw-r--r--drivers/gpu/drm/radeon/r500_reg.h3
-rw-r--r--drivers/gpu/drm/radeon/r520.c276
-rw-r--r--drivers/gpu/drm/radeon/r520d.h187
-rw-r--r--drivers/gpu/drm/radeon/r600.c11
-rw-r--r--drivers/gpu/drm/radeon/r600_cs.c186
-rw-r--r--drivers/gpu/drm/radeon/radeon.h76
-rw-r--r--drivers/gpu/drm/radeon/radeon_asic.h80
-rw-r--r--drivers/gpu/drm/radeon/radeon_atombios.c9
-rw-r--r--drivers/gpu/drm/radeon/radeon_connectors.c79
-rw-r--r--drivers/gpu/drm/radeon/radeon_cs.c103
-rw-r--r--drivers/gpu/drm/radeon/radeon_device.c11
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c5
-rw-r--r--drivers/gpu/drm/radeon/radeon_fb.c26
-rw-r--r--drivers/gpu/drm/radeon/radeon_kms.c49
-rw-r--r--drivers/gpu/drm/radeon/radeon_reg.h1
-rw-r--r--drivers/gpu/drm/radeon/radeon_ttm.c9
-rw-r--r--drivers/gpu/drm/radeon/rs600.c20
-rw-r--r--drivers/gpu/drm/radeon/rs690.c3
-rw-r--r--drivers/gpu/drm/radeon/rv515.c364
-rw-r--r--drivers/gpu/drm/radeon/rv515d.h385
-rw-r--r--drivers/gpu/drm/radeon/rv770.c11
-rw-r--r--drivers/gpu/drm/ttm/ttm_bo_vm.c2
-rw-r--r--drivers/hid/hidraw.c1
-rw-r--r--drivers/hwmon/fschmd.c2
-rw-r--r--drivers/hwmon/ltc4215.c47
-rw-r--r--drivers/hwmon/ltc4245.c131
-rw-r--r--drivers/i2c/busses/Kconfig4
-rw-r--r--drivers/i2c/busses/i2c-amd756.c2
-rw-r--r--drivers/i2c/busses/i2c-amd8111.c4
-rw-r--r--drivers/i2c/busses/i2c-i801.c4
-rw-r--r--drivers/i2c/busses/i2c-isch.c2
-rw-r--r--drivers/i2c/busses/i2c-piix4.c4
-rw-r--r--drivers/i2c/busses/i2c-scmi.c5
-rw-r--r--drivers/i2c/busses/i2c-sis96x.c2
-rw-r--r--drivers/i2c/busses/i2c-viapro.c2
-rw-r--r--drivers/ieee1394/dma.c2
-rw-r--r--drivers/ieee802154/fakehard.c59
-rw-r--r--drivers/infiniband/core/mad_rmpp.c17
-rw-r--r--drivers/infiniband/core/ucm.c1
-rw-r--r--drivers/infiniband/core/user_mad.c1
-rw-r--r--drivers/infiniband/core/uverbs_main.c1
-rw-r--r--drivers/infiniband/hw/ehca/ehca_uverbs.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_file_ops.c2
-rw-r--r--drivers/infiniband/hw/ipath/ipath_mmap.c2
-rw-r--r--drivers/infiniband/hw/mthca/mthca_catas.c11
-rw-r--r--drivers/infiniband/hw/nes/nes_nic.c13
-rw-r--r--drivers/infiniband/ulp/ipoib/ipoib_multicast.c7
-rw-r--r--drivers/input/evdev.c1
-rw-r--r--drivers/input/input.c3
-rw-r--r--drivers/input/joydev.c1
-rw-r--r--drivers/input/misc/uinput.c1
-rw-r--r--drivers/input/mousedev.c1
-rw-r--r--drivers/isdn/divert/divert_procfs.c1
-rw-r--r--drivers/isdn/gigaset/Kconfig25
-rw-r--r--drivers/isdn/gigaset/Makefile5
-rw-r--r--drivers/isdn/gigaset/asyncdata.c662
-rw-r--r--drivers/isdn/gigaset/bas-gigaset.c72
-rw-r--r--drivers/isdn/gigaset/capi.c2292
-rw-r--r--drivers/isdn/gigaset/common.c137
-rw-r--r--drivers/isdn/gigaset/dummyll.c68
-rw-r--r--drivers/isdn/gigaset/ev-layer.c578
-rw-r--r--drivers/isdn/gigaset/gigaset.h176
-rw-r--r--drivers/isdn/gigaset/i4l.c563
-rw-r--r--drivers/isdn/gigaset/interface.c39
-rw-r--r--drivers/isdn/gigaset/isocdata.c186
-rw-r--r--drivers/isdn/gigaset/proc.c2
-rw-r--r--drivers/isdn/gigaset/ser-gigaset.c56
-rw-r--r--drivers/isdn/gigaset/usb-gigaset.c69
-rw-r--r--drivers/isdn/hardware/mISDN/speedfax.c1
-rw-r--r--drivers/isdn/mISDN/socket.c5
-rw-r--r--drivers/leds/Kconfig13
-rw-r--r--drivers/leds/Makefile1
-rw-r--r--drivers/leds/leds-clevo-mail.c2
-rw-r--r--drivers/leds/leds-cobalt-qube.c2
-rw-r--r--drivers/leds/leds-cobalt-raq.c4
-rw-r--r--drivers/leds/leds-gpio.c2
-rw-r--r--drivers/leds/leds-pca9532.c15
-rw-r--r--drivers/leds/leds-wm831x-status.c341
-rw-r--r--drivers/leds/ledtrig-gpio.c32
-rw-r--r--drivers/lguest/lguest_user.c2
-rw-r--r--drivers/macintosh/therm_adt746x.c4
-rw-r--r--drivers/macintosh/therm_pm72.c4
-rw-r--r--drivers/macintosh/via-pmu-led.c2
-rw-r--r--drivers/macintosh/windfarm_lm75_sensor.c4
-rw-r--r--drivers/macintosh/windfarm_max6690_sensor.c4
-rw-r--r--drivers/macintosh/windfarm_smu_sat.c4
-rw-r--r--drivers/md/dm.c16
-rw-r--r--drivers/media/dvb/dvb-core/dmxdev.c3
-rw-r--r--drivers/media/dvb/dvb-core/dvb_demux.c1
-rw-r--r--drivers/media/dvb/firewire/firedtv-ci.c2
-rw-r--r--drivers/media/radio/radio-cadet.c1
-rw-r--r--drivers/media/video/cafe_ccic.c2
-rw-r--r--drivers/media/video/cpia.c1
-rw-r--r--drivers/media/video/et61x251/et61x251_core.c2
-rw-r--r--drivers/media/video/gspca/gspca.c2
-rw-r--r--drivers/media/video/meye.c2
-rw-r--r--drivers/media/video/sh_mobile_ceu_camera.c4
-rw-r--r--drivers/media/video/sn9c102/sn9c102_core.c2
-rw-r--r--drivers/media/video/stk-webcam.c2
-rw-r--r--drivers/media/video/uvc/uvc_v4l2.c2
-rw-r--r--drivers/media/video/videobuf-dma-contig.c2
-rw-r--r--drivers/media/video/videobuf-dma-sg.c2
-rw-r--r--drivers/media/video/videobuf-vmalloc.c2
-rw-r--r--drivers/media/video/vino.c2
-rw-r--r--drivers/media/video/zc0301/zc0301_core.c2
-rw-r--r--drivers/media/video/zoran/zoran_driver.c2
-rw-r--r--drivers/mfd/ab3100-core.c4
-rw-r--r--drivers/mfd/ucb1400_core.c1
-rw-r--r--drivers/misc/Kconfig1
-rw-r--r--drivers/misc/Makefile1
-rw-r--r--drivers/misc/eeprom/max6875.c29
-rw-r--r--drivers/misc/iwmc3200top/Kconfig20
-rw-r--r--drivers/misc/iwmc3200top/Makefile29
-rw-r--r--drivers/misc/iwmc3200top/debugfs.c133
-rw-r--r--drivers/misc/iwmc3200top/debugfs.h58
-rw-r--r--drivers/misc/iwmc3200top/fw-download.c359
-rw-r--r--drivers/misc/iwmc3200top/fw-msg.h113
-rw-r--r--drivers/misc/iwmc3200top/iwmc3200top.h206
-rw-r--r--drivers/misc/iwmc3200top/log.c347
-rw-r--r--drivers/misc/iwmc3200top/log.h158
-rw-r--r--drivers/misc/iwmc3200top/main.c677
-rw-r--r--drivers/misc/phantom.c2
-rw-r--r--drivers/misc/sgi-gru/grufile.c5
-rw-r--r--drivers/misc/sgi-gru/grutables.h2
-rw-r--r--drivers/mmc/core/debugfs.c2
-rw-r--r--drivers/mmc/core/sdio_cis.c65
-rw-r--r--drivers/mmc/host/Kconfig41
-rw-r--r--drivers/mmc/host/mmci.c111
-rw-r--r--drivers/mmc/host/mmci.h3
-rw-r--r--drivers/mmc/host/pxamci.c104
-rw-r--r--drivers/mmc/host/s3cmci.c608
-rw-r--r--drivers/mmc/host/s3cmci.h14
-rw-r--r--drivers/mtd/mtd_blkdevs.c19
-rw-r--r--drivers/net/3c59x.c3
-rw-r--r--drivers/net/8139cp.c8
-rw-r--r--drivers/net/8139too.c3
-rw-r--r--drivers/net/Kconfig2
-rw-r--r--drivers/net/arm/ks8695net.c131
-rw-r--r--drivers/net/arm/w90p910_ether.c4
-rw-r--r--drivers/net/atl1c/atl1c.h22
-rw-r--r--drivers/net/atl1c/atl1c_main.c86
-rw-r--r--drivers/net/atl1e/atl1e_main.c4
-rw-r--r--drivers/net/atlx/atl1.c11
-rw-r--r--drivers/net/atlx/atl2.c3
-rw-r--r--drivers/net/bcm63xx_enet.c5
-rw-r--r--drivers/net/benet/be.h16
-rw-r--r--drivers/net/benet/be_cmds.c96
-rw-r--r--drivers/net/benet/be_cmds.h82
-rw-r--r--drivers/net/benet/be_ethtool.c79
-rw-r--r--drivers/net/benet/be_main.c17
-rw-r--r--drivers/net/bnx2.c4
-rw-r--r--drivers/net/bnx2x.h74
-rw-r--r--drivers/net/bnx2x_hsi.h21
-rw-r--r--drivers/net/bnx2x_link.c317
-rw-r--r--drivers/net/bnx2x_link.h3
-rw-r--r--drivers/net/bnx2x_main.c1077
-rw-r--r--drivers/net/bnx2x_reg.h23
-rw-r--r--drivers/net/bonding/bond_3ad.c99
-rw-r--r--drivers/net/bonding/bond_alb.c3
-rw-r--r--drivers/net/bonding/bond_ipv6.c7
-rw-r--r--drivers/net/bonding/bond_main.c326
-rw-r--r--drivers/net/bonding/bond_sysfs.c92
-rw-r--r--drivers/net/bonding/bonding.h31
-rw-r--r--drivers/net/can/Kconfig32
-rw-r--r--drivers/net/can/Makefile3
-rw-r--r--drivers/net/can/at91_can.c34
-rw-r--r--drivers/net/can/dev.c76
-rw-r--r--drivers/net/can/mcp251x.c1164
-rw-r--r--drivers/net/can/mscan/Makefile5
-rw-r--r--drivers/net/can/mscan/mpc52xx_can.c279
-rw-r--r--drivers/net/can/mscan/mscan.c699
-rw-r--r--drivers/net/can/mscan/mscan.h262
-rw-r--r--drivers/net/can/sja1000/sja1000.c17
-rw-r--r--drivers/net/can/sja1000/sja1000.h2
-rw-r--r--drivers/net/can/ti_hecc.c993
-rw-r--r--drivers/net/can/usb/ems_usb.c20
-rw-r--r--drivers/net/cnic.c1820
-rw-r--r--drivers/net/cnic.h64
-rw-r--r--drivers/net/cnic_defs.h1917
-rw-r--r--drivers/net/cnic_if.h14
-rw-r--r--drivers/net/cpmac.c6
-rw-r--r--drivers/net/cris/eth_v10.c20
-rw-r--r--drivers/net/cxgb3/adapter.h16
-rw-r--r--drivers/net/cxgb3/common.h8
-rw-r--r--drivers/net/cxgb3/cxgb3_main.c47
-rw-r--r--drivers/net/cxgb3/sge.c31
-rw-r--r--drivers/net/davinci_emac.c56
-rw-r--r--drivers/net/dl2k.c18
-rw-r--r--drivers/net/dm9000.c143
-rw-r--r--drivers/net/dm9000.h7
-rw-r--r--drivers/net/e100.c25
-rw-r--r--drivers/net/e1000/e1000.h1
-rw-r--r--drivers/net/e1000/e1000_ethtool.c35
-rw-r--r--drivers/net/e1000/e1000_main.c70
-rw-r--r--drivers/net/e1000e/e1000.h1
-rw-r--r--drivers/net/e1000e/ethtool.c53
-rw-r--r--drivers/net/e1000e/netdev.c80
-rw-r--r--drivers/net/ehea/ehea_main.c9
-rw-r--r--drivers/net/enic/enic_main.c15
-rw-r--r--drivers/net/ethoc.c4
-rw-r--r--drivers/net/fsl_pq_mdio.c67
-rw-r--r--drivers/net/fsl_pq_mdio.h11
-rw-r--r--drivers/net/gianfar.c1826
-rw-r--r--drivers/net/gianfar.h412
-rw-r--r--drivers/net/gianfar_ethtool.c376
-rw-r--r--drivers/net/gianfar_sysfs.c77
-rw-r--r--drivers/net/hamachi.c12
-rw-r--r--drivers/net/hamradio/6pack.c21
-rw-r--r--drivers/net/hamradio/baycom_epp.c6
-rw-r--r--drivers/net/hamradio/mkiss.c21
-rw-r--r--drivers/net/ibm_newemac/core.c10
-rw-r--r--drivers/net/ifb.c6
-rw-r--r--drivers/net/igb/e1000_82575.c306
-rw-r--r--drivers/net/igb/e1000_82575.h26
-rw-r--r--drivers/net/igb/e1000_defines.h33
-rw-r--r--drivers/net/igb/e1000_hw.h8
-rw-r--r--drivers/net/igb/e1000_mac.c100
-rw-r--r--drivers/net/igb/e1000_mbx.c82
-rw-r--r--drivers/net/igb/e1000_mbx.h10
-rw-r--r--drivers/net/igb/e1000_nvm.c36
-rw-r--r--drivers/net/igb/e1000_phy.c207
-rw-r--r--drivers/net/igb/e1000_phy.h4
-rw-r--r--drivers/net/igb/e1000_regs.h75
-rw-r--r--drivers/net/igb/igb.h143
-rw-r--r--drivers/net/igb/igb_ethtool.c690
-rw-r--r--drivers/net/igb/igb_main.c3273
-rw-r--r--drivers/net/igbvf/ethtool.c25
-rw-r--r--drivers/net/igbvf/netdev.c8
-rw-r--r--drivers/net/ipg.c7
-rw-r--r--drivers/net/irda/kingsun-sir.c1
-rw-r--r--drivers/net/irda/ks959-sir.c1
-rw-r--r--drivers/net/irda/ksdazzle-sir.c1
-rw-r--r--drivers/net/irda/mcs7780.c1
-rw-r--r--drivers/net/irda/pxaficp_ir.c47
-rw-r--r--drivers/net/ixgb/ixgb.h1
-rw-r--r--drivers/net/ixgb/ixgb_ethtool.c69
-rw-r--r--drivers/net/ixgb/ixgb_main.c56
-rw-r--r--drivers/net/ixgbe/ixgbe.h4
-rw-r--r--drivers/net/ixgbe/ixgbe_82599.c178
-rw-r--r--drivers/net/ixgbe/ixgbe_dcb_nl.c2
-rw-r--r--drivers/net/ixgbe/ixgbe_ethtool.c63
-rw-r--r--drivers/net/ixgbe/ixgbe_fcoe.c46
-rw-r--r--drivers/net/ixgbe/ixgbe_main.c48
-rw-r--r--drivers/net/ixgbe/ixgbe_type.h26
-rw-r--r--drivers/net/ixp2000/ixpdev.c3
-rw-r--r--drivers/net/korina.c5
-rw-r--r--drivers/net/ks8842.c5
-rw-r--r--drivers/net/lib82596.c11
-rw-r--r--drivers/net/macvlan.c12
-rw-r--r--drivers/net/mdio.c12
-rw-r--r--drivers/net/mlx4/fw.c5
-rw-r--r--drivers/net/myri10ge/myri10ge.c9
-rw-r--r--drivers/net/netx-eth.c3
-rw-r--r--drivers/net/netxen/netxen_nic.h78
-rw-r--r--drivers/net/netxen/netxen_nic_ethtool.c6
-rw-r--r--drivers/net/netxen/netxen_nic_hdr.h76
-rw-r--r--drivers/net/netxen/netxen_nic_hw.c916
-rw-r--r--drivers/net/netxen/netxen_nic_init.c361
-rw-r--r--drivers/net/netxen/netxen_nic_main.c314
-rw-r--r--drivers/net/niu.c15
-rw-r--r--drivers/net/pasemi_mac_ethtool.c14
-rw-r--r--drivers/net/pcmcia/pcnet_cs.c7
-rw-r--r--drivers/net/phy/broadcom.c208
-rw-r--r--drivers/net/ppp_async.c5
-rw-r--r--drivers/net/ppp_synctty.c5
-rw-r--r--drivers/net/pppoe.c19
-rw-r--r--drivers/net/pppol2tp.c22
-rw-r--r--drivers/net/pppox.c5
-rw-r--r--drivers/net/qlge/qlge.h229
-rw-r--r--drivers/net/qlge/qlge_dbg.c180
-rw-r--r--drivers/net/qlge/qlge_ethtool.c290
-rw-r--r--drivers/net/qlge/qlge_main.c483
-rw-r--r--drivers/net/qlge/qlge_mpi.c210
-rw-r--r--drivers/net/r8169.c3
-rw-r--r--drivers/net/sb1250-mac.c1
-rw-r--r--drivers/net/sc92031.c4
-rw-r--r--drivers/net/sfc/Makefile4
-rw-r--r--drivers/net/sfc/bitfield.h13
-rw-r--r--drivers/net/sfc/boards.c328
-rw-r--r--drivers/net/sfc/boards.h28
-rw-r--r--drivers/net/sfc/efx.c89
-rw-r--r--drivers/net/sfc/efx.h11
-rw-r--r--drivers/net/sfc/ethtool.c8
-rw-r--r--drivers/net/sfc/falcon.c1123
-rw-r--r--drivers/net/sfc/falcon.h4
-rw-r--r--drivers/net/sfc/falcon_boards.c (renamed from drivers/net/sfc/sfe4001.c)367
-rw-r--r--drivers/net/sfc/falcon_gmac.c95
-rw-r--r--drivers/net/sfc/falcon_hwdefs.h1333
-rw-r--r--drivers/net/sfc/falcon_io.h258
-rw-r--r--drivers/net/sfc/falcon_xmac.c178
-rw-r--r--drivers/net/sfc/gmii.h60
-rw-r--r--drivers/net/sfc/io.h256
-rw-r--r--drivers/net/sfc/mdio_10g.c121
-rw-r--r--drivers/net/sfc/mdio_10g.h1
-rw-r--r--drivers/net/sfc/net_driver.h56
-rw-r--r--drivers/net/sfc/phy.h6
-rw-r--r--drivers/net/sfc/qt202x_phy.c (renamed from drivers/net/sfc/xfp_phy.c)80
-rw-r--r--drivers/net/sfc/regs.h3180
-rw-r--r--drivers/net/sfc/rx.c31
-rw-r--r--drivers/net/sfc/selftest.c5
-rw-r--r--drivers/net/sfc/tenxpress.c14
-rw-r--r--drivers/net/sfc/tx.c83
-rw-r--r--drivers/net/sfc/workarounds.h2
-rw-r--r--drivers/net/sgiseeq.c7
-rw-r--r--drivers/net/sh_eth.c54
-rw-r--r--drivers/net/sh_eth.h1
-rw-r--r--drivers/net/sis190.c3
-rw-r--r--drivers/net/skge.c7
-rw-r--r--drivers/net/sky2.c129
-rw-r--r--drivers/net/sky2.h185
-rw-r--r--drivers/net/slip.c25
-rw-r--r--drivers/net/smsc911x.c3
-rw-r--r--drivers/net/spider_net.c1
-rw-r--r--drivers/net/sungem.c4
-rw-r--r--drivers/net/tc35815.c290
-rw-r--r--drivers/net/tehuti.c29
-rw-r--r--drivers/net/tehuti.h2
-rw-r--r--drivers/net/tg3.c887
-rw-r--r--drivers/net/tg3.h73
-rw-r--r--drivers/net/tlan.c7
-rw-r--r--drivers/net/tokenring/tms380tr.c2
-rw-r--r--drivers/net/tsi108_eth.c10
-rw-r--r--drivers/net/tun.c58
-rw-r--r--drivers/net/usb/asix.c12
-rw-r--r--drivers/net/usb/cdc_ether.c38
-rw-r--r--drivers/net/usb/dm9601.c2
-rw-r--r--drivers/net/usb/kaweth.c11
-rw-r--r--drivers/net/usb/usbnet.c21
-rw-r--r--drivers/net/veth.c21
-rw-r--r--drivers/net/via-rhine.c8
-rw-r--r--drivers/net/via-velocity.c42
-rw-r--r--drivers/net/via-velocity.h2
-rw-r--r--drivers/net/virtio_net.c7
-rw-r--r--drivers/net/vmxnet3/vmxnet3_int.h5
-rw-r--r--drivers/net/vxge/vxge-config.c300
-rw-r--r--drivers/net/vxge/vxge-config.h2
-rw-r--r--drivers/net/vxge/vxge-main.c109
-rw-r--r--drivers/net/vxge/vxge-main.h1
-rw-r--r--drivers/net/vxge/vxge-reg.h4
-rw-r--r--drivers/net/vxge/vxge-traffic.c4
-rw-r--r--drivers/net/vxge/vxge-traffic.h2
-rw-r--r--drivers/net/vxge/vxge-version.h4
-rw-r--r--drivers/net/wan/cosa.c20
-rw-r--r--drivers/net/wan/x25_asy.c19
-rw-r--r--drivers/net/wimax/i2400m/Kconfig8
-rw-r--r--drivers/net/wimax/i2400m/control.c16
-rw-r--r--drivers/net/wimax/i2400m/debugfs.c2
-rw-r--r--drivers/net/wimax/i2400m/driver.c500
-rw-r--r--drivers/net/wimax/i2400m/fw.c886
-rw-r--r--drivers/net/wimax/i2400m/i2400m-sdio.h16
-rw-r--r--drivers/net/wimax/i2400m/i2400m-usb.h16
-rw-r--r--drivers/net/wimax/i2400m/i2400m.h209
-rw-r--r--drivers/net/wimax/i2400m/netdev.c127
-rw-r--r--drivers/net/wimax/i2400m/rx.c170
-rw-r--r--drivers/net/wimax/i2400m/sdio-fw.c11
-rw-r--r--drivers/net/wimax/i2400m/sdio-rx.c42
-rw-r--r--drivers/net/wimax/i2400m/sdio-tx.c5
-rw-r--r--drivers/net/wimax/i2400m/sdio.c205
-rw-r--r--drivers/net/wimax/i2400m/tx.c20
-rw-r--r--drivers/net/wimax/i2400m/usb-fw.c37
-rw-r--r--drivers/net/wimax/i2400m/usb-notif.c35
-rw-r--r--drivers/net/wimax/i2400m/usb-rx.c60
-rw-r--r--drivers/net/wimax/i2400m/usb-tx.c61
-rw-r--r--drivers/net/wimax/i2400m/usb.c189
-rw-r--r--drivers/net/wireless/Kconfig212
-rw-r--r--drivers/net/wireless/Makefile10
-rw-r--r--drivers/net/wireless/airo.c3
-rw-r--r--drivers/net/wireless/at76c50x-usb.c10
-rw-r--r--drivers/net/wireless/ath/Kconfig9
-rw-r--r--drivers/net/wireless/ath/Makefile9
-rw-r--r--drivers/net/wireless/ath/ar9170/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ar9170/ar9170.h6
-rw-r--r--drivers/net/wireless/ath/ar9170/cmd.c3
-rw-r--r--drivers/net/wireless/ath/ar9170/cmd.h1
-rw-r--r--drivers/net/wireless/ath/ar9170/hw.h6
-rw-r--r--drivers/net/wireless/ath/ar9170/mac.c15
-rw-r--r--drivers/net/wireless/ath/ar9170/main.c41
-rw-r--r--drivers/net/wireless/ath/ar9170/phy.c99
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.c12
-rw-r--r--drivers/net/wireless/ath/ar9170/usb.h2
-rw-r--r--drivers/net/wireless/ath/ath.h41
-rw-r--r--drivers/net/wireless/ath/ath5k/Kconfig2
-rw-r--r--drivers/net/wireless/ath/ath5k/ath5k.h53
-rw-r--r--drivers/net/wireless/ath/ath5k/attach.c33
-rw-r--r--drivers/net/wireless/ath/ath5k/base.c116
-rw-r--r--drivers/net/wireless/ath/ath5k/base.h12
-rw-r--r--drivers/net/wireless/ath/ath5k/initvals.c4
-rw-r--r--drivers/net/wireless/ath/ath5k/led.c2
-rw-r--r--drivers/net/wireless/ath/ath5k/pcu.c193
-rw-r--r--drivers/net/wireless/ath/ath5k/phy.c185
-rw-r--r--drivers/net/wireless/ath/ath5k/reg.h19
-rw-r--r--drivers/net/wireless/ath/ath5k/reset.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/Kconfig10
-rw-r--r--drivers/net/wireless/ath/ath9k/Makefile27
-rw-r--r--drivers/net/wireless/ath/ath9k/ahb.c29
-rw-r--r--drivers/net/wireless/ath/ath9k/ani.c141
-rw-r--r--drivers/net/wireless/ath/ath9k/ath9k.h73
-rw-r--r--drivers/net/wireless/ath/ath9k/beacon.c112
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.c383
-rw-r--r--drivers/net/wireless/ath/ath9k/btcoex.h64
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.c421
-rw-r--r--drivers/net/wireless/ath/ath9k/calib.h2
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.c55
-rw-r--r--drivers/net/wireless/ath/ath9k/debug.h36
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.c8
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom.h9
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_4k.c94
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_9287.c97
-rw-r--r--drivers/net/wireless/ath/ath9k/eeprom_def.c183
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.c1244
-rw-r--r--drivers/net/wireless/ath/ath9k/hw.h85
-rw-r--r--drivers/net/wireless/ath/ath9k/initvals.h101
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.c162
-rw-r--r--drivers/net/wireless/ath/ath9k/mac.h11
-rw-r--r--drivers/net/wireless/ath/ath9k/main.c899
-rw-r--r--drivers/net/wireless/ath/ath9k/pci.c47
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.c1124
-rw-r--r--drivers/net/wireless/ath/ath9k/phy.h41
-rw-r--r--drivers/net/wireless/ath/ath9k/rc.c33
-rw-r--r--drivers/net/wireless/ath/ath9k/recv.c65
-rw-r--r--drivers/net/wireless/ath/ath9k/reg.h8
-rw-r--r--drivers/net/wireless/ath/ath9k/virtual.c22
-rw-r--r--drivers/net/wireless/ath/ath9k/xmit.c118
-rw-r--r--drivers/net/wireless/ath/debug.c32
-rw-r--r--drivers/net/wireless/ath/debug.h77
-rw-r--r--drivers/net/wireless/ath/hw.c126
-rw-r--r--drivers/net/wireless/ath/reg.h27
-rw-r--r--drivers/net/wireless/ath/regd.h8
-rw-r--r--drivers/net/wireless/ath/regd_common.h32
-rw-r--r--drivers/net/wireless/b43/Kconfig2
-rw-r--r--drivers/net/wireless/b43/b43.h18
-rw-r--r--drivers/net/wireless/b43/main.c6
-rw-r--r--drivers/net/wireless/b43/phy_lp.c777
-rw-r--r--drivers/net/wireless/b43/phy_lp.h11
-rw-r--r--drivers/net/wireless/b43/pio.c79
-rw-r--r--drivers/net/wireless/b43/xmit.c8
-rw-r--r--drivers/net/wireless/b43legacy/Kconfig2
-rw-r--r--drivers/net/wireless/b43legacy/b43legacy.h2
-rw-r--r--drivers/net/wireless/b43legacy/dma.c17
-rw-r--r--drivers/net/wireless/b43legacy/main.c6
-rw-r--r--drivers/net/wireless/b43legacy/xmit.c1
-rw-r--r--drivers/net/wireless/hostap/Kconfig3
-rw-r--r--drivers/net/wireless/ipw2x00/Kconfig11
-rw-r--r--drivers/net/wireless/ipw2x00/ipw2200.c1
-rw-r--r--drivers/net/wireless/iwlwifi/Kconfig30
-rw-r--r--drivers/net/wireless/iwlwifi/Makefile12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-1000.c50
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-hw.h12
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.c371
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-led.h22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945-rs.c102
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.c286
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-3945.h45
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965-hw.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-4965.c233
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-5000.c348
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-6000.c369
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.c85
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-led.h32
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.c574
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-rs.h101
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn.c411
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-calib.c76
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-commands.h252
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.c651
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-core.h60
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-csr.h18
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debug.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-debugfs.c693
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-dev.h112
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.c13
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-devtrace.h178
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.c64
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-eeprom.h20
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-hcmd.c25
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-io.h16
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.c323
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-led.h46
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.c232
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-power.h3
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-rx.c177
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-scan.c22
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-spectrum.c2
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.c131
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-sta.h1
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-tx.c82
-rw-r--r--drivers/net/wireless/iwlwifi/iwl3945-base.c309
-rw-r--r--drivers/net/wireless/iwmc3200wifi/Kconfig3
-rw-r--r--drivers/net/wireless/iwmc3200wifi/cfg80211.c47
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.c31
-rw-r--r--drivers/net/wireless/iwmc3200wifi/commands.h70
-rw-r--r--drivers/net/wireless/iwmc3200wifi/fw.c9
-rw-r--r--drivers/net/wireless/iwmc3200wifi/iwm.h6
-rw-r--r--drivers/net/wireless/iwmc3200wifi/lmac.h8
-rw-r--r--drivers/net/wireless/iwmc3200wifi/main.c48
-rw-r--r--drivers/net/wireless/iwmc3200wifi/netdev.c1
-rw-r--r--drivers/net/wireless/iwmc3200wifi/rx.c84
-rw-r--r--drivers/net/wireless/iwmc3200wifi/sdio.c10
-rw-r--r--drivers/net/wireless/iwmc3200wifi/umac.h5
-rw-r--r--drivers/net/wireless/libertas/11d.c696
-rw-r--r--drivers/net/wireless/libertas/11d.h105
-rw-r--r--drivers/net/wireless/libertas/Kconfig39
-rw-r--r--drivers/net/wireless/libertas/Makefile14
-rw-r--r--drivers/net/wireless/libertas/README26
-rw-r--r--drivers/net/wireless/libertas/assoc.c445
-rw-r--r--drivers/net/wireless/libertas/assoc.h141
-rw-r--r--drivers/net/wireless/libertas/cfg.c198
-rw-r--r--drivers/net/wireless/libertas/cfg.h16
-rw-r--r--drivers/net/wireless/libertas/cmd.c538
-rw-r--r--drivers/net/wireless/libertas/cmd.h127
-rw-r--r--drivers/net/wireless/libertas/cmdresp.c116
-rw-r--r--drivers/net/wireless/libertas/debugfs.c27
-rw-r--r--drivers/net/wireless/libertas/decl.h59
-rw-r--r--drivers/net/wireless/libertas/defs.h3
-rw-r--r--drivers/net/wireless/libertas/dev.h419
-rw-r--r--drivers/net/wireless/libertas/host.h959
-rw-r--r--drivers/net/wireless/libertas/hostcmd.h800
-rw-r--r--drivers/net/wireless/libertas/if_cs.c3
-rw-r--r--drivers/net/wireless/libertas/if_sdio.c56
-rw-r--r--drivers/net/wireless/libertas/if_sdio.h3
-rw-r--r--drivers/net/wireless/libertas/if_spi.c139
-rw-r--r--drivers/net/wireless/libertas/if_usb.c3
-rw-r--r--drivers/net/wireless/libertas/main.c373
-rw-r--r--drivers/net/wireless/libertas/persistcfg.c8
-rw-r--r--drivers/net/wireless/libertas/rx.c2
-rw-r--r--drivers/net/wireless/libertas/scan.c250
-rw-r--r--drivers/net/wireless/libertas/scan.h30
-rw-r--r--drivers/net/wireless/libertas/tx.c2
-rw-r--r--drivers/net/wireless/libertas/types.h4
-rw-r--r--drivers/net/wireless/libertas/wext.c196
-rw-r--r--drivers/net/wireless/libertas/wext.h8
-rw-r--r--drivers/net/wireless/mac80211_hwsim.c67
-rw-r--r--drivers/net/wireless/mwl8k.c1258
-rw-r--r--drivers/net/wireless/orinoco/Kconfig6
-rw-r--r--drivers/net/wireless/orinoco/hw.c33
-rw-r--r--drivers/net/wireless/orinoco/hw.h3
-rw-r--r--drivers/net/wireless/orinoco/main.c34
-rw-r--r--drivers/net/wireless/orinoco/orinoco.h1
-rw-r--r--drivers/net/wireless/p54/Kconfig2
-rw-r--r--drivers/net/wireless/p54/eeprom.c31
-rw-r--r--drivers/net/wireless/rt2x00/Kconfig46
-rw-r--r--drivers/net/wireless/rt2x00/Makefile3
-rw-r--r--drivers/net/wireless/rt2x00/rt2800.h1816
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.c1817
-rw-r--r--drivers/net/wireless/rt2x00/rt2800lib.h134
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.c1685
-rw-r--r--drivers/net/wireless/rt2x00/rt2800pci.h180
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.c1828
-rw-r--r--drivers/net/wireless/rt2x00/rt2800usb.h1818
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00.h50
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00leds.h4
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00pci.h24
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00soc.c159
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00soc.h52
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.c2
-rw-r--r--drivers/net/wireless/rt2x00/rt2x00usb.h17
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187.h1
-rw-r--r--drivers/net/wireless/rtl818x/rtl8187_dev.c13
-rw-r--r--drivers/net/wireless/wl12xx/Kconfig3
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_main.c10
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_netlink.h30
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_rx.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1251_spi.c2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271.h95
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.c369
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_acx.h586
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.c218
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_boot.h22
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.c396
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_cmd.h118
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_conf.h919
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.c117
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_event.h37
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_init.c162
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_init.h53
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_main.c976
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.c68
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_ps.h2
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_reg.h47
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.c88
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_rx.h4
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.c311
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_spi.h65
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.c76
-rw-r--r--drivers/net/wireless/wl12xx/wl1271_tx.h18
-rw-r--r--drivers/net/wireless/wl12xx/wl12xx_80211.h4
-rw-r--r--drivers/net/wireless/zd1211rw/Kconfig2
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.c4
-rw-r--r--drivers/net/wireless/zd1211rw/zd_chip.h18
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.c202
-rw-r--r--drivers/net/wireless/zd1211rw/zd_mac.h25
-rw-r--r--drivers/net/wireless/zd1211rw/zd_usb.c4
-rw-r--r--drivers/parisc/led.c7
-rw-r--r--drivers/pci/hotplug/acpiphp_ibm.c1
-rw-r--r--drivers/pcmcia/Kconfig4
-rw-r--r--drivers/pcmcia/Makefile2
-rw-r--r--drivers/pcmcia/at91_cf.c2
-rw-r--r--drivers/pcmcia/au1000_generic.c2
-rw-r--r--drivers/pcmcia/bcm63xx_pcmcia.c536
-rw-r--r--drivers/pcmcia/bcm63xx_pcmcia.h60
-rw-r--r--drivers/pcmcia/bfin_cf_pcmcia.c2
-rw-r--r--drivers/pcmcia/cs.c2
-rw-r--r--drivers/pcmcia/i82092.c2
-rw-r--r--drivers/pcmcia/i82365.c2
-rw-r--r--drivers/pcmcia/m32r_cfc.c2
-rw-r--r--drivers/pcmcia/m32r_pcc.c2
-rw-r--r--drivers/pcmcia/m8xx_pcmcia.c2
-rw-r--r--drivers/pcmcia/omap_cf.c2
-rw-r--r--drivers/pcmcia/pd6729.c2
-rw-r--r--drivers/pcmcia/pxa2xx_base.c18
-rw-r--r--drivers/pcmcia/pxa2xx_palmtc.c230
-rw-r--r--drivers/pcmcia/sa1100_assabet.c2
-rw-r--r--drivers/pcmcia/sa1100_generic.c2
-rw-r--r--drivers/pcmcia/sa1100_neponset.c2
-rw-r--r--drivers/pcmcia/sa1111_generic.c2
-rw-r--r--drivers/pcmcia/tcic.c2
-rw-r--r--drivers/pcmcia/vrc4171_card.c2
-rw-r--r--drivers/pcmcia/yenta_socket.c88
-rw-r--r--drivers/platform/x86/eeepc-laptop.c2
-rw-r--r--drivers/platform/x86/sony-laptop.c127
-rw-r--r--drivers/platform/x86/thinkpad_acpi.c634
-rw-r--r--drivers/pnp/pnpacpi/core.c21
-rw-r--r--drivers/rtc/rtc-pxa.c27
-rw-r--r--drivers/rtc/rtc-sa1100.c23
-rw-r--r--drivers/s390/cio/qdio_debug.c2
-rw-r--r--drivers/s390/cio/qdio_perf.c2
-rw-r--r--drivers/s390/net/Makefile6
-rw-r--r--drivers/s390/net/claw.c82
-rw-r--r--drivers/s390/net/claw.h12
-rw-r--r--drivers/s390/net/ctcm_fsms.c1
-rw-r--r--drivers/s390/net/ctcm_fsms.h1
-rw-r--r--drivers/s390/net/ctcm_main.c168
-rw-r--r--drivers/s390/net/ctcm_main.h20
-rw-r--r--drivers/s390/net/ctcm_mpc.c1
-rw-r--r--drivers/s390/net/ctcm_sysfs.c11
-rw-r--r--drivers/s390/net/cu3088.c148
-rw-r--r--drivers/s390/net/cu3088.h41
-rw-r--r--drivers/s390/net/fsm.c1
-rw-r--r--drivers/s390/net/fsm.h2
-rw-r--r--drivers/s390/net/lcs.c115
-rw-r--r--drivers/s390/net/lcs.h18
-rw-r--r--drivers/s390/net/netiucv.c4
-rw-r--r--drivers/s390/net/qeth_core.h8
-rw-r--r--drivers/s390/net/qeth_core_main.c225
-rw-r--r--drivers/s390/net/qeth_core_mpc.h45
-rw-r--r--drivers/s390/net/qeth_core_sys.c83
-rw-r--r--drivers/s390/net/qeth_l2_main.c33
-rw-r--r--drivers/s390/net/qeth_l3.h2
-rw-r--r--drivers/s390/net/qeth_l3_main.c144
-rw-r--r--drivers/s390/net/qeth_l3_sys.c67
-rw-r--r--drivers/scsi/pmcraid.h1
-rw-r--r--drivers/scsi/sg.c45
-rw-r--r--drivers/serial/8250.c7
-rw-r--r--drivers/serial/Kconfig21
-rw-r--r--drivers/serial/Makefile1
-rw-r--r--drivers/serial/bcm63xx_uart.c890
-rw-r--r--drivers/serial/cpm_uart/cpm_uart_core.c2
-rw-r--r--drivers/serial/crisv10.c1
-rw-r--r--drivers/serial/icom.c54
-rw-r--r--drivers/serial/pxa.c20
-rw-r--r--drivers/serial/sa1100.c2
-rw-r--r--drivers/serial/serial_txx9.c39
-rw-r--r--drivers/sfi/sfi_core.c17
-rw-r--r--drivers/spi/Makefile2
-rw-r--r--drivers/spi/amba-pl022.c8
-rw-r--r--drivers/spi/pxa2xx_spi.c30
-rw-r--r--drivers/spi/spi_imx.c (renamed from drivers/spi/mxc_spi.c)383
-rw-r--r--drivers/spi/spidev.c2
-rw-r--r--drivers/ssb/driver_pcicore.c4
-rw-r--r--drivers/staging/Kconfig8
-rw-r--r--drivers/staging/Makefile5
-rw-r--r--drivers/staging/arlan/Kconfig15
-rw-r--r--drivers/staging/arlan/Makefile3
-rw-r--r--drivers/staging/arlan/TODO7
-rw-r--r--drivers/staging/arlan/arlan-main.c (renamed from drivers/net/wireless/arlan-main.c)0
-rw-r--r--drivers/staging/arlan/arlan-proc.c (renamed from drivers/net/wireless/arlan-proc.c)0
-rw-r--r--drivers/staging/arlan/arlan.h (renamed from drivers/net/wireless/arlan.h)0
-rw-r--r--drivers/staging/dst/dcore.c2
-rw-r--r--drivers/staging/iio/light/tsl2561.c4
-rw-r--r--drivers/staging/netwave/Kconfig11
-rw-r--r--drivers/staging/netwave/Makefile1
-rw-r--r--drivers/staging/netwave/TODO7
-rw-r--r--drivers/staging/netwave/netwave_cs.c (renamed from drivers/net/wireless/netwave_cs.c)0
-rw-r--r--drivers/staging/rtl8187se/Kconfig3
-rw-r--r--drivers/staging/rtl8192e/Kconfig3
-rw-r--r--drivers/staging/strip/Kconfig22
-rw-r--r--drivers/staging/strip/Makefile1
-rw-r--r--drivers/staging/strip/TODO7
-rw-r--r--drivers/staging/strip/strip.c (renamed from drivers/net/wireless/strip.c)17
-rw-r--r--drivers/staging/vt6655/Kconfig4
-rw-r--r--drivers/staging/vt6656/Kconfig4
-rw-r--r--drivers/staging/wavelan/Kconfig38
-rw-r--r--drivers/staging/wavelan/Makefile2
-rw-r--r--drivers/staging/wavelan/TODO7
-rw-r--r--drivers/staging/wavelan/i82586.h (renamed from drivers/net/wireless/i82586.h)0
-rw-r--r--drivers/staging/wavelan/i82593.h (renamed from drivers/net/wireless/i82593.h)0
-rw-r--r--drivers/staging/wavelan/wavelan.c (renamed from drivers/net/wireless/wavelan.c)0
-rw-r--r--drivers/staging/wavelan/wavelan.h (renamed from drivers/net/wireless/wavelan.h)0
-rw-r--r--drivers/staging/wavelan/wavelan.p.h (renamed from drivers/net/wireless/wavelan.p.h)0
-rw-r--r--drivers/staging/wavelan/wavelan_cs.c (renamed from drivers/net/wireless/wavelan_cs.c)0
-rw-r--r--drivers/staging/wavelan/wavelan_cs.h (renamed from drivers/net/wireless/wavelan_cs.h)0
-rw-r--r--drivers/staging/wavelan/wavelan_cs.p.h (renamed from drivers/net/wireless/wavelan_cs.p.h)0
-rw-r--r--drivers/uio/uio.c2
-rw-r--r--drivers/usb/class/usbtmc.c2
-rw-r--r--drivers/usb/gadget/inode.c1
-rw-r--r--drivers/usb/gadget/printer.c2
-rw-r--r--drivers/usb/host/ohci-pxa27x.c30
-rw-r--r--drivers/usb/host/whci/debug.c6
-rw-r--r--drivers/usb/misc/rio500.c3
-rw-r--r--drivers/usb/misc/sisusbvga/sisusb_init.c1
-rw-r--r--drivers/usb/mon/mon_bin.c2
-rw-r--r--drivers/usb/serial/usb-serial.c14
-rw-r--r--drivers/uwb/uwb-debug.c6
-rw-r--r--drivers/video/backlight/Kconfig33
-rw-r--r--drivers/video/backlight/Makefile4
-rw-r--r--drivers/video/backlight/adp5520_bl.c377
-rw-r--r--drivers/video/backlight/adx_bl.c178
-rw-r--r--drivers/video/backlight/backlight.c42
-rw-r--r--drivers/video/backlight/da903x_bl.c20
-rw-r--r--drivers/video/backlight/hp680_bl.c2
-rw-r--r--drivers/video/backlight/lms283gf05.c242
-rw-r--r--drivers/video/backlight/mbp_nvidia_bl.c36
-rw-r--r--drivers/video/backlight/wm831x_bl.c250
-rw-r--r--drivers/video/fb_defio.c2
-rw-r--r--drivers/video/fbmem.c2
-rw-r--r--drivers/video/omap/dispc.c2
-rw-r--r--drivers/video/pxafb.c32
-rw-r--r--drivers/w1/masters/ds2482.c35
-rw-r--r--drivers/watchdog/Kconfig7
-rw-r--r--drivers/watchdog/Makefile1
-rw-r--r--drivers/watchdog/adx_wdt.c354
-rw-r--r--drivers/xen/xenfs/xenbus.c1
787 files changed, 63868 insertions, 29072 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig
index dd8729d674e5..0ed42d8870c7 100644
--- a/drivers/acpi/Kconfig
+++ b/drivers/acpi/Kconfig
@@ -211,6 +211,18 @@ config ACPI_HOTPLUG_CPU
211 select ACPI_CONTAINER 211 select ACPI_CONTAINER
212 default y 212 default y
213 213
214config ACPI_PROCESSOR_AGGREGATOR
215 tristate "Processor Aggregator"
216 depends on ACPI_PROCESSOR
217 depends on EXPERIMENTAL
218 depends on X86
219 help
220 ACPI 4.0 defines processor Aggregator, which enables OS to perform
221 specfic processor configuration and control that applies to all
222 processors in the platform. Currently only logical processor idling
223 is defined, which is to reduce power consumption. This driver
224 support the new device.
225
214config ACPI_THERMAL 226config ACPI_THERMAL
215 tristate "Thermal Zone" 227 tristate "Thermal Zone"
216 depends on ACPI_PROCESSOR 228 depends on ACPI_PROCESSOR
diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile
index 82cd49dc603b..7702118509a0 100644
--- a/drivers/acpi/Makefile
+++ b/drivers/acpi/Makefile
@@ -62,3 +62,5 @@ obj-$(CONFIG_ACPI_POWER_METER) += power_meter.o
62processor-y := processor_core.o processor_throttling.o 62processor-y := processor_core.o processor_throttling.o
63processor-y += processor_idle.o processor_thermal.o 63processor-y += processor_idle.o processor_thermal.o
64processor-$(CONFIG_CPU_FREQ) += processor_perflib.o 64processor-$(CONFIG_CPU_FREQ) += processor_perflib.o
65
66obj-$(CONFIG_ACPI_PROCESSOR_AGGREGATOR) += acpi_pad.o
diff --git a/drivers/acpi/acpi_pad.c b/drivers/acpi/acpi_pad.c
new file mode 100644
index 000000000000..0d2cdb86158b
--- /dev/null
+++ b/drivers/acpi/acpi_pad.c
@@ -0,0 +1,514 @@
1/*
2 * acpi_pad.c ACPI Processor Aggregator Driver
3 *
4 * Copyright (c) 2009, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program; if not, write to the Free Software Foundation, Inc.,
17 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
18 *
19 */
20
21#include <linux/kernel.h>
22#include <linux/cpumask.h>
23#include <linux/module.h>
24#include <linux/init.h>
25#include <linux/types.h>
26#include <linux/kthread.h>
27#include <linux/freezer.h>
28#include <linux/cpu.h>
29#include <linux/clockchips.h>
30#include <acpi/acpi_bus.h>
31#include <acpi/acpi_drivers.h>
32
33#define ACPI_PROCESSOR_AGGREGATOR_CLASS "processor_aggregator"
34#define ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME "Processor Aggregator"
35#define ACPI_PROCESSOR_AGGREGATOR_NOTIFY 0x80
36static DEFINE_MUTEX(isolated_cpus_lock);
37
38#define MWAIT_SUBSTATE_MASK (0xf)
39#define MWAIT_CSTATE_MASK (0xf)
40#define MWAIT_SUBSTATE_SIZE (4)
41#define CPUID_MWAIT_LEAF (5)
42#define CPUID5_ECX_EXTENSIONS_SUPPORTED (0x1)
43#define CPUID5_ECX_INTERRUPT_BREAK (0x2)
44static unsigned long power_saving_mwait_eax;
45static void power_saving_mwait_init(void)
46{
47 unsigned int eax, ebx, ecx, edx;
48 unsigned int highest_cstate = 0;
49 unsigned int highest_subcstate = 0;
50 int i;
51
52 if (!boot_cpu_has(X86_FEATURE_MWAIT))
53 return;
54 if (boot_cpu_data.cpuid_level < CPUID_MWAIT_LEAF)
55 return;
56
57 cpuid(CPUID_MWAIT_LEAF, &eax, &ebx, &ecx, &edx);
58
59 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED) ||
60 !(ecx & CPUID5_ECX_INTERRUPT_BREAK))
61 return;
62
63 edx >>= MWAIT_SUBSTATE_SIZE;
64 for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
65 if (edx & MWAIT_SUBSTATE_MASK) {
66 highest_cstate = i;
67 highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
68 }
69 }
70 power_saving_mwait_eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
71 (highest_subcstate - 1);
72
73 for_each_online_cpu(i)
74 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ON, &i);
75
76#if defined(CONFIG_GENERIC_TIME) && defined(CONFIG_X86)
77 switch (boot_cpu_data.x86_vendor) {
78 case X86_VENDOR_AMD:
79 case X86_VENDOR_INTEL:
80 /*
81 * AMD Fam10h TSC will tick in all
82 * C/P/S0/S1 states when this bit is set.
83 */
84 if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC))
85 return;
86
87 /*FALL THROUGH*/
88 default:
89 /* TSC could halt in idle, so notify users */
90 mark_tsc_unstable("TSC halts in idle");
91 }
92#endif
93}
94
95static unsigned long cpu_weight[NR_CPUS];
96static int tsk_in_cpu[NR_CPUS] = {[0 ... NR_CPUS-1] = -1};
97static DECLARE_BITMAP(pad_busy_cpus_bits, NR_CPUS);
98static void round_robin_cpu(unsigned int tsk_index)
99{
100 struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
101 cpumask_var_t tmp;
102 int cpu;
103 unsigned long min_weight = -1, preferred_cpu;
104
105 if (!alloc_cpumask_var(&tmp, GFP_KERNEL))
106 return;
107
108 mutex_lock(&isolated_cpus_lock);
109 cpumask_clear(tmp);
110 for_each_cpu(cpu, pad_busy_cpus)
111 cpumask_or(tmp, tmp, topology_thread_cpumask(cpu));
112 cpumask_andnot(tmp, cpu_online_mask, tmp);
113 /* avoid HT sibilings if possible */
114 if (cpumask_empty(tmp))
115 cpumask_andnot(tmp, cpu_online_mask, pad_busy_cpus);
116 if (cpumask_empty(tmp)) {
117 mutex_unlock(&isolated_cpus_lock);
118 return;
119 }
120 for_each_cpu(cpu, tmp) {
121 if (cpu_weight[cpu] < min_weight) {
122 min_weight = cpu_weight[cpu];
123 preferred_cpu = cpu;
124 }
125 }
126
127 if (tsk_in_cpu[tsk_index] != -1)
128 cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
129 tsk_in_cpu[tsk_index] = preferred_cpu;
130 cpumask_set_cpu(preferred_cpu, pad_busy_cpus);
131 cpu_weight[preferred_cpu]++;
132 mutex_unlock(&isolated_cpus_lock);
133
134 set_cpus_allowed_ptr(current, cpumask_of(preferred_cpu));
135}
136
137static void exit_round_robin(unsigned int tsk_index)
138{
139 struct cpumask *pad_busy_cpus = to_cpumask(pad_busy_cpus_bits);
140 cpumask_clear_cpu(tsk_in_cpu[tsk_index], pad_busy_cpus);
141 tsk_in_cpu[tsk_index] = -1;
142}
143
144static unsigned int idle_pct = 5; /* percentage */
145static unsigned int round_robin_time = 10; /* second */
146static int power_saving_thread(void *data)
147{
148 struct sched_param param = {.sched_priority = 1};
149 int do_sleep;
150 unsigned int tsk_index = (unsigned long)data;
151 u64 last_jiffies = 0;
152
153 sched_setscheduler(current, SCHED_RR, &param);
154
155 while (!kthread_should_stop()) {
156 int cpu;
157 u64 expire_time;
158
159 try_to_freeze();
160
161 /* round robin to cpus */
162 if (last_jiffies + round_robin_time * HZ < jiffies) {
163 last_jiffies = jiffies;
164 round_robin_cpu(tsk_index);
165 }
166
167 do_sleep = 0;
168
169 current_thread_info()->status &= ~TS_POLLING;
170 /*
171 * TS_POLLING-cleared state must be visible before we test
172 * NEED_RESCHED:
173 */
174 smp_mb();
175
176 expire_time = jiffies + HZ * (100 - idle_pct) / 100;
177
178 while (!need_resched()) {
179 local_irq_disable();
180 cpu = smp_processor_id();
181 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_ENTER,
182 &cpu);
183 stop_critical_timings();
184
185 __monitor((void *)&current_thread_info()->flags, 0, 0);
186 smp_mb();
187 if (!need_resched())
188 __mwait(power_saving_mwait_eax, 1);
189
190 start_critical_timings();
191 clockevents_notify(CLOCK_EVT_NOTIFY_BROADCAST_EXIT,
192 &cpu);
193 local_irq_enable();
194
195 if (jiffies > expire_time) {
196 do_sleep = 1;
197 break;
198 }
199 }
200
201 current_thread_info()->status |= TS_POLLING;
202
203 /*
204 * current sched_rt has threshold for rt task running time.
205 * When a rt task uses 95% CPU time, the rt thread will be
206 * scheduled out for 5% CPU time to not starve other tasks. But
207 * the mechanism only works when all CPUs have RT task running,
208 * as if one CPU hasn't RT task, RT task from other CPUs will
209 * borrow CPU time from this CPU and cause RT task use > 95%
210 * CPU time. To make 'avoid staration' work, takes a nap here.
211 */
212 if (do_sleep)
213 schedule_timeout_killable(HZ * idle_pct / 100);
214 }
215
216 exit_round_robin(tsk_index);
217 return 0;
218}
219
220static struct task_struct *ps_tsks[NR_CPUS];
221static unsigned int ps_tsk_num;
222static int create_power_saving_task(void)
223{
224 ps_tsks[ps_tsk_num] = kthread_run(power_saving_thread,
225 (void *)(unsigned long)ps_tsk_num,
226 "power_saving/%d", ps_tsk_num);
227 if (ps_tsks[ps_tsk_num]) {
228 ps_tsk_num++;
229 return 0;
230 }
231 return -EINVAL;
232}
233
234static void destroy_power_saving_task(void)
235{
236 if (ps_tsk_num > 0) {
237 ps_tsk_num--;
238 kthread_stop(ps_tsks[ps_tsk_num]);
239 }
240}
241
242static void set_power_saving_task_num(unsigned int num)
243{
244 if (num > ps_tsk_num) {
245 while (ps_tsk_num < num) {
246 if (create_power_saving_task())
247 return;
248 }
249 } else if (num < ps_tsk_num) {
250 while (ps_tsk_num > num)
251 destroy_power_saving_task();
252 }
253}
254
255static int acpi_pad_idle_cpus(unsigned int num_cpus)
256{
257 get_online_cpus();
258
259 num_cpus = min_t(unsigned int, num_cpus, num_online_cpus());
260 set_power_saving_task_num(num_cpus);
261
262 put_online_cpus();
263 return 0;
264}
265
266static uint32_t acpi_pad_idle_cpus_num(void)
267{
268 return ps_tsk_num;
269}
270
271static ssize_t acpi_pad_rrtime_store(struct device *dev,
272 struct device_attribute *attr, const char *buf, size_t count)
273{
274 unsigned long num;
275 if (strict_strtoul(buf, 0, &num))
276 return -EINVAL;
277 if (num < 1 || num >= 100)
278 return -EINVAL;
279 mutex_lock(&isolated_cpus_lock);
280 round_robin_time = num;
281 mutex_unlock(&isolated_cpus_lock);
282 return count;
283}
284
285static ssize_t acpi_pad_rrtime_show(struct device *dev,
286 struct device_attribute *attr, char *buf)
287{
288 return scnprintf(buf, PAGE_SIZE, "%d", round_robin_time);
289}
290static DEVICE_ATTR(rrtime, S_IRUGO|S_IWUSR,
291 acpi_pad_rrtime_show,
292 acpi_pad_rrtime_store);
293
294static ssize_t acpi_pad_idlepct_store(struct device *dev,
295 struct device_attribute *attr, const char *buf, size_t count)
296{
297 unsigned long num;
298 if (strict_strtoul(buf, 0, &num))
299 return -EINVAL;
300 if (num < 1 || num >= 100)
301 return -EINVAL;
302 mutex_lock(&isolated_cpus_lock);
303 idle_pct = num;
304 mutex_unlock(&isolated_cpus_lock);
305 return count;
306}
307
308static ssize_t acpi_pad_idlepct_show(struct device *dev,
309 struct device_attribute *attr, char *buf)
310{
311 return scnprintf(buf, PAGE_SIZE, "%d", idle_pct);
312}
313static DEVICE_ATTR(idlepct, S_IRUGO|S_IWUSR,
314 acpi_pad_idlepct_show,
315 acpi_pad_idlepct_store);
316
317static ssize_t acpi_pad_idlecpus_store(struct device *dev,
318 struct device_attribute *attr, const char *buf, size_t count)
319{
320 unsigned long num;
321 if (strict_strtoul(buf, 0, &num))
322 return -EINVAL;
323 mutex_lock(&isolated_cpus_lock);
324 acpi_pad_idle_cpus(num);
325 mutex_unlock(&isolated_cpus_lock);
326 return count;
327}
328
329static ssize_t acpi_pad_idlecpus_show(struct device *dev,
330 struct device_attribute *attr, char *buf)
331{
332 return cpumask_scnprintf(buf, PAGE_SIZE,
333 to_cpumask(pad_busy_cpus_bits));
334}
335static DEVICE_ATTR(idlecpus, S_IRUGO|S_IWUSR,
336 acpi_pad_idlecpus_show,
337 acpi_pad_idlecpus_store);
338
339static int acpi_pad_add_sysfs(struct acpi_device *device)
340{
341 int result;
342
343 result = device_create_file(&device->dev, &dev_attr_idlecpus);
344 if (result)
345 return -ENODEV;
346 result = device_create_file(&device->dev, &dev_attr_idlepct);
347 if (result) {
348 device_remove_file(&device->dev, &dev_attr_idlecpus);
349 return -ENODEV;
350 }
351 result = device_create_file(&device->dev, &dev_attr_rrtime);
352 if (result) {
353 device_remove_file(&device->dev, &dev_attr_idlecpus);
354 device_remove_file(&device->dev, &dev_attr_idlepct);
355 return -ENODEV;
356 }
357 return 0;
358}
359
360static void acpi_pad_remove_sysfs(struct acpi_device *device)
361{
362 device_remove_file(&device->dev, &dev_attr_idlecpus);
363 device_remove_file(&device->dev, &dev_attr_idlepct);
364 device_remove_file(&device->dev, &dev_attr_rrtime);
365}
366
367/* Query firmware how many CPUs should be idle */
368static int acpi_pad_pur(acpi_handle handle, int *num_cpus)
369{
370 struct acpi_buffer buffer = {ACPI_ALLOCATE_BUFFER, NULL};
371 acpi_status status;
372 union acpi_object *package;
373 int rev, num, ret = -EINVAL;
374
375 status = acpi_evaluate_object(handle, "_PUR", NULL, &buffer);
376 if (ACPI_FAILURE(status))
377 return -EINVAL;
378 package = buffer.pointer;
379 if (package->type != ACPI_TYPE_PACKAGE || package->package.count != 2)
380 goto out;
381 rev = package->package.elements[0].integer.value;
382 num = package->package.elements[1].integer.value;
383 if (rev != 1)
384 goto out;
385 *num_cpus = num;
386 ret = 0;
387out:
388 kfree(buffer.pointer);
389 return ret;
390}
391
392/* Notify firmware how many CPUs are idle */
393static void acpi_pad_ost(acpi_handle handle, int stat,
394 uint32_t idle_cpus)
395{
396 union acpi_object params[3] = {
397 {.type = ACPI_TYPE_INTEGER,},
398 {.type = ACPI_TYPE_INTEGER,},
399 {.type = ACPI_TYPE_BUFFER,},
400 };
401 struct acpi_object_list arg_list = {3, params};
402
403 params[0].integer.value = ACPI_PROCESSOR_AGGREGATOR_NOTIFY;
404 params[1].integer.value = stat;
405 params[2].buffer.length = 4;
406 params[2].buffer.pointer = (void *)&idle_cpus;
407 acpi_evaluate_object(handle, "_OST", &arg_list, NULL);
408}
409
410static void acpi_pad_handle_notify(acpi_handle handle)
411{
412 int num_cpus, ret;
413 uint32_t idle_cpus;
414
415 mutex_lock(&isolated_cpus_lock);
416 if (acpi_pad_pur(handle, &num_cpus)) {
417 mutex_unlock(&isolated_cpus_lock);
418 return;
419 }
420 ret = acpi_pad_idle_cpus(num_cpus);
421 idle_cpus = acpi_pad_idle_cpus_num();
422 if (!ret)
423 acpi_pad_ost(handle, 0, idle_cpus);
424 else
425 acpi_pad_ost(handle, 1, 0);
426 mutex_unlock(&isolated_cpus_lock);
427}
428
429static void acpi_pad_notify(acpi_handle handle, u32 event,
430 void *data)
431{
432 struct acpi_device *device = data;
433
434 switch (event) {
435 case ACPI_PROCESSOR_AGGREGATOR_NOTIFY:
436 acpi_pad_handle_notify(handle);
437 acpi_bus_generate_proc_event(device, event, 0);
438 acpi_bus_generate_netlink_event(device->pnp.device_class,
439 dev_name(&device->dev), event, 0);
440 break;
441 default:
442 printk(KERN_WARNING"Unsupported event [0x%x]\n", event);
443 break;
444 }
445}
446
447static int acpi_pad_add(struct acpi_device *device)
448{
449 acpi_status status;
450
451 strcpy(acpi_device_name(device), ACPI_PROCESSOR_AGGREGATOR_DEVICE_NAME);
452 strcpy(acpi_device_class(device), ACPI_PROCESSOR_AGGREGATOR_CLASS);
453
454 if (acpi_pad_add_sysfs(device))
455 return -ENODEV;
456
457 status = acpi_install_notify_handler(device->handle,
458 ACPI_DEVICE_NOTIFY, acpi_pad_notify, device);
459 if (ACPI_FAILURE(status)) {
460 acpi_pad_remove_sysfs(device);
461 return -ENODEV;
462 }
463
464 return 0;
465}
466
467static int acpi_pad_remove(struct acpi_device *device,
468 int type)
469{
470 mutex_lock(&isolated_cpus_lock);
471 acpi_pad_idle_cpus(0);
472 mutex_unlock(&isolated_cpus_lock);
473
474 acpi_remove_notify_handler(device->handle,
475 ACPI_DEVICE_NOTIFY, acpi_pad_notify);
476 acpi_pad_remove_sysfs(device);
477 return 0;
478}
479
480static const struct acpi_device_id pad_device_ids[] = {
481 {"ACPI000C", 0},
482 {"", 0},
483};
484MODULE_DEVICE_TABLE(acpi, pad_device_ids);
485
486static struct acpi_driver acpi_pad_driver = {
487 .name = "processor_aggregator",
488 .class = ACPI_PROCESSOR_AGGREGATOR_CLASS,
489 .ids = pad_device_ids,
490 .ops = {
491 .add = acpi_pad_add,
492 .remove = acpi_pad_remove,
493 },
494};
495
496static int __init acpi_pad_init(void)
497{
498 power_saving_mwait_init();
499 if (power_saving_mwait_eax == 0)
500 return -EINVAL;
501
502 return acpi_bus_register_driver(&acpi_pad_driver);
503}
504
505static void __exit acpi_pad_exit(void)
506{
507 acpi_bus_unregister_driver(&acpi_pad_driver);
508}
509
510module_init(acpi_pad_init);
511module_exit(acpi_pad_exit);
512MODULE_AUTHOR("Shaohua Li<shaohua.li@intel.com>");
513MODULE_DESCRIPTION("ACPI Processor Aggregator Driver");
514MODULE_LICENSE("GPL");
diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c
index 135fbfe1825c..741191524353 100644
--- a/drivers/acpi/bus.c
+++ b/drivers/acpi/bus.c
@@ -94,36 +94,33 @@ int acpi_bus_get_device(acpi_handle handle, struct acpi_device **device)
94 94
95EXPORT_SYMBOL(acpi_bus_get_device); 95EXPORT_SYMBOL(acpi_bus_get_device);
96 96
97int acpi_bus_get_status(struct acpi_device *device) 97acpi_status acpi_bus_get_status_handle(acpi_handle handle,
98 unsigned long long *sta)
98{ 99{
99 acpi_status status = AE_OK; 100 acpi_status status;
100 unsigned long long sta = 0;
101
102 101
103 if (!device) 102 status = acpi_evaluate_integer(handle, "_STA", NULL, sta);
104 return -EINVAL; 103 if (ACPI_SUCCESS(status))
104 return AE_OK;
105 105
106 /* 106 if (status == AE_NOT_FOUND) {
107 * Evaluate _STA if present. 107 *sta = ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED |
108 */ 108 ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING;
109 if (device->flags.dynamic_status) { 109 return AE_OK;
110 status =
111 acpi_evaluate_integer(device->handle, "_STA", NULL, &sta);
112 if (ACPI_FAILURE(status))
113 return -ENODEV;
114 STRUCT_TO_INT(device->status) = (int)sta;
115 } 110 }
111 return status;
112}
116 113
117 /* 114int acpi_bus_get_status(struct acpi_device *device)
118 * According to ACPI spec some device can be present and functional 115{
119 * even if the parent is not present but functional. 116 acpi_status status;
120 * In such conditions the child device should not inherit the status 117 unsigned long long sta;
121 * from the parent. 118
122 */ 119 status = acpi_bus_get_status_handle(device->handle, &sta);
123 else 120 if (ACPI_FAILURE(status))
124 STRUCT_TO_INT(device->status) = 121 return -ENODEV;
125 ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | 122
126 ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING; 123 STRUCT_TO_INT(device->status) = (int) sta;
127 124
128 if (device->status.functional && !device->status.present) { 125 if (device->status.functional && !device->status.present) {
129 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]: " 126 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]: "
@@ -135,10 +132,8 @@ int acpi_bus_get_status(struct acpi_device *device)
135 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]\n", 132 ACPI_DEBUG_PRINT((ACPI_DB_INFO, "Device [%s] status [%08x]\n",
136 device->pnp.bus_id, 133 device->pnp.bus_id,
137 (u32) STRUCT_TO_INT(device->status))); 134 (u32) STRUCT_TO_INT(device->status)));
138
139 return 0; 135 return 0;
140} 136}
141
142EXPORT_SYMBOL(acpi_bus_get_status); 137EXPORT_SYMBOL(acpi_bus_get_status);
143 138
144void acpi_bus_private_data_handler(acpi_handle handle, 139void acpi_bus_private_data_handler(acpi_handle handle,
diff --git a/drivers/acpi/dock.c b/drivers/acpi/dock.c
index 3a2cfefc71ab..7338b6a3e049 100644
--- a/drivers/acpi/dock.c
+++ b/drivers/acpi/dock.c
@@ -67,7 +67,7 @@ struct dock_station {
67 struct list_head dependent_devices; 67 struct list_head dependent_devices;
68 struct list_head hotplug_devices; 68 struct list_head hotplug_devices;
69 69
70 struct list_head sibiling; 70 struct list_head sibling;
71 struct platform_device *dock_device; 71 struct platform_device *dock_device;
72}; 72};
73static LIST_HEAD(dock_stations); 73static LIST_HEAD(dock_stations);
@@ -275,7 +275,7 @@ int is_dock_device(acpi_handle handle)
275 275
276 if (is_dock(handle)) 276 if (is_dock(handle))
277 return 1; 277 return 1;
278 list_for_each_entry(dock_station, &dock_stations, sibiling) { 278 list_for_each_entry(dock_station, &dock_stations, sibling) {
279 if (find_dock_dependent_device(dock_station, handle)) 279 if (find_dock_dependent_device(dock_station, handle))
280 return 1; 280 return 1;
281 } 281 }
@@ -619,7 +619,7 @@ register_hotplug_dock_device(acpi_handle handle, struct acpi_dock_ops *ops,
619 * make sure this handle is for a device dependent on the dock, 619 * make sure this handle is for a device dependent on the dock,
620 * this would include the dock station itself 620 * this would include the dock station itself
621 */ 621 */
622 list_for_each_entry(dock_station, &dock_stations, sibiling) { 622 list_for_each_entry(dock_station, &dock_stations, sibling) {
623 /* 623 /*
624 * An ATA bay can be in a dock and itself can be ejected 624 * An ATA bay can be in a dock and itself can be ejected
625 * seperately, so there are two 'dock stations' which need the 625 * seperately, so there are two 'dock stations' which need the
@@ -651,7 +651,7 @@ void unregister_hotplug_dock_device(acpi_handle handle)
651 if (!dock_station_count) 651 if (!dock_station_count)
652 return; 652 return;
653 653
654 list_for_each_entry(dock_station, &dock_stations, sibiling) { 654 list_for_each_entry(dock_station, &dock_stations, sibling) {
655 dd = find_dock_dependent_device(dock_station, handle); 655 dd = find_dock_dependent_device(dock_station, handle);
656 if (dd) 656 if (dd)
657 dock_del_hotplug_device(dock_station, dd); 657 dock_del_hotplug_device(dock_station, dd);
@@ -787,7 +787,7 @@ static int acpi_dock_notifier_call(struct notifier_block *this,
787 if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK 787 if (event != ACPI_NOTIFY_BUS_CHECK && event != ACPI_NOTIFY_DEVICE_CHECK
788 && event != ACPI_NOTIFY_EJECT_REQUEST) 788 && event != ACPI_NOTIFY_EJECT_REQUEST)
789 return 0; 789 return 0;
790 list_for_each_entry(dock_station, &dock_stations, sibiling) { 790 list_for_each_entry(dock_station, &dock_stations, sibling) {
791 if (dock_station->handle == handle) { 791 if (dock_station->handle == handle) {
792 struct dock_data *dock_data; 792 struct dock_data *dock_data;
793 793
@@ -958,7 +958,7 @@ static int dock_add(acpi_handle handle)
958 dock_station->last_dock_time = jiffies - HZ; 958 dock_station->last_dock_time = jiffies - HZ;
959 INIT_LIST_HEAD(&dock_station->dependent_devices); 959 INIT_LIST_HEAD(&dock_station->dependent_devices);
960 INIT_LIST_HEAD(&dock_station->hotplug_devices); 960 INIT_LIST_HEAD(&dock_station->hotplug_devices);
961 INIT_LIST_HEAD(&dock_station->sibiling); 961 INIT_LIST_HEAD(&dock_station->sibling);
962 spin_lock_init(&dock_station->dd_lock); 962 spin_lock_init(&dock_station->dd_lock);
963 mutex_init(&dock_station->hp_lock); 963 mutex_init(&dock_station->hp_lock);
964 ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list); 964 ATOMIC_INIT_NOTIFIER_HEAD(&dock_notifier_list);
@@ -1044,7 +1044,7 @@ static int dock_add(acpi_handle handle)
1044 add_dock_dependent_device(dock_station, dd); 1044 add_dock_dependent_device(dock_station, dd);
1045 1045
1046 dock_station_count++; 1046 dock_station_count++;
1047 list_add(&dock_station->sibiling, &dock_stations); 1047 list_add(&dock_station->sibling, &dock_stations);
1048 return 0; 1048 return 0;
1049 1049
1050dock_add_err_unregister: 1050dock_add_err_unregister:
@@ -1149,7 +1149,7 @@ static void __exit dock_exit(void)
1149 struct dock_station *tmp; 1149 struct dock_station *tmp;
1150 1150
1151 unregister_acpi_bus_notifier(&dock_acpi_notifier); 1151 unregister_acpi_bus_notifier(&dock_acpi_notifier);
1152 list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibiling) 1152 list_for_each_entry_safe(dock_station, tmp, &dock_stations, sibling)
1153 dock_remove(dock_station); 1153 dock_remove(dock_station);
1154} 1154}
1155 1155
diff --git a/drivers/acpi/ec.c b/drivers/acpi/ec.c
index f70796081c4c..baef28c1e630 100644
--- a/drivers/acpi/ec.c
+++ b/drivers/acpi/ec.c
@@ -119,6 +119,8 @@ static struct acpi_ec {
119} *boot_ec, *first_ec; 119} *boot_ec, *first_ec;
120 120
121static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */ 121static int EC_FLAGS_MSI; /* Out-of-spec MSI controller */
122static int EC_FLAGS_VALIDATE_ECDT; /* ASUStec ECDTs need to be validated */
123static int EC_FLAGS_SKIP_DSDT_SCAN; /* Not all BIOS survive early DSDT scan */
122 124
123/* -------------------------------------------------------------------------- 125/* --------------------------------------------------------------------------
124 Transaction Management 126 Transaction Management
@@ -232,10 +234,8 @@ static int ec_poll(struct acpi_ec *ec)
232 } 234 }
233 advance_transaction(ec, acpi_ec_read_status(ec)); 235 advance_transaction(ec, acpi_ec_read_status(ec));
234 } while (time_before(jiffies, delay)); 236 } while (time_before(jiffies, delay));
235 if (!ec->curr->irq_count || 237 if (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF)
236 (acpi_ec_read_status(ec) & ACPI_EC_FLAG_IBF))
237 break; 238 break;
238 /* try restart command if we get any false interrupts */
239 pr_debug(PREFIX "controller reset, restart transaction\n"); 239 pr_debug(PREFIX "controller reset, restart transaction\n");
240 spin_lock_irqsave(&ec->curr_lock, flags); 240 spin_lock_irqsave(&ec->curr_lock, flags);
241 start_transaction(ec); 241 start_transaction(ec);
@@ -899,6 +899,44 @@ static const struct acpi_device_id ec_device_ids[] = {
899 {"", 0}, 899 {"", 0},
900}; 900};
901 901
902/* Some BIOS do not survive early DSDT scan, skip it */
903static int ec_skip_dsdt_scan(const struct dmi_system_id *id)
904{
905 EC_FLAGS_SKIP_DSDT_SCAN = 1;
906 return 0;
907}
908
909/* ASUStek often supplies us with broken ECDT, validate it */
910static int ec_validate_ecdt(const struct dmi_system_id *id)
911{
912 EC_FLAGS_VALIDATE_ECDT = 1;
913 return 0;
914}
915
916/* MSI EC needs special treatment, enable it */
917static int ec_flag_msi(const struct dmi_system_id *id)
918{
919 EC_FLAGS_MSI = 1;
920 EC_FLAGS_VALIDATE_ECDT = 1;
921 return 0;
922}
923
924static struct dmi_system_id __initdata ec_dmi_table[] = {
925 {
926 ec_skip_dsdt_scan, "Compal JFL92", {
927 DMI_MATCH(DMI_BIOS_VENDOR, "COMPAL"),
928 DMI_MATCH(DMI_BOARD_NAME, "JFL92") }, NULL},
929 {
930 ec_flag_msi, "MSI hardware", {
931 DMI_MATCH(DMI_BIOS_VENDOR, "Micro-Star"),
932 DMI_MATCH(DMI_CHASSIS_VENDOR, "MICRO-Star") }, NULL},
933 {
934 ec_validate_ecdt, "ASUS hardware", {
935 DMI_MATCH(DMI_BIOS_VENDOR, "ASUS") }, NULL},
936 {},
937};
938
939
902int __init acpi_ec_ecdt_probe(void) 940int __init acpi_ec_ecdt_probe(void)
903{ 941{
904 acpi_status status; 942 acpi_status status;
@@ -911,11 +949,7 @@ int __init acpi_ec_ecdt_probe(void)
911 /* 949 /*
912 * Generate a boot ec context 950 * Generate a boot ec context
913 */ 951 */
914 if (dmi_name_in_vendors("Micro-Star") || 952 dmi_check_system(ec_dmi_table);
915 dmi_name_in_vendors("Notebook")) {
916 pr_info(PREFIX "Enabling special treatment for EC from MSI.\n");
917 EC_FLAGS_MSI = 1;
918 }
919 status = acpi_get_table(ACPI_SIG_ECDT, 1, 953 status = acpi_get_table(ACPI_SIG_ECDT, 1,
920 (struct acpi_table_header **)&ecdt_ptr); 954 (struct acpi_table_header **)&ecdt_ptr);
921 if (ACPI_SUCCESS(status)) { 955 if (ACPI_SUCCESS(status)) {
@@ -926,7 +960,7 @@ int __init acpi_ec_ecdt_probe(void)
926 boot_ec->handle = ACPI_ROOT_OBJECT; 960 boot_ec->handle = ACPI_ROOT_OBJECT;
927 acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle); 961 acpi_get_handle(ACPI_ROOT_OBJECT, ecdt_ptr->id, &boot_ec->handle);
928 /* Don't trust ECDT, which comes from ASUSTek */ 962 /* Don't trust ECDT, which comes from ASUSTek */
929 if (!dmi_name_in_vendors("ASUS") && EC_FLAGS_MSI == 0) 963 if (!EC_FLAGS_VALIDATE_ECDT)
930 goto install; 964 goto install;
931 saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL); 965 saved_ec = kmalloc(sizeof(struct acpi_ec), GFP_KERNEL);
932 if (!saved_ec) 966 if (!saved_ec)
@@ -934,6 +968,10 @@ int __init acpi_ec_ecdt_probe(void)
934 memcpy(saved_ec, boot_ec, sizeof(struct acpi_ec)); 968 memcpy(saved_ec, boot_ec, sizeof(struct acpi_ec));
935 /* fall through */ 969 /* fall through */
936 } 970 }
971
972 if (EC_FLAGS_SKIP_DSDT_SCAN)
973 return -ENODEV;
974
937 /* This workaround is needed only on some broken machines, 975 /* This workaround is needed only on some broken machines,
938 * which require early EC, but fail to provide ECDT */ 976 * which require early EC, but fail to provide ECDT */
939 printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n"); 977 printk(KERN_DEBUG PREFIX "Look up EC in DSDT\n");
diff --git a/drivers/acpi/osl.c b/drivers/acpi/osl.c
index 5633b86e3ed1..7c1c59ea9ec6 100644
--- a/drivers/acpi/osl.c
+++ b/drivers/acpi/osl.c
@@ -1161,7 +1161,13 @@ int acpi_check_resource_conflict(struct resource *res)
1161 res_list_elem->name, 1161 res_list_elem->name,
1162 (long long) res_list_elem->start, 1162 (long long) res_list_elem->start,
1163 (long long) res_list_elem->end); 1163 (long long) res_list_elem->end);
1164 printk(KERN_INFO "ACPI: Device needs an ACPI driver\n"); 1164 if (acpi_enforce_resources == ENFORCE_RESOURCES_LAX)
1165 printk(KERN_NOTICE "ACPI: This conflict may"
1166 " cause random problems and system"
1167 " instability\n");
1168 printk(KERN_INFO "ACPI: If an ACPI driver is available"
1169 " for this device, you should use it instead of"
1170 " the native driver\n");
1165 } 1171 }
1166 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT) 1172 if (acpi_enforce_resources == ENFORCE_RESOURCES_STRICT)
1167 return -EBUSY; 1173 return -EBUSY;
diff --git a/drivers/acpi/proc.c b/drivers/acpi/proc.c
index d0d550d22a6d..f8b6f555ba52 100644
--- a/drivers/acpi/proc.c
+++ b/drivers/acpi/proc.c
@@ -398,6 +398,8 @@ acpi_system_write_wakeup_device(struct file *file,
398 398
399 if (len > 4) 399 if (len > 4)
400 len = 4; 400 len = 4;
401 if (len < 0)
402 return -EFAULT;
401 403
402 if (copy_from_user(strbuf, buffer, len)) 404 if (copy_from_user(strbuf, buffer, len))
403 return -EFAULT; 405 return -EFAULT;
diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c
index c2d4d6e09364..c567b46dfa0f 100644
--- a/drivers/acpi/processor_core.c
+++ b/drivers/acpi/processor_core.c
@@ -863,13 +863,6 @@ static int acpi_processor_add(struct acpi_device *device)
863 goto err_remove_sysfs; 863 goto err_remove_sysfs;
864 } 864 }
865 865
866 if (pr->flags.throttling) {
867 printk(KERN_INFO PREFIX "%s [%s] (supports",
868 acpi_device_name(device), acpi_device_bid(device));
869 printk(" %d throttling states", pr->throttling.state_count);
870 printk(")\n");
871 }
872
873 return 0; 866 return 0;
874 867
875err_remove_sysfs: 868err_remove_sysfs:
diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c
index cc61a6220102..bbd066e7f854 100644
--- a/drivers/acpi/processor_idle.c
+++ b/drivers/acpi/processor_idle.c
@@ -1166,7 +1166,6 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1166#ifdef CONFIG_ACPI_PROCFS 1166#ifdef CONFIG_ACPI_PROCFS
1167 struct proc_dir_entry *entry = NULL; 1167 struct proc_dir_entry *entry = NULL;
1168#endif 1168#endif
1169 unsigned int i;
1170 1169
1171 if (boot_option_idle_override) 1170 if (boot_option_idle_override)
1172 return 0; 1171 return 0;
@@ -1214,13 +1213,6 @@ int __cpuinit acpi_processor_power_init(struct acpi_processor *pr,
1214 acpi_processor_setup_cpuidle(pr); 1213 acpi_processor_setup_cpuidle(pr);
1215 if (cpuidle_register_device(&pr->power.dev)) 1214 if (cpuidle_register_device(&pr->power.dev))
1216 return -EIO; 1215 return -EIO;
1217
1218 printk(KERN_INFO PREFIX "CPU%d (power states:", pr->id);
1219 for (i = 1; i <= pr->power.count; i++)
1220 if (pr->power.states[i].valid)
1221 printk(" C%d[C%d]", i,
1222 pr->power.states[i].type);
1223 printk(")\n");
1224 } 1216 }
1225#ifdef CONFIG_ACPI_PROCFS 1217#ifdef CONFIG_ACPI_PROCFS
1226 /* 'power' [R] */ 1218 /* 'power' [R] */
diff --git a/drivers/acpi/scan.c b/drivers/acpi/scan.c
index 408ebde18986..14a7481c97d7 100644
--- a/drivers/acpi/scan.c
+++ b/drivers/acpi/scan.c
@@ -22,6 +22,8 @@ extern struct acpi_device *acpi_root;
22#define ACPI_BUS_HID "LNXSYBUS" 22#define ACPI_BUS_HID "LNXSYBUS"
23#define ACPI_BUS_DEVICE_NAME "System Bus" 23#define ACPI_BUS_DEVICE_NAME "System Bus"
24 24
25#define ACPI_IS_ROOT_DEVICE(device) (!(device)->parent)
26
25static LIST_HEAD(acpi_device_list); 27static LIST_HEAD(acpi_device_list);
26static LIST_HEAD(acpi_bus_id_list); 28static LIST_HEAD(acpi_bus_id_list);
27DEFINE_MUTEX(acpi_device_lock); 29DEFINE_MUTEX(acpi_device_lock);
@@ -43,40 +45,19 @@ static int create_modalias(struct acpi_device *acpi_dev, char *modalias,
43{ 45{
44 int len; 46 int len;
45 int count; 47 int count;
46 48 struct acpi_hardware_id *id;
47 if (!acpi_dev->flags.hardware_id && !acpi_dev->flags.compatible_ids)
48 return -ENODEV;
49 49
50 len = snprintf(modalias, size, "acpi:"); 50 len = snprintf(modalias, size, "acpi:");
51 size -= len; 51 size -= len;
52 52
53 if (acpi_dev->flags.hardware_id) { 53 list_for_each_entry(id, &acpi_dev->pnp.ids, list) {
54 count = snprintf(&modalias[len], size, "%s:", 54 count = snprintf(&modalias[len], size, "%s:", id->id);
55 acpi_dev->pnp.hardware_id);
56 if (count < 0 || count >= size) 55 if (count < 0 || count >= size)
57 return -EINVAL; 56 return -EINVAL;
58 len += count; 57 len += count;
59 size -= count; 58 size -= count;
60 } 59 }
61 60
62 if (acpi_dev->flags.compatible_ids) {
63 struct acpica_device_id_list *cid_list;
64 int i;
65
66 cid_list = acpi_dev->pnp.cid_list;
67 for (i = 0; i < cid_list->count; i++) {
68 count = snprintf(&modalias[len], size, "%s:",
69 cid_list->ids[i].string);
70 if (count < 0 || count >= size) {
71 printk(KERN_ERR PREFIX "%s cid[%i] exceeds event buffer size",
72 acpi_dev->pnp.device_name, i);
73 break;
74 }
75 len += count;
76 size -= count;
77 }
78 }
79
80 modalias[len] = '\0'; 61 modalias[len] = '\0';
81 return len; 62 return len;
82} 63}
@@ -183,7 +164,7 @@ static ssize_t
183acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) { 164acpi_device_hid_show(struct device *dev, struct device_attribute *attr, char *buf) {
184 struct acpi_device *acpi_dev = to_acpi_device(dev); 165 struct acpi_device *acpi_dev = to_acpi_device(dev);
185 166
186 return sprintf(buf, "%s\n", acpi_dev->pnp.hardware_id); 167 return sprintf(buf, "%s\n", acpi_device_hid(acpi_dev));
187} 168}
188static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL); 169static DEVICE_ATTR(hid, 0444, acpi_device_hid_show, NULL);
189 170
@@ -219,17 +200,13 @@ static int acpi_device_setup_files(struct acpi_device *dev)
219 goto end; 200 goto end;
220 } 201 }
221 202
222 if (dev->flags.hardware_id) { 203 result = device_create_file(&dev->dev, &dev_attr_hid);
223 result = device_create_file(&dev->dev, &dev_attr_hid); 204 if (result)
224 if (result) 205 goto end;
225 goto end;
226 }
227 206
228 if (dev->flags.hardware_id || dev->flags.compatible_ids) { 207 result = device_create_file(&dev->dev, &dev_attr_modalias);
229 result = device_create_file(&dev->dev, &dev_attr_modalias); 208 if (result)
230 if (result) 209 goto end;
231 goto end;
232 }
233 210
234 /* 211 /*
235 * If device has _EJ0, 'eject' file is created that is used to trigger 212 * If device has _EJ0, 'eject' file is created that is used to trigger
@@ -255,11 +232,8 @@ static void acpi_device_remove_files(struct acpi_device *dev)
255 if (ACPI_SUCCESS(status)) 232 if (ACPI_SUCCESS(status))
256 device_remove_file(&dev->dev, &dev_attr_eject); 233 device_remove_file(&dev->dev, &dev_attr_eject);
257 234
258 if (dev->flags.hardware_id || dev->flags.compatible_ids) 235 device_remove_file(&dev->dev, &dev_attr_modalias);
259 device_remove_file(&dev->dev, &dev_attr_modalias); 236 device_remove_file(&dev->dev, &dev_attr_hid);
260
261 if (dev->flags.hardware_id)
262 device_remove_file(&dev->dev, &dev_attr_hid);
263 if (dev->handle) 237 if (dev->handle)
264 device_remove_file(&dev->dev, &dev_attr_path); 238 device_remove_file(&dev->dev, &dev_attr_path);
265} 239}
@@ -271,6 +245,7 @@ int acpi_match_device_ids(struct acpi_device *device,
271 const struct acpi_device_id *ids) 245 const struct acpi_device_id *ids)
272{ 246{
273 const struct acpi_device_id *id; 247 const struct acpi_device_id *id;
248 struct acpi_hardware_id *hwid;
274 249
275 /* 250 /*
276 * If the device is not present, it is unnecessary to load device 251 * If the device is not present, it is unnecessary to load device
@@ -279,40 +254,30 @@ int acpi_match_device_ids(struct acpi_device *device,
279 if (!device->status.present) 254 if (!device->status.present)
280 return -ENODEV; 255 return -ENODEV;
281 256
282 if (device->flags.hardware_id) { 257 for (id = ids; id->id[0]; id++)
283 for (id = ids; id->id[0]; id++) { 258 list_for_each_entry(hwid, &device->pnp.ids, list)
284 if (!strcmp((char*)id->id, device->pnp.hardware_id)) 259 if (!strcmp((char *) id->id, hwid->id))
285 return 0; 260 return 0;
286 }
287 }
288
289 if (device->flags.compatible_ids) {
290 struct acpica_device_id_list *cid_list = device->pnp.cid_list;
291 int i;
292
293 for (id = ids; id->id[0]; id++) {
294 /* compare multiple _CID entries against driver ids */
295 for (i = 0; i < cid_list->count; i++) {
296 if (!strcmp((char*)id->id,
297 cid_list->ids[i].string))
298 return 0;
299 }
300 }
301 }
302 261
303 return -ENOENT; 262 return -ENOENT;
304} 263}
305EXPORT_SYMBOL(acpi_match_device_ids); 264EXPORT_SYMBOL(acpi_match_device_ids);
306 265
266static void acpi_free_ids(struct acpi_device *device)
267{
268 struct acpi_hardware_id *id, *tmp;
269
270 list_for_each_entry_safe(id, tmp, &device->pnp.ids, list) {
271 kfree(id->id);
272 kfree(id);
273 }
274}
275
307static void acpi_device_release(struct device *dev) 276static void acpi_device_release(struct device *dev)
308{ 277{
309 struct acpi_device *acpi_dev = to_acpi_device(dev); 278 struct acpi_device *acpi_dev = to_acpi_device(dev);
310 279
311 kfree(acpi_dev->pnp.cid_list); 280 acpi_free_ids(acpi_dev);
312 if (acpi_dev->flags.hardware_id)
313 kfree(acpi_dev->pnp.hardware_id);
314 if (acpi_dev->flags.unique_id)
315 kfree(acpi_dev->pnp.unique_id);
316 kfree(acpi_dev); 281 kfree(acpi_dev);
317} 282}
318 283
@@ -378,15 +343,13 @@ static acpi_status acpi_device_notify_fixed(void *data)
378static int acpi_device_install_notify_handler(struct acpi_device *device) 343static int acpi_device_install_notify_handler(struct acpi_device *device)
379{ 344{
380 acpi_status status; 345 acpi_status status;
381 char *hid;
382 346
383 hid = acpi_device_hid(device); 347 if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
384 if (!strcmp(hid, ACPI_BUTTON_HID_POWERF))
385 status = 348 status =
386 acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, 349 acpi_install_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
387 acpi_device_notify_fixed, 350 acpi_device_notify_fixed,
388 device); 351 device);
389 else if (!strcmp(hid, ACPI_BUTTON_HID_SLEEPF)) 352 else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
390 status = 353 status =
391 acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, 354 acpi_install_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
392 acpi_device_notify_fixed, 355 acpi_device_notify_fixed,
@@ -404,10 +367,10 @@ static int acpi_device_install_notify_handler(struct acpi_device *device)
404 367
405static void acpi_device_remove_notify_handler(struct acpi_device *device) 368static void acpi_device_remove_notify_handler(struct acpi_device *device)
406{ 369{
407 if (!strcmp(acpi_device_hid(device), ACPI_BUTTON_HID_POWERF)) 370 if (device->device_type == ACPI_BUS_TYPE_POWER_BUTTON)
408 acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON, 371 acpi_remove_fixed_event_handler(ACPI_EVENT_POWER_BUTTON,
409 acpi_device_notify_fixed); 372 acpi_device_notify_fixed);
410 else if (!strcmp(acpi_device_hid(device), ACPI_BUTTON_HID_SLEEPF)) 373 else if (device->device_type == ACPI_BUS_TYPE_SLEEP_BUTTON)
411 acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON, 374 acpi_remove_fixed_event_handler(ACPI_EVENT_SLEEP_BUTTON,
412 acpi_device_notify_fixed); 375 acpi_device_notify_fixed);
413 else 376 else
@@ -474,12 +437,12 @@ struct bus_type acpi_bus_type = {
474 .uevent = acpi_device_uevent, 437 .uevent = acpi_device_uevent,
475}; 438};
476 439
477static int acpi_device_register(struct acpi_device *device, 440static int acpi_device_register(struct acpi_device *device)
478 struct acpi_device *parent)
479{ 441{
480 int result; 442 int result;
481 struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id; 443 struct acpi_device_bus_id *acpi_device_bus_id, *new_bus_id;
482 int found = 0; 444 int found = 0;
445
483 /* 446 /*
484 * Linkage 447 * Linkage
485 * ------- 448 * -------
@@ -501,8 +464,9 @@ static int acpi_device_register(struct acpi_device *device,
501 * If failed, create one and link it into acpi_bus_id_list 464 * If failed, create one and link it into acpi_bus_id_list
502 */ 465 */
503 list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) { 466 list_for_each_entry(acpi_device_bus_id, &acpi_bus_id_list, node) {
504 if(!strcmp(acpi_device_bus_id->bus_id, device->flags.hardware_id? device->pnp.hardware_id : "device")) { 467 if (!strcmp(acpi_device_bus_id->bus_id,
505 acpi_device_bus_id->instance_no ++; 468 acpi_device_hid(device))) {
469 acpi_device_bus_id->instance_no++;
506 found = 1; 470 found = 1;
507 kfree(new_bus_id); 471 kfree(new_bus_id);
508 break; 472 break;
@@ -510,7 +474,7 @@ static int acpi_device_register(struct acpi_device *device,
510 } 474 }
511 if (!found) { 475 if (!found) {
512 acpi_device_bus_id = new_bus_id; 476 acpi_device_bus_id = new_bus_id;
513 strcpy(acpi_device_bus_id->bus_id, device->flags.hardware_id ? device->pnp.hardware_id : "device"); 477 strcpy(acpi_device_bus_id->bus_id, acpi_device_hid(device));
514 acpi_device_bus_id->instance_no = 0; 478 acpi_device_bus_id->instance_no = 0;
515 list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list); 479 list_add_tail(&acpi_device_bus_id->node, &acpi_bus_id_list);
516 } 480 }
@@ -524,7 +488,7 @@ static int acpi_device_register(struct acpi_device *device,
524 mutex_unlock(&acpi_device_lock); 488 mutex_unlock(&acpi_device_lock);
525 489
526 if (device->parent) 490 if (device->parent)
527 device->dev.parent = &parent->dev; 491 device->dev.parent = &device->parent->dev;
528 device->dev.bus = &acpi_bus_type; 492 device->dev.bus = &acpi_bus_type;
529 device->dev.release = &acpi_device_release; 493 device->dev.release = &acpi_device_release;
530 result = device_register(&device->dev); 494 result = device_register(&device->dev);
@@ -664,6 +628,33 @@ EXPORT_SYMBOL(acpi_bus_unregister_driver);
664/* -------------------------------------------------------------------------- 628/* --------------------------------------------------------------------------
665 Device Enumeration 629 Device Enumeration
666 -------------------------------------------------------------------------- */ 630 -------------------------------------------------------------------------- */
631static struct acpi_device *acpi_bus_get_parent(acpi_handle handle)
632{
633 acpi_status status;
634 int ret;
635 struct acpi_device *device;
636
637 /*
638 * Fixed hardware devices do not appear in the namespace and do not
639 * have handles, but we fabricate acpi_devices for them, so we have
640 * to deal with them specially.
641 */
642 if (handle == NULL)
643 return acpi_root;
644
645 do {
646 status = acpi_get_parent(handle, &handle);
647 if (status == AE_NULL_ENTRY)
648 return NULL;
649 if (ACPI_FAILURE(status))
650 return acpi_root;
651
652 ret = acpi_bus_get_device(handle, &device);
653 if (ret == 0)
654 return device;
655 } while (1);
656}
657
667acpi_status 658acpi_status
668acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd) 659acpi_bus_get_ejd(acpi_handle handle, acpi_handle *ejd)
669{ 660{
@@ -876,11 +867,6 @@ static int acpi_bus_get_flags(struct acpi_device *device)
876 if (ACPI_SUCCESS(status)) 867 if (ACPI_SUCCESS(status))
877 device->flags.dynamic_status = 1; 868 device->flags.dynamic_status = 1;
878 869
879 /* Presence of _CID indicates 'compatible_ids' */
880 status = acpi_get_handle(device->handle, "_CID", &temp);
881 if (ACPI_SUCCESS(status))
882 device->flags.compatible_ids = 1;
883
884 /* Presence of _RMV indicates 'removable' */ 870 /* Presence of _RMV indicates 'removable' */
885 status = acpi_get_handle(device->handle, "_RMV", &temp); 871 status = acpi_get_handle(device->handle, "_RMV", &temp);
886 if (ACPI_SUCCESS(status)) 872 if (ACPI_SUCCESS(status))
@@ -918,8 +904,7 @@ static int acpi_bus_get_flags(struct acpi_device *device)
918 return 0; 904 return 0;
919} 905}
920 906
921static void acpi_device_get_busid(struct acpi_device *device, 907static void acpi_device_get_busid(struct acpi_device *device)
922 acpi_handle handle, int type)
923{ 908{
924 char bus_id[5] = { '?', 0 }; 909 char bus_id[5] = { '?', 0 };
925 struct acpi_buffer buffer = { sizeof(bus_id), bus_id }; 910 struct acpi_buffer buffer = { sizeof(bus_id), bus_id };
@@ -931,10 +916,12 @@ static void acpi_device_get_busid(struct acpi_device *device,
931 * The device's Bus ID is simply the object name. 916 * The device's Bus ID is simply the object name.
932 * TBD: Shouldn't this value be unique (within the ACPI namespace)? 917 * TBD: Shouldn't this value be unique (within the ACPI namespace)?
933 */ 918 */
934 switch (type) { 919 if (ACPI_IS_ROOT_DEVICE(device)) {
935 case ACPI_BUS_TYPE_SYSTEM:
936 strcpy(device->pnp.bus_id, "ACPI"); 920 strcpy(device->pnp.bus_id, "ACPI");
937 break; 921 return;
922 }
923
924 switch (device->device_type) {
938 case ACPI_BUS_TYPE_POWER_BUTTON: 925 case ACPI_BUS_TYPE_POWER_BUTTON:
939 strcpy(device->pnp.bus_id, "PWRF"); 926 strcpy(device->pnp.bus_id, "PWRF");
940 break; 927 break;
@@ -942,7 +929,7 @@ static void acpi_device_get_busid(struct acpi_device *device,
942 strcpy(device->pnp.bus_id, "SLPF"); 929 strcpy(device->pnp.bus_id, "SLPF");
943 break; 930 break;
944 default: 931 default:
945 acpi_get_name(handle, ACPI_SINGLE_NAME, &buffer); 932 acpi_get_name(device->handle, ACPI_SINGLE_NAME, &buffer);
946 /* Clean up trailing underscores (if any) */ 933 /* Clean up trailing underscores (if any) */
947 for (i = 3; i > 1; i--) { 934 for (i = 3; i > 1; i--) {
948 if (bus_id[i] == '_') 935 if (bus_id[i] == '_')
@@ -1000,204 +987,134 @@ static int acpi_dock_match(struct acpi_device *device)
1000 return acpi_get_handle(device->handle, "_DCK", &tmp); 987 return acpi_get_handle(device->handle, "_DCK", &tmp);
1001} 988}
1002 989
1003static struct acpica_device_id_list* 990char *acpi_device_hid(struct acpi_device *device)
1004acpi_add_cid(
1005 struct acpi_device_info *info,
1006 struct acpica_device_id *new_cid)
1007{ 991{
1008 struct acpica_device_id_list *cid; 992 struct acpi_hardware_id *hid;
1009 char *next_id_string;
1010 acpi_size cid_length;
1011 acpi_size new_cid_length;
1012 u32 i;
1013
1014
1015 /* Allocate new CID list with room for the new CID */
1016
1017 if (!new_cid)
1018 new_cid_length = info->compatible_id_list.list_size;
1019 else if (info->compatible_id_list.list_size)
1020 new_cid_length = info->compatible_id_list.list_size +
1021 new_cid->length + sizeof(struct acpica_device_id);
1022 else
1023 new_cid_length = sizeof(struct acpica_device_id_list) + new_cid->length;
1024
1025 cid = ACPI_ALLOCATE_ZEROED(new_cid_length);
1026 if (!cid) {
1027 return NULL;
1028 }
1029
1030 cid->list_size = new_cid_length;
1031 cid->count = info->compatible_id_list.count;
1032 if (new_cid)
1033 cid->count++;
1034 next_id_string = (char *) cid->ids + (cid->count * sizeof(struct acpica_device_id));
1035
1036 /* Copy all existing CIDs */
1037
1038 for (i = 0; i < info->compatible_id_list.count; i++) {
1039 cid_length = info->compatible_id_list.ids[i].length;
1040 cid->ids[i].string = next_id_string;
1041 cid->ids[i].length = cid_length;
1042 993
1043 ACPI_MEMCPY(next_id_string, info->compatible_id_list.ids[i].string, 994 hid = list_first_entry(&device->pnp.ids, struct acpi_hardware_id, list);
1044 cid_length); 995 return hid->id;
1045 996}
1046 next_id_string += cid_length; 997EXPORT_SYMBOL(acpi_device_hid);
1047 }
1048 998
1049 /* Append the new CID */ 999static void acpi_add_id(struct acpi_device *device, const char *dev_id)
1000{
1001 struct acpi_hardware_id *id;
1050 1002
1051 if (new_cid) { 1003 id = kmalloc(sizeof(*id), GFP_KERNEL);
1052 cid->ids[i].string = next_id_string; 1004 if (!id)
1053 cid->ids[i].length = new_cid->length; 1005 return;
1054 1006
1055 ACPI_MEMCPY(next_id_string, new_cid->string, new_cid->length); 1007 id->id = kmalloc(strlen(dev_id) + 1, GFP_KERNEL);
1008 if (!id->id) {
1009 kfree(id);
1010 return;
1056 } 1011 }
1057 1012
1058 return cid; 1013 strcpy(id->id, dev_id);
1014 list_add_tail(&id->list, &device->pnp.ids);
1059} 1015}
1060 1016
1061static void acpi_device_set_id(struct acpi_device *device, 1017static void acpi_device_set_id(struct acpi_device *device)
1062 struct acpi_device *parent, acpi_handle handle,
1063 int type)
1064{ 1018{
1065 struct acpi_device_info *info = NULL;
1066 char *hid = NULL;
1067 char *uid = NULL;
1068 struct acpica_device_id_list *cid_list = NULL;
1069 char *cid_add = NULL;
1070 acpi_status status; 1019 acpi_status status;
1020 struct acpi_device_info *info;
1021 struct acpica_device_id_list *cid_list;
1022 int i;
1071 1023
1072 switch (type) { 1024 switch (device->device_type) {
1073 case ACPI_BUS_TYPE_DEVICE: 1025 case ACPI_BUS_TYPE_DEVICE:
1074 status = acpi_get_object_info(handle, &info); 1026 if (ACPI_IS_ROOT_DEVICE(device)) {
1027 acpi_add_id(device, ACPI_SYSTEM_HID);
1028 break;
1029 } else if (ACPI_IS_ROOT_DEVICE(device->parent)) {
1030 /* \_SB_, the only root-level namespace device */
1031 acpi_add_id(device, ACPI_BUS_HID);
1032 strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
1033 strcpy(device->pnp.device_class, ACPI_BUS_CLASS);
1034 break;
1035 }
1036
1037 status = acpi_get_object_info(device->handle, &info);
1075 if (ACPI_FAILURE(status)) { 1038 if (ACPI_FAILURE(status)) {
1076 printk(KERN_ERR PREFIX "%s: Error reading device info\n", __func__); 1039 printk(KERN_ERR PREFIX "%s: Error reading device info\n", __func__);
1077 return; 1040 return;
1078 } 1041 }
1079 1042
1080 if (info->valid & ACPI_VALID_HID) 1043 if (info->valid & ACPI_VALID_HID)
1081 hid = info->hardware_id.string; 1044 acpi_add_id(device, info->hardware_id.string);
1082 if (info->valid & ACPI_VALID_UID) 1045 if (info->valid & ACPI_VALID_CID) {
1083 uid = info->unique_id.string;
1084 if (info->valid & ACPI_VALID_CID)
1085 cid_list = &info->compatible_id_list; 1046 cid_list = &info->compatible_id_list;
1047 for (i = 0; i < cid_list->count; i++)
1048 acpi_add_id(device, cid_list->ids[i].string);
1049 }
1086 if (info->valid & ACPI_VALID_ADR) { 1050 if (info->valid & ACPI_VALID_ADR) {
1087 device->pnp.bus_address = info->address; 1051 device->pnp.bus_address = info->address;
1088 device->flags.bus_address = 1; 1052 device->flags.bus_address = 1;
1089 } 1053 }
1090 1054
1091 /* If we have a video/bay/dock device, add our selfdefined 1055 kfree(info);
1092 HID to the CID list. Like that the video/bay/dock drivers 1056
1093 will get autoloaded and the device might still match 1057 /*
1094 against another driver. 1058 * Some devices don't reliably have _HIDs & _CIDs, so add
1095 */ 1059 * synthetic HIDs to make sure drivers can find them.
1060 */
1096 if (acpi_is_video_device(device)) 1061 if (acpi_is_video_device(device))
1097 cid_add = ACPI_VIDEO_HID; 1062 acpi_add_id(device, ACPI_VIDEO_HID);
1098 else if (ACPI_SUCCESS(acpi_bay_match(device))) 1063 else if (ACPI_SUCCESS(acpi_bay_match(device)))
1099 cid_add = ACPI_BAY_HID; 1064 acpi_add_id(device, ACPI_BAY_HID);
1100 else if (ACPI_SUCCESS(acpi_dock_match(device))) 1065 else if (ACPI_SUCCESS(acpi_dock_match(device)))
1101 cid_add = ACPI_DOCK_HID; 1066 acpi_add_id(device, ACPI_DOCK_HID);
1102 1067
1103 break; 1068 break;
1104 case ACPI_BUS_TYPE_POWER: 1069 case ACPI_BUS_TYPE_POWER:
1105 hid = ACPI_POWER_HID; 1070 acpi_add_id(device, ACPI_POWER_HID);
1106 break; 1071 break;
1107 case ACPI_BUS_TYPE_PROCESSOR: 1072 case ACPI_BUS_TYPE_PROCESSOR:
1108 hid = ACPI_PROCESSOR_OBJECT_HID; 1073 acpi_add_id(device, ACPI_PROCESSOR_OBJECT_HID);
1109 break;
1110 case ACPI_BUS_TYPE_SYSTEM:
1111 hid = ACPI_SYSTEM_HID;
1112 break; 1074 break;
1113 case ACPI_BUS_TYPE_THERMAL: 1075 case ACPI_BUS_TYPE_THERMAL:
1114 hid = ACPI_THERMAL_HID; 1076 acpi_add_id(device, ACPI_THERMAL_HID);
1115 break; 1077 break;
1116 case ACPI_BUS_TYPE_POWER_BUTTON: 1078 case ACPI_BUS_TYPE_POWER_BUTTON:
1117 hid = ACPI_BUTTON_HID_POWERF; 1079 acpi_add_id(device, ACPI_BUTTON_HID_POWERF);
1118 break; 1080 break;
1119 case ACPI_BUS_TYPE_SLEEP_BUTTON: 1081 case ACPI_BUS_TYPE_SLEEP_BUTTON:
1120 hid = ACPI_BUTTON_HID_SLEEPF; 1082 acpi_add_id(device, ACPI_BUTTON_HID_SLEEPF);
1121 break; 1083 break;
1122 } 1084 }
1123 1085
1124 /* 1086 /*
1125 * \_SB 1087 * We build acpi_devices for some objects that don't have _HID or _CID,
1126 * ---- 1088 * e.g., PCI bridges and slots. Drivers can't bind to these objects,
1127 * Fix for the system root bus device -- the only root-level device. 1089 * but we do use them indirectly by traversing the acpi_device tree.
1090 * This generic ID isn't useful for driver binding, but it provides
1091 * the useful property that "every acpi_device has an ID."
1128 */ 1092 */
1129 if (((acpi_handle)parent == ACPI_ROOT_OBJECT) && (type == ACPI_BUS_TYPE_DEVICE)) { 1093 if (list_empty(&device->pnp.ids))
1130 hid = ACPI_BUS_HID; 1094 acpi_add_id(device, "device");
1131 strcpy(device->pnp.device_name, ACPI_BUS_DEVICE_NAME);
1132 strcpy(device->pnp.device_class, ACPI_BUS_CLASS);
1133 }
1134
1135 if (hid) {
1136 device->pnp.hardware_id = ACPI_ALLOCATE_ZEROED(strlen (hid) + 1);
1137 if (device->pnp.hardware_id) {
1138 strcpy(device->pnp.hardware_id, hid);
1139 device->flags.hardware_id = 1;
1140 }
1141 }
1142 if (!device->flags.hardware_id)
1143 device->pnp.hardware_id = "";
1144
1145 if (uid) {
1146 device->pnp.unique_id = ACPI_ALLOCATE_ZEROED(strlen (uid) + 1);
1147 if (device->pnp.unique_id) {
1148 strcpy(device->pnp.unique_id, uid);
1149 device->flags.unique_id = 1;
1150 }
1151 }
1152 if (!device->flags.unique_id)
1153 device->pnp.unique_id = "";
1154
1155 if (cid_list || cid_add) {
1156 struct acpica_device_id_list *list;
1157
1158 if (cid_add) {
1159 struct acpica_device_id cid;
1160 cid.length = strlen (cid_add) + 1;
1161 cid.string = cid_add;
1162
1163 list = acpi_add_cid(info, &cid);
1164 } else {
1165 list = acpi_add_cid(info, NULL);
1166 }
1167
1168 if (list) {
1169 device->pnp.cid_list = list;
1170 if (cid_add)
1171 device->flags.compatible_ids = 1;
1172 }
1173 }
1174
1175 kfree(info);
1176} 1095}
1177 1096
1178static int acpi_device_set_context(struct acpi_device *device, int type) 1097static int acpi_device_set_context(struct acpi_device *device)
1179{ 1098{
1180 acpi_status status = AE_OK; 1099 acpi_status status;
1181 int result = 0; 1100
1182 /* 1101 /*
1183 * Context 1102 * Context
1184 * ------- 1103 * -------
1185 * Attach this 'struct acpi_device' to the ACPI object. This makes 1104 * Attach this 'struct acpi_device' to the ACPI object. This makes
1186 * resolutions from handle->device very efficient. Note that we need 1105 * resolutions from handle->device very efficient. Fixed hardware
1187 * to be careful with fixed-feature devices as they all attach to the 1106 * devices have no handles, so we skip them.
1188 * root object.
1189 */ 1107 */
1190 if (type != ACPI_BUS_TYPE_POWER_BUTTON && 1108 if (!device->handle)
1191 type != ACPI_BUS_TYPE_SLEEP_BUTTON) { 1109 return 0;
1192 status = acpi_attach_data(device->handle,
1193 acpi_bus_data_handler, device);
1194 1110
1195 if (ACPI_FAILURE(status)) { 1111 status = acpi_attach_data(device->handle,
1196 printk(KERN_ERR PREFIX "Error attaching device data\n"); 1112 acpi_bus_data_handler, device);
1197 result = -ENODEV; 1113 if (ACPI_SUCCESS(status))
1198 } 1114 return 0;
1199 } 1115
1200 return result; 1116 printk(KERN_ERR PREFIX "Error attaching device data\n");
1117 return -ENODEV;
1201} 1118}
1202 1119
1203static int acpi_bus_remove(struct acpi_device *dev, int rmdevice) 1120static int acpi_bus_remove(struct acpi_device *dev, int rmdevice)
@@ -1223,17 +1140,14 @@ static int acpi_bus_remove(struct acpi_device *dev, int rmdevice)
1223 return 0; 1140 return 0;
1224} 1141}
1225 1142
1226static int 1143static int acpi_add_single_object(struct acpi_device **child,
1227acpi_add_single_object(struct acpi_device **child, 1144 acpi_handle handle, int type,
1228 struct acpi_device *parent, acpi_handle handle, int type, 1145 unsigned long long sta,
1229 struct acpi_bus_ops *ops) 1146 struct acpi_bus_ops *ops)
1230{ 1147{
1231 int result = 0; 1148 int result;
1232 struct acpi_device *device = NULL; 1149 struct acpi_device *device;
1233 1150 struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL };
1234
1235 if (!child)
1236 return -EINVAL;
1237 1151
1238 device = kzalloc(sizeof(struct acpi_device), GFP_KERNEL); 1152 device = kzalloc(sizeof(struct acpi_device), GFP_KERNEL);
1239 if (!device) { 1153 if (!device) {
@@ -1241,75 +1155,31 @@ acpi_add_single_object(struct acpi_device **child,
1241 return -ENOMEM; 1155 return -ENOMEM;
1242 } 1156 }
1243 1157
1158 INIT_LIST_HEAD(&device->pnp.ids);
1159 device->device_type = type;
1244 device->handle = handle; 1160 device->handle = handle;
1245 device->parent = parent; 1161 device->parent = acpi_bus_get_parent(handle);
1246 device->bus_ops = *ops; /* workround for not call .start */ 1162 device->bus_ops = *ops; /* workround for not call .start */
1163 STRUCT_TO_INT(device->status) = sta;
1247 1164
1248 1165 acpi_device_get_busid(device);
1249 acpi_device_get_busid(device, handle, type);
1250 1166
1251 /* 1167 /*
1252 * Flags 1168 * Flags
1253 * ----- 1169 * -----
1254 * Get prior to calling acpi_bus_get_status() so we know whether 1170 * Note that we only look for object handles -- cannot evaluate objects
1255 * or not _STA is present. Note that we only look for object 1171 * until we know the device is present and properly initialized.
1256 * handles -- cannot evaluate objects until we know the device is
1257 * present and properly initialized.
1258 */ 1172 */
1259 result = acpi_bus_get_flags(device); 1173 result = acpi_bus_get_flags(device);
1260 if (result) 1174 if (result)
1261 goto end; 1175 goto end;
1262 1176
1263 /* 1177 /*
1264 * Status
1265 * ------
1266 * See if the device is present. We always assume that non-Device
1267 * and non-Processor objects (e.g. thermal zones, power resources,
1268 * etc.) are present, functioning, etc. (at least when parent object
1269 * is present). Note that _STA has a different meaning for some
1270 * objects (e.g. power resources) so we need to be careful how we use
1271 * it.
1272 */
1273 switch (type) {
1274 case ACPI_BUS_TYPE_PROCESSOR:
1275 case ACPI_BUS_TYPE_DEVICE:
1276 result = acpi_bus_get_status(device);
1277 if (ACPI_FAILURE(result)) {
1278 result = -ENODEV;
1279 goto end;
1280 }
1281 /*
1282 * When the device is neither present nor functional, the
1283 * device should not be added to Linux ACPI device tree.
1284 * When the status of the device is not present but functinal,
1285 * it should be added to Linux ACPI tree. For example : bay
1286 * device , dock device.
1287 * In such conditions it is unncessary to check whether it is
1288 * bay device or dock device.
1289 */
1290 if (!device->status.present && !device->status.functional) {
1291 result = -ENODEV;
1292 goto end;
1293 }
1294 break;
1295 default:
1296 STRUCT_TO_INT(device->status) =
1297 ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED |
1298 ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING;
1299 break;
1300 }
1301
1302 /*
1303 * Initialize Device 1178 * Initialize Device
1304 * ----------------- 1179 * -----------------
1305 * TBD: Synch with Core's enumeration/initialization process. 1180 * TBD: Synch with Core's enumeration/initialization process.
1306 */ 1181 */
1307 1182 acpi_device_set_id(device);
1308 /*
1309 * Hardware ID, Unique ID, & Bus Address
1310 * -------------------------------------
1311 */
1312 acpi_device_set_id(device, parent, handle, type);
1313 1183
1314 /* 1184 /*
1315 * Power Management 1185 * Power Management
@@ -1341,10 +1211,10 @@ acpi_add_single_object(struct acpi_device **child,
1341 goto end; 1211 goto end;
1342 } 1212 }
1343 1213
1344 if ((result = acpi_device_set_context(device, type))) 1214 if ((result = acpi_device_set_context(device)))
1345 goto end; 1215 goto end;
1346 1216
1347 result = acpi_device_register(device, parent); 1217 result = acpi_device_register(device);
1348 1218
1349 /* 1219 /*
1350 * Bind _ADR-Based Devices when hot add 1220 * Bind _ADR-Based Devices when hot add
@@ -1355,128 +1225,117 @@ acpi_add_single_object(struct acpi_device **child,
1355 } 1225 }
1356 1226
1357end: 1227end:
1358 if (!result) 1228 if (!result) {
1229 acpi_get_name(handle, ACPI_FULL_PATHNAME, &buffer);
1230 ACPI_DEBUG_PRINT((ACPI_DB_INFO,
1231 "Adding %s [%s] parent %s\n", dev_name(&device->dev),
1232 (char *) buffer.pointer,
1233 device->parent ? dev_name(&device->parent->dev) :
1234 "(null)"));
1235 kfree(buffer.pointer);
1359 *child = device; 1236 *child = device;
1360 else 1237 } else
1361 acpi_device_release(&device->dev); 1238 acpi_device_release(&device->dev);
1362 1239
1363 return result; 1240 return result;
1364} 1241}
1365 1242
1366static int acpi_bus_scan(struct acpi_device *start, struct acpi_bus_ops *ops) 1243#define ACPI_STA_DEFAULT (ACPI_STA_DEVICE_PRESENT | ACPI_STA_DEVICE_ENABLED | \
1367{ 1244 ACPI_STA_DEVICE_UI | ACPI_STA_DEVICE_FUNCTIONING)
1368 acpi_status status = AE_OK;
1369 struct acpi_device *parent = NULL;
1370 struct acpi_device *child = NULL;
1371 acpi_handle phandle = NULL;
1372 acpi_handle chandle = NULL;
1373 acpi_object_type type = 0;
1374 u32 level = 1;
1375
1376 1245
1377 if (!start) 1246static int acpi_bus_type_and_status(acpi_handle handle, int *type,
1378 return -EINVAL; 1247 unsigned long long *sta)
1248{
1249 acpi_status status;
1250 acpi_object_type acpi_type;
1379 1251
1380 parent = start; 1252 status = acpi_get_type(handle, &acpi_type);
1381 phandle = start->handle; 1253 if (ACPI_FAILURE(status))
1254 return -ENODEV;
1382 1255
1383 /* 1256 switch (acpi_type) {
1384 * Parse through the ACPI namespace, identify all 'devices', and 1257 case ACPI_TYPE_ANY: /* for ACPI_ROOT_OBJECT */
1385 * create a new 'struct acpi_device' for each. 1258 case ACPI_TYPE_DEVICE:
1386 */ 1259 *type = ACPI_BUS_TYPE_DEVICE;
1387 while ((level > 0) && parent) { 1260 status = acpi_bus_get_status_handle(handle, sta);
1261 if (ACPI_FAILURE(status))
1262 return -ENODEV;
1263 break;
1264 case ACPI_TYPE_PROCESSOR:
1265 *type = ACPI_BUS_TYPE_PROCESSOR;
1266 status = acpi_bus_get_status_handle(handle, sta);
1267 if (ACPI_FAILURE(status))
1268 return -ENODEV;
1269 break;
1270 case ACPI_TYPE_THERMAL:
1271 *type = ACPI_BUS_TYPE_THERMAL;
1272 *sta = ACPI_STA_DEFAULT;
1273 break;
1274 case ACPI_TYPE_POWER:
1275 *type = ACPI_BUS_TYPE_POWER;
1276 *sta = ACPI_STA_DEFAULT;
1277 break;
1278 default:
1279 return -ENODEV;
1280 }
1388 1281
1389 status = acpi_get_next_object(ACPI_TYPE_ANY, phandle, 1282 return 0;
1390 chandle, &chandle); 1283}
1391 1284
1392 /* 1285static acpi_status acpi_bus_check_add(acpi_handle handle, u32 lvl,
1393 * If this scope is exhausted then move our way back up. 1286 void *context, void **return_value)
1394 */ 1287{
1395 if (ACPI_FAILURE(status)) { 1288 struct acpi_bus_ops *ops = context;
1396 level--; 1289 int type;
1397 chandle = phandle; 1290 unsigned long long sta;
1398 acpi_get_parent(phandle, &phandle); 1291 struct acpi_device *device;
1399 if (parent->parent) 1292 acpi_status status;
1400 parent = parent->parent; 1293 int result;
1401 continue;
1402 }
1403 1294
1404 status = acpi_get_type(chandle, &type); 1295 result = acpi_bus_type_and_status(handle, &type, &sta);
1405 if (ACPI_FAILURE(status)) 1296 if (result)
1406 continue; 1297 return AE_OK;
1407 1298
1408 /* 1299 if (!(sta & ACPI_STA_DEVICE_PRESENT) &&
1409 * If this is a scope object then parse it (depth-first). 1300 !(sta & ACPI_STA_DEVICE_FUNCTIONING))
1410 */ 1301 return AE_CTRL_DEPTH;
1411 if (type == ACPI_TYPE_LOCAL_SCOPE) {
1412 level++;
1413 phandle = chandle;
1414 chandle = NULL;
1415 continue;
1416 }
1417 1302
1418 /* 1303 /*
1419 * We're only interested in objects that we consider 'devices'. 1304 * We may already have an acpi_device from a previous enumeration. If
1420 */ 1305 * so, we needn't add it again, but we may still have to start it.
1421 switch (type) { 1306 */
1422 case ACPI_TYPE_DEVICE: 1307 device = NULL;
1423 type = ACPI_BUS_TYPE_DEVICE; 1308 acpi_bus_get_device(handle, &device);
1424 break; 1309 if (ops->acpi_op_add && !device)
1425 case ACPI_TYPE_PROCESSOR: 1310 acpi_add_single_object(&device, handle, type, sta, ops);
1426 type = ACPI_BUS_TYPE_PROCESSOR;
1427 break;
1428 case ACPI_TYPE_THERMAL:
1429 type = ACPI_BUS_TYPE_THERMAL;
1430 break;
1431 case ACPI_TYPE_POWER:
1432 type = ACPI_BUS_TYPE_POWER;
1433 break;
1434 default:
1435 continue;
1436 }
1437 1311
1438 if (ops->acpi_op_add) 1312 if (!device)
1439 status = acpi_add_single_object(&child, parent, 1313 return AE_CTRL_DEPTH;
1440 chandle, type, ops);
1441 else
1442 status = acpi_bus_get_device(chandle, &child);
1443 1314
1315 if (ops->acpi_op_start && !(ops->acpi_op_add)) {
1316 status = acpi_start_single_object(device);
1444 if (ACPI_FAILURE(status)) 1317 if (ACPI_FAILURE(status))
1445 continue; 1318 return AE_CTRL_DEPTH;
1319 }
1446 1320
1447 if (ops->acpi_op_start && !(ops->acpi_op_add)) { 1321 if (!*return_value)
1448 status = acpi_start_single_object(child); 1322 *return_value = device;
1449 if (ACPI_FAILURE(status)) 1323 return AE_OK;
1450 continue; 1324}
1451 }
1452 1325
1453 /* 1326static int acpi_bus_scan(acpi_handle handle, struct acpi_bus_ops *ops,
1454 * If the device is present, enabled, and functioning then 1327 struct acpi_device **child)
1455 * parse its scope (depth-first). Note that we need to 1328{
1456 * represent absent devices to facilitate PnP notifications 1329 acpi_status status;
1457 * -- but only the subtree head (not all of its children, 1330 void *device = NULL;
1458 * which will be enumerated when the parent is inserted). 1331
1459 * 1332 status = acpi_bus_check_add(handle, 0, ops, &device);
1460 * TBD: Need notifications and other detection mechanisms 1333 if (ACPI_SUCCESS(status))
1461 * in place before we can fully implement this. 1334 acpi_walk_namespace(ACPI_TYPE_ANY, handle, ACPI_UINT32_MAX,
1462 */ 1335 acpi_bus_check_add, ops, &device);
1463 /*
1464 * When the device is not present but functional, it is also
1465 * necessary to scan the children of this device.
1466 */
1467 if (child->status.present || (!child->status.present &&
1468 child->status.functional)) {
1469 status = acpi_get_next_object(ACPI_TYPE_ANY, chandle,
1470 NULL, NULL);
1471 if (ACPI_SUCCESS(status)) {
1472 level++;
1473 phandle = chandle;
1474 chandle = NULL;
1475 parent = child;
1476 }
1477 }
1478 }
1479 1336
1337 if (child)
1338 *child = device;
1480 return 0; 1339 return 0;
1481} 1340}
1482 1341
@@ -1484,36 +1343,25 @@ int
1484acpi_bus_add(struct acpi_device **child, 1343acpi_bus_add(struct acpi_device **child,
1485 struct acpi_device *parent, acpi_handle handle, int type) 1344 struct acpi_device *parent, acpi_handle handle, int type)
1486{ 1345{
1487 int result;
1488 struct acpi_bus_ops ops; 1346 struct acpi_bus_ops ops;
1489 1347
1490 memset(&ops, 0, sizeof(ops)); 1348 memset(&ops, 0, sizeof(ops));
1491 ops.acpi_op_add = 1; 1349 ops.acpi_op_add = 1;
1492 1350
1493 result = acpi_add_single_object(child, parent, handle, type, &ops); 1351 acpi_bus_scan(handle, &ops, child);
1494 if (!result) 1352 return 0;
1495 result = acpi_bus_scan(*child, &ops);
1496
1497 return result;
1498} 1353}
1499EXPORT_SYMBOL(acpi_bus_add); 1354EXPORT_SYMBOL(acpi_bus_add);
1500 1355
1501int acpi_bus_start(struct acpi_device *device) 1356int acpi_bus_start(struct acpi_device *device)
1502{ 1357{
1503 int result;
1504 struct acpi_bus_ops ops; 1358 struct acpi_bus_ops ops;
1505 1359
1360 memset(&ops, 0, sizeof(ops));
1361 ops.acpi_op_start = 1;
1506 1362
1507 if (!device) 1363 acpi_bus_scan(device->handle, &ops, NULL);
1508 return -EINVAL; 1364 return 0;
1509
1510 result = acpi_start_single_object(device);
1511 if (!result) {
1512 memset(&ops, 0, sizeof(ops));
1513 ops.acpi_op_start = 1;
1514 result = acpi_bus_scan(device, &ops);
1515 }
1516 return result;
1517} 1365}
1518EXPORT_SYMBOL(acpi_bus_start); 1366EXPORT_SYMBOL(acpi_bus_start);
1519 1367
@@ -1572,15 +1420,12 @@ int acpi_bus_trim(struct acpi_device *start, int rmdevice)
1572} 1420}
1573EXPORT_SYMBOL_GPL(acpi_bus_trim); 1421EXPORT_SYMBOL_GPL(acpi_bus_trim);
1574 1422
1575static int acpi_bus_scan_fixed(struct acpi_device *root) 1423static int acpi_bus_scan_fixed(void)
1576{ 1424{
1577 int result = 0; 1425 int result = 0;
1578 struct acpi_device *device = NULL; 1426 struct acpi_device *device = NULL;
1579 struct acpi_bus_ops ops; 1427 struct acpi_bus_ops ops;
1580 1428
1581 if (!root)
1582 return -ENODEV;
1583
1584 memset(&ops, 0, sizeof(ops)); 1429 memset(&ops, 0, sizeof(ops));
1585 ops.acpi_op_add = 1; 1430 ops.acpi_op_add = 1;
1586 ops.acpi_op_start = 1; 1431 ops.acpi_op_start = 1;
@@ -1589,16 +1434,16 @@ static int acpi_bus_scan_fixed(struct acpi_device *root)
1589 * Enumerate all fixed-feature devices. 1434 * Enumerate all fixed-feature devices.
1590 */ 1435 */
1591 if ((acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON) == 0) { 1436 if ((acpi_gbl_FADT.flags & ACPI_FADT_POWER_BUTTON) == 0) {
1592 result = acpi_add_single_object(&device, acpi_root, 1437 result = acpi_add_single_object(&device, NULL,
1593 NULL,
1594 ACPI_BUS_TYPE_POWER_BUTTON, 1438 ACPI_BUS_TYPE_POWER_BUTTON,
1439 ACPI_STA_DEFAULT,
1595 &ops); 1440 &ops);
1596 } 1441 }
1597 1442
1598 if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) { 1443 if ((acpi_gbl_FADT.flags & ACPI_FADT_SLEEP_BUTTON) == 0) {
1599 result = acpi_add_single_object(&device, acpi_root, 1444 result = acpi_add_single_object(&device, NULL,
1600 NULL,
1601 ACPI_BUS_TYPE_SLEEP_BUTTON, 1445 ACPI_BUS_TYPE_SLEEP_BUTTON,
1446 ACPI_STA_DEFAULT,
1602 &ops); 1447 &ops);
1603 } 1448 }
1604 1449
@@ -1621,24 +1466,15 @@ int __init acpi_scan_init(void)
1621 } 1466 }
1622 1467
1623 /* 1468 /*
1624 * Create the root device in the bus's device tree
1625 */
1626 result = acpi_add_single_object(&acpi_root, NULL, ACPI_ROOT_OBJECT,
1627 ACPI_BUS_TYPE_SYSTEM, &ops);
1628 if (result)
1629 goto Done;
1630
1631 /*
1632 * Enumerate devices in the ACPI namespace. 1469 * Enumerate devices in the ACPI namespace.
1633 */ 1470 */
1634 result = acpi_bus_scan_fixed(acpi_root); 1471 result = acpi_bus_scan(ACPI_ROOT_OBJECT, &ops, &acpi_root);
1635 1472
1636 if (!result) 1473 if (!result)
1637 result = acpi_bus_scan(acpi_root, &ops); 1474 result = acpi_bus_scan_fixed();
1638 1475
1639 if (result) 1476 if (result)
1640 acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL); 1477 acpi_device_unregister(acpi_root, ACPI_BUS_REMOVAL_NORMAL);
1641 1478
1642Done:
1643 return result; 1479 return result;
1644} 1480}
diff --git a/drivers/acpi/video.c b/drivers/acpi/video.c
index 94b1a4c5abab..f6e54bf8dd96 100644
--- a/drivers/acpi/video.c
+++ b/drivers/acpi/video.c
@@ -285,7 +285,7 @@ static int acpi_video_device_brightness_open_fs(struct inode *inode,
285 struct file *file); 285 struct file *file);
286static ssize_t acpi_video_device_write_brightness(struct file *file, 286static ssize_t acpi_video_device_write_brightness(struct file *file,
287 const char __user *buffer, size_t count, loff_t *data); 287 const char __user *buffer, size_t count, loff_t *data);
288static struct file_operations acpi_video_device_brightness_fops = { 288static const struct file_operations acpi_video_device_brightness_fops = {
289 .owner = THIS_MODULE, 289 .owner = THIS_MODULE,
290 .open = acpi_video_device_brightness_open_fs, 290 .open = acpi_video_device_brightness_open_fs,
291 .read = seq_read, 291 .read = seq_read,
@@ -1986,6 +1986,10 @@ acpi_video_switch_brightness(struct acpi_video_device *device, int event)
1986 1986
1987 result = acpi_video_device_lcd_set_level(device, level_next); 1987 result = acpi_video_device_lcd_set_level(device, level_next);
1988 1988
1989 if (!result)
1990 backlight_force_update(device->backlight,
1991 BACKLIGHT_UPDATE_HOTKEY);
1992
1989out: 1993out:
1990 if (result) 1994 if (result)
1991 printk(KERN_ERR PREFIX "Failed to switch the brightness\n"); 1995 printk(KERN_ERR PREFIX "Failed to switch the brightness\n");
diff --git a/drivers/atm/ambassador.c b/drivers/atm/ambassador.c
index 66e181345b3a..8af23411743c 100644
--- a/drivers/atm/ambassador.c
+++ b/drivers/atm/ambassador.c
@@ -2351,6 +2351,7 @@ static void __init amb_check_args (void) {
2351MODULE_AUTHOR(maintainer_string); 2351MODULE_AUTHOR(maintainer_string);
2352MODULE_DESCRIPTION(description_string); 2352MODULE_DESCRIPTION(description_string);
2353MODULE_LICENSE("GPL"); 2353MODULE_LICENSE("GPL");
2354MODULE_FIRMWARE("atmsar11.fw");
2354module_param(debug, ushort, 0644); 2355module_param(debug, ushort, 0644);
2355module_param(cmds, uint, 0); 2356module_param(cmds, uint, 0);
2356module_param(txs, uint, 0); 2357module_param(txs, uint, 0);
diff --git a/drivers/atm/fore200e.c b/drivers/atm/fore200e.c
index f766cc46b4c4..bc53fed89b1e 100644
--- a/drivers/atm/fore200e.c
+++ b/drivers/atm/fore200e.c
@@ -2906,8 +2906,8 @@ fore200e_proc_read(struct atm_dev *dev, loff_t* pos, char* page)
2906 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type)); 2906 u32 media_index = FORE200E_MEDIA_INDEX(fore200e->bus->read(&fore200e->cp_queues->media_type));
2907 u32 oc3_index; 2907 u32 oc3_index;
2908 2908
2909 if ((media_index < 0) || (media_index > 4)) 2909 if (media_index > 4)
2910 media_index = 5; 2910 media_index = 5;
2911 2911
2912 switch (fore200e->loop_mode) { 2912 switch (fore200e->loop_mode) {
2913 case ATM_LM_NONE: oc3_index = 0; 2913 case ATM_LM_NONE: oc3_index = 0;
diff --git a/drivers/atm/he.c b/drivers/atm/he.c
index 70667033a568..e90665876c47 100644
--- a/drivers/atm/he.c
+++ b/drivers/atm/he.c
@@ -2739,7 +2739,7 @@ he_ioctl(struct atm_dev *atm_dev, unsigned int cmd, void __user *arg)
2739 spin_lock_irqsave(&he_dev->global_lock, flags); 2739 spin_lock_irqsave(&he_dev->global_lock, flags);
2740 switch (reg.type) { 2740 switch (reg.type) {
2741 case HE_REGTYPE_PCI: 2741 case HE_REGTYPE_PCI:
2742 if (reg.addr < 0 || reg.addr >= HE_REGMAP_SIZE) { 2742 if (reg.addr >= HE_REGMAP_SIZE) {
2743 err = -EINVAL; 2743 err = -EINVAL;
2744 break; 2744 break;
2745 } 2745 }
diff --git a/drivers/atm/solos-pci.c b/drivers/atm/solos-pci.c
index c5f5186d62a3..d7ad19d2603a 100644
--- a/drivers/atm/solos-pci.c
+++ b/drivers/atm/solos-pci.c
@@ -142,6 +142,9 @@ MODULE_AUTHOR("Traverse Technologies <support@traverse.com.au>");
142MODULE_DESCRIPTION("Solos PCI driver"); 142MODULE_DESCRIPTION("Solos PCI driver");
143MODULE_VERSION(VERSION); 143MODULE_VERSION(VERSION);
144MODULE_LICENSE("GPL"); 144MODULE_LICENSE("GPL");
145MODULE_FIRMWARE("solos-FPGA.bin");
146MODULE_FIRMWARE("solos-Firmware.bin");
147MODULE_FIRMWARE("solos-db-FPGA.bin");
145MODULE_PARM_DESC(reset, "Reset Solos chips on startup"); 148MODULE_PARM_DESC(reset, "Reset Solos chips on startup");
146MODULE_PARM_DESC(atmdebug, "Print ATM data"); 149MODULE_PARM_DESC(atmdebug, "Print ATM data");
147MODULE_PARM_DESC(firmware_upgrade, "Initiate Solos firmware upgrade"); 150MODULE_PARM_DESC(firmware_upgrade, "Initiate Solos firmware upgrade");
diff --git a/drivers/block/DAC960.c b/drivers/block/DAC960.c
index 6fa7b0fdbdfd..eb4fa1943944 100644
--- a/drivers/block/DAC960.c
+++ b/drivers/block/DAC960.c
@@ -38,6 +38,7 @@
38#include <linux/slab.h> 38#include <linux/slab.h>
39#include <linux/smp_lock.h> 39#include <linux/smp_lock.h>
40#include <linux/proc_fs.h> 40#include <linux/proc_fs.h>
41#include <linux/seq_file.h>
41#include <linux/reboot.h> 42#include <linux/reboot.h>
42#include <linux/spinlock.h> 43#include <linux/spinlock.h>
43#include <linux/timer.h> 44#include <linux/timer.h>
@@ -6422,16 +6423,10 @@ static bool DAC960_V2_ExecuteUserCommand(DAC960_Controller_T *Controller,
6422 return true; 6423 return true;
6423} 6424}
6424 6425
6425 6426static int dac960_proc_show(struct seq_file *m, void *v)
6426/*
6427 DAC960_ProcReadStatus implements reading /proc/rd/status.
6428*/
6429
6430static int DAC960_ProcReadStatus(char *Page, char **Start, off_t Offset,
6431 int Count, int *EOF, void *Data)
6432{ 6427{
6433 unsigned char *StatusMessage = "OK\n"; 6428 unsigned char *StatusMessage = "OK\n";
6434 int ControllerNumber, BytesAvailable; 6429 int ControllerNumber;
6435 for (ControllerNumber = 0; 6430 for (ControllerNumber = 0;
6436 ControllerNumber < DAC960_ControllerCount; 6431 ControllerNumber < DAC960_ControllerCount;
6437 ControllerNumber++) 6432 ControllerNumber++)
@@ -6444,52 +6439,49 @@ static int DAC960_ProcReadStatus(char *Page, char **Start, off_t Offset,
6444 break; 6439 break;
6445 } 6440 }
6446 } 6441 }
6447 BytesAvailable = strlen(StatusMessage) - Offset; 6442 seq_puts(m, StatusMessage);
6448 if (Count >= BytesAvailable) 6443 return 0;
6449 {
6450 Count = BytesAvailable;
6451 *EOF = true;
6452 }
6453 if (Count <= 0) return 0;
6454 *Start = Page;
6455 memcpy(Page, &StatusMessage[Offset], Count);
6456 return Count;
6457} 6444}
6458 6445
6446static int dac960_proc_open(struct inode *inode, struct file *file)
6447{
6448 return single_open(file, dac960_proc_show, NULL);
6449}
6459 6450
6460/* 6451static const struct file_operations dac960_proc_fops = {
6461 DAC960_ProcReadInitialStatus implements reading /proc/rd/cN/initial_status. 6452 .owner = THIS_MODULE,
6462*/ 6453 .open = dac960_proc_open,
6454 .read = seq_read,
6455 .llseek = seq_lseek,
6456 .release = single_release,
6457};
6463 6458
6464static int DAC960_ProcReadInitialStatus(char *Page, char **Start, off_t Offset, 6459static int dac960_initial_status_proc_show(struct seq_file *m, void *v)
6465 int Count, int *EOF, void *Data)
6466{ 6460{
6467 DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; 6461 DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private;
6468 int BytesAvailable = Controller->InitialStatusLength - Offset; 6462 seq_printf(m, "%.*s", Controller->InitialStatusLength, Controller->CombinedStatusBuffer);
6469 if (Count >= BytesAvailable) 6463 return 0;
6470 {
6471 Count = BytesAvailable;
6472 *EOF = true;
6473 }
6474 if (Count <= 0) return 0;
6475 *Start = Page;
6476 memcpy(Page, &Controller->CombinedStatusBuffer[Offset], Count);
6477 return Count;
6478} 6464}
6479 6465
6466static int dac960_initial_status_proc_open(struct inode *inode, struct file *file)
6467{
6468 return single_open(file, dac960_initial_status_proc_show, PDE(inode)->data);
6469}
6480 6470
6481/* 6471static const struct file_operations dac960_initial_status_proc_fops = {
6482 DAC960_ProcReadCurrentStatus implements reading /proc/rd/cN/current_status. 6472 .owner = THIS_MODULE,
6483*/ 6473 .open = dac960_initial_status_proc_open,
6474 .read = seq_read,
6475 .llseek = seq_lseek,
6476 .release = single_release,
6477};
6484 6478
6485static int DAC960_ProcReadCurrentStatus(char *Page, char **Start, off_t Offset, 6479static int dac960_current_status_proc_show(struct seq_file *m, void *v)
6486 int Count, int *EOF, void *Data)
6487{ 6480{
6488 DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; 6481 DAC960_Controller_T *Controller = (DAC960_Controller_T *) m->private;
6489 unsigned char *StatusMessage = 6482 unsigned char *StatusMessage =
6490 "No Rebuild or Consistency Check in Progress\n"; 6483 "No Rebuild or Consistency Check in Progress\n";
6491 int ProgressMessageLength = strlen(StatusMessage); 6484 int ProgressMessageLength = strlen(StatusMessage);
6492 int BytesAvailable;
6493 if (jiffies != Controller->LastCurrentStatusTime) 6485 if (jiffies != Controller->LastCurrentStatusTime)
6494 { 6486 {
6495 Controller->CurrentStatusLength = 0; 6487 Controller->CurrentStatusLength = 0;
@@ -6513,49 +6505,41 @@ static int DAC960_ProcReadCurrentStatus(char *Page, char **Start, off_t Offset,
6513 } 6505 }
6514 Controller->LastCurrentStatusTime = jiffies; 6506 Controller->LastCurrentStatusTime = jiffies;
6515 } 6507 }
6516 BytesAvailable = Controller->CurrentStatusLength - Offset; 6508 seq_printf(m, "%.*s", Controller->CurrentStatusLength, Controller->CurrentStatusBuffer);
6517 if (Count >= BytesAvailable) 6509 return 0;
6518 {
6519 Count = BytesAvailable;
6520 *EOF = true;
6521 }
6522 if (Count <= 0) return 0;
6523 *Start = Page;
6524 memcpy(Page, &Controller->CurrentStatusBuffer[Offset], Count);
6525 return Count;
6526} 6510}
6527 6511
6512static int dac960_current_status_proc_open(struct inode *inode, struct file *file)
6513{
6514 return single_open(file, dac960_current_status_proc_show, PDE(inode)->data);
6515}
6528 6516
6529/* 6517static const struct file_operations dac960_current_status_proc_fops = {
6530 DAC960_ProcReadUserCommand implements reading /proc/rd/cN/user_command. 6518 .owner = THIS_MODULE,
6531*/ 6519 .open = dac960_current_status_proc_open,
6520 .read = seq_read,
6521 .llseek = seq_lseek,
6522 .release = single_release,
6523};
6532 6524
6533static int DAC960_ProcReadUserCommand(char *Page, char **Start, off_t Offset, 6525static int dac960_user_command_proc_show(struct seq_file *m, void *v)
6534 int Count, int *EOF, void *Data)
6535{ 6526{
6536 DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; 6527 DAC960_Controller_T *Controller = (DAC960_Controller_T *)m->private;
6537 int BytesAvailable = Controller->UserStatusLength - Offset;
6538 if (Count >= BytesAvailable)
6539 {
6540 Count = BytesAvailable;
6541 *EOF = true;
6542 }
6543 if (Count <= 0) return 0;
6544 *Start = Page;
6545 memcpy(Page, &Controller->UserStatusBuffer[Offset], Count);
6546 return Count;
6547}
6548 6528
6529 seq_printf(m, "%.*s", Controller->UserStatusLength, Controller->UserStatusBuffer);
6530 return 0;
6531}
6549 6532
6550/* 6533static int dac960_user_command_proc_open(struct inode *inode, struct file *file)
6551 DAC960_ProcWriteUserCommand implements writing /proc/rd/cN/user_command. 6534{
6552*/ 6535 return single_open(file, dac960_user_command_proc_show, PDE(inode)->data);
6536}
6553 6537
6554static int DAC960_ProcWriteUserCommand(struct file *file, 6538static ssize_t dac960_user_command_proc_write(struct file *file,
6555 const char __user *Buffer, 6539 const char __user *Buffer,
6556 unsigned long Count, void *Data) 6540 size_t Count, loff_t *pos)
6557{ 6541{
6558 DAC960_Controller_T *Controller = (DAC960_Controller_T *) Data; 6542 DAC960_Controller_T *Controller = (DAC960_Controller_T *) PDE(file->f_path.dentry->d_inode)->data;
6559 unsigned char CommandBuffer[80]; 6543 unsigned char CommandBuffer[80];
6560 int Length; 6544 int Length;
6561 if (Count > sizeof(CommandBuffer)-1) return -EINVAL; 6545 if (Count > sizeof(CommandBuffer)-1) return -EINVAL;
@@ -6572,6 +6556,14 @@ static int DAC960_ProcWriteUserCommand(struct file *file,
6572 ? Count : -EBUSY); 6556 ? Count : -EBUSY);
6573} 6557}
6574 6558
6559static const struct file_operations dac960_user_command_proc_fops = {
6560 .owner = THIS_MODULE,
6561 .open = dac960_user_command_proc_open,
6562 .read = seq_read,
6563 .llseek = seq_lseek,
6564 .release = single_release,
6565 .write = dac960_user_command_proc_write,
6566};
6575 6567
6576/* 6568/*
6577 DAC960_CreateProcEntries creates the /proc/rd/... entries for the 6569 DAC960_CreateProcEntries creates the /proc/rd/... entries for the
@@ -6586,23 +6578,17 @@ static void DAC960_CreateProcEntries(DAC960_Controller_T *Controller)
6586 6578
6587 if (DAC960_ProcDirectoryEntry == NULL) { 6579 if (DAC960_ProcDirectoryEntry == NULL) {
6588 DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL); 6580 DAC960_ProcDirectoryEntry = proc_mkdir("rd", NULL);
6589 StatusProcEntry = create_proc_read_entry("status", 0, 6581 StatusProcEntry = proc_create("status", 0,
6590 DAC960_ProcDirectoryEntry, 6582 DAC960_ProcDirectoryEntry,
6591 DAC960_ProcReadStatus, NULL); 6583 &dac960_proc_fops);
6592 } 6584 }
6593 6585
6594 sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber); 6586 sprintf(Controller->ControllerName, "c%d", Controller->ControllerNumber);
6595 ControllerProcEntry = proc_mkdir(Controller->ControllerName, 6587 ControllerProcEntry = proc_mkdir(Controller->ControllerName,
6596 DAC960_ProcDirectoryEntry); 6588 DAC960_ProcDirectoryEntry);
6597 create_proc_read_entry("initial_status", 0, ControllerProcEntry, 6589 proc_create_data("initial_status", 0, ControllerProcEntry, &dac960_initial_status_proc_fops, Controller);
6598 DAC960_ProcReadInitialStatus, Controller); 6590 proc_create_data("current_status", 0, ControllerProcEntry, &dac960_current_status_proc_fops, Controller);
6599 create_proc_read_entry("current_status", 0, ControllerProcEntry, 6591 UserCommandProcEntry = proc_create_data("user_command", S_IWUSR | S_IRUSR, ControllerProcEntry, &dac960_user_command_proc_fops, Controller);
6600 DAC960_ProcReadCurrentStatus, Controller);
6601 UserCommandProcEntry =
6602 create_proc_read_entry("user_command", S_IWUSR | S_IRUSR,
6603 ControllerProcEntry, DAC960_ProcReadUserCommand,
6604 Controller);
6605 UserCommandProcEntry->write_proc = DAC960_ProcWriteUserCommand;
6606 Controller->ControllerProcEntry = ControllerProcEntry; 6592 Controller->ControllerProcEntry = ControllerProcEntry;
6607} 6593}
6608 6594
diff --git a/drivers/block/cciss.c b/drivers/block/cciss.c
index 24c3e21ab263..fb5be2d95d52 100644
--- a/drivers/block/cciss.c
+++ b/drivers/block/cciss.c
@@ -36,9 +36,11 @@
36#include <linux/proc_fs.h> 36#include <linux/proc_fs.h>
37#include <linux/seq_file.h> 37#include <linux/seq_file.h>
38#include <linux/init.h> 38#include <linux/init.h>
39#include <linux/jiffies.h>
39#include <linux/hdreg.h> 40#include <linux/hdreg.h>
40#include <linux/spinlock.h> 41#include <linux/spinlock.h>
41#include <linux/compat.h> 42#include <linux/compat.h>
43#include <linux/mutex.h>
42#include <asm/uaccess.h> 44#include <asm/uaccess.h>
43#include <asm/io.h> 45#include <asm/io.h>
44 46
@@ -155,6 +157,10 @@ static struct board_type products[] = {
155 157
156static ctlr_info_t *hba[MAX_CTLR]; 158static ctlr_info_t *hba[MAX_CTLR];
157 159
160static struct task_struct *cciss_scan_thread;
161static DEFINE_MUTEX(scan_mutex);
162static LIST_HEAD(scan_q);
163
158static void do_cciss_request(struct request_queue *q); 164static void do_cciss_request(struct request_queue *q);
159static irqreturn_t do_cciss_intr(int irq, void *dev_id); 165static irqreturn_t do_cciss_intr(int irq, void *dev_id);
160static int cciss_open(struct block_device *bdev, fmode_t mode); 166static int cciss_open(struct block_device *bdev, fmode_t mode);
@@ -164,9 +170,9 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
164static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo); 170static int cciss_getgeo(struct block_device *bdev, struct hd_geometry *geo);
165 171
166static int cciss_revalidate(struct gendisk *disk); 172static int cciss_revalidate(struct gendisk *disk);
167static int rebuild_lun_table(ctlr_info_t *h, int first_time); 173static int rebuild_lun_table(ctlr_info_t *h, int first_time, int via_ioctl);
168static int deregister_disk(ctlr_info_t *h, int drv_index, 174static int deregister_disk(ctlr_info_t *h, int drv_index,
169 int clear_all); 175 int clear_all, int via_ioctl);
170 176
171static void cciss_read_capacity(int ctlr, int logvol, int withirq, 177static void cciss_read_capacity(int ctlr, int logvol, int withirq,
172 sector_t *total_size, unsigned int *block_size); 178 sector_t *total_size, unsigned int *block_size);
@@ -189,8 +195,13 @@ static int sendcmd_withirq_core(ctlr_info_t *h, CommandList_struct *c,
189static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c); 195static int process_sendcmd_error(ctlr_info_t *h, CommandList_struct *c);
190 196
191static void fail_all_cmds(unsigned long ctlr); 197static void fail_all_cmds(unsigned long ctlr);
198static int add_to_scan_list(struct ctlr_info *h);
192static int scan_thread(void *data); 199static int scan_thread(void *data);
193static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c); 200static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c);
201static void cciss_hba_release(struct device *dev);
202static void cciss_device_release(struct device *dev);
203static void cciss_free_gendisk(ctlr_info_t *h, int drv_index);
204static void cciss_free_drive_info(ctlr_info_t *h, int drv_index);
194 205
195#ifdef CONFIG_PROC_FS 206#ifdef CONFIG_PROC_FS
196static void cciss_procinit(int i); 207static void cciss_procinit(int i);
@@ -245,7 +256,10 @@ static inline void removeQ(CommandList_struct *c)
245 256
246#include "cciss_scsi.c" /* For SCSI tape support */ 257#include "cciss_scsi.c" /* For SCSI tape support */
247 258
248#define RAID_UNKNOWN 6 259static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
260 "UNKNOWN"
261};
262#define RAID_UNKNOWN (sizeof(raid_label) / sizeof(raid_label[0])-1)
249 263
250#ifdef CONFIG_PROC_FS 264#ifdef CONFIG_PROC_FS
251 265
@@ -255,9 +269,6 @@ static inline void removeQ(CommandList_struct *c)
255#define ENG_GIG 1000000000 269#define ENG_GIG 1000000000
256#define ENG_GIG_FACTOR (ENG_GIG/512) 270#define ENG_GIG_FACTOR (ENG_GIG/512)
257#define ENGAGE_SCSI "engage scsi" 271#define ENGAGE_SCSI "engage scsi"
258static const char *raid_label[] = { "0", "4", "1(1+0)", "5", "5+1", "ADG",
259 "UNKNOWN"
260};
261 272
262static struct proc_dir_entry *proc_cciss; 273static struct proc_dir_entry *proc_cciss;
263 274
@@ -318,7 +329,7 @@ static int cciss_seq_show(struct seq_file *seq, void *v)
318 ctlr_info_t *h = seq->private; 329 ctlr_info_t *h = seq->private;
319 unsigned ctlr = h->ctlr; 330 unsigned ctlr = h->ctlr;
320 loff_t *pos = v; 331 loff_t *pos = v;
321 drive_info_struct *drv = &h->drv[*pos]; 332 drive_info_struct *drv = h->drv[*pos];
322 333
323 if (*pos > h->highest_lun) 334 if (*pos > h->highest_lun)
324 return 0; 335 return 0;
@@ -331,7 +342,7 @@ static int cciss_seq_show(struct seq_file *seq, void *v)
331 vol_sz_frac *= 100; 342 vol_sz_frac *= 100;
332 sector_div(vol_sz_frac, ENG_GIG_FACTOR); 343 sector_div(vol_sz_frac, ENG_GIG_FACTOR);
333 344
334 if (drv->raid_level > 5) 345 if (drv->raid_level < 0 || drv->raid_level > RAID_UNKNOWN)
335 drv->raid_level = RAID_UNKNOWN; 346 drv->raid_level = RAID_UNKNOWN;
336 seq_printf(seq, "cciss/c%dd%d:" 347 seq_printf(seq, "cciss/c%dd%d:"
337 "\t%4u.%02uGB\tRAID %s\n", 348 "\t%4u.%02uGB\tRAID %s\n",
@@ -426,7 +437,7 @@ out:
426 return err; 437 return err;
427} 438}
428 439
429static struct file_operations cciss_proc_fops = { 440static const struct file_operations cciss_proc_fops = {
430 .owner = THIS_MODULE, 441 .owner = THIS_MODULE,
431 .open = cciss_seq_open, 442 .open = cciss_seq_open,
432 .read = seq_read, 443 .read = seq_read,
@@ -454,9 +465,19 @@ static void __devinit cciss_procinit(int i)
454#define to_hba(n) container_of(n, struct ctlr_info, dev) 465#define to_hba(n) container_of(n, struct ctlr_info, dev)
455#define to_drv(n) container_of(n, drive_info_struct, dev) 466#define to_drv(n) container_of(n, drive_info_struct, dev)
456 467
457static struct device_type cciss_host_type = { 468static ssize_t host_store_rescan(struct device *dev,
458 .name = "cciss_host", 469 struct device_attribute *attr,
459}; 470 const char *buf, size_t count)
471{
472 struct ctlr_info *h = to_hba(dev);
473
474 add_to_scan_list(h);
475 wake_up_process(cciss_scan_thread);
476 wait_for_completion_interruptible(&h->scan_wait);
477
478 return count;
479}
480DEVICE_ATTR(rescan, S_IWUSR, NULL, host_store_rescan);
460 481
461static ssize_t dev_show_unique_id(struct device *dev, 482static ssize_t dev_show_unique_id(struct device *dev,
462 struct device_attribute *attr, 483 struct device_attribute *attr,
@@ -560,11 +581,101 @@ static ssize_t dev_show_rev(struct device *dev,
560} 581}
561DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL); 582DEVICE_ATTR(rev, S_IRUGO, dev_show_rev, NULL);
562 583
584static ssize_t cciss_show_lunid(struct device *dev,
585 struct device_attribute *attr, char *buf)
586{
587 drive_info_struct *drv = to_drv(dev);
588 struct ctlr_info *h = to_hba(drv->dev.parent);
589 unsigned long flags;
590 unsigned char lunid[8];
591
592 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
593 if (h->busy_configuring) {
594 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
595 return -EBUSY;
596 }
597 if (!drv->heads) {
598 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
599 return -ENOTTY;
600 }
601 memcpy(lunid, drv->LunID, sizeof(lunid));
602 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
603 return snprintf(buf, 20, "0x%02x%02x%02x%02x%02x%02x%02x%02x\n",
604 lunid[0], lunid[1], lunid[2], lunid[3],
605 lunid[4], lunid[5], lunid[6], lunid[7]);
606}
607DEVICE_ATTR(lunid, S_IRUGO, cciss_show_lunid, NULL);
608
609static ssize_t cciss_show_raid_level(struct device *dev,
610 struct device_attribute *attr, char *buf)
611{
612 drive_info_struct *drv = to_drv(dev);
613 struct ctlr_info *h = to_hba(drv->dev.parent);
614 int raid;
615 unsigned long flags;
616
617 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
618 if (h->busy_configuring) {
619 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
620 return -EBUSY;
621 }
622 raid = drv->raid_level;
623 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
624 if (raid < 0 || raid > RAID_UNKNOWN)
625 raid = RAID_UNKNOWN;
626
627 return snprintf(buf, strlen(raid_label[raid]) + 7, "RAID %s\n",
628 raid_label[raid]);
629}
630DEVICE_ATTR(raid_level, S_IRUGO, cciss_show_raid_level, NULL);
631
632static ssize_t cciss_show_usage_count(struct device *dev,
633 struct device_attribute *attr, char *buf)
634{
635 drive_info_struct *drv = to_drv(dev);
636 struct ctlr_info *h = to_hba(drv->dev.parent);
637 unsigned long flags;
638 int count;
639
640 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
641 if (h->busy_configuring) {
642 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
643 return -EBUSY;
644 }
645 count = drv->usage_count;
646 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
647 return snprintf(buf, 20, "%d\n", count);
648}
649DEVICE_ATTR(usage_count, S_IRUGO, cciss_show_usage_count, NULL);
650
651static struct attribute *cciss_host_attrs[] = {
652 &dev_attr_rescan.attr,
653 NULL
654};
655
656static struct attribute_group cciss_host_attr_group = {
657 .attrs = cciss_host_attrs,
658};
659
660static const struct attribute_group *cciss_host_attr_groups[] = {
661 &cciss_host_attr_group,
662 NULL
663};
664
665static struct device_type cciss_host_type = {
666 .name = "cciss_host",
667 .groups = cciss_host_attr_groups,
668 .release = cciss_hba_release,
669};
670
563static struct attribute *cciss_dev_attrs[] = { 671static struct attribute *cciss_dev_attrs[] = {
564 &dev_attr_unique_id.attr, 672 &dev_attr_unique_id.attr,
565 &dev_attr_model.attr, 673 &dev_attr_model.attr,
566 &dev_attr_vendor.attr, 674 &dev_attr_vendor.attr,
567 &dev_attr_rev.attr, 675 &dev_attr_rev.attr,
676 &dev_attr_lunid.attr,
677 &dev_attr_raid_level.attr,
678 &dev_attr_usage_count.attr,
568 NULL 679 NULL
569}; 680};
570 681
@@ -580,12 +691,24 @@ static const struct attribute_group *cciss_dev_attr_groups[] = {
580static struct device_type cciss_dev_type = { 691static struct device_type cciss_dev_type = {
581 .name = "cciss_device", 692 .name = "cciss_device",
582 .groups = cciss_dev_attr_groups, 693 .groups = cciss_dev_attr_groups,
694 .release = cciss_device_release,
583}; 695};
584 696
585static struct bus_type cciss_bus_type = { 697static struct bus_type cciss_bus_type = {
586 .name = "cciss", 698 .name = "cciss",
587}; 699};
588 700
701/*
702 * cciss_hba_release is called when the reference count
703 * of h->dev goes to zero.
704 */
705static void cciss_hba_release(struct device *dev)
706{
707 /*
708 * nothing to do, but need this to avoid a warning
709 * about not having a release handler from lib/kref.c.
710 */
711}
589 712
590/* 713/*
591 * Initialize sysfs entry for each controller. This sets up and registers 714 * Initialize sysfs entry for each controller. This sets up and registers
@@ -609,6 +732,16 @@ static int cciss_create_hba_sysfs_entry(struct ctlr_info *h)
609static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h) 732static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h)
610{ 733{
611 device_del(&h->dev); 734 device_del(&h->dev);
735 put_device(&h->dev); /* final put. */
736}
737
738/* cciss_device_release is called when the reference count
739 * of h->drv[x]dev goes to zero.
740 */
741static void cciss_device_release(struct device *dev)
742{
743 drive_info_struct *drv = to_drv(dev);
744 kfree(drv);
612} 745}
613 746
614/* 747/*
@@ -617,24 +750,39 @@ static void cciss_destroy_hba_sysfs_entry(struct ctlr_info *h)
617 * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from 750 * /sys/bus/pci/devices/<dev/ccis#/. We also create a link from
618 * /sys/block/cciss!c#d# to this entry. 751 * /sys/block/cciss!c#d# to this entry.
619 */ 752 */
620static int cciss_create_ld_sysfs_entry(struct ctlr_info *h, 753static long cciss_create_ld_sysfs_entry(struct ctlr_info *h,
621 drive_info_struct *drv,
622 int drv_index) 754 int drv_index)
623{ 755{
624 device_initialize(&drv->dev); 756 struct device *dev;
625 drv->dev.type = &cciss_dev_type; 757
626 drv->dev.bus = &cciss_bus_type; 758 if (h->drv[drv_index]->device_initialized)
627 dev_set_name(&drv->dev, "c%dd%d", h->ctlr, drv_index); 759 return 0;
628 drv->dev.parent = &h->dev; 760
629 return device_add(&drv->dev); 761 dev = &h->drv[drv_index]->dev;
762 device_initialize(dev);
763 dev->type = &cciss_dev_type;
764 dev->bus = &cciss_bus_type;
765 dev_set_name(dev, "c%dd%d", h->ctlr, drv_index);
766 dev->parent = &h->dev;
767 h->drv[drv_index]->device_initialized = 1;
768 return device_add(dev);
630} 769}
631 770
632/* 771/*
633 * Remove sysfs entries for a logical drive. 772 * Remove sysfs entries for a logical drive.
634 */ 773 */
635static void cciss_destroy_ld_sysfs_entry(drive_info_struct *drv) 774static void cciss_destroy_ld_sysfs_entry(struct ctlr_info *h, int drv_index,
775 int ctlr_exiting)
636{ 776{
637 device_del(&drv->dev); 777 struct device *dev = &h->drv[drv_index]->dev;
778
779 /* special case for c*d0, we only destroy it on controller exit */
780 if (drv_index == 0 && !ctlr_exiting)
781 return;
782
783 device_del(dev);
784 put_device(dev); /* the "final" put. */
785 h->drv[drv_index] = NULL;
638} 786}
639 787
640/* 788/*
@@ -751,7 +899,7 @@ static int cciss_open(struct block_device *bdev, fmode_t mode)
751 printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name); 899 printk(KERN_DEBUG "cciss_open %s\n", bdev->bd_disk->disk_name);
752#endif /* CCISS_DEBUG */ 900#endif /* CCISS_DEBUG */
753 901
754 if (host->busy_initializing || drv->busy_configuring) 902 if (drv->busy_configuring)
755 return -EBUSY; 903 return -EBUSY;
756 /* 904 /*
757 * Root is allowed to open raw volume zero even if it's not configured 905 * Root is allowed to open raw volume zero even if it's not configured
@@ -767,7 +915,8 @@ static int cciss_open(struct block_device *bdev, fmode_t mode)
767 if (MINOR(bdev->bd_dev) & 0x0f) { 915 if (MINOR(bdev->bd_dev) & 0x0f) {
768 return -ENXIO; 916 return -ENXIO;
769 /* if it is, make sure we have a LUN ID */ 917 /* if it is, make sure we have a LUN ID */
770 } else if (drv->LunID == 0) { 918 } else if (memcmp(drv->LunID, CTLR_LUNID,
919 sizeof(drv->LunID))) {
771 return -ENXIO; 920 return -ENXIO;
772 } 921 }
773 } 922 }
@@ -1132,12 +1281,13 @@ static int cciss_ioctl(struct block_device *bdev, fmode_t mode,
1132 case CCISS_DEREGDISK: 1281 case CCISS_DEREGDISK:
1133 case CCISS_REGNEWD: 1282 case CCISS_REGNEWD:
1134 case CCISS_REVALIDVOLS: 1283 case CCISS_REVALIDVOLS:
1135 return rebuild_lun_table(host, 0); 1284 return rebuild_lun_table(host, 0, 1);
1136 1285
1137 case CCISS_GETLUNINFO:{ 1286 case CCISS_GETLUNINFO:{
1138 LogvolInfo_struct luninfo; 1287 LogvolInfo_struct luninfo;
1139 1288
1140 luninfo.LunID = drv->LunID; 1289 memcpy(&luninfo.LunID, drv->LunID,
1290 sizeof(luninfo.LunID));
1141 luninfo.num_opens = drv->usage_count; 1291 luninfo.num_opens = drv->usage_count;
1142 luninfo.num_parts = 0; 1292 luninfo.num_parts = 0;
1143 if (copy_to_user(argp, &luninfo, 1293 if (copy_to_user(argp, &luninfo,
@@ -1475,7 +1625,10 @@ static void cciss_check_queues(ctlr_info_t *h)
1475 /* make sure the disk has been added and the drive is real 1625 /* make sure the disk has been added and the drive is real
1476 * because this can be called from the middle of init_one. 1626 * because this can be called from the middle of init_one.
1477 */ 1627 */
1478 if (!(h->drv[curr_queue].queue) || !(h->drv[curr_queue].heads)) 1628 if (!h->drv[curr_queue])
1629 continue;
1630 if (!(h->drv[curr_queue]->queue) ||
1631 !(h->drv[curr_queue]->heads))
1479 continue; 1632 continue;
1480 blk_start_queue(h->gendisk[curr_queue]->queue); 1633 blk_start_queue(h->gendisk[curr_queue]->queue);
1481 1634
@@ -1532,13 +1685,11 @@ static void cciss_softirq_done(struct request *rq)
1532 spin_unlock_irqrestore(&h->lock, flags); 1685 spin_unlock_irqrestore(&h->lock, flags);
1533} 1686}
1534 1687
1535static void log_unit_to_scsi3addr(ctlr_info_t *h, unsigned char scsi3addr[], 1688static inline void log_unit_to_scsi3addr(ctlr_info_t *h,
1536 uint32_t log_unit) 1689 unsigned char scsi3addr[], uint32_t log_unit)
1537{ 1690{
1538 log_unit = h->drv[log_unit].LunID & 0x03fff; 1691 memcpy(scsi3addr, h->drv[log_unit]->LunID,
1539 memset(&scsi3addr[4], 0, 4); 1692 sizeof(h->drv[log_unit]->LunID));
1540 memcpy(&scsi3addr[0], &log_unit, 4);
1541 scsi3addr[3] |= 0x40;
1542} 1693}
1543 1694
1544/* This function gets the SCSI vendor, model, and revision of a logical drive 1695/* This function gets the SCSI vendor, model, and revision of a logical drive
@@ -1615,16 +1766,23 @@ static void cciss_get_serial_no(int ctlr, int logvol, int withirq,
1615 return; 1766 return;
1616} 1767}
1617 1768
1618static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk, 1769/*
1770 * cciss_add_disk sets up the block device queue for a logical drive
1771 */
1772static int cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
1619 int drv_index) 1773 int drv_index)
1620{ 1774{
1621 disk->queue = blk_init_queue(do_cciss_request, &h->lock); 1775 disk->queue = blk_init_queue(do_cciss_request, &h->lock);
1776 if (!disk->queue)
1777 goto init_queue_failure;
1622 sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index); 1778 sprintf(disk->disk_name, "cciss/c%dd%d", h->ctlr, drv_index);
1623 disk->major = h->major; 1779 disk->major = h->major;
1624 disk->first_minor = drv_index << NWD_SHIFT; 1780 disk->first_minor = drv_index << NWD_SHIFT;
1625 disk->fops = &cciss_fops; 1781 disk->fops = &cciss_fops;
1626 disk->private_data = &h->drv[drv_index]; 1782 if (cciss_create_ld_sysfs_entry(h, drv_index))
1627 disk->driverfs_dev = &h->drv[drv_index].dev; 1783 goto cleanup_queue;
1784 disk->private_data = h->drv[drv_index];
1785 disk->driverfs_dev = &h->drv[drv_index]->dev;
1628 1786
1629 /* Set up queue information */ 1787 /* Set up queue information */
1630 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask); 1788 blk_queue_bounce_limit(disk->queue, h->pdev->dma_mask);
@@ -1642,14 +1800,21 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
1642 disk->queue->queuedata = h; 1800 disk->queue->queuedata = h;
1643 1801
1644 blk_queue_logical_block_size(disk->queue, 1802 blk_queue_logical_block_size(disk->queue,
1645 h->drv[drv_index].block_size); 1803 h->drv[drv_index]->block_size);
1646 1804
1647 /* Make sure all queue data is written out before */ 1805 /* Make sure all queue data is written out before */
1648 /* setting h->drv[drv_index].queue, as setting this */ 1806 /* setting h->drv[drv_index]->queue, as setting this */
1649 /* allows the interrupt handler to start the queue */ 1807 /* allows the interrupt handler to start the queue */
1650 wmb(); 1808 wmb();
1651 h->drv[drv_index].queue = disk->queue; 1809 h->drv[drv_index]->queue = disk->queue;
1652 add_disk(disk); 1810 add_disk(disk);
1811 return 0;
1812
1813cleanup_queue:
1814 blk_cleanup_queue(disk->queue);
1815 disk->queue = NULL;
1816init_queue_failure:
1817 return -1;
1653} 1818}
1654 1819
1655/* This function will check the usage_count of the drive to be updated/added. 1820/* This function will check the usage_count of the drive to be updated/added.
@@ -1662,7 +1827,8 @@ static void cciss_add_disk(ctlr_info_t *h, struct gendisk *disk,
1662 * is also the controller node. Any changes to disk 0 will show up on 1827 * is also the controller node. Any changes to disk 0 will show up on
1663 * the next reboot. 1828 * the next reboot.
1664 */ 1829 */
1665static void cciss_update_drive_info(int ctlr, int drv_index, int first_time) 1830static void cciss_update_drive_info(int ctlr, int drv_index, int first_time,
1831 int via_ioctl)
1666{ 1832{
1667 ctlr_info_t *h = hba[ctlr]; 1833 ctlr_info_t *h = hba[ctlr];
1668 struct gendisk *disk; 1834 struct gendisk *disk;
@@ -1672,21 +1838,13 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
1672 unsigned long flags = 0; 1838 unsigned long flags = 0;
1673 int ret = 0; 1839 int ret = 0;
1674 drive_info_struct *drvinfo; 1840 drive_info_struct *drvinfo;
1675 int was_only_controller_node;
1676 1841
1677 /* Get information about the disk and modify the driver structure */ 1842 /* Get information about the disk and modify the driver structure */
1678 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL); 1843 inq_buff = kmalloc(sizeof(InquiryData_struct), GFP_KERNEL);
1679 drvinfo = kmalloc(sizeof(*drvinfo), GFP_KERNEL); 1844 drvinfo = kzalloc(sizeof(*drvinfo), GFP_KERNEL);
1680 if (inq_buff == NULL || drvinfo == NULL) 1845 if (inq_buff == NULL || drvinfo == NULL)
1681 goto mem_msg; 1846 goto mem_msg;
1682 1847
1683 /* See if we're trying to update the "controller node"
1684 * this will happen the when the first logical drive gets
1685 * created by ACU.
1686 */
1687 was_only_controller_node = (drv_index == 0 &&
1688 h->drv[0].raid_level == -1);
1689
1690 /* testing to see if 16-byte CDBs are already being used */ 1848 /* testing to see if 16-byte CDBs are already being used */
1691 if (h->cciss_read == CCISS_READ_16) { 1849 if (h->cciss_read == CCISS_READ_16) {
1692 cciss_read_capacity_16(h->ctlr, drv_index, 1, 1850 cciss_read_capacity_16(h->ctlr, drv_index, 1,
@@ -1719,16 +1877,19 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
1719 drvinfo->model, drvinfo->rev); 1877 drvinfo->model, drvinfo->rev);
1720 cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no, 1878 cciss_get_serial_no(ctlr, drv_index, 1, drvinfo->serial_no,
1721 sizeof(drvinfo->serial_no)); 1879 sizeof(drvinfo->serial_no));
1880 /* Save the lunid in case we deregister the disk, below. */
1881 memcpy(drvinfo->LunID, h->drv[drv_index]->LunID,
1882 sizeof(drvinfo->LunID));
1722 1883
1723 /* Is it the same disk we already know, and nothing's changed? */ 1884 /* Is it the same disk we already know, and nothing's changed? */
1724 if (h->drv[drv_index].raid_level != -1 && 1885 if (h->drv[drv_index]->raid_level != -1 &&
1725 ((memcmp(drvinfo->serial_no, 1886 ((memcmp(drvinfo->serial_no,
1726 h->drv[drv_index].serial_no, 16) == 0) && 1887 h->drv[drv_index]->serial_no, 16) == 0) &&
1727 drvinfo->block_size == h->drv[drv_index].block_size && 1888 drvinfo->block_size == h->drv[drv_index]->block_size &&
1728 drvinfo->nr_blocks == h->drv[drv_index].nr_blocks && 1889 drvinfo->nr_blocks == h->drv[drv_index]->nr_blocks &&
1729 drvinfo->heads == h->drv[drv_index].heads && 1890 drvinfo->heads == h->drv[drv_index]->heads &&
1730 drvinfo->sectors == h->drv[drv_index].sectors && 1891 drvinfo->sectors == h->drv[drv_index]->sectors &&
1731 drvinfo->cylinders == h->drv[drv_index].cylinders)) 1892 drvinfo->cylinders == h->drv[drv_index]->cylinders))
1732 /* The disk is unchanged, nothing to update */ 1893 /* The disk is unchanged, nothing to update */
1733 goto freeret; 1894 goto freeret;
1734 1895
@@ -1738,18 +1899,17 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
1738 * If the disk already exists then deregister it before proceeding 1899 * If the disk already exists then deregister it before proceeding
1739 * (unless it's the first disk (for the controller node). 1900 * (unless it's the first disk (for the controller node).
1740 */ 1901 */
1741 if (h->drv[drv_index].raid_level != -1 && drv_index != 0) { 1902 if (h->drv[drv_index]->raid_level != -1 && drv_index != 0) {
1742 printk(KERN_WARNING "disk %d has changed.\n", drv_index); 1903 printk(KERN_WARNING "disk %d has changed.\n", drv_index);
1743 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 1904 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1744 h->drv[drv_index].busy_configuring = 1; 1905 h->drv[drv_index]->busy_configuring = 1;
1745 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 1906 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1746 1907
1747 /* deregister_disk sets h->drv[drv_index].queue = NULL 1908 /* deregister_disk sets h->drv[drv_index]->queue = NULL
1748 * which keeps the interrupt handler from starting 1909 * which keeps the interrupt handler from starting
1749 * the queue. 1910 * the queue.
1750 */ 1911 */
1751 ret = deregister_disk(h, drv_index, 0); 1912 ret = deregister_disk(h, drv_index, 0, via_ioctl);
1752 h->drv[drv_index].busy_configuring = 0;
1753 } 1913 }
1754 1914
1755 /* If the disk is in use return */ 1915 /* If the disk is in use return */
@@ -1757,22 +1917,31 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
1757 goto freeret; 1917 goto freeret;
1758 1918
1759 /* Save the new information from cciss_geometry_inquiry 1919 /* Save the new information from cciss_geometry_inquiry
1760 * and serial number inquiry. 1920 * and serial number inquiry. If the disk was deregistered
1921 * above, then h->drv[drv_index] will be NULL.
1761 */ 1922 */
1762 h->drv[drv_index].block_size = drvinfo->block_size; 1923 if (h->drv[drv_index] == NULL) {
1763 h->drv[drv_index].nr_blocks = drvinfo->nr_blocks; 1924 drvinfo->device_initialized = 0;
1764 h->drv[drv_index].heads = drvinfo->heads; 1925 h->drv[drv_index] = drvinfo;
1765 h->drv[drv_index].sectors = drvinfo->sectors; 1926 drvinfo = NULL; /* so it won't be freed below. */
1766 h->drv[drv_index].cylinders = drvinfo->cylinders; 1927 } else {
1767 h->drv[drv_index].raid_level = drvinfo->raid_level; 1928 /* special case for cxd0 */
1768 memcpy(h->drv[drv_index].serial_no, drvinfo->serial_no, 16); 1929 h->drv[drv_index]->block_size = drvinfo->block_size;
1769 memcpy(h->drv[drv_index].vendor, drvinfo->vendor, VENDOR_LEN + 1); 1930 h->drv[drv_index]->nr_blocks = drvinfo->nr_blocks;
1770 memcpy(h->drv[drv_index].model, drvinfo->model, MODEL_LEN + 1); 1931 h->drv[drv_index]->heads = drvinfo->heads;
1771 memcpy(h->drv[drv_index].rev, drvinfo->rev, REV_LEN + 1); 1932 h->drv[drv_index]->sectors = drvinfo->sectors;
1933 h->drv[drv_index]->cylinders = drvinfo->cylinders;
1934 h->drv[drv_index]->raid_level = drvinfo->raid_level;
1935 memcpy(h->drv[drv_index]->serial_no, drvinfo->serial_no, 16);
1936 memcpy(h->drv[drv_index]->vendor, drvinfo->vendor,
1937 VENDOR_LEN + 1);
1938 memcpy(h->drv[drv_index]->model, drvinfo->model, MODEL_LEN + 1);
1939 memcpy(h->drv[drv_index]->rev, drvinfo->rev, REV_LEN + 1);
1940 }
1772 1941
1773 ++h->num_luns; 1942 ++h->num_luns;
1774 disk = h->gendisk[drv_index]; 1943 disk = h->gendisk[drv_index];
1775 set_capacity(disk, h->drv[drv_index].nr_blocks); 1944 set_capacity(disk, h->drv[drv_index]->nr_blocks);
1776 1945
1777 /* If it's not disk 0 (drv_index != 0) 1946 /* If it's not disk 0 (drv_index != 0)
1778 * or if it was disk 0, but there was previously 1947 * or if it was disk 0, but there was previously
@@ -1780,8 +1949,15 @@ static void cciss_update_drive_info(int ctlr, int drv_index, int first_time)
1780 * (raid_leve == -1) then we want to update the 1949 * (raid_leve == -1) then we want to update the
1781 * logical drive's information. 1950 * logical drive's information.
1782 */ 1951 */
1783 if (drv_index || first_time) 1952 if (drv_index || first_time) {
1784 cciss_add_disk(h, disk, drv_index); 1953 if (cciss_add_disk(h, disk, drv_index) != 0) {
1954 cciss_free_gendisk(h, drv_index);
1955 cciss_free_drive_info(h, drv_index);
1956 printk(KERN_WARNING "cciss:%d could not update "
1957 "disk %d\n", h->ctlr, drv_index);
1958 --h->num_luns;
1959 }
1960 }
1785 1961
1786freeret: 1962freeret:
1787 kfree(inq_buff); 1963 kfree(inq_buff);
@@ -1793,28 +1969,70 @@ mem_msg:
1793} 1969}
1794 1970
1795/* This function will find the first index of the controllers drive array 1971/* This function will find the first index of the controllers drive array
1796 * that has a -1 for the raid_level and will return that index. This is 1972 * that has a null drv pointer and allocate the drive info struct and
1797 * where new drives will be added. If the index to be returned is greater 1973 * will return that index This is where new drives will be added.
1798 * than the highest_lun index for the controller then highest_lun is set 1974 * If the index to be returned is greater than the highest_lun index for
1799 * to this new index. If there are no available indexes then -1 is returned. 1975 * the controller then highest_lun is set * to this new index.
1800 * "controller_node" is used to know if this is a real logical drive, or just 1976 * If there are no available indexes or if tha allocation fails, then -1
1801 * the controller node, which determines if this counts towards highest_lun. 1977 * is returned. * "controller_node" is used to know if this is a real
1978 * logical drive, or just the controller node, which determines if this
1979 * counts towards highest_lun.
1802 */ 1980 */
1803static int cciss_find_free_drive_index(int ctlr, int controller_node) 1981static int cciss_alloc_drive_info(ctlr_info_t *h, int controller_node)
1804{ 1982{
1805 int i; 1983 int i;
1984 drive_info_struct *drv;
1806 1985
1986 /* Search for an empty slot for our drive info */
1807 for (i = 0; i < CISS_MAX_LUN; i++) { 1987 for (i = 0; i < CISS_MAX_LUN; i++) {
1808 if (hba[ctlr]->drv[i].raid_level == -1) { 1988
1809 if (i > hba[ctlr]->highest_lun) 1989 /* if not cxd0 case, and it's occupied, skip it. */
1810 if (!controller_node) 1990 if (h->drv[i] && i != 0)
1811 hba[ctlr]->highest_lun = i; 1991 continue;
1992 /*
1993 * If it's cxd0 case, and drv is alloc'ed already, and a
1994 * disk is configured there, skip it.
1995 */
1996 if (i == 0 && h->drv[i] && h->drv[i]->raid_level != -1)
1997 continue;
1998
1999 /*
2000 * We've found an empty slot. Update highest_lun
2001 * provided this isn't just the fake cxd0 controller node.
2002 */
2003 if (i > h->highest_lun && !controller_node)
2004 h->highest_lun = i;
2005
2006 /* If adding a real disk at cxd0, and it's already alloc'ed */
2007 if (i == 0 && h->drv[i] != NULL)
1812 return i; 2008 return i;
1813 } 2009
2010 /*
2011 * Found an empty slot, not already alloc'ed. Allocate it.
2012 * Mark it with raid_level == -1, so we know it's new later on.
2013 */
2014 drv = kzalloc(sizeof(*drv), GFP_KERNEL);
2015 if (!drv)
2016 return -1;
2017 drv->raid_level = -1; /* so we know it's new */
2018 h->drv[i] = drv;
2019 return i;
1814 } 2020 }
1815 return -1; 2021 return -1;
1816} 2022}
1817 2023
2024static void cciss_free_drive_info(ctlr_info_t *h, int drv_index)
2025{
2026 kfree(h->drv[drv_index]);
2027 h->drv[drv_index] = NULL;
2028}
2029
2030static void cciss_free_gendisk(ctlr_info_t *h, int drv_index)
2031{
2032 put_disk(h->gendisk[drv_index]);
2033 h->gendisk[drv_index] = NULL;
2034}
2035
1818/* cciss_add_gendisk finds a free hba[]->drv structure 2036/* cciss_add_gendisk finds a free hba[]->drv structure
1819 * and allocates a gendisk if needed, and sets the lunid 2037 * and allocates a gendisk if needed, and sets the lunid
1820 * in the drvinfo structure. It returns the index into 2038 * in the drvinfo structure. It returns the index into
@@ -1824,13 +2042,15 @@ static int cciss_find_free_drive_index(int ctlr, int controller_node)
1824 * a means to talk to the controller in case no logical 2042 * a means to talk to the controller in case no logical
1825 * drives have yet been configured. 2043 * drives have yet been configured.
1826 */ 2044 */
1827static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node) 2045static int cciss_add_gendisk(ctlr_info_t *h, unsigned char lunid[],
2046 int controller_node)
1828{ 2047{
1829 int drv_index; 2048 int drv_index;
1830 2049
1831 drv_index = cciss_find_free_drive_index(h->ctlr, controller_node); 2050 drv_index = cciss_alloc_drive_info(h, controller_node);
1832 if (drv_index == -1) 2051 if (drv_index == -1)
1833 return -1; 2052 return -1;
2053
1834 /*Check if the gendisk needs to be allocated */ 2054 /*Check if the gendisk needs to be allocated */
1835 if (!h->gendisk[drv_index]) { 2055 if (!h->gendisk[drv_index]) {
1836 h->gendisk[drv_index] = 2056 h->gendisk[drv_index] =
@@ -1839,23 +2059,24 @@ static int cciss_add_gendisk(ctlr_info_t *h, __u32 lunid, int controller_node)
1839 printk(KERN_ERR "cciss%d: could not " 2059 printk(KERN_ERR "cciss%d: could not "
1840 "allocate a new disk %d\n", 2060 "allocate a new disk %d\n",
1841 h->ctlr, drv_index); 2061 h->ctlr, drv_index);
1842 return -1; 2062 goto err_free_drive_info;
1843 } 2063 }
1844 } 2064 }
1845 h->drv[drv_index].LunID = lunid; 2065 memcpy(h->drv[drv_index]->LunID, lunid,
1846 if (cciss_create_ld_sysfs_entry(h, &h->drv[drv_index], drv_index)) 2066 sizeof(h->drv[drv_index]->LunID));
2067 if (cciss_create_ld_sysfs_entry(h, drv_index))
1847 goto err_free_disk; 2068 goto err_free_disk;
1848
1849 /* Don't need to mark this busy because nobody */ 2069 /* Don't need to mark this busy because nobody */
1850 /* else knows about this disk yet to contend */ 2070 /* else knows about this disk yet to contend */
1851 /* for access to it. */ 2071 /* for access to it. */
1852 h->drv[drv_index].busy_configuring = 0; 2072 h->drv[drv_index]->busy_configuring = 0;
1853 wmb(); 2073 wmb();
1854 return drv_index; 2074 return drv_index;
1855 2075
1856err_free_disk: 2076err_free_disk:
1857 put_disk(h->gendisk[drv_index]); 2077 cciss_free_gendisk(h, drv_index);
1858 h->gendisk[drv_index] = NULL; 2078err_free_drive_info:
2079 cciss_free_drive_info(h, drv_index);
1859 return -1; 2080 return -1;
1860} 2081}
1861 2082
@@ -1872,21 +2093,25 @@ static void cciss_add_controller_node(ctlr_info_t *h)
1872 if (h->gendisk[0] != NULL) /* already did this? Then bail. */ 2093 if (h->gendisk[0] != NULL) /* already did this? Then bail. */
1873 return; 2094 return;
1874 2095
1875 drv_index = cciss_add_gendisk(h, 0, 1); 2096 drv_index = cciss_add_gendisk(h, CTLR_LUNID, 1);
1876 if (drv_index == -1) { 2097 if (drv_index == -1)
1877 printk(KERN_WARNING "cciss%d: could not " 2098 goto error;
1878 "add disk 0.\n", h->ctlr); 2099 h->drv[drv_index]->block_size = 512;
1879 return; 2100 h->drv[drv_index]->nr_blocks = 0;
1880 } 2101 h->drv[drv_index]->heads = 0;
1881 h->drv[drv_index].block_size = 512; 2102 h->drv[drv_index]->sectors = 0;
1882 h->drv[drv_index].nr_blocks = 0; 2103 h->drv[drv_index]->cylinders = 0;
1883 h->drv[drv_index].heads = 0; 2104 h->drv[drv_index]->raid_level = -1;
1884 h->drv[drv_index].sectors = 0; 2105 memset(h->drv[drv_index]->serial_no, 0, 16);
1885 h->drv[drv_index].cylinders = 0;
1886 h->drv[drv_index].raid_level = -1;
1887 memset(h->drv[drv_index].serial_no, 0, 16);
1888 disk = h->gendisk[drv_index]; 2106 disk = h->gendisk[drv_index];
1889 cciss_add_disk(h, disk, drv_index); 2107 if (cciss_add_disk(h, disk, drv_index) == 0)
2108 return;
2109 cciss_free_gendisk(h, drv_index);
2110 cciss_free_drive_info(h, drv_index);
2111error:
2112 printk(KERN_WARNING "cciss%d: could not "
2113 "add disk 0.\n", h->ctlr);
2114 return;
1890} 2115}
1891 2116
1892/* This function will add and remove logical drives from the Logical 2117/* This function will add and remove logical drives from the Logical
@@ -1897,7 +2122,8 @@ static void cciss_add_controller_node(ctlr_info_t *h)
1897 * INPUT 2122 * INPUT
1898 * h = The controller to perform the operations on 2123 * h = The controller to perform the operations on
1899 */ 2124 */
1900static int rebuild_lun_table(ctlr_info_t *h, int first_time) 2125static int rebuild_lun_table(ctlr_info_t *h, int first_time,
2126 int via_ioctl)
1901{ 2127{
1902 int ctlr = h->ctlr; 2128 int ctlr = h->ctlr;
1903 int num_luns; 2129 int num_luns;
@@ -1907,7 +2133,7 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
1907 int i; 2133 int i;
1908 int drv_found; 2134 int drv_found;
1909 int drv_index = 0; 2135 int drv_index = 0;
1910 __u32 lunid = 0; 2136 unsigned char lunid[8] = CTLR_LUNID;
1911 unsigned long flags; 2137 unsigned long flags;
1912 2138
1913 if (!capable(CAP_SYS_RAWIO)) 2139 if (!capable(CAP_SYS_RAWIO))
@@ -1960,13 +2186,13 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
1960 drv_found = 0; 2186 drv_found = 0;
1961 2187
1962 /* skip holes in the array from already deleted drives */ 2188 /* skip holes in the array from already deleted drives */
1963 if (h->drv[i].raid_level == -1) 2189 if (h->drv[i] == NULL)
1964 continue; 2190 continue;
1965 2191
1966 for (j = 0; j < num_luns; j++) { 2192 for (j = 0; j < num_luns; j++) {
1967 memcpy(&lunid, &ld_buff->LUN[j][0], 4); 2193 memcpy(lunid, &ld_buff->LUN[j][0], sizeof(lunid));
1968 lunid = le32_to_cpu(lunid); 2194 if (memcmp(h->drv[i]->LunID, lunid,
1969 if (h->drv[i].LunID == lunid) { 2195 sizeof(lunid)) == 0) {
1970 drv_found = 1; 2196 drv_found = 1;
1971 break; 2197 break;
1972 } 2198 }
@@ -1974,11 +2200,11 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
1974 if (!drv_found) { 2200 if (!drv_found) {
1975 /* Deregister it from the OS, it's gone. */ 2201 /* Deregister it from the OS, it's gone. */
1976 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags); 2202 spin_lock_irqsave(CCISS_LOCK(h->ctlr), flags);
1977 h->drv[i].busy_configuring = 1; 2203 h->drv[i]->busy_configuring = 1;
1978 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags); 2204 spin_unlock_irqrestore(CCISS_LOCK(h->ctlr), flags);
1979 return_code = deregister_disk(h, i, 1); 2205 return_code = deregister_disk(h, i, 1, via_ioctl);
1980 cciss_destroy_ld_sysfs_entry(&h->drv[i]); 2206 if (h->drv[i] != NULL)
1981 h->drv[i].busy_configuring = 0; 2207 h->drv[i]->busy_configuring = 0;
1982 } 2208 }
1983 } 2209 }
1984 2210
@@ -1992,17 +2218,16 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
1992 2218
1993 drv_found = 0; 2219 drv_found = 0;
1994 2220
1995 memcpy(&lunid, &ld_buff->LUN[i][0], 4); 2221 memcpy(lunid, &ld_buff->LUN[i][0], sizeof(lunid));
1996 lunid = le32_to_cpu(lunid);
1997
1998 /* Find if the LUN is already in the drive array 2222 /* Find if the LUN is already in the drive array
1999 * of the driver. If so then update its info 2223 * of the driver. If so then update its info
2000 * if not in use. If it does not exist then find 2224 * if not in use. If it does not exist then find
2001 * the first free index and add it. 2225 * the first free index and add it.
2002 */ 2226 */
2003 for (j = 0; j <= h->highest_lun; j++) { 2227 for (j = 0; j <= h->highest_lun; j++) {
2004 if (h->drv[j].raid_level != -1 && 2228 if (h->drv[j] != NULL &&
2005 h->drv[j].LunID == lunid) { 2229 memcmp(h->drv[j]->LunID, lunid,
2230 sizeof(h->drv[j]->LunID)) == 0) {
2006 drv_index = j; 2231 drv_index = j;
2007 drv_found = 1; 2232 drv_found = 1;
2008 break; 2233 break;
@@ -2015,7 +2240,8 @@ static int rebuild_lun_table(ctlr_info_t *h, int first_time)
2015 if (drv_index == -1) 2240 if (drv_index == -1)
2016 goto freeret; 2241 goto freeret;
2017 } 2242 }
2018 cciss_update_drive_info(ctlr, drv_index, first_time); 2243 cciss_update_drive_info(ctlr, drv_index, first_time,
2244 via_ioctl);
2019 } /* end for */ 2245 } /* end for */
2020 2246
2021freeret: 2247freeret:
@@ -2032,6 +2258,25 @@ mem_msg:
2032 goto freeret; 2258 goto freeret;
2033} 2259}
2034 2260
2261static void cciss_clear_drive_info(drive_info_struct *drive_info)
2262{
2263 /* zero out the disk size info */
2264 drive_info->nr_blocks = 0;
2265 drive_info->block_size = 0;
2266 drive_info->heads = 0;
2267 drive_info->sectors = 0;
2268 drive_info->cylinders = 0;
2269 drive_info->raid_level = -1;
2270 memset(drive_info->serial_no, 0, sizeof(drive_info->serial_no));
2271 memset(drive_info->model, 0, sizeof(drive_info->model));
2272 memset(drive_info->rev, 0, sizeof(drive_info->rev));
2273 memset(drive_info->vendor, 0, sizeof(drive_info->vendor));
2274 /*
2275 * don't clear the LUNID though, we need to remember which
2276 * one this one is.
2277 */
2278}
2279
2035/* This function will deregister the disk and it's queue from the 2280/* This function will deregister the disk and it's queue from the
2036 * kernel. It must be called with the controller lock held and the 2281 * kernel. It must be called with the controller lock held and the
2037 * drv structures busy_configuring flag set. It's parameters are: 2282 * drv structures busy_configuring flag set. It's parameters are:
@@ -2046,43 +2291,48 @@ mem_msg:
2046 * the disk in preparation for re-adding it. In this case 2291 * the disk in preparation for re-adding it. In this case
2047 * the highest_lun should be left unchanged and the LunID 2292 * the highest_lun should be left unchanged and the LunID
2048 * should not be cleared. 2293 * should not be cleared.
2294 * via_ioctl
2295 * This indicates whether we've reached this path via ioctl.
2296 * This affects the maximum usage count allowed for c0d0 to be messed with.
2297 * If this path is reached via ioctl(), then the max_usage_count will
2298 * be 1, as the process calling ioctl() has got to have the device open.
2299 * If we get here via sysfs, then the max usage count will be zero.
2049*/ 2300*/
2050static int deregister_disk(ctlr_info_t *h, int drv_index, 2301static int deregister_disk(ctlr_info_t *h, int drv_index,
2051 int clear_all) 2302 int clear_all, int via_ioctl)
2052{ 2303{
2053 int i; 2304 int i;
2054 struct gendisk *disk; 2305 struct gendisk *disk;
2055 drive_info_struct *drv; 2306 drive_info_struct *drv;
2307 int recalculate_highest_lun;
2056 2308
2057 if (!capable(CAP_SYS_RAWIO)) 2309 if (!capable(CAP_SYS_RAWIO))
2058 return -EPERM; 2310 return -EPERM;
2059 2311
2060 drv = &h->drv[drv_index]; 2312 drv = h->drv[drv_index];
2061 disk = h->gendisk[drv_index]; 2313 disk = h->gendisk[drv_index];
2062 2314
2063 /* make sure logical volume is NOT is use */ 2315 /* make sure logical volume is NOT is use */
2064 if (clear_all || (h->gendisk[0] == disk)) { 2316 if (clear_all || (h->gendisk[0] == disk)) {
2065 if (drv->usage_count > 1) 2317 if (drv->usage_count > via_ioctl)
2066 return -EBUSY; 2318 return -EBUSY;
2067 } else if (drv->usage_count > 0) 2319 } else if (drv->usage_count > 0)
2068 return -EBUSY; 2320 return -EBUSY;
2069 2321
2322 recalculate_highest_lun = (drv == h->drv[h->highest_lun]);
2323
2070 /* invalidate the devices and deregister the disk. If it is disk 2324 /* invalidate the devices and deregister the disk. If it is disk
2071 * zero do not deregister it but just zero out it's values. This 2325 * zero do not deregister it but just zero out it's values. This
2072 * allows us to delete disk zero but keep the controller registered. 2326 * allows us to delete disk zero but keep the controller registered.
2073 */ 2327 */
2074 if (h->gendisk[0] != disk) { 2328 if (h->gendisk[0] != disk) {
2075 struct request_queue *q = disk->queue; 2329 struct request_queue *q = disk->queue;
2076 if (disk->flags & GENHD_FL_UP) 2330 if (disk->flags & GENHD_FL_UP) {
2331 cciss_destroy_ld_sysfs_entry(h, drv_index, 0);
2077 del_gendisk(disk); 2332 del_gendisk(disk);
2078 if (q) {
2079 blk_cleanup_queue(q);
2080 /* Set drv->queue to NULL so that we do not try
2081 * to call blk_start_queue on this queue in the
2082 * interrupt handler
2083 */
2084 drv->queue = NULL;
2085 } 2333 }
2334 if (q)
2335 blk_cleanup_queue(q);
2086 /* If clear_all is set then we are deleting the logical 2336 /* If clear_all is set then we are deleting the logical
2087 * drive, not just refreshing its info. For drives 2337 * drive, not just refreshing its info. For drives
2088 * other than disk 0 we will call put_disk. We do not 2338 * other than disk 0 we will call put_disk. We do not
@@ -2105,34 +2355,20 @@ static int deregister_disk(ctlr_info_t *h, int drv_index,
2105 } 2355 }
2106 } else { 2356 } else {
2107 set_capacity(disk, 0); 2357 set_capacity(disk, 0);
2358 cciss_clear_drive_info(drv);
2108 } 2359 }
2109 2360
2110 --h->num_luns; 2361 --h->num_luns;
2111 /* zero out the disk size info */
2112 drv->nr_blocks = 0;
2113 drv->block_size = 0;
2114 drv->heads = 0;
2115 drv->sectors = 0;
2116 drv->cylinders = 0;
2117 drv->raid_level = -1; /* This can be used as a flag variable to
2118 * indicate that this element of the drive
2119 * array is free.
2120 */
2121
2122 if (clear_all) {
2123 /* check to see if it was the last disk */
2124 if (drv == h->drv + h->highest_lun) {
2125 /* if so, find the new hightest lun */
2126 int i, newhighest = -1;
2127 for (i = 0; i <= h->highest_lun; i++) {
2128 /* if the disk has size > 0, it is available */
2129 if (h->drv[i].heads)
2130 newhighest = i;
2131 }
2132 h->highest_lun = newhighest;
2133 }
2134 2362
2135 drv->LunID = 0; 2363 /* if it was the last disk, find the new hightest lun */
2364 if (clear_all && recalculate_highest_lun) {
2365 int i, newhighest = -1;
2366 for (i = 0; i <= h->highest_lun; i++) {
2367 /* if the disk has size > 0, it is available */
2368 if (h->drv[i] && h->drv[i]->heads)
2369 newhighest = i;
2370 }
2371 h->highest_lun = newhighest;
2136 } 2372 }
2137 return 0; 2373 return 0;
2138} 2374}
@@ -2479,8 +2715,6 @@ static void cciss_geometry_inquiry(int ctlr, int logvol,
2479 } else { /* Get geometry failed */ 2715 } else { /* Get geometry failed */
2480 printk(KERN_WARNING "cciss: reading geometry failed\n"); 2716 printk(KERN_WARNING "cciss: reading geometry failed\n");
2481 } 2717 }
2482 printk(KERN_INFO " heads=%d, sectors=%d, cylinders=%d\n\n",
2483 drv->heads, drv->sectors, drv->cylinders);
2484} 2718}
2485 2719
2486static void 2720static void
@@ -2514,9 +2748,6 @@ cciss_read_capacity(int ctlr, int logvol, int withirq, sector_t *total_size,
2514 *total_size = 0; 2748 *total_size = 0;
2515 *block_size = BLOCK_SIZE; 2749 *block_size = BLOCK_SIZE;
2516 } 2750 }
2517 if (*total_size != 0)
2518 printk(KERN_INFO " blocks= %llu block_size= %d\n",
2519 (unsigned long long)*total_size+1, *block_size);
2520 kfree(buf); 2751 kfree(buf);
2521} 2752}
2522 2753
@@ -2568,7 +2799,8 @@ static int cciss_revalidate(struct gendisk *disk)
2568 InquiryData_struct *inq_buff = NULL; 2799 InquiryData_struct *inq_buff = NULL;
2569 2800
2570 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) { 2801 for (logvol = 0; logvol < CISS_MAX_LUN; logvol++) {
2571 if (h->drv[logvol].LunID == drv->LunID) { 2802 if (memcmp(h->drv[logvol]->LunID, drv->LunID,
2803 sizeof(drv->LunID)) == 0) {
2572 FOUND = 1; 2804 FOUND = 1;
2573 break; 2805 break;
2574 } 2806 }
@@ -3053,8 +3285,7 @@ static void do_cciss_request(struct request_queue *q)
3053 /* The first 2 bits are reserved for controller error reporting. */ 3285 /* The first 2 bits are reserved for controller error reporting. */
3054 c->Header.Tag.lower = (c->cmdindex << 3); 3286 c->Header.Tag.lower = (c->cmdindex << 3);
3055 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */ 3287 c->Header.Tag.lower |= 0x04; /* flag for direct lookup. */
3056 c->Header.LUN.LogDev.VolId = drv->LunID; 3288 memcpy(&c->Header.LUN, drv->LunID, sizeof(drv->LunID));
3057 c->Header.LUN.LogDev.Mode = 1;
3058 c->Request.CDBLen = 10; // 12 byte commands not in FW yet; 3289 c->Request.CDBLen = 10; // 12 byte commands not in FW yet;
3059 c->Request.Type.Type = TYPE_CMD; // It is a command. 3290 c->Request.Type.Type = TYPE_CMD; // It is a command.
3060 c->Request.Type.Attribute = ATTR_SIMPLE; 3291 c->Request.Type.Attribute = ATTR_SIMPLE;
@@ -3232,20 +3463,121 @@ static irqreturn_t do_cciss_intr(int irq, void *dev_id)
3232 return IRQ_HANDLED; 3463 return IRQ_HANDLED;
3233} 3464}
3234 3465
3466/**
3467 * add_to_scan_list() - add controller to rescan queue
3468 * @h: Pointer to the controller.
3469 *
3470 * Adds the controller to the rescan queue if not already on the queue.
3471 *
3472 * returns 1 if added to the queue, 0 if skipped (could be on the
3473 * queue already, or the controller could be initializing or shutting
3474 * down).
3475 **/
3476static int add_to_scan_list(struct ctlr_info *h)
3477{
3478 struct ctlr_info *test_h;
3479 int found = 0;
3480 int ret = 0;
3481
3482 if (h->busy_initializing)
3483 return 0;
3484
3485 if (!mutex_trylock(&h->busy_shutting_down))
3486 return 0;
3487
3488 mutex_lock(&scan_mutex);
3489 list_for_each_entry(test_h, &scan_q, scan_list) {
3490 if (test_h == h) {
3491 found = 1;
3492 break;
3493 }
3494 }
3495 if (!found && !h->busy_scanning) {
3496 INIT_COMPLETION(h->scan_wait);
3497 list_add_tail(&h->scan_list, &scan_q);
3498 ret = 1;
3499 }
3500 mutex_unlock(&scan_mutex);
3501 mutex_unlock(&h->busy_shutting_down);
3502
3503 return ret;
3504}
3505
3506/**
3507 * remove_from_scan_list() - remove controller from rescan queue
3508 * @h: Pointer to the controller.
3509 *
3510 * Removes the controller from the rescan queue if present. Blocks if
3511 * the controller is currently conducting a rescan.
3512 **/
3513static void remove_from_scan_list(struct ctlr_info *h)
3514{
3515 struct ctlr_info *test_h, *tmp_h;
3516 int scanning = 0;
3517
3518 mutex_lock(&scan_mutex);
3519 list_for_each_entry_safe(test_h, tmp_h, &scan_q, scan_list) {
3520 if (test_h == h) {
3521 list_del(&h->scan_list);
3522 complete_all(&h->scan_wait);
3523 mutex_unlock(&scan_mutex);
3524 return;
3525 }
3526 }
3527 if (&h->busy_scanning)
3528 scanning = 0;
3529 mutex_unlock(&scan_mutex);
3530
3531 if (scanning)
3532 wait_for_completion(&h->scan_wait);
3533}
3534
3535/**
3536 * scan_thread() - kernel thread used to rescan controllers
3537 * @data: Ignored.
3538 *
3539 * A kernel thread used scan for drive topology changes on
3540 * controllers. The thread processes only one controller at a time
3541 * using a queue. Controllers are added to the queue using
3542 * add_to_scan_list() and removed from the queue either after done
3543 * processing or using remove_from_scan_list().
3544 *
3545 * returns 0.
3546 **/
3235static int scan_thread(void *data) 3547static int scan_thread(void *data)
3236{ 3548{
3237 ctlr_info_t *h = data; 3549 struct ctlr_info *h;
3238 int rc;
3239 DECLARE_COMPLETION_ONSTACK(wait);
3240 h->rescan_wait = &wait;
3241 3550
3242 for (;;) { 3551 while (1) {
3243 rc = wait_for_completion_interruptible(&wait); 3552 set_current_state(TASK_INTERRUPTIBLE);
3553 schedule();
3244 if (kthread_should_stop()) 3554 if (kthread_should_stop())
3245 break; 3555 break;
3246 if (!rc) 3556
3247 rebuild_lun_table(h, 0); 3557 while (1) {
3558 mutex_lock(&scan_mutex);
3559 if (list_empty(&scan_q)) {
3560 mutex_unlock(&scan_mutex);
3561 break;
3562 }
3563
3564 h = list_entry(scan_q.next,
3565 struct ctlr_info,
3566 scan_list);
3567 list_del(&h->scan_list);
3568 h->busy_scanning = 1;
3569 mutex_unlock(&scan_mutex);
3570
3571 if (h) {
3572 rebuild_lun_table(h, 0, 0);
3573 complete_all(&h->scan_wait);
3574 mutex_lock(&scan_mutex);
3575 h->busy_scanning = 0;
3576 mutex_unlock(&scan_mutex);
3577 }
3578 }
3248 } 3579 }
3580
3249 return 0; 3581 return 0;
3250} 3582}
3251 3583
@@ -3268,8 +3600,8 @@ static int check_for_unit_attention(ctlr_info_t *h, CommandList_struct *c)
3268 case REPORT_LUNS_CHANGED: 3600 case REPORT_LUNS_CHANGED:
3269 printk(KERN_WARNING "cciss%d: report LUN data " 3601 printk(KERN_WARNING "cciss%d: report LUN data "
3270 "changed\n", h->ctlr); 3602 "changed\n", h->ctlr);
3271 if (h->rescan_wait) 3603 add_to_scan_list(h);
3272 complete(h->rescan_wait); 3604 wake_up_process(cciss_scan_thread);
3273 return 1; 3605 return 1;
3274 break; 3606 break;
3275 case POWER_OR_RESET: 3607 case POWER_OR_RESET:
@@ -3489,7 +3821,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3489 if (scratchpad == CCISS_FIRMWARE_READY) 3821 if (scratchpad == CCISS_FIRMWARE_READY)
3490 break; 3822 break;
3491 set_current_state(TASK_INTERRUPTIBLE); 3823 set_current_state(TASK_INTERRUPTIBLE);
3492 schedule_timeout(HZ / 10); /* wait 100ms */ 3824 schedule_timeout(msecs_to_jiffies(100)); /* wait 100ms */
3493 } 3825 }
3494 if (scratchpad != CCISS_FIRMWARE_READY) { 3826 if (scratchpad != CCISS_FIRMWARE_READY) {
3495 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n"); 3827 printk(KERN_WARNING "cciss: Board not ready. Timed out.\n");
@@ -3615,7 +3947,7 @@ static int __devinit cciss_pci_init(ctlr_info_t *c, struct pci_dev *pdev)
3615 break; 3947 break;
3616 /* delay and try again */ 3948 /* delay and try again */
3617 set_current_state(TASK_INTERRUPTIBLE); 3949 set_current_state(TASK_INTERRUPTIBLE);
3618 schedule_timeout(10); 3950 schedule_timeout(msecs_to_jiffies(1));
3619 } 3951 }
3620 3952
3621#ifdef CCISS_DEBUG 3953#ifdef CCISS_DEBUG
@@ -3669,15 +4001,16 @@ Enomem:
3669 return -1; 4001 return -1;
3670} 4002}
3671 4003
3672static void free_hba(int i) 4004static void free_hba(int n)
3673{ 4005{
3674 ctlr_info_t *p = hba[i]; 4006 ctlr_info_t *h = hba[n];
3675 int n; 4007 int i;
3676 4008
3677 hba[i] = NULL; 4009 hba[n] = NULL;
3678 for (n = 0; n < CISS_MAX_LUN; n++) 4010 for (i = 0; i < h->highest_lun + 1; i++)
3679 put_disk(p->gendisk[n]); 4011 if (h->gendisk[i] != NULL)
3680 kfree(p); 4012 put_disk(h->gendisk[i]);
4013 kfree(h);
3681} 4014}
3682 4015
3683/* Send a message CDB to the firmware. */ 4016/* Send a message CDB to the firmware. */
@@ -3918,6 +4251,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3918 hba[i]->busy_initializing = 1; 4251 hba[i]->busy_initializing = 1;
3919 INIT_HLIST_HEAD(&hba[i]->cmpQ); 4252 INIT_HLIST_HEAD(&hba[i]->cmpQ);
3920 INIT_HLIST_HEAD(&hba[i]->reqQ); 4253 INIT_HLIST_HEAD(&hba[i]->reqQ);
4254 mutex_init(&hba[i]->busy_shutting_down);
3921 4255
3922 if (cciss_pci_init(hba[i], pdev) != 0) 4256 if (cciss_pci_init(hba[i], pdev) != 0)
3923 goto clean0; 4257 goto clean0;
@@ -3926,6 +4260,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
3926 hba[i]->ctlr = i; 4260 hba[i]->ctlr = i;
3927 hba[i]->pdev = pdev; 4261 hba[i]->pdev = pdev;
3928 4262
4263 init_completion(&hba[i]->scan_wait);
4264
3929 if (cciss_create_hba_sysfs_entry(hba[i])) 4265 if (cciss_create_hba_sysfs_entry(hba[i]))
3930 goto clean0; 4266 goto clean0;
3931 4267
@@ -4001,8 +4337,7 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4001 hba[i]->num_luns = 0; 4337 hba[i]->num_luns = 0;
4002 hba[i]->highest_lun = -1; 4338 hba[i]->highest_lun = -1;
4003 for (j = 0; j < CISS_MAX_LUN; j++) { 4339 for (j = 0; j < CISS_MAX_LUN; j++) {
4004 hba[i]->drv[j].raid_level = -1; 4340 hba[i]->drv[j] = NULL;
4005 hba[i]->drv[j].queue = NULL;
4006 hba[i]->gendisk[j] = NULL; 4341 hba[i]->gendisk[j] = NULL;
4007 } 4342 }
4008 4343
@@ -4035,14 +4370,8 @@ static int __devinit cciss_init_one(struct pci_dev *pdev,
4035 4370
4036 hba[i]->cciss_max_sectors = 2048; 4371 hba[i]->cciss_max_sectors = 2048;
4037 4372
4373 rebuild_lun_table(hba[i], 1, 0);
4038 hba[i]->busy_initializing = 0; 4374 hba[i]->busy_initializing = 0;
4039
4040 rebuild_lun_table(hba[i], 1);
4041 hba[i]->cciss_scan_thread = kthread_run(scan_thread, hba[i],
4042 "cciss_scan%02d", i);
4043 if (IS_ERR(hba[i]->cciss_scan_thread))
4044 return PTR_ERR(hba[i]->cciss_scan_thread);
4045
4046 return 1; 4375 return 1;
4047 4376
4048clean4: 4377clean4:
@@ -4063,12 +4392,7 @@ clean1:
4063 cciss_destroy_hba_sysfs_entry(hba[i]); 4392 cciss_destroy_hba_sysfs_entry(hba[i]);
4064clean0: 4393clean0:
4065 hba[i]->busy_initializing = 0; 4394 hba[i]->busy_initializing = 0;
4066 /* cleanup any queues that may have been initialized */ 4395
4067 for (j=0; j <= hba[i]->highest_lun; j++){
4068 drive_info_struct *drv = &(hba[i]->drv[j]);
4069 if (drv->queue)
4070 blk_cleanup_queue(drv->queue);
4071 }
4072 /* 4396 /*
4073 * Deliberately omit pci_disable_device(): it does something nasty to 4397 * Deliberately omit pci_disable_device(): it does something nasty to
4074 * Smart Array controllers that pci_enable_device does not undo 4398 * Smart Array controllers that pci_enable_device does not undo
@@ -4125,8 +4449,9 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
4125 return; 4449 return;
4126 } 4450 }
4127 4451
4128 kthread_stop(hba[i]->cciss_scan_thread); 4452 mutex_lock(&hba[i]->busy_shutting_down);
4129 4453
4454 remove_from_scan_list(hba[i]);
4130 remove_proc_entry(hba[i]->devname, proc_cciss); 4455 remove_proc_entry(hba[i]->devname, proc_cciss);
4131 unregister_blkdev(hba[i]->major, hba[i]->devname); 4456 unregister_blkdev(hba[i]->major, hba[i]->devname);
4132 4457
@@ -4136,8 +4461,10 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
4136 if (disk) { 4461 if (disk) {
4137 struct request_queue *q = disk->queue; 4462 struct request_queue *q = disk->queue;
4138 4463
4139 if (disk->flags & GENHD_FL_UP) 4464 if (disk->flags & GENHD_FL_UP) {
4465 cciss_destroy_ld_sysfs_entry(hba[i], j, 1);
4140 del_gendisk(disk); 4466 del_gendisk(disk);
4467 }
4141 if (q) 4468 if (q)
4142 blk_cleanup_queue(q); 4469 blk_cleanup_queue(q);
4143 } 4470 }
@@ -4170,6 +4497,7 @@ static void __devexit cciss_remove_one(struct pci_dev *pdev)
4170 pci_release_regions(pdev); 4497 pci_release_regions(pdev);
4171 pci_set_drvdata(pdev, NULL); 4498 pci_set_drvdata(pdev, NULL);
4172 cciss_destroy_hba_sysfs_entry(hba[i]); 4499 cciss_destroy_hba_sysfs_entry(hba[i]);
4500 mutex_unlock(&hba[i]->busy_shutting_down);
4173 free_hba(i); 4501 free_hba(i);
4174} 4502}
4175 4503
@@ -4202,15 +4530,25 @@ static int __init cciss_init(void)
4202 if (err) 4530 if (err)
4203 return err; 4531 return err;
4204 4532
4533 /* Start the scan thread */
4534 cciss_scan_thread = kthread_run(scan_thread, NULL, "cciss_scan");
4535 if (IS_ERR(cciss_scan_thread)) {
4536 err = PTR_ERR(cciss_scan_thread);
4537 goto err_bus_unregister;
4538 }
4539
4205 /* Register for our PCI devices */ 4540 /* Register for our PCI devices */
4206 err = pci_register_driver(&cciss_pci_driver); 4541 err = pci_register_driver(&cciss_pci_driver);
4207 if (err) 4542 if (err)
4208 goto err_bus_register; 4543 goto err_thread_stop;
4209 4544
4210 return 0; 4545 return err;
4211 4546
4212err_bus_register: 4547err_thread_stop:
4548 kthread_stop(cciss_scan_thread);
4549err_bus_unregister:
4213 bus_unregister(&cciss_bus_type); 4550 bus_unregister(&cciss_bus_type);
4551
4214 return err; 4552 return err;
4215} 4553}
4216 4554
@@ -4227,6 +4565,7 @@ static void __exit cciss_cleanup(void)
4227 cciss_remove_one(hba[i]->pdev); 4565 cciss_remove_one(hba[i]->pdev);
4228 } 4566 }
4229 } 4567 }
4568 kthread_stop(cciss_scan_thread);
4230 remove_proc_entry("driver/cciss", NULL); 4569 remove_proc_entry("driver/cciss", NULL);
4231 bus_unregister(&cciss_bus_type); 4570 bus_unregister(&cciss_bus_type);
4232} 4571}
diff --git a/drivers/block/cciss.h b/drivers/block/cciss.h
index 06a5db25b298..31524cf42c77 100644
--- a/drivers/block/cciss.h
+++ b/drivers/block/cciss.h
@@ -2,6 +2,7 @@
2#define CCISS_H 2#define CCISS_H
3 3
4#include <linux/genhd.h> 4#include <linux/genhd.h>
5#include <linux/mutex.h>
5 6
6#include "cciss_cmd.h" 7#include "cciss_cmd.h"
7 8
@@ -29,7 +30,7 @@ struct access_method {
29}; 30};
30typedef struct _drive_info_struct 31typedef struct _drive_info_struct
31{ 32{
32 __u32 LunID; 33 unsigned char LunID[8];
33 int usage_count; 34 int usage_count;
34 struct request_queue *queue; 35 struct request_queue *queue;
35 sector_t nr_blocks; 36 sector_t nr_blocks;
@@ -51,6 +52,7 @@ typedef struct _drive_info_struct
51 char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */ 52 char vendor[VENDOR_LEN + 1]; /* SCSI vendor string */
52 char model[MODEL_LEN + 1]; /* SCSI model string */ 53 char model[MODEL_LEN + 1]; /* SCSI model string */
53 char rev[REV_LEN + 1]; /* SCSI revision string */ 54 char rev[REV_LEN + 1]; /* SCSI revision string */
55 char device_initialized; /* indicates whether dev is initialized */
54} drive_info_struct; 56} drive_info_struct;
55 57
56struct ctlr_info 58struct ctlr_info
@@ -86,7 +88,7 @@ struct ctlr_info
86 BYTE cciss_read_capacity; 88 BYTE cciss_read_capacity;
87 89
88 // information about each logical volume 90 // information about each logical volume
89 drive_info_struct drv[CISS_MAX_LUN]; 91 drive_info_struct *drv[CISS_MAX_LUN];
90 92
91 struct access_method access; 93 struct access_method access;
92 94
@@ -108,6 +110,8 @@ struct ctlr_info
108 int nr_frees; 110 int nr_frees;
109 int busy_configuring; 111 int busy_configuring;
110 int busy_initializing; 112 int busy_initializing;
113 int busy_scanning;
114 struct mutex busy_shutting_down;
111 115
112 /* This element holds the zero based queue number of the last 116 /* This element holds the zero based queue number of the last
113 * queue to be started. It is used for fairness. 117 * queue to be started. It is used for fairness.
@@ -122,8 +126,8 @@ struct ctlr_info
122 /* and saved for later processing */ 126 /* and saved for later processing */
123#endif 127#endif
124 unsigned char alive; 128 unsigned char alive;
125 struct completion *rescan_wait; 129 struct list_head scan_list;
126 struct task_struct *cciss_scan_thread; 130 struct completion scan_wait;
127 struct device dev; 131 struct device dev;
128}; 132};
129 133
diff --git a/drivers/block/cpqarray.c b/drivers/block/cpqarray.c
index b82d438e2607..6422651ec364 100644
--- a/drivers/block/cpqarray.c
+++ b/drivers/block/cpqarray.c
@@ -32,6 +32,7 @@
32#include <linux/blkpg.h> 32#include <linux/blkpg.h>
33#include <linux/timer.h> 33#include <linux/timer.h>
34#include <linux/proc_fs.h> 34#include <linux/proc_fs.h>
35#include <linux/seq_file.h>
35#include <linux/init.h> 36#include <linux/init.h>
36#include <linux/hdreg.h> 37#include <linux/hdreg.h>
37#include <linux/spinlock.h> 38#include <linux/spinlock.h>
@@ -177,7 +178,6 @@ static int cpqarray_register_ctlr(int ctlr, struct pci_dev *pdev);
177 178
178#ifdef CONFIG_PROC_FS 179#ifdef CONFIG_PROC_FS
179static void ida_procinit(int i); 180static void ida_procinit(int i);
180static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data);
181#else 181#else
182static void ida_procinit(int i) {} 182static void ida_procinit(int i) {}
183#endif 183#endif
@@ -206,6 +206,7 @@ static const struct block_device_operations ida_fops = {
206#ifdef CONFIG_PROC_FS 206#ifdef CONFIG_PROC_FS
207 207
208static struct proc_dir_entry *proc_array; 208static struct proc_dir_entry *proc_array;
209static const struct file_operations ida_proc_fops;
209 210
210/* 211/*
211 * Get us a file in /proc/array that says something about each controller. 212 * Get us a file in /proc/array that says something about each controller.
@@ -218,19 +219,16 @@ static void __init ida_procinit(int i)
218 if (!proc_array) return; 219 if (!proc_array) return;
219 } 220 }
220 221
221 create_proc_read_entry(hba[i]->devname, 0, proc_array, 222 proc_create_data(hba[i]->devname, 0, proc_array, &ida_proc_fops, hba[i]);
222 ida_proc_get_info, hba[i]);
223} 223}
224 224
225/* 225/*
226 * Report information about this controller. 226 * Report information about this controller.
227 */ 227 */
228static int ida_proc_get_info(char *buffer, char **start, off_t offset, int length, int *eof, void *data) 228static int ida_proc_show(struct seq_file *m, void *v)
229{ 229{
230 off_t pos = 0; 230 int i, ctlr;
231 off_t len = 0; 231 ctlr_info_t *h = (ctlr_info_t*)m->private;
232 int size, i, ctlr;
233 ctlr_info_t *h = (ctlr_info_t*)data;
234 drv_info_t *drv; 232 drv_info_t *drv;
235#ifdef CPQ_PROC_PRINT_QUEUES 233#ifdef CPQ_PROC_PRINT_QUEUES
236 cmdlist_t *c; 234 cmdlist_t *c;
@@ -238,7 +236,7 @@ static int ida_proc_get_info(char *buffer, char **start, off_t offset, int lengt
238#endif 236#endif
239 237
240 ctlr = h->ctlr; 238 ctlr = h->ctlr;
241 size = sprintf(buffer, "%s: Compaq %s Controller\n" 239 seq_printf(m, "%s: Compaq %s Controller\n"
242 " Board ID: 0x%08lx\n" 240 " Board ID: 0x%08lx\n"
243 " Firmware Revision: %c%c%c%c\n" 241 " Firmware Revision: %c%c%c%c\n"
244 " Controller Sig: 0x%08lx\n" 242 " Controller Sig: 0x%08lx\n"
@@ -258,55 +256,54 @@ static int ida_proc_get_info(char *buffer, char **start, off_t offset, int lengt
258 h->log_drives, h->phys_drives, 256 h->log_drives, h->phys_drives,
259 h->Qdepth, h->maxQsinceinit); 257 h->Qdepth, h->maxQsinceinit);
260 258
261 pos += size; len += size; 259 seq_puts(m, "Logical Drive Info:\n");
262
263 size = sprintf(buffer+len, "Logical Drive Info:\n");
264 pos += size; len += size;
265 260
266 for(i=0; i<h->log_drives; i++) { 261 for(i=0; i<h->log_drives; i++) {
267 drv = &h->drv[i]; 262 drv = &h->drv[i];
268 size = sprintf(buffer+len, "ida/c%dd%d: blksz=%d nr_blks=%d\n", 263 seq_printf(m, "ida/c%dd%d: blksz=%d nr_blks=%d\n",
269 ctlr, i, drv->blk_size, drv->nr_blks); 264 ctlr, i, drv->blk_size, drv->nr_blks);
270 pos += size; len += size;
271 } 265 }
272 266
273#ifdef CPQ_PROC_PRINT_QUEUES 267#ifdef CPQ_PROC_PRINT_QUEUES
274 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags); 268 spin_lock_irqsave(IDA_LOCK(h->ctlr), flags);
275 size = sprintf(buffer+len, "\nCurrent Queues:\n"); 269 seq_puts(m, "\nCurrent Queues:\n");
276 pos += size; len += size;
277 270
278 c = h->reqQ; 271 c = h->reqQ;
279 size = sprintf(buffer+len, "reqQ = %p", c); pos += size; len += size; 272 seq_printf(m, "reqQ = %p", c);
280 if (c) c=c->next; 273 if (c) c=c->next;
281 while(c && c != h->reqQ) { 274 while(c && c != h->reqQ) {
282 size = sprintf(buffer+len, "->%p", c); 275 seq_printf(m, "->%p", c);
283 pos += size; len += size;
284 c=c->next; 276 c=c->next;
285 } 277 }
286 278
287 c = h->cmpQ; 279 c = h->cmpQ;
288 size = sprintf(buffer+len, "\ncmpQ = %p", c); pos += size; len += size; 280 seq_printf(m, "\ncmpQ = %p", c);
289 if (c) c=c->next; 281 if (c) c=c->next;
290 while(c && c != h->cmpQ) { 282 while(c && c != h->cmpQ) {
291 size = sprintf(buffer+len, "->%p", c); 283 seq_printf(m, "->%p", c);
292 pos += size; len += size;
293 c=c->next; 284 c=c->next;
294 } 285 }
295 286
296 size = sprintf(buffer+len, "\n"); pos += size; len += size; 287 seq_putc(m, '\n');
297 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags); 288 spin_unlock_irqrestore(IDA_LOCK(h->ctlr), flags);
298#endif 289#endif
299 size = sprintf(buffer+len, "nr_allocs = %d\nnr_frees = %d\n", 290 seq_printf(m, "nr_allocs = %d\nnr_frees = %d\n",
300 h->nr_allocs, h->nr_frees); 291 h->nr_allocs, h->nr_frees);
301 pos += size; len += size; 292 return 0;
302 293}
303 *eof = 1; 294
304 *start = buffer+offset; 295static int ida_proc_open(struct inode *inode, struct file *file)
305 len -= offset; 296{
306 if (len>length) 297 return single_open(file, ida_proc_show, PDE(inode)->data);
307 len = length;
308 return len;
309} 298}
299
300static const struct file_operations ida_proc_fops = {
301 .owner = THIS_MODULE,
302 .open = ida_proc_open,
303 .read = seq_read,
304 .llseek = seq_lseek,
305 .release = single_release,
306};
310#endif /* CONFIG_PROC_FS */ 307#endif /* CONFIG_PROC_FS */
311 308
312module_param_array(eisa, int, NULL, 0); 309module_param_array(eisa, int, NULL, 0);
diff --git a/drivers/char/agp/agp.h b/drivers/char/agp/agp.h
index d6f36c004d9b..870f12cfed93 100644
--- a/drivers/char/agp/agp.h
+++ b/drivers/char/agp/agp.h
@@ -131,7 +131,7 @@ struct agp_bridge_driver {
131struct agp_bridge_data { 131struct agp_bridge_data {
132 const struct agp_version *version; 132 const struct agp_version *version;
133 const struct agp_bridge_driver *driver; 133 const struct agp_bridge_driver *driver;
134 struct vm_operations_struct *vm_ops; 134 const struct vm_operations_struct *vm_ops;
135 void *previous_size; 135 void *previous_size;
136 void *current_size; 136 void *current_size;
137 void *dev_private_data; 137 void *dev_private_data;
diff --git a/drivers/char/agp/alpha-agp.c b/drivers/char/agp/alpha-agp.c
index 5ea4da8e9954..dd84af4d4f7e 100644
--- a/drivers/char/agp/alpha-agp.c
+++ b/drivers/char/agp/alpha-agp.c
@@ -40,7 +40,7 @@ static struct aper_size_info_fixed alpha_core_agp_sizes[] =
40 { 0, 0, 0 }, /* filled in by alpha_core_agp_setup */ 40 { 0, 0, 0 }, /* filled in by alpha_core_agp_setup */
41}; 41};
42 42
43struct vm_operations_struct alpha_core_agp_vm_ops = { 43static const struct vm_operations_struct alpha_core_agp_vm_ops = {
44 .fault = alpha_core_agp_vm_fault, 44 .fault = alpha_core_agp_vm_fault,
45}; 45};
46 46
diff --git a/drivers/char/apm-emulation.c b/drivers/char/apm-emulation.c
index aaca40283be9..4f568cb9af3f 100644
--- a/drivers/char/apm-emulation.c
+++ b/drivers/char/apm-emulation.c
@@ -393,7 +393,7 @@ static int apm_open(struct inode * inode, struct file * filp)
393 return as ? 0 : -ENOMEM; 393 return as ? 0 : -ENOMEM;
394} 394}
395 395
396static struct file_operations apm_bios_fops = { 396static const struct file_operations apm_bios_fops = {
397 .owner = THIS_MODULE, 397 .owner = THIS_MODULE,
398 .read = apm_read, 398 .read = apm_read,
399 .poll = apm_poll, 399 .poll = apm_poll,
diff --git a/drivers/char/bfin-otp.c b/drivers/char/bfin-otp.c
index e3dd24bff514..836d4f0a876f 100644
--- a/drivers/char/bfin-otp.c
+++ b/drivers/char/bfin-otp.c
@@ -217,7 +217,7 @@ static long bfin_otp_ioctl(struct file *filp, unsigned cmd, unsigned long arg)
217# define bfin_otp_ioctl NULL 217# define bfin_otp_ioctl NULL
218#endif 218#endif
219 219
220static struct file_operations bfin_otp_fops = { 220static const struct file_operations bfin_otp_fops = {
221 .owner = THIS_MODULE, 221 .owner = THIS_MODULE,
222 .unlocked_ioctl = bfin_otp_ioctl, 222 .unlocked_ioctl = bfin_otp_ioctl,
223 .read = bfin_otp_read, 223 .read = bfin_otp_read,
diff --git a/drivers/char/cyclades.c b/drivers/char/cyclades.c
index df5038bbcbc2..4254457d3911 100644
--- a/drivers/char/cyclades.c
+++ b/drivers/char/cyclades.c
@@ -3354,7 +3354,7 @@ static int __init cy_detect_isa(void)
3354 continue; 3354 continue;
3355 } 3355 }
3356#ifdef MODULE 3356#ifdef MODULE
3357 if (isparam && irq[i]) 3357 if (isparam && i < NR_CARDS && irq[i])
3358 cy_isa_irq = irq[i]; 3358 cy_isa_irq = irq[i];
3359 else 3359 else
3360#endif 3360#endif
diff --git a/drivers/char/dtlk.c b/drivers/char/dtlk.c
index 52e06589821d..045c930e6320 100644
--- a/drivers/char/dtlk.c
+++ b/drivers/char/dtlk.c
@@ -56,6 +56,7 @@
56#include <linux/errno.h> /* for -EBUSY */ 56#include <linux/errno.h> /* for -EBUSY */
57#include <linux/ioport.h> /* for request_region */ 57#include <linux/ioport.h> /* for request_region */
58#include <linux/delay.h> /* for loops_per_jiffy */ 58#include <linux/delay.h> /* for loops_per_jiffy */
59#include <linux/sched.h>
59#include <linux/smp_lock.h> /* cycle_kernel_lock() */ 60#include <linux/smp_lock.h> /* cycle_kernel_lock() */
60#include <asm/io.h> /* for inb_p, outb_p, inb, outb, etc. */ 61#include <asm/io.h> /* for inb_p, outb_p, inb, outb, etc. */
61#include <asm/uaccess.h> /* for get_user, etc. */ 62#include <asm/uaccess.h> /* for get_user, etc. */
diff --git a/drivers/char/hvc_console.c b/drivers/char/hvc_console.c
index 25ce15bb1c08..a632f25f144a 100644
--- a/drivers/char/hvc_console.c
+++ b/drivers/char/hvc_console.c
@@ -678,7 +678,7 @@ int hvc_poll(struct hvc_struct *hp)
678EXPORT_SYMBOL_GPL(hvc_poll); 678EXPORT_SYMBOL_GPL(hvc_poll);
679 679
680/** 680/**
681 * hvc_resize() - Update terminal window size information. 681 * __hvc_resize() - Update terminal window size information.
682 * @hp: HVC console pointer 682 * @hp: HVC console pointer
683 * @ws: Terminal window size structure 683 * @ws: Terminal window size structure
684 * 684 *
@@ -687,12 +687,12 @@ EXPORT_SYMBOL_GPL(hvc_poll);
687 * 687 *
688 * Locking: Locking free; the function MUST be called holding hp->lock 688 * Locking: Locking free; the function MUST be called holding hp->lock
689 */ 689 */
690void hvc_resize(struct hvc_struct *hp, struct winsize ws) 690void __hvc_resize(struct hvc_struct *hp, struct winsize ws)
691{ 691{
692 hp->ws = ws; 692 hp->ws = ws;
693 schedule_work(&hp->tty_resize); 693 schedule_work(&hp->tty_resize);
694} 694}
695EXPORT_SYMBOL_GPL(hvc_resize); 695EXPORT_SYMBOL_GPL(__hvc_resize);
696 696
697/* 697/*
698 * This kthread is either polling or interrupt driven. This is determined by 698 * This kthread is either polling or interrupt driven. This is determined by
diff --git a/drivers/char/hvc_console.h b/drivers/char/hvc_console.h
index 3c85d78c975c..10950ca706d8 100644
--- a/drivers/char/hvc_console.h
+++ b/drivers/char/hvc_console.h
@@ -28,6 +28,7 @@
28#define HVC_CONSOLE_H 28#define HVC_CONSOLE_H
29#include <linux/kref.h> 29#include <linux/kref.h>
30#include <linux/tty.h> 30#include <linux/tty.h>
31#include <linux/spinlock.h>
31 32
32/* 33/*
33 * This is the max number of console adapters that can/will be found as 34 * This is the max number of console adapters that can/will be found as
@@ -88,7 +89,16 @@ int hvc_poll(struct hvc_struct *hp);
88void hvc_kick(void); 89void hvc_kick(void);
89 90
90/* Resize hvc tty terminal window */ 91/* Resize hvc tty terminal window */
91extern void hvc_resize(struct hvc_struct *hp, struct winsize ws); 92extern void __hvc_resize(struct hvc_struct *hp, struct winsize ws);
93
94static inline void hvc_resize(struct hvc_struct *hp, struct winsize ws)
95{
96 unsigned long flags;
97
98 spin_lock_irqsave(&hp->lock, flags);
99 __hvc_resize(hp, ws);
100 spin_unlock_irqrestore(&hp->lock, flags);
101}
92 102
93/* default notifier for irq based notification */ 103/* default notifier for irq based notification */
94extern int notifier_add_irq(struct hvc_struct *hp, int data); 104extern int notifier_add_irq(struct hvc_struct *hp, int data);
diff --git a/drivers/char/hvc_iucv.c b/drivers/char/hvc_iucv.c
index 0ecac7e532f6..b8a5d654d3d0 100644
--- a/drivers/char/hvc_iucv.c
+++ b/drivers/char/hvc_iucv.c
@@ -273,7 +273,9 @@ static int hvc_iucv_write(struct hvc_iucv_private *priv,
273 case MSG_TYPE_WINSIZE: 273 case MSG_TYPE_WINSIZE:
274 if (rb->mbuf->datalen != sizeof(struct winsize)) 274 if (rb->mbuf->datalen != sizeof(struct winsize))
275 break; 275 break;
276 hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data)); 276 /* The caller must ensure that the hvc is locked, which
277 * is the case when called from hvc_iucv_get_chars() */
278 __hvc_resize(priv->hvc, *((struct winsize *) rb->mbuf->data));
277 break; 279 break;
278 280
279 case MSG_TYPE_ERROR: /* ignored ... */ 281 case MSG_TYPE_ERROR: /* ignored ... */
diff --git a/drivers/char/hw_random/omap-rng.c b/drivers/char/hw_random/omap-rng.c
index 00dd3de1be51..06aad0831c73 100644
--- a/drivers/char/hw_random/omap-rng.c
+++ b/drivers/char/hw_random/omap-rng.c
@@ -116,7 +116,7 @@ static int __devinit omap_rng_probe(struct platform_device *pdev)
116 if (!res) 116 if (!res)
117 return -ENOENT; 117 return -ENOENT;
118 118
119 mem = request_mem_region(res->start, res->end - res->start + 1, 119 mem = request_mem_region(res->start, resource_size(res),
120 pdev->name); 120 pdev->name);
121 if (mem == NULL) { 121 if (mem == NULL) {
122 ret = -EBUSY; 122 ret = -EBUSY;
@@ -124,7 +124,7 @@ static int __devinit omap_rng_probe(struct platform_device *pdev)
124 } 124 }
125 125
126 dev_set_drvdata(&pdev->dev, mem); 126 dev_set_drvdata(&pdev->dev, mem);
127 rng_base = ioremap(res->start, res->end - res->start + 1); 127 rng_base = ioremap(res->start, resource_size(res));
128 if (!rng_base) { 128 if (!rng_base) {
129 ret = -ENOMEM; 129 ret = -ENOMEM;
130 goto err_ioremap; 130 goto err_ioremap;
diff --git a/drivers/char/ipmi/ipmi_devintf.c b/drivers/char/ipmi/ipmi_devintf.c
index 41fc11dc921c..65545de3dbf4 100644
--- a/drivers/char/ipmi/ipmi_devintf.c
+++ b/drivers/char/ipmi/ipmi_devintf.c
@@ -36,6 +36,7 @@
36#include <linux/errno.h> 36#include <linux/errno.h>
37#include <asm/system.h> 37#include <asm/system.h>
38#include <linux/poll.h> 38#include <linux/poll.h>
39#include <linux/sched.h>
39#include <linux/spinlock.h> 40#include <linux/spinlock.h>
40#include <linux/slab.h> 41#include <linux/slab.h>
41#include <linux/ipmi.h> 42#include <linux/ipmi.h>
diff --git a/drivers/char/ipmi/ipmi_msghandler.c b/drivers/char/ipmi/ipmi_msghandler.c
index 09050797c76a..ec5e3f8df648 100644
--- a/drivers/char/ipmi/ipmi_msghandler.c
+++ b/drivers/char/ipmi/ipmi_msghandler.c
@@ -35,6 +35,7 @@
35#include <linux/errno.h> 35#include <linux/errno.h>
36#include <asm/system.h> 36#include <asm/system.h>
37#include <linux/poll.h> 37#include <linux/poll.h>
38#include <linux/sched.h>
38#include <linux/spinlock.h> 39#include <linux/spinlock.h>
39#include <linux/mutex.h> 40#include <linux/mutex.h>
40#include <linux/slab.h> 41#include <linux/slab.h>
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 6c8b65d069e5..a074fceb67d3 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -301,7 +301,7 @@ static inline int private_mapping_ok(struct vm_area_struct *vma)
301} 301}
302#endif 302#endif
303 303
304static struct vm_operations_struct mmap_mem_ops = { 304static const struct vm_operations_struct mmap_mem_ops = {
305#ifdef CONFIG_HAVE_IOREMAP_PROT 305#ifdef CONFIG_HAVE_IOREMAP_PROT
306 .access = generic_access_phys 306 .access = generic_access_phys
307#endif 307#endif
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
index 30f095a8c2d4..1997270bb6f4 100644
--- a/drivers/char/mspec.c
+++ b/drivers/char/mspec.c
@@ -239,7 +239,7 @@ mspec_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
239 return VM_FAULT_NOPAGE; 239 return VM_FAULT_NOPAGE;
240} 240}
241 241
242static struct vm_operations_struct mspec_vm_ops = { 242static const struct vm_operations_struct mspec_vm_ops = {
243 .open = mspec_open, 243 .open = mspec_open,
244 .close = mspec_close, 244 .close = mspec_close,
245 .fault = mspec_fault, 245 .fault = mspec_fault,
diff --git a/drivers/char/pty.c b/drivers/char/pty.c
index 53761cefa915..e066c4fdf81b 100644
--- a/drivers/char/pty.c
+++ b/drivers/char/pty.c
@@ -261,6 +261,9 @@ done:
261 return 0; 261 return 0;
262} 262}
263 263
264/* Traditional BSD devices */
265#ifdef CONFIG_LEGACY_PTYS
266
264static int pty_install(struct tty_driver *driver, struct tty_struct *tty) 267static int pty_install(struct tty_driver *driver, struct tty_struct *tty)
265{ 268{
266 struct tty_struct *o_tty; 269 struct tty_struct *o_tty;
@@ -310,24 +313,6 @@ free_mem_out:
310 return -ENOMEM; 313 return -ENOMEM;
311} 314}
312 315
313
314static const struct tty_operations pty_ops = {
315 .install = pty_install,
316 .open = pty_open,
317 .close = pty_close,
318 .write = pty_write,
319 .write_room = pty_write_room,
320 .flush_buffer = pty_flush_buffer,
321 .chars_in_buffer = pty_chars_in_buffer,
322 .unthrottle = pty_unthrottle,
323 .set_termios = pty_set_termios,
324 .resize = pty_resize
325};
326
327/* Traditional BSD devices */
328#ifdef CONFIG_LEGACY_PTYS
329static struct tty_driver *pty_driver, *pty_slave_driver;
330
331static int pty_bsd_ioctl(struct tty_struct *tty, struct file *file, 316static int pty_bsd_ioctl(struct tty_struct *tty, struct file *file,
332 unsigned int cmd, unsigned long arg) 317 unsigned int cmd, unsigned long arg)
333{ 318{
@@ -341,7 +326,12 @@ static int pty_bsd_ioctl(struct tty_struct *tty, struct file *file,
341static int legacy_count = CONFIG_LEGACY_PTY_COUNT; 326static int legacy_count = CONFIG_LEGACY_PTY_COUNT;
342module_param(legacy_count, int, 0); 327module_param(legacy_count, int, 0);
343 328
344static const struct tty_operations pty_ops_bsd = { 329/*
330 * The master side of a pty can do TIOCSPTLCK and thus
331 * has pty_bsd_ioctl.
332 */
333static const struct tty_operations master_pty_ops_bsd = {
334 .install = pty_install,
345 .open = pty_open, 335 .open = pty_open,
346 .close = pty_close, 336 .close = pty_close,
347 .write = pty_write, 337 .write = pty_write,
@@ -354,8 +344,23 @@ static const struct tty_operations pty_ops_bsd = {
354 .resize = pty_resize 344 .resize = pty_resize
355}; 345};
356 346
347static const struct tty_operations slave_pty_ops_bsd = {
348 .install = pty_install,
349 .open = pty_open,
350 .close = pty_close,
351 .write = pty_write,
352 .write_room = pty_write_room,
353 .flush_buffer = pty_flush_buffer,
354 .chars_in_buffer = pty_chars_in_buffer,
355 .unthrottle = pty_unthrottle,
356 .set_termios = pty_set_termios,
357 .resize = pty_resize
358};
359
357static void __init legacy_pty_init(void) 360static void __init legacy_pty_init(void)
358{ 361{
362 struct tty_driver *pty_driver, *pty_slave_driver;
363
359 if (legacy_count <= 0) 364 if (legacy_count <= 0)
360 return; 365 return;
361 366
@@ -383,7 +388,7 @@ static void __init legacy_pty_init(void)
383 pty_driver->init_termios.c_ospeed = 38400; 388 pty_driver->init_termios.c_ospeed = 38400;
384 pty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW; 389 pty_driver->flags = TTY_DRIVER_RESET_TERMIOS | TTY_DRIVER_REAL_RAW;
385 pty_driver->other = pty_slave_driver; 390 pty_driver->other = pty_slave_driver;
386 tty_set_operations(pty_driver, &pty_ops); 391 tty_set_operations(pty_driver, &master_pty_ops_bsd);
387 392
388 pty_slave_driver->owner = THIS_MODULE; 393 pty_slave_driver->owner = THIS_MODULE;
389 pty_slave_driver->driver_name = "pty_slave"; 394 pty_slave_driver->driver_name = "pty_slave";
@@ -399,7 +404,7 @@ static void __init legacy_pty_init(void)
399 pty_slave_driver->flags = TTY_DRIVER_RESET_TERMIOS | 404 pty_slave_driver->flags = TTY_DRIVER_RESET_TERMIOS |
400 TTY_DRIVER_REAL_RAW; 405 TTY_DRIVER_REAL_RAW;
401 pty_slave_driver->other = pty_driver; 406 pty_slave_driver->other = pty_driver;
402 tty_set_operations(pty_slave_driver, &pty_ops); 407 tty_set_operations(pty_slave_driver, &slave_pty_ops_bsd);
403 408
404 if (tty_register_driver(pty_driver)) 409 if (tty_register_driver(pty_driver))
405 panic("Couldn't register pty driver"); 410 panic("Couldn't register pty driver");
diff --git a/drivers/char/serial167.c b/drivers/char/serial167.c
index 5942a9d674c0..452370af95de 100644
--- a/drivers/char/serial167.c
+++ b/drivers/char/serial167.c
@@ -220,8 +220,7 @@ static inline int serial_paranoia_check(struct cyclades_port *info, char *name,
220 return 1; 220 return 1;
221 } 221 }
222 222
223 if ((long)info < (long)(&cy_port[0]) 223 if (info < &cy_port[0] || info >= &cy_port[NR_PORTS]) {
224 || (long)(&cy_port[NR_PORTS]) < (long)info) {
225 printk("Warning: cyclades_port out of range for (%s) in %s\n", 224 printk("Warning: cyclades_port out of range for (%s) in %s\n",
226 name, routine); 225 name, routine);
227 return 1; 226 return 1;
@@ -520,15 +519,13 @@ static irqreturn_t cd2401_tx_interrupt(int irq, void *dev_id)
520 panic("TxInt on debug port!!!"); 519 panic("TxInt on debug port!!!");
521 } 520 }
522#endif 521#endif
523
524 info = &cy_port[channel];
525
526 /* validate the port number (as configured and open) */ 522 /* validate the port number (as configured and open) */
527 if ((channel < 0) || (NR_PORTS <= channel)) { 523 if ((channel < 0) || (NR_PORTS <= channel)) {
528 base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy); 524 base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy);
529 base_addr[CyTEOIR] = CyNOTRANS; 525 base_addr[CyTEOIR] = CyNOTRANS;
530 return IRQ_HANDLED; 526 return IRQ_HANDLED;
531 } 527 }
528 info = &cy_port[channel];
532 info->last_active = jiffies; 529 info->last_active = jiffies;
533 if (info->tty == 0) { 530 if (info->tty == 0) {
534 base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy); 531 base_addr[CyIER] &= ~(CyTxMpty | CyTxRdy);
diff --git a/drivers/char/tty_io.c b/drivers/char/tty_io.c
index ea18a129b0b5..59499ee0fe6a 100644
--- a/drivers/char/tty_io.c
+++ b/drivers/char/tty_io.c
@@ -1389,7 +1389,7 @@ EXPORT_SYMBOL(tty_shutdown);
1389 * of ttys that the driver keeps. 1389 * of ttys that the driver keeps.
1390 * 1390 *
1391 * This method gets called from a work queue so that the driver private 1391 * This method gets called from a work queue so that the driver private
1392 * shutdown ops can sleep (needed for USB at least) 1392 * cleanup ops can sleep (needed for USB at least)
1393 */ 1393 */
1394static void release_one_tty(struct work_struct *work) 1394static void release_one_tty(struct work_struct *work)
1395{ 1395{
@@ -1397,10 +1397,9 @@ static void release_one_tty(struct work_struct *work)
1397 container_of(work, struct tty_struct, hangup_work); 1397 container_of(work, struct tty_struct, hangup_work);
1398 struct tty_driver *driver = tty->driver; 1398 struct tty_driver *driver = tty->driver;
1399 1399
1400 if (tty->ops->shutdown) 1400 if (tty->ops->cleanup)
1401 tty->ops->shutdown(tty); 1401 tty->ops->cleanup(tty);
1402 else 1402
1403 tty_shutdown(tty);
1404 tty->magic = 0; 1403 tty->magic = 0;
1405 tty_driver_kref_put(driver); 1404 tty_driver_kref_put(driver);
1406 module_put(driver->owner); 1405 module_put(driver->owner);
@@ -1415,6 +1414,12 @@ static void release_one_tty(struct work_struct *work)
1415static void queue_release_one_tty(struct kref *kref) 1414static void queue_release_one_tty(struct kref *kref)
1416{ 1415{
1417 struct tty_struct *tty = container_of(kref, struct tty_struct, kref); 1416 struct tty_struct *tty = container_of(kref, struct tty_struct, kref);
1417
1418 if (tty->ops->shutdown)
1419 tty->ops->shutdown(tty);
1420 else
1421 tty_shutdown(tty);
1422
1418 /* The hangup queue is now free so we can reuse it rather than 1423 /* The hangup queue is now free so we can reuse it rather than
1419 waste a chunk of memory for each port */ 1424 waste a chunk of memory for each port */
1420 INIT_WORK(&tty->hangup_work, release_one_tty); 1425 INIT_WORK(&tty->hangup_work, release_one_tty);
diff --git a/drivers/char/tty_ldisc.c b/drivers/char/tty_ldisc.c
index aafdbaebc16a..feb55075819b 100644
--- a/drivers/char/tty_ldisc.c
+++ b/drivers/char/tty_ldisc.c
@@ -518,7 +518,7 @@ static void tty_ldisc_restore(struct tty_struct *tty, struct tty_ldisc *old)
518static int tty_ldisc_halt(struct tty_struct *tty) 518static int tty_ldisc_halt(struct tty_struct *tty)
519{ 519{
520 clear_bit(TTY_LDISC, &tty->flags); 520 clear_bit(TTY_LDISC, &tty->flags);
521 return cancel_delayed_work(&tty->buf.work); 521 return cancel_delayed_work_sync(&tty->buf.work);
522} 522}
523 523
524/** 524/**
@@ -756,12 +756,9 @@ void tty_ldisc_hangup(struct tty_struct *tty)
756 * N_TTY. 756 * N_TTY.
757 */ 757 */
758 if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) { 758 if (tty->driver->flags & TTY_DRIVER_RESET_TERMIOS) {
759 /* Make sure the old ldisc is quiescent */
760 tty_ldisc_halt(tty);
761 flush_scheduled_work();
762
763 /* Avoid racing set_ldisc or tty_ldisc_release */ 759 /* Avoid racing set_ldisc or tty_ldisc_release */
764 mutex_lock(&tty->ldisc_mutex); 760 mutex_lock(&tty->ldisc_mutex);
761 tty_ldisc_halt(tty);
765 if (tty->ldisc) { /* Not yet closed */ 762 if (tty->ldisc) { /* Not yet closed */
766 /* Switch back to N_TTY */ 763 /* Switch back to N_TTY */
767 tty_ldisc_reinit(tty); 764 tty_ldisc_reinit(tty);
diff --git a/drivers/char/vt_ioctl.c b/drivers/char/vt_ioctl.c
index 29c651ab0d78..6b36ee56e6fe 100644
--- a/drivers/char/vt_ioctl.c
+++ b/drivers/char/vt_ioctl.c
@@ -981,8 +981,10 @@ int vt_ioctl(struct tty_struct *tty, struct file * file,
981 goto eperm; 981 goto eperm;
982 982
983 if (copy_from_user(&vsa, (struct vt_setactivate __user *)arg, 983 if (copy_from_user(&vsa, (struct vt_setactivate __user *)arg,
984 sizeof(struct vt_setactivate))) 984 sizeof(struct vt_setactivate))) {
985 return -EFAULT; 985 ret = -EFAULT;
986 goto out;
987 }
986 if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES) 988 if (vsa.console == 0 || vsa.console > MAX_NR_CONSOLES)
987 ret = -ENXIO; 989 ret = -ENXIO;
988 else { 990 else {
diff --git a/drivers/char/xilinx_hwicap/xilinx_hwicap.c b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
index f40ab699860f..4846d50199f3 100644
--- a/drivers/char/xilinx_hwicap/xilinx_hwicap.c
+++ b/drivers/char/xilinx_hwicap/xilinx_hwicap.c
@@ -559,7 +559,7 @@ static int hwicap_release(struct inode *inode, struct file *file)
559 return status; 559 return status;
560} 560}
561 561
562static struct file_operations hwicap_fops = { 562static const struct file_operations hwicap_fops = {
563 .owner = THIS_MODULE, 563 .owner = THIS_MODULE,
564 .write = hwicap_write, 564 .write = hwicap_write,
565 .read = hwicap_read, 565 .read = hwicap_read,
diff --git a/drivers/firewire/core-cdev.c b/drivers/firewire/core-cdev.c
index ced186d7e9a9..5089331544ed 100644
--- a/drivers/firewire/core-cdev.c
+++ b/drivers/firewire/core-cdev.c
@@ -33,6 +33,7 @@
33#include <linux/mutex.h> 33#include <linux/mutex.h>
34#include <linux/poll.h> 34#include <linux/poll.h>
35#include <linux/preempt.h> 35#include <linux/preempt.h>
36#include <linux/sched.h>
36#include <linux/spinlock.h> 37#include <linux/spinlock.h>
37#include <linux/time.h> 38#include <linux/time.h>
38#include <linux/uaccess.h> 39#include <linux/uaccess.h>
diff --git a/drivers/gpio/gpiolib.c b/drivers/gpio/gpiolib.c
index bb11a429394a..662ed923d9eb 100644
--- a/drivers/gpio/gpiolib.c
+++ b/drivers/gpio/gpiolib.c
@@ -1487,7 +1487,7 @@ static int gpiolib_open(struct inode *inode, struct file *file)
1487 return single_open(file, gpiolib_show, NULL); 1487 return single_open(file, gpiolib_show, NULL);
1488} 1488}
1489 1489
1490static struct file_operations gpiolib_operations = { 1490static const struct file_operations gpiolib_operations = {
1491 .open = gpiolib_open, 1491 .open = gpiolib_open,
1492 .read = seq_read, 1492 .read = seq_read,
1493 .llseek = seq_lseek, 1493 .llseek = seq_lseek,
diff --git a/drivers/gpu/drm/drm_crtc.c b/drivers/gpu/drm/drm_crtc.c
index ba728ad77f2a..8e7b0ebece0c 100644
--- a/drivers/gpu/drm/drm_crtc.c
+++ b/drivers/gpu/drm/drm_crtc.c
@@ -482,6 +482,7 @@ void drm_connector_cleanup(struct drm_connector *connector)
482 list_for_each_entry_safe(mode, t, &connector->user_modes, head) 482 list_for_each_entry_safe(mode, t, &connector->user_modes, head)
483 drm_mode_remove(connector, mode); 483 drm_mode_remove(connector, mode);
484 484
485 kfree(connector->fb_helper_private);
485 mutex_lock(&dev->mode_config.mutex); 486 mutex_lock(&dev->mode_config.mutex);
486 drm_mode_object_put(dev, &connector->base); 487 drm_mode_object_put(dev, &connector->base);
487 list_del(&connector->head); 488 list_del(&connector->head);
diff --git a/drivers/gpu/drm/drm_crtc_helper.c b/drivers/gpu/drm/drm_crtc_helper.c
index fe8697447f32..1fe4e1d344fd 100644
--- a/drivers/gpu/drm/drm_crtc_helper.c
+++ b/drivers/gpu/drm/drm_crtc_helper.c
@@ -32,6 +32,7 @@
32#include "drmP.h" 32#include "drmP.h"
33#include "drm_crtc.h" 33#include "drm_crtc.h"
34#include "drm_crtc_helper.h" 34#include "drm_crtc_helper.h"
35#include "drm_fb_helper.h"
35 36
36static void drm_mode_validate_flag(struct drm_connector *connector, 37static void drm_mode_validate_flag(struct drm_connector *connector,
37 int flags) 38 int flags)
@@ -90,7 +91,15 @@ int drm_helper_probe_single_connector_modes(struct drm_connector *connector,
90 list_for_each_entry_safe(mode, t, &connector->modes, head) 91 list_for_each_entry_safe(mode, t, &connector->modes, head)
91 mode->status = MODE_UNVERIFIED; 92 mode->status = MODE_UNVERIFIED;
92 93
93 connector->status = connector->funcs->detect(connector); 94 if (connector->force) {
95 if (connector->force == DRM_FORCE_ON)
96 connector->status = connector_status_connected;
97 else
98 connector->status = connector_status_disconnected;
99 if (connector->funcs->force)
100 connector->funcs->force(connector);
101 } else
102 connector->status = connector->funcs->detect(connector);
94 103
95 if (connector->status == connector_status_disconnected) { 104 if (connector->status == connector_status_disconnected) {
96 DRM_DEBUG_KMS("%s is disconnected\n", 105 DRM_DEBUG_KMS("%s is disconnected\n",
@@ -267,6 +276,65 @@ static struct drm_display_mode *drm_has_preferred_mode(struct drm_connector *con
267 return NULL; 276 return NULL;
268} 277}
269 278
279static bool drm_has_cmdline_mode(struct drm_connector *connector)
280{
281 struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
282 struct drm_fb_helper_cmdline_mode *cmdline_mode;
283
284 if (!fb_help_conn)
285 return false;
286
287 cmdline_mode = &fb_help_conn->cmdline_mode;
288 return cmdline_mode->specified;
289}
290
291static struct drm_display_mode *drm_pick_cmdline_mode(struct drm_connector *connector, int width, int height)
292{
293 struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
294 struct drm_fb_helper_cmdline_mode *cmdline_mode;
295 struct drm_display_mode *mode = NULL;
296
297 if (!fb_help_conn)
298 return mode;
299
300 cmdline_mode = &fb_help_conn->cmdline_mode;
301 if (cmdline_mode->specified == false)
302 return mode;
303
304 /* attempt to find a matching mode in the list of modes
305 * we have gotten so far, if not add a CVT mode that conforms
306 */
307 if (cmdline_mode->rb || cmdline_mode->margins)
308 goto create_mode;
309
310 list_for_each_entry(mode, &connector->modes, head) {
311 /* check width/height */
312 if (mode->hdisplay != cmdline_mode->xres ||
313 mode->vdisplay != cmdline_mode->yres)
314 continue;
315
316 if (cmdline_mode->refresh_specified) {
317 if (mode->vrefresh != cmdline_mode->refresh)
318 continue;
319 }
320
321 if (cmdline_mode->interlace) {
322 if (!(mode->flags & DRM_MODE_FLAG_INTERLACE))
323 continue;
324 }
325 return mode;
326 }
327
328create_mode:
329 mode = drm_cvt_mode(connector->dev, cmdline_mode->xres,
330 cmdline_mode->yres,
331 cmdline_mode->refresh_specified ? cmdline_mode->refresh : 60,
332 cmdline_mode->rb, cmdline_mode->interlace,
333 cmdline_mode->margins);
334 list_add(&mode->head, &connector->modes);
335 return mode;
336}
337
270static bool drm_connector_enabled(struct drm_connector *connector, bool strict) 338static bool drm_connector_enabled(struct drm_connector *connector, bool strict)
271{ 339{
272 bool enable; 340 bool enable;
@@ -317,10 +385,16 @@ static bool drm_target_preferred(struct drm_device *dev,
317 continue; 385 continue;
318 } 386 }
319 387
320 DRM_DEBUG_KMS("looking for preferred mode on connector %d\n", 388 DRM_DEBUG_KMS("looking for cmdline mode on connector %d\n",
321 connector->base.id); 389 connector->base.id);
322 390
323 modes[i] = drm_has_preferred_mode(connector, width, height); 391 /* got for command line mode first */
392 modes[i] = drm_pick_cmdline_mode(connector, width, height);
393 if (!modes[i]) {
394 DRM_DEBUG_KMS("looking for preferred mode on connector %d\n",
395 connector->base.id);
396 modes[i] = drm_has_preferred_mode(connector, width, height);
397 }
324 /* No preferred modes, pick one off the list */ 398 /* No preferred modes, pick one off the list */
325 if (!modes[i] && !list_empty(&connector->modes)) { 399 if (!modes[i] && !list_empty(&connector->modes)) {
326 list_for_each_entry(modes[i], &connector->modes, head) 400 list_for_each_entry(modes[i], &connector->modes, head)
@@ -369,6 +443,8 @@ static int drm_pick_crtcs(struct drm_device *dev,
369 my_score = 1; 443 my_score = 1;
370 if (connector->status == connector_status_connected) 444 if (connector->status == connector_status_connected)
371 my_score++; 445 my_score++;
446 if (drm_has_cmdline_mode(connector))
447 my_score++;
372 if (drm_has_preferred_mode(connector, width, height)) 448 if (drm_has_preferred_mode(connector, width, height))
373 my_score++; 449 my_score++;
374 450
@@ -943,6 +1019,8 @@ bool drm_helper_initial_config(struct drm_device *dev)
943{ 1019{
944 int count = 0; 1020 int count = 0;
945 1021
1022 drm_fb_helper_parse_command_line(dev);
1023
946 count = drm_helper_probe_connector_modes(dev, 1024 count = drm_helper_probe_connector_modes(dev,
947 dev->mode_config.max_width, 1025 dev->mode_config.max_width,
948 dev->mode_config.max_height); 1026 dev->mode_config.max_height);
@@ -950,7 +1028,7 @@ bool drm_helper_initial_config(struct drm_device *dev)
950 /* 1028 /*
951 * we shouldn't end up with no modes here. 1029 * we shouldn't end up with no modes here.
952 */ 1030 */
953 WARN(!count, "Connected connector with 0 modes\n"); 1031 WARN(!count, "No connectors reported connected with modes\n");
954 1032
955 drm_setup_crtcs(dev); 1033 drm_setup_crtcs(dev);
956 1034
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 90d76bacff17..3c0d2b3aed76 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -109,7 +109,9 @@ static struct edid_quirk {
109 109
110 110
111/* Valid EDID header has these bytes */ 111/* Valid EDID header has these bytes */
112static u8 edid_header[] = { 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00 }; 112static const u8 edid_header[] = {
113 0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00
114};
113 115
114/** 116/**
115 * edid_is_valid - sanity check EDID data 117 * edid_is_valid - sanity check EDID data
@@ -500,6 +502,19 @@ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
500 } 502 }
501 return mode; 503 return mode;
502} 504}
505
506/*
507 * 0 is reserved. The spec says 0x01 fill for unused timings. Some old
508 * monitors fill with ascii space (0x20) instead.
509 */
510static int
511bad_std_timing(u8 a, u8 b)
512{
513 return (a == 0x00 && b == 0x00) ||
514 (a == 0x01 && b == 0x01) ||
515 (a == 0x20 && b == 0x20);
516}
517
503/** 518/**
504 * drm_mode_std - convert standard mode info (width, height, refresh) into mode 519 * drm_mode_std - convert standard mode info (width, height, refresh) into mode
505 * @t: standard timing params 520 * @t: standard timing params
@@ -513,6 +528,7 @@ static struct drm_display_mode *drm_find_dmt(struct drm_device *dev,
513 */ 528 */
514struct drm_display_mode *drm_mode_std(struct drm_device *dev, 529struct drm_display_mode *drm_mode_std(struct drm_device *dev,
515 struct std_timing *t, 530 struct std_timing *t,
531 int revision,
516 int timing_level) 532 int timing_level)
517{ 533{
518 struct drm_display_mode *mode; 534 struct drm_display_mode *mode;
@@ -523,14 +539,20 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
523 unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK) 539 unsigned vfreq = (t->vfreq_aspect & EDID_TIMING_VFREQ_MASK)
524 >> EDID_TIMING_VFREQ_SHIFT; 540 >> EDID_TIMING_VFREQ_SHIFT;
525 541
542 if (bad_std_timing(t->hsize, t->vfreq_aspect))
543 return NULL;
544
526 /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */ 545 /* According to the EDID spec, the hdisplay = hsize * 8 + 248 */
527 hsize = t->hsize * 8 + 248; 546 hsize = t->hsize * 8 + 248;
528 /* vrefresh_rate = vfreq + 60 */ 547 /* vrefresh_rate = vfreq + 60 */
529 vrefresh_rate = vfreq + 60; 548 vrefresh_rate = vfreq + 60;
530 /* the vdisplay is calculated based on the aspect ratio */ 549 /* the vdisplay is calculated based on the aspect ratio */
531 if (aspect_ratio == 0) 550 if (aspect_ratio == 0) {
532 vsize = (hsize * 10) / 16; 551 if (revision < 3)
533 else if (aspect_ratio == 1) 552 vsize = hsize;
553 else
554 vsize = (hsize * 10) / 16;
555 } else if (aspect_ratio == 1)
534 vsize = (hsize * 3) / 4; 556 vsize = (hsize * 3) / 4;
535 else if (aspect_ratio == 2) 557 else if (aspect_ratio == 2)
536 vsize = (hsize * 4) / 5; 558 vsize = (hsize * 4) / 5;
@@ -538,7 +560,8 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
538 vsize = (hsize * 9) / 16; 560 vsize = (hsize * 9) / 16;
539 /* HDTV hack */ 561 /* HDTV hack */
540 if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) { 562 if (hsize == 1360 && vsize == 765 && vrefresh_rate == 60) {
541 mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); 563 mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
564 false);
542 mode->hdisplay = 1366; 565 mode->hdisplay = 1366;
543 mode->vsync_start = mode->vsync_start - 1; 566 mode->vsync_start = mode->vsync_start - 1;
544 mode->vsync_end = mode->vsync_end - 1; 567 mode->vsync_end = mode->vsync_end - 1;
@@ -557,7 +580,8 @@ struct drm_display_mode *drm_mode_std(struct drm_device *dev,
557 mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); 580 mode = drm_gtf_mode(dev, hsize, vsize, vrefresh_rate, 0, 0);
558 break; 581 break;
559 case LEVEL_CVT: 582 case LEVEL_CVT:
560 mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0); 583 mode = drm_cvt_mode(dev, hsize, vsize, vrefresh_rate, 0, 0,
584 false);
561 break; 585 break;
562 } 586 }
563 return mode; 587 return mode;
@@ -779,7 +803,7 @@ static int add_standard_modes(struct drm_connector *connector, struct edid *edid
779 continue; 803 continue;
780 804
781 newmode = drm_mode_std(dev, &edid->standard_timings[i], 805 newmode = drm_mode_std(dev, &edid->standard_timings[i],
782 timing_level); 806 edid->revision, timing_level);
783 if (newmode) { 807 if (newmode) {
784 drm_mode_probed_add(connector, newmode); 808 drm_mode_probed_add(connector, newmode);
785 modes++; 809 modes++;
@@ -829,13 +853,13 @@ static int add_detailed_info(struct drm_connector *connector,
829 case EDID_DETAIL_MONITOR_CPDATA: 853 case EDID_DETAIL_MONITOR_CPDATA:
830 break; 854 break;
831 case EDID_DETAIL_STD_MODES: 855 case EDID_DETAIL_STD_MODES:
832 /* Five modes per detailed section */ 856 for (j = 0; j < 6; i++) {
833 for (j = 0; j < 5; i++) {
834 struct std_timing *std; 857 struct std_timing *std;
835 struct drm_display_mode *newmode; 858 struct drm_display_mode *newmode;
836 859
837 std = &data->data.timings[j]; 860 std = &data->data.timings[j];
838 newmode = drm_mode_std(dev, std, 861 newmode = drm_mode_std(dev, std,
862 edid->revision,
839 timing_level); 863 timing_level);
840 if (newmode) { 864 if (newmode) {
841 drm_mode_probed_add(connector, newmode); 865 drm_mode_probed_add(connector, newmode);
@@ -964,7 +988,9 @@ static int add_detailed_info_eedid(struct drm_connector *connector,
964 struct drm_display_mode *newmode; 988 struct drm_display_mode *newmode;
965 989
966 std = &data->data.timings[j]; 990 std = &data->data.timings[j];
967 newmode = drm_mode_std(dev, std, timing_level); 991 newmode = drm_mode_std(dev, std,
992 edid->revision,
993 timing_level);
968 if (newmode) { 994 if (newmode) {
969 drm_mode_probed_add(connector, newmode); 995 drm_mode_probed_add(connector, newmode);
970 modes++; 996 modes++;
diff --git a/drivers/gpu/drm/drm_fb_helper.c b/drivers/gpu/drm/drm_fb_helper.c
index 2c4671314884..819ddcbfcce5 100644
--- a/drivers/gpu/drm/drm_fb_helper.c
+++ b/drivers/gpu/drm/drm_fb_helper.c
@@ -40,6 +40,199 @@ MODULE_LICENSE("GPL and additional rights");
40 40
41static LIST_HEAD(kernel_fb_helper_list); 41static LIST_HEAD(kernel_fb_helper_list);
42 42
43int drm_fb_helper_add_connector(struct drm_connector *connector)
44{
45 connector->fb_helper_private = kzalloc(sizeof(struct drm_fb_helper_connector), GFP_KERNEL);
46 if (!connector->fb_helper_private)
47 return -ENOMEM;
48
49 return 0;
50}
51EXPORT_SYMBOL(drm_fb_helper_add_connector);
52
53static int my_atoi(const char *name)
54{
55 int val = 0;
56
57 for (;; name++) {
58 switch (*name) {
59 case '0' ... '9':
60 val = 10*val+(*name-'0');
61 break;
62 default:
63 return val;
64 }
65 }
66}
67
68/**
69 * drm_fb_helper_connector_parse_command_line - parse command line for connector
70 * @connector - connector to parse line for
71 * @mode_option - per connector mode option
72 *
73 * This parses the connector specific then generic command lines for
74 * modes and options to configure the connector.
75 *
76 * This uses the same parameters as the fb modedb.c, except for extra
77 * <xres>x<yres>[M][R][-<bpp>][@<refresh>][i][m][eDd]
78 *
79 * enable/enable Digital/disable bit at the end
80 */
81static bool drm_fb_helper_connector_parse_command_line(struct drm_connector *connector,
82 const char *mode_option)
83{
84 const char *name;
85 unsigned int namelen;
86 int res_specified = 0, bpp_specified = 0, refresh_specified = 0;
87 unsigned int xres = 0, yres = 0, bpp = 32, refresh = 0;
88 int yres_specified = 0, cvt = 0, rb = 0, interlace = 0, margins = 0;
89 int i;
90 enum drm_connector_force force = DRM_FORCE_UNSPECIFIED;
91 struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
92 struct drm_fb_helper_cmdline_mode *cmdline_mode;
93
94 if (!fb_help_conn)
95 return false;
96
97 cmdline_mode = &fb_help_conn->cmdline_mode;
98 if (!mode_option)
99 mode_option = fb_mode_option;
100
101 if (!mode_option) {
102 cmdline_mode->specified = false;
103 return false;
104 }
105
106 name = mode_option;
107 namelen = strlen(name);
108 for (i = namelen-1; i >= 0; i--) {
109 switch (name[i]) {
110 case '@':
111 namelen = i;
112 if (!refresh_specified && !bpp_specified &&
113 !yres_specified) {
114 refresh = my_atoi(&name[i+1]);
115 refresh_specified = 1;
116 if (cvt || rb)
117 cvt = 0;
118 } else
119 goto done;
120 break;
121 case '-':
122 namelen = i;
123 if (!bpp_specified && !yres_specified) {
124 bpp = my_atoi(&name[i+1]);
125 bpp_specified = 1;
126 if (cvt || rb)
127 cvt = 0;
128 } else
129 goto done;
130 break;
131 case 'x':
132 if (!yres_specified) {
133 yres = my_atoi(&name[i+1]);
134 yres_specified = 1;
135 } else
136 goto done;
137 case '0' ... '9':
138 break;
139 case 'M':
140 if (!yres_specified)
141 cvt = 1;
142 break;
143 case 'R':
144 if (!cvt)
145 rb = 1;
146 break;
147 case 'm':
148 if (!cvt)
149 margins = 1;
150 break;
151 case 'i':
152 if (!cvt)
153 interlace = 1;
154 break;
155 case 'e':
156 force = DRM_FORCE_ON;
157 break;
158 case 'D':
159 if ((connector->connector_type != DRM_MODE_CONNECTOR_DVII) ||
160 (connector->connector_type != DRM_MODE_CONNECTOR_HDMIB))
161 force = DRM_FORCE_ON;
162 else
163 force = DRM_FORCE_ON_DIGITAL;
164 break;
165 case 'd':
166 force = DRM_FORCE_OFF;
167 break;
168 default:
169 goto done;
170 }
171 }
172 if (i < 0 && yres_specified) {
173 xres = my_atoi(name);
174 res_specified = 1;
175 }
176done:
177
178 DRM_DEBUG_KMS("cmdline mode for connector %s %dx%d@%dHz%s%s%s\n",
179 drm_get_connector_name(connector), xres, yres,
180 (refresh) ? refresh : 60, (rb) ? " reduced blanking" :
181 "", (margins) ? " with margins" : "", (interlace) ?
182 " interlaced" : "");
183
184 if (force) {
185 const char *s;
186 switch (force) {
187 case DRM_FORCE_OFF: s = "OFF"; break;
188 case DRM_FORCE_ON_DIGITAL: s = "ON - dig"; break;
189 default:
190 case DRM_FORCE_ON: s = "ON"; break;
191 }
192
193 DRM_INFO("forcing %s connector %s\n",
194 drm_get_connector_name(connector), s);
195 connector->force = force;
196 }
197
198 if (res_specified) {
199 cmdline_mode->specified = true;
200 cmdline_mode->xres = xres;
201 cmdline_mode->yres = yres;
202 }
203
204 if (refresh_specified) {
205 cmdline_mode->refresh_specified = true;
206 cmdline_mode->refresh = refresh;
207 }
208
209 if (bpp_specified) {
210 cmdline_mode->bpp_specified = true;
211 cmdline_mode->bpp = bpp;
212 }
213 cmdline_mode->rb = rb ? true : false;
214 cmdline_mode->cvt = cvt ? true : false;
215 cmdline_mode->interlace = interlace ? true : false;
216
217 return true;
218}
219
220int drm_fb_helper_parse_command_line(struct drm_device *dev)
221{
222 struct drm_connector *connector;
223
224 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
225 char *option = NULL;
226
227 /* do something on return - turn off connector maybe */
228 if (fb_get_options(drm_get_connector_name(connector), &option))
229 continue;
230
231 drm_fb_helper_connector_parse_command_line(connector, option);
232 }
233 return 0;
234}
235
43bool drm_fb_helper_force_kernel_mode(void) 236bool drm_fb_helper_force_kernel_mode(void)
44{ 237{
45 int i = 0; 238 int i = 0;
@@ -87,6 +280,7 @@ void drm_fb_helper_restore(void)
87} 280}
88EXPORT_SYMBOL(drm_fb_helper_restore); 281EXPORT_SYMBOL(drm_fb_helper_restore);
89 282
283#ifdef CONFIG_MAGIC_SYSRQ
90static void drm_fb_helper_restore_work_fn(struct work_struct *ignored) 284static void drm_fb_helper_restore_work_fn(struct work_struct *ignored)
91{ 285{
92 drm_fb_helper_restore(); 286 drm_fb_helper_restore();
@@ -103,6 +297,7 @@ static struct sysrq_key_op sysrq_drm_fb_helper_restore_op = {
103 .help_msg = "force-fb(V)", 297 .help_msg = "force-fb(V)",
104 .action_msg = "Restore framebuffer console", 298 .action_msg = "Restore framebuffer console",
105}; 299};
300#endif
106 301
107static void drm_fb_helper_on(struct fb_info *info) 302static void drm_fb_helper_on(struct fb_info *info)
108{ 303{
@@ -484,6 +679,8 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
484 uint32_t fb_height, 679 uint32_t fb_height,
485 uint32_t surface_width, 680 uint32_t surface_width,
486 uint32_t surface_height, 681 uint32_t surface_height,
682 uint32_t surface_depth,
683 uint32_t surface_bpp,
487 struct drm_framebuffer **fb_ptr)) 684 struct drm_framebuffer **fb_ptr))
488{ 685{
489 struct drm_crtc *crtc; 686 struct drm_crtc *crtc;
@@ -497,8 +694,43 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
497 struct drm_framebuffer *fb; 694 struct drm_framebuffer *fb;
498 struct drm_mode_set *modeset = NULL; 695 struct drm_mode_set *modeset = NULL;
499 struct drm_fb_helper *fb_helper; 696 struct drm_fb_helper *fb_helper;
697 uint32_t surface_depth = 24, surface_bpp = 32;
500 698
501 /* first up get a count of crtcs now in use and new min/maxes width/heights */ 699 /* first up get a count of crtcs now in use and new min/maxes width/heights */
700 list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
701 struct drm_fb_helper_connector *fb_help_conn = connector->fb_helper_private;
702
703 struct drm_fb_helper_cmdline_mode *cmdline_mode;
704
705 if (!fb_help_conn)
706 continue;
707
708 cmdline_mode = &fb_help_conn->cmdline_mode;
709
710 if (cmdline_mode->bpp_specified) {
711 switch (cmdline_mode->bpp) {
712 case 8:
713 surface_depth = surface_bpp = 8;
714 break;
715 case 15:
716 surface_depth = 15;
717 surface_bpp = 16;
718 break;
719 case 16:
720 surface_depth = surface_bpp = 16;
721 break;
722 case 24:
723 surface_depth = surface_bpp = 24;
724 break;
725 case 32:
726 surface_depth = 24;
727 surface_bpp = 32;
728 break;
729 }
730 break;
731 }
732 }
733
502 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) { 734 list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
503 if (drm_helper_crtc_in_use(crtc)) { 735 if (drm_helper_crtc_in_use(crtc)) {
504 if (crtc->desired_mode) { 736 if (crtc->desired_mode) {
@@ -527,7 +759,8 @@ int drm_fb_helper_single_fb_probe(struct drm_device *dev,
527 /* do we have an fb already? */ 759 /* do we have an fb already? */
528 if (list_empty(&dev->mode_config.fb_kernel_list)) { 760 if (list_empty(&dev->mode_config.fb_kernel_list)) {
529 ret = (*fb_create)(dev, fb_width, fb_height, surface_width, 761 ret = (*fb_create)(dev, fb_width, fb_height, surface_width,
530 surface_height, &fb); 762 surface_height, surface_depth, surface_bpp,
763 &fb);
531 if (ret) 764 if (ret)
532 return -EINVAL; 765 return -EINVAL;
533 new_fb = 1; 766 new_fb = 1;
diff --git a/drivers/gpu/drm/drm_modes.c b/drivers/gpu/drm/drm_modes.c
index 49404ce1666e..51f677215f1d 100644
--- a/drivers/gpu/drm/drm_modes.c
+++ b/drivers/gpu/drm/drm_modes.c
@@ -88,7 +88,7 @@ EXPORT_SYMBOL(drm_mode_debug_printmodeline);
88#define HV_FACTOR 1000 88#define HV_FACTOR 1000
89struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay, 89struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
90 int vdisplay, int vrefresh, 90 int vdisplay, int vrefresh,
91 bool reduced, bool interlaced) 91 bool reduced, bool interlaced, bool margins)
92{ 92{
93 /* 1) top/bottom margin size (% of height) - default: 1.8, */ 93 /* 1) top/bottom margin size (% of height) - default: 1.8, */
94#define CVT_MARGIN_PERCENTAGE 18 94#define CVT_MARGIN_PERCENTAGE 18
@@ -101,7 +101,6 @@ struct drm_display_mode *drm_cvt_mode(struct drm_device *dev, int hdisplay,
101 /* Pixel Clock step (kHz) */ 101 /* Pixel Clock step (kHz) */
102#define CVT_CLOCK_STEP 250 102#define CVT_CLOCK_STEP 250
103 struct drm_display_mode *drm_mode; 103 struct drm_display_mode *drm_mode;
104 bool margins = false;
105 unsigned int vfieldrate, hperiod; 104 unsigned int vfieldrate, hperiod;
106 int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync; 105 int hdisplay_rnd, hmargin, vdisplay_rnd, vmargin, vsync;
107 int interlace; 106 int interlace;
diff --git a/drivers/gpu/drm/drm_vm.c b/drivers/gpu/drm/drm_vm.c
index 7e1fbe5d4779..4ac900f4647f 100644
--- a/drivers/gpu/drm/drm_vm.c
+++ b/drivers/gpu/drm/drm_vm.c
@@ -369,28 +369,28 @@ static int drm_vm_sg_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
369} 369}
370 370
371/** AGP virtual memory operations */ 371/** AGP virtual memory operations */
372static struct vm_operations_struct drm_vm_ops = { 372static const struct vm_operations_struct drm_vm_ops = {
373 .fault = drm_vm_fault, 373 .fault = drm_vm_fault,
374 .open = drm_vm_open, 374 .open = drm_vm_open,
375 .close = drm_vm_close, 375 .close = drm_vm_close,
376}; 376};
377 377
378/** Shared virtual memory operations */ 378/** Shared virtual memory operations */
379static struct vm_operations_struct drm_vm_shm_ops = { 379static const struct vm_operations_struct drm_vm_shm_ops = {
380 .fault = drm_vm_shm_fault, 380 .fault = drm_vm_shm_fault,
381 .open = drm_vm_open, 381 .open = drm_vm_open,
382 .close = drm_vm_shm_close, 382 .close = drm_vm_shm_close,
383}; 383};
384 384
385/** DMA virtual memory operations */ 385/** DMA virtual memory operations */
386static struct vm_operations_struct drm_vm_dma_ops = { 386static const struct vm_operations_struct drm_vm_dma_ops = {
387 .fault = drm_vm_dma_fault, 387 .fault = drm_vm_dma_fault,
388 .open = drm_vm_open, 388 .open = drm_vm_open,
389 .close = drm_vm_close, 389 .close = drm_vm_close,
390}; 390};
391 391
392/** Scatter-gather virtual memory operations */ 392/** Scatter-gather virtual memory operations */
393static struct vm_operations_struct drm_vm_sg_ops = { 393static const struct vm_operations_struct drm_vm_sg_ops = {
394 .fault = drm_vm_sg_fault, 394 .fault = drm_vm_sg_fault,
395 .open = drm_vm_open, 395 .open = drm_vm_open,
396 .close = drm_vm_close, 396 .close = drm_vm_close,
diff --git a/drivers/gpu/drm/i915/intel_fb.c b/drivers/gpu/drm/i915/intel_fb.c
index 7ba4a232a97f..e85d7e9eed7d 100644
--- a/drivers/gpu/drm/i915/intel_fb.c
+++ b/drivers/gpu/drm/i915/intel_fb.c
@@ -110,6 +110,7 @@ EXPORT_SYMBOL(intelfb_resize);
110static int intelfb_create(struct drm_device *dev, uint32_t fb_width, 110static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
111 uint32_t fb_height, uint32_t surface_width, 111 uint32_t fb_height, uint32_t surface_width,
112 uint32_t surface_height, 112 uint32_t surface_height,
113 uint32_t surface_depth, uint32_t surface_bpp,
113 struct drm_framebuffer **fb_p) 114 struct drm_framebuffer **fb_p)
114{ 115{
115 struct fb_info *info; 116 struct fb_info *info;
@@ -125,9 +126,9 @@ static int intelfb_create(struct drm_device *dev, uint32_t fb_width,
125 mode_cmd.width = surface_width; 126 mode_cmd.width = surface_width;
126 mode_cmd.height = surface_height; 127 mode_cmd.height = surface_height;
127 128
128 mode_cmd.bpp = 32; 129 mode_cmd.bpp = surface_bpp;
129 mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64); 130 mode_cmd.pitch = ALIGN(mode_cmd.width * ((mode_cmd.bpp + 1) / 8), 64);
130 mode_cmd.depth = 24; 131 mode_cmd.depth = surface_depth;
131 132
132 size = mode_cmd.pitch * mode_cmd.height; 133 size = mode_cmd.pitch * mode_cmd.height;
133 size = ALIGN(size, PAGE_SIZE); 134 size = ALIGN(size, PAGE_SIZE);
diff --git a/drivers/gpu/drm/radeon/.gitignore b/drivers/gpu/drm/radeon/.gitignore
new file mode 100644
index 000000000000..403eb3a5891f
--- /dev/null
+++ b/drivers/gpu/drm/radeon/.gitignore
@@ -0,0 +1,3 @@
1mkregtable
2*_reg_safe.h
3
diff --git a/drivers/gpu/drm/radeon/avivod.h b/drivers/gpu/drm/radeon/avivod.h
index e2b92c445bab..d4e6e6e4a938 100644
--- a/drivers/gpu/drm/radeon/avivod.h
+++ b/drivers/gpu/drm/radeon/avivod.h
@@ -57,13 +57,4 @@
57#define VGA_RENDER_CONTROL 0x0300 57#define VGA_RENDER_CONTROL 0x0300
58#define VGA_VSTATUS_CNTL_MASK 0x00030000 58#define VGA_VSTATUS_CNTL_MASK 0x00030000
59 59
60/* AVIVO disable VGA rendering */
61static inline void radeon_avivo_vga_render_disable(struct radeon_device *rdev)
62{
63 u32 vga_render;
64 vga_render = RREG32(VGA_RENDER_CONTROL);
65 vga_render &= ~VGA_VSTATUS_CNTL_MASK;
66 WREG32(VGA_RENDER_CONTROL, vga_render);
67}
68
69#endif 60#endif
diff --git a/drivers/gpu/drm/radeon/r100.c b/drivers/gpu/drm/radeon/r100.c
index be51c5f7d0f6..e6cce24de802 100644
--- a/drivers/gpu/drm/radeon/r100.c
+++ b/drivers/gpu/drm/radeon/r100.c
@@ -863,13 +863,11 @@ int r100_cs_parse_packet0(struct radeon_cs_parser *p,
863void r100_cs_dump_packet(struct radeon_cs_parser *p, 863void r100_cs_dump_packet(struct radeon_cs_parser *p,
864 struct radeon_cs_packet *pkt) 864 struct radeon_cs_packet *pkt)
865{ 865{
866 struct radeon_cs_chunk *ib_chunk;
867 volatile uint32_t *ib; 866 volatile uint32_t *ib;
868 unsigned i; 867 unsigned i;
869 unsigned idx; 868 unsigned idx;
870 869
871 ib = p->ib->ptr; 870 ib = p->ib->ptr;
872 ib_chunk = &p->chunks[p->chunk_ib_idx];
873 idx = pkt->idx; 871 idx = pkt->idx;
874 for (i = 0; i <= (pkt->count + 1); i++, idx++) { 872 for (i = 0; i <= (pkt->count + 1); i++, idx++) {
875 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]); 873 DRM_INFO("ib[%d]=0x%08X\n", idx, ib[idx]);
@@ -896,7 +894,7 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
896 idx, ib_chunk->length_dw); 894 idx, ib_chunk->length_dw);
897 return -EINVAL; 895 return -EINVAL;
898 } 896 }
899 header = ib_chunk->kdata[idx]; 897 header = radeon_get_ib_value(p, idx);
900 pkt->idx = idx; 898 pkt->idx = idx;
901 pkt->type = CP_PACKET_GET_TYPE(header); 899 pkt->type = CP_PACKET_GET_TYPE(header);
902 pkt->count = CP_PACKET_GET_COUNT(header); 900 pkt->count = CP_PACKET_GET_COUNT(header);
@@ -939,7 +937,6 @@ int r100_cs_packet_parse(struct radeon_cs_parser *p,
939 */ 937 */
940int r100_cs_packet_parse_vline(struct radeon_cs_parser *p) 938int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
941{ 939{
942 struct radeon_cs_chunk *ib_chunk;
943 struct drm_mode_object *obj; 940 struct drm_mode_object *obj;
944 struct drm_crtc *crtc; 941 struct drm_crtc *crtc;
945 struct radeon_crtc *radeon_crtc; 942 struct radeon_crtc *radeon_crtc;
@@ -947,8 +944,9 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
947 int crtc_id; 944 int crtc_id;
948 int r; 945 int r;
949 uint32_t header, h_idx, reg; 946 uint32_t header, h_idx, reg;
947 volatile uint32_t *ib;
950 948
951 ib_chunk = &p->chunks[p->chunk_ib_idx]; 949 ib = p->ib->ptr;
952 950
953 /* parse the wait until */ 951 /* parse the wait until */
954 r = r100_cs_packet_parse(p, &waitreloc, p->idx); 952 r = r100_cs_packet_parse(p, &waitreloc, p->idx);
@@ -963,24 +961,24 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
963 return r; 961 return r;
964 } 962 }
965 963
966 if (ib_chunk->kdata[waitreloc.idx + 1] != RADEON_WAIT_CRTC_VLINE) { 964 if (radeon_get_ib_value(p, waitreloc.idx + 1) != RADEON_WAIT_CRTC_VLINE) {
967 DRM_ERROR("vline wait had illegal wait until\n"); 965 DRM_ERROR("vline wait had illegal wait until\n");
968 r = -EINVAL; 966 r = -EINVAL;
969 return r; 967 return r;
970 } 968 }
971 969
972 /* jump over the NOP */ 970 /* jump over the NOP */
973 r = r100_cs_packet_parse(p, &p3reloc, p->idx); 971 r = r100_cs_packet_parse(p, &p3reloc, p->idx + waitreloc.count + 2);
974 if (r) 972 if (r)
975 return r; 973 return r;
976 974
977 h_idx = p->idx - 2; 975 h_idx = p->idx - 2;
978 p->idx += waitreloc.count; 976 p->idx += waitreloc.count + 2;
979 p->idx += p3reloc.count; 977 p->idx += p3reloc.count + 2;
980 978
981 header = ib_chunk->kdata[h_idx]; 979 header = radeon_get_ib_value(p, h_idx);
982 crtc_id = ib_chunk->kdata[h_idx + 5]; 980 crtc_id = radeon_get_ib_value(p, h_idx + 5);
983 reg = ib_chunk->kdata[h_idx] >> 2; 981 reg = header >> 2;
984 mutex_lock(&p->rdev->ddev->mode_config.mutex); 982 mutex_lock(&p->rdev->ddev->mode_config.mutex);
985 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC); 983 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
986 if (!obj) { 984 if (!obj) {
@@ -994,16 +992,16 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
994 992
995 if (!crtc->enabled) { 993 if (!crtc->enabled) {
996 /* if the CRTC isn't enabled - we need to nop out the wait until */ 994 /* if the CRTC isn't enabled - we need to nop out the wait until */
997 ib_chunk->kdata[h_idx + 2] = PACKET2(0); 995 ib[h_idx + 2] = PACKET2(0);
998 ib_chunk->kdata[h_idx + 3] = PACKET2(0); 996 ib[h_idx + 3] = PACKET2(0);
999 } else if (crtc_id == 1) { 997 } else if (crtc_id == 1) {
1000 switch (reg) { 998 switch (reg) {
1001 case AVIVO_D1MODE_VLINE_START_END: 999 case AVIVO_D1MODE_VLINE_START_END:
1002 header &= R300_CP_PACKET0_REG_MASK; 1000 header &= ~R300_CP_PACKET0_REG_MASK;
1003 header |= AVIVO_D2MODE_VLINE_START_END >> 2; 1001 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
1004 break; 1002 break;
1005 case RADEON_CRTC_GUI_TRIG_VLINE: 1003 case RADEON_CRTC_GUI_TRIG_VLINE:
1006 header &= R300_CP_PACKET0_REG_MASK; 1004 header &= ~R300_CP_PACKET0_REG_MASK;
1007 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2; 1005 header |= RADEON_CRTC2_GUI_TRIG_VLINE >> 2;
1008 break; 1006 break;
1009 default: 1007 default:
@@ -1011,8 +1009,8 @@ int r100_cs_packet_parse_vline(struct radeon_cs_parser *p)
1011 r = -EINVAL; 1009 r = -EINVAL;
1012 goto out; 1010 goto out;
1013 } 1011 }
1014 ib_chunk->kdata[h_idx] = header; 1012 ib[h_idx] = header;
1015 ib_chunk->kdata[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1; 1013 ib[h_idx + 3] |= RADEON_ENG_DISPLAY_SELECT_CRTC1;
1016 } 1014 }
1017out: 1015out:
1018 mutex_unlock(&p->rdev->ddev->mode_config.mutex); 1016 mutex_unlock(&p->rdev->ddev->mode_config.mutex);
@@ -1033,7 +1031,6 @@ out:
1033int r100_cs_packet_next_reloc(struct radeon_cs_parser *p, 1031int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
1034 struct radeon_cs_reloc **cs_reloc) 1032 struct radeon_cs_reloc **cs_reloc)
1035{ 1033{
1036 struct radeon_cs_chunk *ib_chunk;
1037 struct radeon_cs_chunk *relocs_chunk; 1034 struct radeon_cs_chunk *relocs_chunk;
1038 struct radeon_cs_packet p3reloc; 1035 struct radeon_cs_packet p3reloc;
1039 unsigned idx; 1036 unsigned idx;
@@ -1044,7 +1041,6 @@ int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
1044 return -EINVAL; 1041 return -EINVAL;
1045 } 1042 }
1046 *cs_reloc = NULL; 1043 *cs_reloc = NULL;
1047 ib_chunk = &p->chunks[p->chunk_ib_idx];
1048 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 1044 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
1049 r = r100_cs_packet_parse(p, &p3reloc, p->idx); 1045 r = r100_cs_packet_parse(p, &p3reloc, p->idx);
1050 if (r) { 1046 if (r) {
@@ -1057,7 +1053,7 @@ int r100_cs_packet_next_reloc(struct radeon_cs_parser *p,
1057 r100_cs_dump_packet(p, &p3reloc); 1053 r100_cs_dump_packet(p, &p3reloc);
1058 return -EINVAL; 1054 return -EINVAL;
1059 } 1055 }
1060 idx = ib_chunk->kdata[p3reloc.idx + 1]; 1056 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
1061 if (idx >= relocs_chunk->length_dw) { 1057 if (idx >= relocs_chunk->length_dw) {
1062 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 1058 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
1063 idx, relocs_chunk->length_dw); 1059 idx, relocs_chunk->length_dw);
@@ -1126,7 +1122,6 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1126 struct radeon_cs_packet *pkt, 1122 struct radeon_cs_packet *pkt,
1127 unsigned idx, unsigned reg) 1123 unsigned idx, unsigned reg)
1128{ 1124{
1129 struct radeon_cs_chunk *ib_chunk;
1130 struct radeon_cs_reloc *reloc; 1125 struct radeon_cs_reloc *reloc;
1131 struct r100_cs_track *track; 1126 struct r100_cs_track *track;
1132 volatile uint32_t *ib; 1127 volatile uint32_t *ib;
@@ -1134,11 +1129,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1134 int r; 1129 int r;
1135 int i, face; 1130 int i, face;
1136 u32 tile_flags = 0; 1131 u32 tile_flags = 0;
1132 u32 idx_value;
1137 1133
1138 ib = p->ib->ptr; 1134 ib = p->ib->ptr;
1139 ib_chunk = &p->chunks[p->chunk_ib_idx];
1140 track = (struct r100_cs_track *)p->track; 1135 track = (struct r100_cs_track *)p->track;
1141 1136
1137 idx_value = radeon_get_ib_value(p, idx);
1138
1142 switch (reg) { 1139 switch (reg) {
1143 case RADEON_CRTC_GUI_TRIG_VLINE: 1140 case RADEON_CRTC_GUI_TRIG_VLINE:
1144 r = r100_cs_packet_parse_vline(p); 1141 r = r100_cs_packet_parse_vline(p);
@@ -1166,8 +1163,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1166 return r; 1163 return r;
1167 } 1164 }
1168 track->zb.robj = reloc->robj; 1165 track->zb.robj = reloc->robj;
1169 track->zb.offset = ib_chunk->kdata[idx]; 1166 track->zb.offset = idx_value;
1170 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1167 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1171 break; 1168 break;
1172 case RADEON_RB3D_COLOROFFSET: 1169 case RADEON_RB3D_COLOROFFSET:
1173 r = r100_cs_packet_next_reloc(p, &reloc); 1170 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1178,8 +1175,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1178 return r; 1175 return r;
1179 } 1176 }
1180 track->cb[0].robj = reloc->robj; 1177 track->cb[0].robj = reloc->robj;
1181 track->cb[0].offset = ib_chunk->kdata[idx]; 1178 track->cb[0].offset = idx_value;
1182 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1179 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1183 break; 1180 break;
1184 case RADEON_PP_TXOFFSET_0: 1181 case RADEON_PP_TXOFFSET_0:
1185 case RADEON_PP_TXOFFSET_1: 1182 case RADEON_PP_TXOFFSET_1:
@@ -1192,7 +1189,7 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1192 r100_cs_dump_packet(p, pkt); 1189 r100_cs_dump_packet(p, pkt);
1193 return r; 1190 return r;
1194 } 1191 }
1195 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1192 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1196 track->textures[i].robj = reloc->robj; 1193 track->textures[i].robj = reloc->robj;
1197 break; 1194 break;
1198 case RADEON_PP_CUBIC_OFFSET_T0_0: 1195 case RADEON_PP_CUBIC_OFFSET_T0_0:
@@ -1208,8 +1205,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1208 r100_cs_dump_packet(p, pkt); 1205 r100_cs_dump_packet(p, pkt);
1209 return r; 1206 return r;
1210 } 1207 }
1211 track->textures[0].cube_info[i].offset = ib_chunk->kdata[idx]; 1208 track->textures[0].cube_info[i].offset = idx_value;
1212 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1209 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1213 track->textures[0].cube_info[i].robj = reloc->robj; 1210 track->textures[0].cube_info[i].robj = reloc->robj;
1214 break; 1211 break;
1215 case RADEON_PP_CUBIC_OFFSET_T1_0: 1212 case RADEON_PP_CUBIC_OFFSET_T1_0:
@@ -1225,8 +1222,8 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1225 r100_cs_dump_packet(p, pkt); 1222 r100_cs_dump_packet(p, pkt);
1226 return r; 1223 return r;
1227 } 1224 }
1228 track->textures[1].cube_info[i].offset = ib_chunk->kdata[idx]; 1225 track->textures[1].cube_info[i].offset = idx_value;
1229 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1226 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1230 track->textures[1].cube_info[i].robj = reloc->robj; 1227 track->textures[1].cube_info[i].robj = reloc->robj;
1231 break; 1228 break;
1232 case RADEON_PP_CUBIC_OFFSET_T2_0: 1229 case RADEON_PP_CUBIC_OFFSET_T2_0:
@@ -1242,12 +1239,12 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1242 r100_cs_dump_packet(p, pkt); 1239 r100_cs_dump_packet(p, pkt);
1243 return r; 1240 return r;
1244 } 1241 }
1245 track->textures[2].cube_info[i].offset = ib_chunk->kdata[idx]; 1242 track->textures[2].cube_info[i].offset = idx_value;
1246 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1243 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1247 track->textures[2].cube_info[i].robj = reloc->robj; 1244 track->textures[2].cube_info[i].robj = reloc->robj;
1248 break; 1245 break;
1249 case RADEON_RE_WIDTH_HEIGHT: 1246 case RADEON_RE_WIDTH_HEIGHT:
1250 track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF); 1247 track->maxy = ((idx_value >> 16) & 0x7FF);
1251 break; 1248 break;
1252 case RADEON_RB3D_COLORPITCH: 1249 case RADEON_RB3D_COLORPITCH:
1253 r = r100_cs_packet_next_reloc(p, &reloc); 1250 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1263,17 +1260,17 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1263 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 1260 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
1264 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; 1261 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
1265 1262
1266 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); 1263 tmp = idx_value & ~(0x7 << 16);
1267 tmp |= tile_flags; 1264 tmp |= tile_flags;
1268 ib[idx] = tmp; 1265 ib[idx] = tmp;
1269 1266
1270 track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK; 1267 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
1271 break; 1268 break;
1272 case RADEON_RB3D_DEPTHPITCH: 1269 case RADEON_RB3D_DEPTHPITCH:
1273 track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK; 1270 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
1274 break; 1271 break;
1275 case RADEON_RB3D_CNTL: 1272 case RADEON_RB3D_CNTL:
1276 switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { 1273 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
1277 case 7: 1274 case 7:
1278 case 8: 1275 case 8:
1279 case 9: 1276 case 9:
@@ -1291,13 +1288,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1291 break; 1288 break;
1292 default: 1289 default:
1293 DRM_ERROR("Invalid color buffer format (%d) !\n", 1290 DRM_ERROR("Invalid color buffer format (%d) !\n",
1294 ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); 1291 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
1295 return -EINVAL; 1292 return -EINVAL;
1296 } 1293 }
1297 track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE); 1294 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
1298 break; 1295 break;
1299 case RADEON_RB3D_ZSTENCILCNTL: 1296 case RADEON_RB3D_ZSTENCILCNTL:
1300 switch (ib_chunk->kdata[idx] & 0xf) { 1297 switch (idx_value & 0xf) {
1301 case 0: 1298 case 0:
1302 track->zb.cpp = 2; 1299 track->zb.cpp = 2;
1303 break; 1300 break;
@@ -1321,44 +1318,44 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1321 r100_cs_dump_packet(p, pkt); 1318 r100_cs_dump_packet(p, pkt);
1322 return r; 1319 return r;
1323 } 1320 }
1324 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1321 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1325 break; 1322 break;
1326 case RADEON_PP_CNTL: 1323 case RADEON_PP_CNTL:
1327 { 1324 {
1328 uint32_t temp = ib_chunk->kdata[idx] >> 4; 1325 uint32_t temp = idx_value >> 4;
1329 for (i = 0; i < track->num_texture; i++) 1326 for (i = 0; i < track->num_texture; i++)
1330 track->textures[i].enabled = !!(temp & (1 << i)); 1327 track->textures[i].enabled = !!(temp & (1 << i));
1331 } 1328 }
1332 break; 1329 break;
1333 case RADEON_SE_VF_CNTL: 1330 case RADEON_SE_VF_CNTL:
1334 track->vap_vf_cntl = ib_chunk->kdata[idx]; 1331 track->vap_vf_cntl = idx_value;
1335 break; 1332 break;
1336 case RADEON_SE_VTX_FMT: 1333 case RADEON_SE_VTX_FMT:
1337 track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx]); 1334 track->vtx_size = r100_get_vtx_size(idx_value);
1338 break; 1335 break;
1339 case RADEON_PP_TEX_SIZE_0: 1336 case RADEON_PP_TEX_SIZE_0:
1340 case RADEON_PP_TEX_SIZE_1: 1337 case RADEON_PP_TEX_SIZE_1:
1341 case RADEON_PP_TEX_SIZE_2: 1338 case RADEON_PP_TEX_SIZE_2:
1342 i = (reg - RADEON_PP_TEX_SIZE_0) / 8; 1339 i = (reg - RADEON_PP_TEX_SIZE_0) / 8;
1343 track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1; 1340 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
1344 track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; 1341 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
1345 break; 1342 break;
1346 case RADEON_PP_TEX_PITCH_0: 1343 case RADEON_PP_TEX_PITCH_0:
1347 case RADEON_PP_TEX_PITCH_1: 1344 case RADEON_PP_TEX_PITCH_1:
1348 case RADEON_PP_TEX_PITCH_2: 1345 case RADEON_PP_TEX_PITCH_2:
1349 i = (reg - RADEON_PP_TEX_PITCH_0) / 8; 1346 i = (reg - RADEON_PP_TEX_PITCH_0) / 8;
1350 track->textures[i].pitch = ib_chunk->kdata[idx] + 32; 1347 track->textures[i].pitch = idx_value + 32;
1351 break; 1348 break;
1352 case RADEON_PP_TXFILTER_0: 1349 case RADEON_PP_TXFILTER_0:
1353 case RADEON_PP_TXFILTER_1: 1350 case RADEON_PP_TXFILTER_1:
1354 case RADEON_PP_TXFILTER_2: 1351 case RADEON_PP_TXFILTER_2:
1355 i = (reg - RADEON_PP_TXFILTER_0) / 24; 1352 i = (reg - RADEON_PP_TXFILTER_0) / 24;
1356 track->textures[i].num_levels = ((ib_chunk->kdata[idx] & RADEON_MAX_MIP_LEVEL_MASK) 1353 track->textures[i].num_levels = ((idx_value & RADEON_MAX_MIP_LEVEL_MASK)
1357 >> RADEON_MAX_MIP_LEVEL_SHIFT); 1354 >> RADEON_MAX_MIP_LEVEL_SHIFT);
1358 tmp = (ib_chunk->kdata[idx] >> 23) & 0x7; 1355 tmp = (idx_value >> 23) & 0x7;
1359 if (tmp == 2 || tmp == 6) 1356 if (tmp == 2 || tmp == 6)
1360 track->textures[i].roundup_w = false; 1357 track->textures[i].roundup_w = false;
1361 tmp = (ib_chunk->kdata[idx] >> 27) & 0x7; 1358 tmp = (idx_value >> 27) & 0x7;
1362 if (tmp == 2 || tmp == 6) 1359 if (tmp == 2 || tmp == 6)
1363 track->textures[i].roundup_h = false; 1360 track->textures[i].roundup_h = false;
1364 break; 1361 break;
@@ -1366,16 +1363,16 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1366 case RADEON_PP_TXFORMAT_1: 1363 case RADEON_PP_TXFORMAT_1:
1367 case RADEON_PP_TXFORMAT_2: 1364 case RADEON_PP_TXFORMAT_2:
1368 i = (reg - RADEON_PP_TXFORMAT_0) / 24; 1365 i = (reg - RADEON_PP_TXFORMAT_0) / 24;
1369 if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_NON_POWER2) { 1366 if (idx_value & RADEON_TXFORMAT_NON_POWER2) {
1370 track->textures[i].use_pitch = 1; 1367 track->textures[i].use_pitch = 1;
1371 } else { 1368 } else {
1372 track->textures[i].use_pitch = 0; 1369 track->textures[i].use_pitch = 0;
1373 track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); 1370 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
1374 track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); 1371 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
1375 } 1372 }
1376 if (ib_chunk->kdata[idx] & RADEON_TXFORMAT_CUBIC_MAP_ENABLE) 1373 if (idx_value & RADEON_TXFORMAT_CUBIC_MAP_ENABLE)
1377 track->textures[i].tex_coord_type = 2; 1374 track->textures[i].tex_coord_type = 2;
1378 switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) { 1375 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
1379 case RADEON_TXFORMAT_I8: 1376 case RADEON_TXFORMAT_I8:
1380 case RADEON_TXFORMAT_RGB332: 1377 case RADEON_TXFORMAT_RGB332:
1381 case RADEON_TXFORMAT_Y8: 1378 case RADEON_TXFORMAT_Y8:
@@ -1402,13 +1399,13 @@ static int r100_packet0_check(struct radeon_cs_parser *p,
1402 track->textures[i].cpp = 4; 1399 track->textures[i].cpp = 4;
1403 break; 1400 break;
1404 } 1401 }
1405 track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf); 1402 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
1406 track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf); 1403 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
1407 break; 1404 break;
1408 case RADEON_PP_CUBIC_FACES_0: 1405 case RADEON_PP_CUBIC_FACES_0:
1409 case RADEON_PP_CUBIC_FACES_1: 1406 case RADEON_PP_CUBIC_FACES_1:
1410 case RADEON_PP_CUBIC_FACES_2: 1407 case RADEON_PP_CUBIC_FACES_2:
1411 tmp = ib_chunk->kdata[idx]; 1408 tmp = idx_value;
1412 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4; 1409 i = (reg - RADEON_PP_CUBIC_FACES_0) / 4;
1413 for (face = 0; face < 4; face++) { 1410 for (face = 0; face < 4; face++) {
1414 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); 1411 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
@@ -1427,15 +1424,14 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1427 struct radeon_cs_packet *pkt, 1424 struct radeon_cs_packet *pkt,
1428 struct radeon_object *robj) 1425 struct radeon_object *robj)
1429{ 1426{
1430 struct radeon_cs_chunk *ib_chunk;
1431 unsigned idx; 1427 unsigned idx;
1432 1428 u32 value;
1433 ib_chunk = &p->chunks[p->chunk_ib_idx];
1434 idx = pkt->idx + 1; 1429 idx = pkt->idx + 1;
1435 if ((ib_chunk->kdata[idx+2] + 1) > radeon_object_size(robj)) { 1430 value = radeon_get_ib_value(p, idx + 2);
1431 if ((value + 1) > radeon_object_size(robj)) {
1436 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER " 1432 DRM_ERROR("[drm] Buffer too small for PACKET3 INDX_BUFFER "
1437 "(need %u have %lu) !\n", 1433 "(need %u have %lu) !\n",
1438 ib_chunk->kdata[idx+2] + 1, 1434 value + 1,
1439 radeon_object_size(robj)); 1435 radeon_object_size(robj));
1440 return -EINVAL; 1436 return -EINVAL;
1441 } 1437 }
@@ -1445,59 +1441,20 @@ int r100_cs_track_check_pkt3_indx_buffer(struct radeon_cs_parser *p,
1445static int r100_packet3_check(struct radeon_cs_parser *p, 1441static int r100_packet3_check(struct radeon_cs_parser *p,
1446 struct radeon_cs_packet *pkt) 1442 struct radeon_cs_packet *pkt)
1447{ 1443{
1448 struct radeon_cs_chunk *ib_chunk;
1449 struct radeon_cs_reloc *reloc; 1444 struct radeon_cs_reloc *reloc;
1450 struct r100_cs_track *track; 1445 struct r100_cs_track *track;
1451 unsigned idx; 1446 unsigned idx;
1452 unsigned i, c;
1453 volatile uint32_t *ib; 1447 volatile uint32_t *ib;
1454 int r; 1448 int r;
1455 1449
1456 ib = p->ib->ptr; 1450 ib = p->ib->ptr;
1457 ib_chunk = &p->chunks[p->chunk_ib_idx];
1458 idx = pkt->idx + 1; 1451 idx = pkt->idx + 1;
1459 track = (struct r100_cs_track *)p->track; 1452 track = (struct r100_cs_track *)p->track;
1460 switch (pkt->opcode) { 1453 switch (pkt->opcode) {
1461 case PACKET3_3D_LOAD_VBPNTR: 1454 case PACKET3_3D_LOAD_VBPNTR:
1462 c = ib_chunk->kdata[idx++]; 1455 r = r100_packet3_load_vbpntr(p, pkt, idx);
1463 track->num_arrays = c; 1456 if (r)
1464 for (i = 0; i < (c - 1); i += 2, idx += 3) { 1457 return r;
1465 r = r100_cs_packet_next_reloc(p, &reloc);
1466 if (r) {
1467 DRM_ERROR("No reloc for packet3 %d\n",
1468 pkt->opcode);
1469 r100_cs_dump_packet(p, pkt);
1470 return r;
1471 }
1472 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1473 track->arrays[i + 0].robj = reloc->robj;
1474 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1475 track->arrays[i + 0].esize &= 0x7F;
1476 r = r100_cs_packet_next_reloc(p, &reloc);
1477 if (r) {
1478 DRM_ERROR("No reloc for packet3 %d\n",
1479 pkt->opcode);
1480 r100_cs_dump_packet(p, pkt);
1481 return r;
1482 }
1483 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
1484 track->arrays[i + 1].robj = reloc->robj;
1485 track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
1486 track->arrays[i + 1].esize &= 0x7F;
1487 }
1488 if (c & 1) {
1489 r = r100_cs_packet_next_reloc(p, &reloc);
1490 if (r) {
1491 DRM_ERROR("No reloc for packet3 %d\n",
1492 pkt->opcode);
1493 r100_cs_dump_packet(p, pkt);
1494 return r;
1495 }
1496 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1497 track->arrays[i + 0].robj = reloc->robj;
1498 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1499 track->arrays[i + 0].esize &= 0x7F;
1500 }
1501 break; 1458 break;
1502 case PACKET3_INDX_BUFFER: 1459 case PACKET3_INDX_BUFFER:
1503 r = r100_cs_packet_next_reloc(p, &reloc); 1460 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1506,7 +1463,7 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1506 r100_cs_dump_packet(p, pkt); 1463 r100_cs_dump_packet(p, pkt);
1507 return r; 1464 return r;
1508 } 1465 }
1509 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); 1466 ib[idx+1] = radeon_get_ib_value(p, idx+1) + ((u32)reloc->lobj.gpu_offset);
1510 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); 1467 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1511 if (r) { 1468 if (r) {
1512 return r; 1469 return r;
@@ -1520,27 +1477,27 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1520 r100_cs_dump_packet(p, pkt); 1477 r100_cs_dump_packet(p, pkt);
1521 return r; 1478 return r;
1522 } 1479 }
1523 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1480 ib[idx] = radeon_get_ib_value(p, idx) + ((u32)reloc->lobj.gpu_offset);
1524 track->num_arrays = 1; 1481 track->num_arrays = 1;
1525 track->vtx_size = r100_get_vtx_size(ib_chunk->kdata[idx+2]); 1482 track->vtx_size = r100_get_vtx_size(radeon_get_ib_value(p, idx + 2));
1526 1483
1527 track->arrays[0].robj = reloc->robj; 1484 track->arrays[0].robj = reloc->robj;
1528 track->arrays[0].esize = track->vtx_size; 1485 track->arrays[0].esize = track->vtx_size;
1529 1486
1530 track->max_indx = ib_chunk->kdata[idx+1]; 1487 track->max_indx = radeon_get_ib_value(p, idx+1);
1531 1488
1532 track->vap_vf_cntl = ib_chunk->kdata[idx+3]; 1489 track->vap_vf_cntl = radeon_get_ib_value(p, idx+3);
1533 track->immd_dwords = pkt->count - 1; 1490 track->immd_dwords = pkt->count - 1;
1534 r = r100_cs_track_check(p->rdev, track); 1491 r = r100_cs_track_check(p->rdev, track);
1535 if (r) 1492 if (r)
1536 return r; 1493 return r;
1537 break; 1494 break;
1538 case PACKET3_3D_DRAW_IMMD: 1495 case PACKET3_3D_DRAW_IMMD:
1539 if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) { 1496 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
1540 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1497 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1541 return -EINVAL; 1498 return -EINVAL;
1542 } 1499 }
1543 track->vap_vf_cntl = ib_chunk->kdata[idx+1]; 1500 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1544 track->immd_dwords = pkt->count - 1; 1501 track->immd_dwords = pkt->count - 1;
1545 r = r100_cs_track_check(p->rdev, track); 1502 r = r100_cs_track_check(p->rdev, track);
1546 if (r) 1503 if (r)
@@ -1548,11 +1505,11 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1548 break; 1505 break;
1549 /* triggers drawing using in-packet vertex data */ 1506 /* triggers drawing using in-packet vertex data */
1550 case PACKET3_3D_DRAW_IMMD_2: 1507 case PACKET3_3D_DRAW_IMMD_2:
1551 if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) { 1508 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
1552 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1509 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1553 return -EINVAL; 1510 return -EINVAL;
1554 } 1511 }
1555 track->vap_vf_cntl = ib_chunk->kdata[idx]; 1512 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1556 track->immd_dwords = pkt->count; 1513 track->immd_dwords = pkt->count;
1557 r = r100_cs_track_check(p->rdev, track); 1514 r = r100_cs_track_check(p->rdev, track);
1558 if (r) 1515 if (r)
@@ -1560,28 +1517,28 @@ static int r100_packet3_check(struct radeon_cs_parser *p,
1560 break; 1517 break;
1561 /* triggers drawing using in-packet vertex data */ 1518 /* triggers drawing using in-packet vertex data */
1562 case PACKET3_3D_DRAW_VBUF_2: 1519 case PACKET3_3D_DRAW_VBUF_2:
1563 track->vap_vf_cntl = ib_chunk->kdata[idx]; 1520 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1564 r = r100_cs_track_check(p->rdev, track); 1521 r = r100_cs_track_check(p->rdev, track);
1565 if (r) 1522 if (r)
1566 return r; 1523 return r;
1567 break; 1524 break;
1568 /* triggers drawing of vertex buffers setup elsewhere */ 1525 /* triggers drawing of vertex buffers setup elsewhere */
1569 case PACKET3_3D_DRAW_INDX_2: 1526 case PACKET3_3D_DRAW_INDX_2:
1570 track->vap_vf_cntl = ib_chunk->kdata[idx]; 1527 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1571 r = r100_cs_track_check(p->rdev, track); 1528 r = r100_cs_track_check(p->rdev, track);
1572 if (r) 1529 if (r)
1573 return r; 1530 return r;
1574 break; 1531 break;
1575 /* triggers drawing using indices to vertex buffer */ 1532 /* triggers drawing using indices to vertex buffer */
1576 case PACKET3_3D_DRAW_VBUF: 1533 case PACKET3_3D_DRAW_VBUF:
1577 track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; 1534 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1578 r = r100_cs_track_check(p->rdev, track); 1535 r = r100_cs_track_check(p->rdev, track);
1579 if (r) 1536 if (r)
1580 return r; 1537 return r;
1581 break; 1538 break;
1582 /* triggers drawing of vertex buffers setup elsewhere */ 1539 /* triggers drawing of vertex buffers setup elsewhere */
1583 case PACKET3_3D_DRAW_INDX: 1540 case PACKET3_3D_DRAW_INDX:
1584 track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; 1541 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1585 r = r100_cs_track_check(p->rdev, track); 1542 r = r100_cs_track_check(p->rdev, track);
1586 if (r) 1543 if (r)
1587 return r; 1544 return r;
diff --git a/drivers/gpu/drm/radeon/r100_track.h b/drivers/gpu/drm/radeon/r100_track.h
index 70a82eda394a..0daf0d76a891 100644
--- a/drivers/gpu/drm/radeon/r100_track.h
+++ b/drivers/gpu/drm/radeon/r100_track.h
@@ -84,6 +84,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
84 struct radeon_cs_packet *pkt, 84 struct radeon_cs_packet *pkt,
85 unsigned idx, unsigned reg); 85 unsigned idx, unsigned reg);
86 86
87
88
87static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p, 89static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
88 struct radeon_cs_packet *pkt, 90 struct radeon_cs_packet *pkt,
89 unsigned idx, 91 unsigned idx,
@@ -93,9 +95,7 @@ static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
93 u32 tile_flags = 0; 95 u32 tile_flags = 0;
94 u32 tmp; 96 u32 tmp;
95 struct radeon_cs_reloc *reloc; 97 struct radeon_cs_reloc *reloc;
96 struct radeon_cs_chunk *ib_chunk; 98 u32 value;
97
98 ib_chunk = &p->chunks[p->chunk_ib_idx];
99 99
100 r = r100_cs_packet_next_reloc(p, &reloc); 100 r = r100_cs_packet_next_reloc(p, &reloc);
101 if (r) { 101 if (r) {
@@ -104,7 +104,8 @@ static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
104 r100_cs_dump_packet(p, pkt); 104 r100_cs_dump_packet(p, pkt);
105 return r; 105 return r;
106 } 106 }
107 tmp = ib_chunk->kdata[idx] & 0x003fffff; 107 value = radeon_get_ib_value(p, idx);
108 tmp = value & 0x003fffff;
108 tmp += (((u32)reloc->lobj.gpu_offset) >> 10); 109 tmp += (((u32)reloc->lobj.gpu_offset) >> 10);
109 110
110 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO) 111 if (reloc->lobj.tiling_flags & RADEON_TILING_MACRO)
@@ -119,6 +120,64 @@ static inline int r100_reloc_pitch_offset(struct radeon_cs_parser *p,
119 } 120 }
120 121
121 tmp |= tile_flags; 122 tmp |= tile_flags;
122 p->ib->ptr[idx] = (ib_chunk->kdata[idx] & 0x3fc00000) | tmp; 123 p->ib->ptr[idx] = (value & 0x3fc00000) | tmp;
123 return 0; 124 return 0;
124} 125}
126
127static inline int r100_packet3_load_vbpntr(struct radeon_cs_parser *p,
128 struct radeon_cs_packet *pkt,
129 int idx)
130{
131 unsigned c, i;
132 struct radeon_cs_reloc *reloc;
133 struct r100_cs_track *track;
134 int r = 0;
135 volatile uint32_t *ib;
136 u32 idx_value;
137
138 ib = p->ib->ptr;
139 track = (struct r100_cs_track *)p->track;
140 c = radeon_get_ib_value(p, idx++) & 0x1F;
141 track->num_arrays = c;
142 for (i = 0; i < (c - 1); i+=2, idx+=3) {
143 r = r100_cs_packet_next_reloc(p, &reloc);
144 if (r) {
145 DRM_ERROR("No reloc for packet3 %d\n",
146 pkt->opcode);
147 r100_cs_dump_packet(p, pkt);
148 return r;
149 }
150 idx_value = radeon_get_ib_value(p, idx);
151 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
152
153 track->arrays[i + 0].esize = idx_value >> 8;
154 track->arrays[i + 0].robj = reloc->robj;
155 track->arrays[i + 0].esize &= 0x7F;
156 r = r100_cs_packet_next_reloc(p, &reloc);
157 if (r) {
158 DRM_ERROR("No reloc for packet3 %d\n",
159 pkt->opcode);
160 r100_cs_dump_packet(p, pkt);
161 return r;
162 }
163 ib[idx+2] = radeon_get_ib_value(p, idx + 2) + ((u32)reloc->lobj.gpu_offset);
164 track->arrays[i + 1].robj = reloc->robj;
165 track->arrays[i + 1].esize = idx_value >> 24;
166 track->arrays[i + 1].esize &= 0x7F;
167 }
168 if (c & 1) {
169 r = r100_cs_packet_next_reloc(p, &reloc);
170 if (r) {
171 DRM_ERROR("No reloc for packet3 %d\n",
172 pkt->opcode);
173 r100_cs_dump_packet(p, pkt);
174 return r;
175 }
176 idx_value = radeon_get_ib_value(p, idx);
177 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
178 track->arrays[i + 0].robj = reloc->robj;
179 track->arrays[i + 0].esize = idx_value >> 8;
180 track->arrays[i + 0].esize &= 0x7F;
181 }
182 return r;
183}
diff --git a/drivers/gpu/drm/radeon/r200.c b/drivers/gpu/drm/radeon/r200.c
index 568c74bfba3d..cf7fea5ff2e5 100644
--- a/drivers/gpu/drm/radeon/r200.c
+++ b/drivers/gpu/drm/radeon/r200.c
@@ -96,7 +96,6 @@ int r200_packet0_check(struct radeon_cs_parser *p,
96 struct radeon_cs_packet *pkt, 96 struct radeon_cs_packet *pkt,
97 unsigned idx, unsigned reg) 97 unsigned idx, unsigned reg)
98{ 98{
99 struct radeon_cs_chunk *ib_chunk;
100 struct radeon_cs_reloc *reloc; 99 struct radeon_cs_reloc *reloc;
101 struct r100_cs_track *track; 100 struct r100_cs_track *track;
102 volatile uint32_t *ib; 101 volatile uint32_t *ib;
@@ -105,11 +104,11 @@ int r200_packet0_check(struct radeon_cs_parser *p,
105 int i; 104 int i;
106 int face; 105 int face;
107 u32 tile_flags = 0; 106 u32 tile_flags = 0;
107 u32 idx_value;
108 108
109 ib = p->ib->ptr; 109 ib = p->ib->ptr;
110 ib_chunk = &p->chunks[p->chunk_ib_idx];
111 track = (struct r100_cs_track *)p->track; 110 track = (struct r100_cs_track *)p->track;
112 111 idx_value = radeon_get_ib_value(p, idx);
113 switch (reg) { 112 switch (reg) {
114 case RADEON_CRTC_GUI_TRIG_VLINE: 113 case RADEON_CRTC_GUI_TRIG_VLINE:
115 r = r100_cs_packet_parse_vline(p); 114 r = r100_cs_packet_parse_vline(p);
@@ -137,8 +136,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
137 return r; 136 return r;
138 } 137 }
139 track->zb.robj = reloc->robj; 138 track->zb.robj = reloc->robj;
140 track->zb.offset = ib_chunk->kdata[idx]; 139 track->zb.offset = idx_value;
141 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 140 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
142 break; 141 break;
143 case RADEON_RB3D_COLOROFFSET: 142 case RADEON_RB3D_COLOROFFSET:
144 r = r100_cs_packet_next_reloc(p, &reloc); 143 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -149,8 +148,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
149 return r; 148 return r;
150 } 149 }
151 track->cb[0].robj = reloc->robj; 150 track->cb[0].robj = reloc->robj;
152 track->cb[0].offset = ib_chunk->kdata[idx]; 151 track->cb[0].offset = idx_value;
153 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 152 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
154 break; 153 break;
155 case R200_PP_TXOFFSET_0: 154 case R200_PP_TXOFFSET_0:
156 case R200_PP_TXOFFSET_1: 155 case R200_PP_TXOFFSET_1:
@@ -166,7 +165,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
166 r100_cs_dump_packet(p, pkt); 165 r100_cs_dump_packet(p, pkt);
167 return r; 166 return r;
168 } 167 }
169 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 168 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
170 track->textures[i].robj = reloc->robj; 169 track->textures[i].robj = reloc->robj;
171 break; 170 break;
172 case R200_PP_CUBIC_OFFSET_F1_0: 171 case R200_PP_CUBIC_OFFSET_F1_0:
@@ -208,12 +207,12 @@ int r200_packet0_check(struct radeon_cs_parser *p,
208 r100_cs_dump_packet(p, pkt); 207 r100_cs_dump_packet(p, pkt);
209 return r; 208 return r;
210 } 209 }
211 track->textures[i].cube_info[face - 1].offset = ib_chunk->kdata[idx]; 210 track->textures[i].cube_info[face - 1].offset = idx_value;
212 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 211 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
213 track->textures[i].cube_info[face - 1].robj = reloc->robj; 212 track->textures[i].cube_info[face - 1].robj = reloc->robj;
214 break; 213 break;
215 case RADEON_RE_WIDTH_HEIGHT: 214 case RADEON_RE_WIDTH_HEIGHT:
216 track->maxy = ((ib_chunk->kdata[idx] >> 16) & 0x7FF); 215 track->maxy = ((idx_value >> 16) & 0x7FF);
217 break; 216 break;
218 case RADEON_RB3D_COLORPITCH: 217 case RADEON_RB3D_COLORPITCH:
219 r = r100_cs_packet_next_reloc(p, &reloc); 218 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -229,17 +228,17 @@ int r200_packet0_check(struct radeon_cs_parser *p,
229 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 228 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
230 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE; 229 tile_flags |= RADEON_COLOR_MICROTILE_ENABLE;
231 230
232 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); 231 tmp = idx_value & ~(0x7 << 16);
233 tmp |= tile_flags; 232 tmp |= tile_flags;
234 ib[idx] = tmp; 233 ib[idx] = tmp;
235 234
236 track->cb[0].pitch = ib_chunk->kdata[idx] & RADEON_COLORPITCH_MASK; 235 track->cb[0].pitch = idx_value & RADEON_COLORPITCH_MASK;
237 break; 236 break;
238 case RADEON_RB3D_DEPTHPITCH: 237 case RADEON_RB3D_DEPTHPITCH:
239 track->zb.pitch = ib_chunk->kdata[idx] & RADEON_DEPTHPITCH_MASK; 238 track->zb.pitch = idx_value & RADEON_DEPTHPITCH_MASK;
240 break; 239 break;
241 case RADEON_RB3D_CNTL: 240 case RADEON_RB3D_CNTL:
242 switch ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) { 241 switch ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f) {
243 case 7: 242 case 7:
244 case 8: 243 case 8:
245 case 9: 244 case 9:
@@ -257,18 +256,18 @@ int r200_packet0_check(struct radeon_cs_parser *p,
257 break; 256 break;
258 default: 257 default:
259 DRM_ERROR("Invalid color buffer format (%d) !\n", 258 DRM_ERROR("Invalid color buffer format (%d) !\n",
260 ((ib_chunk->kdata[idx] >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f)); 259 ((idx_value >> RADEON_RB3D_COLOR_FORMAT_SHIFT) & 0x1f));
261 return -EINVAL; 260 return -EINVAL;
262 } 261 }
263 if (ib_chunk->kdata[idx] & RADEON_DEPTHXY_OFFSET_ENABLE) { 262 if (idx_value & RADEON_DEPTHXY_OFFSET_ENABLE) {
264 DRM_ERROR("No support for depth xy offset in kms\n"); 263 DRM_ERROR("No support for depth xy offset in kms\n");
265 return -EINVAL; 264 return -EINVAL;
266 } 265 }
267 266
268 track->z_enabled = !!(ib_chunk->kdata[idx] & RADEON_Z_ENABLE); 267 track->z_enabled = !!(idx_value & RADEON_Z_ENABLE);
269 break; 268 break;
270 case RADEON_RB3D_ZSTENCILCNTL: 269 case RADEON_RB3D_ZSTENCILCNTL:
271 switch (ib_chunk->kdata[idx] & 0xf) { 270 switch (idx_value & 0xf) {
272 case 0: 271 case 0:
273 track->zb.cpp = 2; 272 track->zb.cpp = 2;
274 break; 273 break;
@@ -292,27 +291,27 @@ int r200_packet0_check(struct radeon_cs_parser *p,
292 r100_cs_dump_packet(p, pkt); 291 r100_cs_dump_packet(p, pkt);
293 return r; 292 return r;
294 } 293 }
295 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 294 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
296 break; 295 break;
297 case RADEON_PP_CNTL: 296 case RADEON_PP_CNTL:
298 { 297 {
299 uint32_t temp = ib_chunk->kdata[idx] >> 4; 298 uint32_t temp = idx_value >> 4;
300 for (i = 0; i < track->num_texture; i++) 299 for (i = 0; i < track->num_texture; i++)
301 track->textures[i].enabled = !!(temp & (1 << i)); 300 track->textures[i].enabled = !!(temp & (1 << i));
302 } 301 }
303 break; 302 break;
304 case RADEON_SE_VF_CNTL: 303 case RADEON_SE_VF_CNTL:
305 track->vap_vf_cntl = ib_chunk->kdata[idx]; 304 track->vap_vf_cntl = idx_value;
306 break; 305 break;
307 case 0x210c: 306 case 0x210c:
308 /* VAP_VF_MAX_VTX_INDX */ 307 /* VAP_VF_MAX_VTX_INDX */
309 track->max_indx = ib_chunk->kdata[idx] & 0x00FFFFFFUL; 308 track->max_indx = idx_value & 0x00FFFFFFUL;
310 break; 309 break;
311 case R200_SE_VTX_FMT_0: 310 case R200_SE_VTX_FMT_0:
312 track->vtx_size = r200_get_vtx_size_0(ib_chunk->kdata[idx]); 311 track->vtx_size = r200_get_vtx_size_0(idx_value);
313 break; 312 break;
314 case R200_SE_VTX_FMT_1: 313 case R200_SE_VTX_FMT_1:
315 track->vtx_size += r200_get_vtx_size_1(ib_chunk->kdata[idx]); 314 track->vtx_size += r200_get_vtx_size_1(idx_value);
316 break; 315 break;
317 case R200_PP_TXSIZE_0: 316 case R200_PP_TXSIZE_0:
318 case R200_PP_TXSIZE_1: 317 case R200_PP_TXSIZE_1:
@@ -321,8 +320,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
321 case R200_PP_TXSIZE_4: 320 case R200_PP_TXSIZE_4:
322 case R200_PP_TXSIZE_5: 321 case R200_PP_TXSIZE_5:
323 i = (reg - R200_PP_TXSIZE_0) / 32; 322 i = (reg - R200_PP_TXSIZE_0) / 32;
324 track->textures[i].width = (ib_chunk->kdata[idx] & RADEON_TEX_USIZE_MASK) + 1; 323 track->textures[i].width = (idx_value & RADEON_TEX_USIZE_MASK) + 1;
325 track->textures[i].height = ((ib_chunk->kdata[idx] & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1; 324 track->textures[i].height = ((idx_value & RADEON_TEX_VSIZE_MASK) >> RADEON_TEX_VSIZE_SHIFT) + 1;
326 break; 325 break;
327 case R200_PP_TXPITCH_0: 326 case R200_PP_TXPITCH_0:
328 case R200_PP_TXPITCH_1: 327 case R200_PP_TXPITCH_1:
@@ -331,7 +330,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
331 case R200_PP_TXPITCH_4: 330 case R200_PP_TXPITCH_4:
332 case R200_PP_TXPITCH_5: 331 case R200_PP_TXPITCH_5:
333 i = (reg - R200_PP_TXPITCH_0) / 32; 332 i = (reg - R200_PP_TXPITCH_0) / 32;
334 track->textures[i].pitch = ib_chunk->kdata[idx] + 32; 333 track->textures[i].pitch = idx_value + 32;
335 break; 334 break;
336 case R200_PP_TXFILTER_0: 335 case R200_PP_TXFILTER_0:
337 case R200_PP_TXFILTER_1: 336 case R200_PP_TXFILTER_1:
@@ -340,12 +339,12 @@ int r200_packet0_check(struct radeon_cs_parser *p,
340 case R200_PP_TXFILTER_4: 339 case R200_PP_TXFILTER_4:
341 case R200_PP_TXFILTER_5: 340 case R200_PP_TXFILTER_5:
342 i = (reg - R200_PP_TXFILTER_0) / 32; 341 i = (reg - R200_PP_TXFILTER_0) / 32;
343 track->textures[i].num_levels = ((ib_chunk->kdata[idx] & R200_MAX_MIP_LEVEL_MASK) 342 track->textures[i].num_levels = ((idx_value & R200_MAX_MIP_LEVEL_MASK)
344 >> R200_MAX_MIP_LEVEL_SHIFT); 343 >> R200_MAX_MIP_LEVEL_SHIFT);
345 tmp = (ib_chunk->kdata[idx] >> 23) & 0x7; 344 tmp = (idx_value >> 23) & 0x7;
346 if (tmp == 2 || tmp == 6) 345 if (tmp == 2 || tmp == 6)
347 track->textures[i].roundup_w = false; 346 track->textures[i].roundup_w = false;
348 tmp = (ib_chunk->kdata[idx] >> 27) & 0x7; 347 tmp = (idx_value >> 27) & 0x7;
349 if (tmp == 2 || tmp == 6) 348 if (tmp == 2 || tmp == 6)
350 track->textures[i].roundup_h = false; 349 track->textures[i].roundup_h = false;
351 break; 350 break;
@@ -364,8 +363,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
364 case R200_PP_TXFORMAT_X_4: 363 case R200_PP_TXFORMAT_X_4:
365 case R200_PP_TXFORMAT_X_5: 364 case R200_PP_TXFORMAT_X_5:
366 i = (reg - R200_PP_TXFORMAT_X_0) / 32; 365 i = (reg - R200_PP_TXFORMAT_X_0) / 32;
367 track->textures[i].txdepth = ib_chunk->kdata[idx] & 0x7; 366 track->textures[i].txdepth = idx_value & 0x7;
368 tmp = (ib_chunk->kdata[idx] >> 16) & 0x3; 367 tmp = (idx_value >> 16) & 0x3;
369 /* 2D, 3D, CUBE */ 368 /* 2D, 3D, CUBE */
370 switch (tmp) { 369 switch (tmp) {
371 case 0: 370 case 0:
@@ -389,14 +388,14 @@ int r200_packet0_check(struct radeon_cs_parser *p,
389 case R200_PP_TXFORMAT_4: 388 case R200_PP_TXFORMAT_4:
390 case R200_PP_TXFORMAT_5: 389 case R200_PP_TXFORMAT_5:
391 i = (reg - R200_PP_TXFORMAT_0) / 32; 390 i = (reg - R200_PP_TXFORMAT_0) / 32;
392 if (ib_chunk->kdata[idx] & R200_TXFORMAT_NON_POWER2) { 391 if (idx_value & R200_TXFORMAT_NON_POWER2) {
393 track->textures[i].use_pitch = 1; 392 track->textures[i].use_pitch = 1;
394 } else { 393 } else {
395 track->textures[i].use_pitch = 0; 394 track->textures[i].use_pitch = 0;
396 track->textures[i].width = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK); 395 track->textures[i].width = 1 << ((idx_value >> RADEON_TXFORMAT_WIDTH_SHIFT) & RADEON_TXFORMAT_WIDTH_MASK);
397 track->textures[i].height = 1 << ((ib_chunk->kdata[idx] >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK); 396 track->textures[i].height = 1 << ((idx_value >> RADEON_TXFORMAT_HEIGHT_SHIFT) & RADEON_TXFORMAT_HEIGHT_MASK);
398 } 397 }
399 switch ((ib_chunk->kdata[idx] & RADEON_TXFORMAT_FORMAT_MASK)) { 398 switch ((idx_value & RADEON_TXFORMAT_FORMAT_MASK)) {
400 case R200_TXFORMAT_I8: 399 case R200_TXFORMAT_I8:
401 case R200_TXFORMAT_RGB332: 400 case R200_TXFORMAT_RGB332:
402 case R200_TXFORMAT_Y8: 401 case R200_TXFORMAT_Y8:
@@ -424,8 +423,8 @@ int r200_packet0_check(struct radeon_cs_parser *p,
424 track->textures[i].cpp = 4; 423 track->textures[i].cpp = 4;
425 break; 424 break;
426 } 425 }
427 track->textures[i].cube_info[4].width = 1 << ((ib_chunk->kdata[idx] >> 16) & 0xf); 426 track->textures[i].cube_info[4].width = 1 << ((idx_value >> 16) & 0xf);
428 track->textures[i].cube_info[4].height = 1 << ((ib_chunk->kdata[idx] >> 20) & 0xf); 427 track->textures[i].cube_info[4].height = 1 << ((idx_value >> 20) & 0xf);
429 break; 428 break;
430 case R200_PP_CUBIC_FACES_0: 429 case R200_PP_CUBIC_FACES_0:
431 case R200_PP_CUBIC_FACES_1: 430 case R200_PP_CUBIC_FACES_1:
@@ -433,7 +432,7 @@ int r200_packet0_check(struct radeon_cs_parser *p,
433 case R200_PP_CUBIC_FACES_3: 432 case R200_PP_CUBIC_FACES_3:
434 case R200_PP_CUBIC_FACES_4: 433 case R200_PP_CUBIC_FACES_4:
435 case R200_PP_CUBIC_FACES_5: 434 case R200_PP_CUBIC_FACES_5:
436 tmp = ib_chunk->kdata[idx]; 435 tmp = idx_value;
437 i = (reg - R200_PP_CUBIC_FACES_0) / 32; 436 i = (reg - R200_PP_CUBIC_FACES_0) / 32;
438 for (face = 0; face < 4; face++) { 437 for (face = 0; face < 4; face++) {
439 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf); 438 track->textures[i].cube_info[face].width = 1 << ((tmp >> (face * 8)) & 0xf);
diff --git a/drivers/gpu/drm/radeon/r300.c b/drivers/gpu/drm/radeon/r300.c
index bb151ecdf8fc..1ebea8cc8c93 100644
--- a/drivers/gpu/drm/radeon/r300.c
+++ b/drivers/gpu/drm/radeon/r300.c
@@ -697,17 +697,18 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
697 struct radeon_cs_packet *pkt, 697 struct radeon_cs_packet *pkt,
698 unsigned idx, unsigned reg) 698 unsigned idx, unsigned reg)
699{ 699{
700 struct radeon_cs_chunk *ib_chunk;
701 struct radeon_cs_reloc *reloc; 700 struct radeon_cs_reloc *reloc;
702 struct r100_cs_track *track; 701 struct r100_cs_track *track;
703 volatile uint32_t *ib; 702 volatile uint32_t *ib;
704 uint32_t tmp, tile_flags = 0; 703 uint32_t tmp, tile_flags = 0;
705 unsigned i; 704 unsigned i;
706 int r; 705 int r;
706 u32 idx_value;
707 707
708 ib = p->ib->ptr; 708 ib = p->ib->ptr;
709 ib_chunk = &p->chunks[p->chunk_ib_idx];
710 track = (struct r100_cs_track *)p->track; 709 track = (struct r100_cs_track *)p->track;
710 idx_value = radeon_get_ib_value(p, idx);
711
711 switch(reg) { 712 switch(reg) {
712 case AVIVO_D1MODE_VLINE_START_END: 713 case AVIVO_D1MODE_VLINE_START_END:
713 case RADEON_CRTC_GUI_TRIG_VLINE: 714 case RADEON_CRTC_GUI_TRIG_VLINE:
@@ -738,8 +739,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
738 return r; 739 return r;
739 } 740 }
740 track->cb[i].robj = reloc->robj; 741 track->cb[i].robj = reloc->robj;
741 track->cb[i].offset = ib_chunk->kdata[idx]; 742 track->cb[i].offset = idx_value;
742 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 743 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
743 break; 744 break;
744 case R300_ZB_DEPTHOFFSET: 745 case R300_ZB_DEPTHOFFSET:
745 r = r100_cs_packet_next_reloc(p, &reloc); 746 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -750,8 +751,8 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
750 return r; 751 return r;
751 } 752 }
752 track->zb.robj = reloc->robj; 753 track->zb.robj = reloc->robj;
753 track->zb.offset = ib_chunk->kdata[idx]; 754 track->zb.offset = idx_value;
754 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 755 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
755 break; 756 break;
756 case R300_TX_OFFSET_0: 757 case R300_TX_OFFSET_0:
757 case R300_TX_OFFSET_0+4: 758 case R300_TX_OFFSET_0+4:
@@ -777,32 +778,32 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
777 r100_cs_dump_packet(p, pkt); 778 r100_cs_dump_packet(p, pkt);
778 return r; 779 return r;
779 } 780 }
780 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 781 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
781 track->textures[i].robj = reloc->robj; 782 track->textures[i].robj = reloc->robj;
782 break; 783 break;
783 /* Tracked registers */ 784 /* Tracked registers */
784 case 0x2084: 785 case 0x2084:
785 /* VAP_VF_CNTL */ 786 /* VAP_VF_CNTL */
786 track->vap_vf_cntl = ib_chunk->kdata[idx]; 787 track->vap_vf_cntl = idx_value;
787 break; 788 break;
788 case 0x20B4: 789 case 0x20B4:
789 /* VAP_VTX_SIZE */ 790 /* VAP_VTX_SIZE */
790 track->vtx_size = ib_chunk->kdata[idx] & 0x7F; 791 track->vtx_size = idx_value & 0x7F;
791 break; 792 break;
792 case 0x2134: 793 case 0x2134:
793 /* VAP_VF_MAX_VTX_INDX */ 794 /* VAP_VF_MAX_VTX_INDX */
794 track->max_indx = ib_chunk->kdata[idx] & 0x00FFFFFFUL; 795 track->max_indx = idx_value & 0x00FFFFFFUL;
795 break; 796 break;
796 case 0x43E4: 797 case 0x43E4:
797 /* SC_SCISSOR1 */ 798 /* SC_SCISSOR1 */
798 track->maxy = ((ib_chunk->kdata[idx] >> 13) & 0x1FFF) + 1; 799 track->maxy = ((idx_value >> 13) & 0x1FFF) + 1;
799 if (p->rdev->family < CHIP_RV515) { 800 if (p->rdev->family < CHIP_RV515) {
800 track->maxy -= 1440; 801 track->maxy -= 1440;
801 } 802 }
802 break; 803 break;
803 case 0x4E00: 804 case 0x4E00:
804 /* RB3D_CCTL */ 805 /* RB3D_CCTL */
805 track->num_cb = ((ib_chunk->kdata[idx] >> 5) & 0x3) + 1; 806 track->num_cb = ((idx_value >> 5) & 0x3) + 1;
806 break; 807 break;
807 case 0x4E38: 808 case 0x4E38:
808 case 0x4E3C: 809 case 0x4E3C:
@@ -825,13 +826,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
825 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 826 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
826 tile_flags |= R300_COLOR_MICROTILE_ENABLE; 827 tile_flags |= R300_COLOR_MICROTILE_ENABLE;
827 828
828 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); 829 tmp = idx_value & ~(0x7 << 16);
829 tmp |= tile_flags; 830 tmp |= tile_flags;
830 ib[idx] = tmp; 831 ib[idx] = tmp;
831 832
832 i = (reg - 0x4E38) >> 2; 833 i = (reg - 0x4E38) >> 2;
833 track->cb[i].pitch = ib_chunk->kdata[idx] & 0x3FFE; 834 track->cb[i].pitch = idx_value & 0x3FFE;
834 switch (((ib_chunk->kdata[idx] >> 21) & 0xF)) { 835 switch (((idx_value >> 21) & 0xF)) {
835 case 9: 836 case 9:
836 case 11: 837 case 11:
837 case 12: 838 case 12:
@@ -854,13 +855,13 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
854 break; 855 break;
855 default: 856 default:
856 DRM_ERROR("Invalid color buffer format (%d) !\n", 857 DRM_ERROR("Invalid color buffer format (%d) !\n",
857 ((ib_chunk->kdata[idx] >> 21) & 0xF)); 858 ((idx_value >> 21) & 0xF));
858 return -EINVAL; 859 return -EINVAL;
859 } 860 }
860 break; 861 break;
861 case 0x4F00: 862 case 0x4F00:
862 /* ZB_CNTL */ 863 /* ZB_CNTL */
863 if (ib_chunk->kdata[idx] & 2) { 864 if (idx_value & 2) {
864 track->z_enabled = true; 865 track->z_enabled = true;
865 } else { 866 } else {
866 track->z_enabled = false; 867 track->z_enabled = false;
@@ -868,7 +869,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
868 break; 869 break;
869 case 0x4F10: 870 case 0x4F10:
870 /* ZB_FORMAT */ 871 /* ZB_FORMAT */
871 switch ((ib_chunk->kdata[idx] & 0xF)) { 872 switch ((idx_value & 0xF)) {
872 case 0: 873 case 0:
873 case 1: 874 case 1:
874 track->zb.cpp = 2; 875 track->zb.cpp = 2;
@@ -878,7 +879,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
878 break; 879 break;
879 default: 880 default:
880 DRM_ERROR("Invalid z buffer format (%d) !\n", 881 DRM_ERROR("Invalid z buffer format (%d) !\n",
881 (ib_chunk->kdata[idx] & 0xF)); 882 (idx_value & 0xF));
882 return -EINVAL; 883 return -EINVAL;
883 } 884 }
884 break; 885 break;
@@ -897,17 +898,17 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
897 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO) 898 if (reloc->lobj.tiling_flags & RADEON_TILING_MICRO)
898 tile_flags |= R300_DEPTHMICROTILE_TILED;; 899 tile_flags |= R300_DEPTHMICROTILE_TILED;;
899 900
900 tmp = ib_chunk->kdata[idx] & ~(0x7 << 16); 901 tmp = idx_value & ~(0x7 << 16);
901 tmp |= tile_flags; 902 tmp |= tile_flags;
902 ib[idx] = tmp; 903 ib[idx] = tmp;
903 904
904 track->zb.pitch = ib_chunk->kdata[idx] & 0x3FFC; 905 track->zb.pitch = idx_value & 0x3FFC;
905 break; 906 break;
906 case 0x4104: 907 case 0x4104:
907 for (i = 0; i < 16; i++) { 908 for (i = 0; i < 16; i++) {
908 bool enabled; 909 bool enabled;
909 910
910 enabled = !!(ib_chunk->kdata[idx] & (1 << i)); 911 enabled = !!(idx_value & (1 << i));
911 track->textures[i].enabled = enabled; 912 track->textures[i].enabled = enabled;
912 } 913 }
913 break; 914 break;
@@ -929,9 +930,9 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
929 case 0x44FC: 930 case 0x44FC:
930 /* TX_FORMAT1_[0-15] */ 931 /* TX_FORMAT1_[0-15] */
931 i = (reg - 0x44C0) >> 2; 932 i = (reg - 0x44C0) >> 2;
932 tmp = (ib_chunk->kdata[idx] >> 25) & 0x3; 933 tmp = (idx_value >> 25) & 0x3;
933 track->textures[i].tex_coord_type = tmp; 934 track->textures[i].tex_coord_type = tmp;
934 switch ((ib_chunk->kdata[idx] & 0x1F)) { 935 switch ((idx_value & 0x1F)) {
935 case R300_TX_FORMAT_X8: 936 case R300_TX_FORMAT_X8:
936 case R300_TX_FORMAT_Y4X4: 937 case R300_TX_FORMAT_Y4X4:
937 case R300_TX_FORMAT_Z3Y3X2: 938 case R300_TX_FORMAT_Z3Y3X2:
@@ -971,7 +972,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
971 break; 972 break;
972 default: 973 default:
973 DRM_ERROR("Invalid texture format %u\n", 974 DRM_ERROR("Invalid texture format %u\n",
974 (ib_chunk->kdata[idx] & 0x1F)); 975 (idx_value & 0x1F));
975 return -EINVAL; 976 return -EINVAL;
976 break; 977 break;
977 } 978 }
@@ -994,11 +995,11 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
994 case 0x443C: 995 case 0x443C:
995 /* TX_FILTER0_[0-15] */ 996 /* TX_FILTER0_[0-15] */
996 i = (reg - 0x4400) >> 2; 997 i = (reg - 0x4400) >> 2;
997 tmp = ib_chunk->kdata[idx] & 0x7; 998 tmp = idx_value & 0x7;
998 if (tmp == 2 || tmp == 4 || tmp == 6) { 999 if (tmp == 2 || tmp == 4 || tmp == 6) {
999 track->textures[i].roundup_w = false; 1000 track->textures[i].roundup_w = false;
1000 } 1001 }
1001 tmp = (ib_chunk->kdata[idx] >> 3) & 0x7; 1002 tmp = (idx_value >> 3) & 0x7;
1002 if (tmp == 2 || tmp == 4 || tmp == 6) { 1003 if (tmp == 2 || tmp == 4 || tmp == 6) {
1003 track->textures[i].roundup_h = false; 1004 track->textures[i].roundup_h = false;
1004 } 1005 }
@@ -1021,12 +1022,12 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1021 case 0x453C: 1022 case 0x453C:
1022 /* TX_FORMAT2_[0-15] */ 1023 /* TX_FORMAT2_[0-15] */
1023 i = (reg - 0x4500) >> 2; 1024 i = (reg - 0x4500) >> 2;
1024 tmp = ib_chunk->kdata[idx] & 0x3FFF; 1025 tmp = idx_value & 0x3FFF;
1025 track->textures[i].pitch = tmp + 1; 1026 track->textures[i].pitch = tmp + 1;
1026 if (p->rdev->family >= CHIP_RV515) { 1027 if (p->rdev->family >= CHIP_RV515) {
1027 tmp = ((ib_chunk->kdata[idx] >> 15) & 1) << 11; 1028 tmp = ((idx_value >> 15) & 1) << 11;
1028 track->textures[i].width_11 = tmp; 1029 track->textures[i].width_11 = tmp;
1029 tmp = ((ib_chunk->kdata[idx] >> 16) & 1) << 11; 1030 tmp = ((idx_value >> 16) & 1) << 11;
1030 track->textures[i].height_11 = tmp; 1031 track->textures[i].height_11 = tmp;
1031 } 1032 }
1032 break; 1033 break;
@@ -1048,15 +1049,15 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1048 case 0x44BC: 1049 case 0x44BC:
1049 /* TX_FORMAT0_[0-15] */ 1050 /* TX_FORMAT0_[0-15] */
1050 i = (reg - 0x4480) >> 2; 1051 i = (reg - 0x4480) >> 2;
1051 tmp = ib_chunk->kdata[idx] & 0x7FF; 1052 tmp = idx_value & 0x7FF;
1052 track->textures[i].width = tmp + 1; 1053 track->textures[i].width = tmp + 1;
1053 tmp = (ib_chunk->kdata[idx] >> 11) & 0x7FF; 1054 tmp = (idx_value >> 11) & 0x7FF;
1054 track->textures[i].height = tmp + 1; 1055 track->textures[i].height = tmp + 1;
1055 tmp = (ib_chunk->kdata[idx] >> 26) & 0xF; 1056 tmp = (idx_value >> 26) & 0xF;
1056 track->textures[i].num_levels = tmp; 1057 track->textures[i].num_levels = tmp;
1057 tmp = ib_chunk->kdata[idx] & (1 << 31); 1058 tmp = idx_value & (1 << 31);
1058 track->textures[i].use_pitch = !!tmp; 1059 track->textures[i].use_pitch = !!tmp;
1059 tmp = (ib_chunk->kdata[idx] >> 22) & 0xF; 1060 tmp = (idx_value >> 22) & 0xF;
1060 track->textures[i].txdepth = tmp; 1061 track->textures[i].txdepth = tmp;
1061 break; 1062 break;
1062 case R300_ZB_ZPASS_ADDR: 1063 case R300_ZB_ZPASS_ADDR:
@@ -1067,7 +1068,7 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1067 r100_cs_dump_packet(p, pkt); 1068 r100_cs_dump_packet(p, pkt);
1068 return r; 1069 return r;
1069 } 1070 }
1070 ib[idx] = ib_chunk->kdata[idx] + ((u32)reloc->lobj.gpu_offset); 1071 ib[idx] = idx_value + ((u32)reloc->lobj.gpu_offset);
1071 break; 1072 break;
1072 case 0x4be8: 1073 case 0x4be8:
1073 /* valid register only on RV530 */ 1074 /* valid register only on RV530 */
@@ -1085,60 +1086,20 @@ static int r300_packet0_check(struct radeon_cs_parser *p,
1085static int r300_packet3_check(struct radeon_cs_parser *p, 1086static int r300_packet3_check(struct radeon_cs_parser *p,
1086 struct radeon_cs_packet *pkt) 1087 struct radeon_cs_packet *pkt)
1087{ 1088{
1088 struct radeon_cs_chunk *ib_chunk;
1089
1090 struct radeon_cs_reloc *reloc; 1089 struct radeon_cs_reloc *reloc;
1091 struct r100_cs_track *track; 1090 struct r100_cs_track *track;
1092 volatile uint32_t *ib; 1091 volatile uint32_t *ib;
1093 unsigned idx; 1092 unsigned idx;
1094 unsigned i, c;
1095 int r; 1093 int r;
1096 1094
1097 ib = p->ib->ptr; 1095 ib = p->ib->ptr;
1098 ib_chunk = &p->chunks[p->chunk_ib_idx];
1099 idx = pkt->idx + 1; 1096 idx = pkt->idx + 1;
1100 track = (struct r100_cs_track *)p->track; 1097 track = (struct r100_cs_track *)p->track;
1101 switch(pkt->opcode) { 1098 switch(pkt->opcode) {
1102 case PACKET3_3D_LOAD_VBPNTR: 1099 case PACKET3_3D_LOAD_VBPNTR:
1103 c = ib_chunk->kdata[idx++] & 0x1F; 1100 r = r100_packet3_load_vbpntr(p, pkt, idx);
1104 track->num_arrays = c; 1101 if (r)
1105 for (i = 0; i < (c - 1); i+=2, idx+=3) { 1102 return r;
1106 r = r100_cs_packet_next_reloc(p, &reloc);
1107 if (r) {
1108 DRM_ERROR("No reloc for packet3 %d\n",
1109 pkt->opcode);
1110 r100_cs_dump_packet(p, pkt);
1111 return r;
1112 }
1113 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1114 track->arrays[i + 0].robj = reloc->robj;
1115 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1116 track->arrays[i + 0].esize &= 0x7F;
1117 r = r100_cs_packet_next_reloc(p, &reloc);
1118 if (r) {
1119 DRM_ERROR("No reloc for packet3 %d\n",
1120 pkt->opcode);
1121 r100_cs_dump_packet(p, pkt);
1122 return r;
1123 }
1124 ib[idx+2] = ib_chunk->kdata[idx+2] + ((u32)reloc->lobj.gpu_offset);
1125 track->arrays[i + 1].robj = reloc->robj;
1126 track->arrays[i + 1].esize = ib_chunk->kdata[idx] >> 24;
1127 track->arrays[i + 1].esize &= 0x7F;
1128 }
1129 if (c & 1) {
1130 r = r100_cs_packet_next_reloc(p, &reloc);
1131 if (r) {
1132 DRM_ERROR("No reloc for packet3 %d\n",
1133 pkt->opcode);
1134 r100_cs_dump_packet(p, pkt);
1135 return r;
1136 }
1137 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset);
1138 track->arrays[i + 0].robj = reloc->robj;
1139 track->arrays[i + 0].esize = ib_chunk->kdata[idx] >> 8;
1140 track->arrays[i + 0].esize &= 0x7F;
1141 }
1142 break; 1103 break;
1143 case PACKET3_INDX_BUFFER: 1104 case PACKET3_INDX_BUFFER:
1144 r = r100_cs_packet_next_reloc(p, &reloc); 1105 r = r100_cs_packet_next_reloc(p, &reloc);
@@ -1147,7 +1108,7 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1147 r100_cs_dump_packet(p, pkt); 1108 r100_cs_dump_packet(p, pkt);
1148 return r; 1109 return r;
1149 } 1110 }
1150 ib[idx+1] = ib_chunk->kdata[idx+1] + ((u32)reloc->lobj.gpu_offset); 1111 ib[idx+1] = radeon_get_ib_value(p, idx + 1) + ((u32)reloc->lobj.gpu_offset);
1151 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj); 1112 r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, reloc->robj);
1152 if (r) { 1113 if (r) {
1153 return r; 1114 return r;
@@ -1158,11 +1119,11 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1158 /* Number of dwords is vtx_size * (num_vertices - 1) 1119 /* Number of dwords is vtx_size * (num_vertices - 1)
1159 * PRIM_WALK must be equal to 3 vertex data in embedded 1120 * PRIM_WALK must be equal to 3 vertex data in embedded
1160 * in cmd stream */ 1121 * in cmd stream */
1161 if (((ib_chunk->kdata[idx+1] >> 4) & 0x3) != 3) { 1122 if (((radeon_get_ib_value(p, idx + 1) >> 4) & 0x3) != 3) {
1162 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1123 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1163 return -EINVAL; 1124 return -EINVAL;
1164 } 1125 }
1165 track->vap_vf_cntl = ib_chunk->kdata[idx+1]; 1126 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1166 track->immd_dwords = pkt->count - 1; 1127 track->immd_dwords = pkt->count - 1;
1167 r = r100_cs_track_check(p->rdev, track); 1128 r = r100_cs_track_check(p->rdev, track);
1168 if (r) { 1129 if (r) {
@@ -1173,11 +1134,11 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1173 /* Number of dwords is vtx_size * (num_vertices - 1) 1134 /* Number of dwords is vtx_size * (num_vertices - 1)
1174 * PRIM_WALK must be equal to 3 vertex data in embedded 1135 * PRIM_WALK must be equal to 3 vertex data in embedded
1175 * in cmd stream */ 1136 * in cmd stream */
1176 if (((ib_chunk->kdata[idx] >> 4) & 0x3) != 3) { 1137 if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) {
1177 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n"); 1138 DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n");
1178 return -EINVAL; 1139 return -EINVAL;
1179 } 1140 }
1180 track->vap_vf_cntl = ib_chunk->kdata[idx]; 1141 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1181 track->immd_dwords = pkt->count; 1142 track->immd_dwords = pkt->count;
1182 r = r100_cs_track_check(p->rdev, track); 1143 r = r100_cs_track_check(p->rdev, track);
1183 if (r) { 1144 if (r) {
@@ -1185,28 +1146,28 @@ static int r300_packet3_check(struct radeon_cs_parser *p,
1185 } 1146 }
1186 break; 1147 break;
1187 case PACKET3_3D_DRAW_VBUF: 1148 case PACKET3_3D_DRAW_VBUF:
1188 track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; 1149 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1189 r = r100_cs_track_check(p->rdev, track); 1150 r = r100_cs_track_check(p->rdev, track);
1190 if (r) { 1151 if (r) {
1191 return r; 1152 return r;
1192 } 1153 }
1193 break; 1154 break;
1194 case PACKET3_3D_DRAW_VBUF_2: 1155 case PACKET3_3D_DRAW_VBUF_2:
1195 track->vap_vf_cntl = ib_chunk->kdata[idx]; 1156 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1196 r = r100_cs_track_check(p->rdev, track); 1157 r = r100_cs_track_check(p->rdev, track);
1197 if (r) { 1158 if (r) {
1198 return r; 1159 return r;
1199 } 1160 }
1200 break; 1161 break;
1201 case PACKET3_3D_DRAW_INDX: 1162 case PACKET3_3D_DRAW_INDX:
1202 track->vap_vf_cntl = ib_chunk->kdata[idx + 1]; 1163 track->vap_vf_cntl = radeon_get_ib_value(p, idx + 1);
1203 r = r100_cs_track_check(p->rdev, track); 1164 r = r100_cs_track_check(p->rdev, track);
1204 if (r) { 1165 if (r) {
1205 return r; 1166 return r;
1206 } 1167 }
1207 break; 1168 break;
1208 case PACKET3_3D_DRAW_INDX_2: 1169 case PACKET3_3D_DRAW_INDX_2:
1209 track->vap_vf_cntl = ib_chunk->kdata[idx]; 1170 track->vap_vf_cntl = radeon_get_ib_value(p, idx);
1210 r = r100_cs_track_check(p->rdev, track); 1171 r = r100_cs_track_check(p->rdev, track);
1211 if (r) { 1172 if (r) {
1212 return r; 1173 return r;
diff --git a/drivers/gpu/drm/radeon/r500_reg.h b/drivers/gpu/drm/radeon/r500_reg.h
index e1d5e0331e19..868add6e166d 100644
--- a/drivers/gpu/drm/radeon/r500_reg.h
+++ b/drivers/gpu/drm/radeon/r500_reg.h
@@ -445,6 +445,8 @@
445#define AVIVO_D1MODE_VBLANK_STATUS 0x6534 445#define AVIVO_D1MODE_VBLANK_STATUS 0x6534
446# define AVIVO_VBLANK_ACK (1 << 4) 446# define AVIVO_VBLANK_ACK (1 << 4)
447#define AVIVO_D1MODE_VLINE_START_END 0x6538 447#define AVIVO_D1MODE_VLINE_START_END 0x6538
448#define AVIVO_D1MODE_VLINE_STATUS 0x653c
449# define AVIVO_D1MODE_VLINE_STAT (1 << 12)
448#define AVIVO_DxMODE_INT_MASK 0x6540 450#define AVIVO_DxMODE_INT_MASK 0x6540
449# define AVIVO_D1MODE_INT_MASK (1 << 0) 451# define AVIVO_D1MODE_INT_MASK (1 << 0)
450# define AVIVO_D2MODE_INT_MASK (1 << 8) 452# define AVIVO_D2MODE_INT_MASK (1 << 8)
@@ -502,6 +504,7 @@
502 504
503#define AVIVO_D2MODE_VBLANK_STATUS 0x6d34 505#define AVIVO_D2MODE_VBLANK_STATUS 0x6d34
504#define AVIVO_D2MODE_VLINE_START_END 0x6d38 506#define AVIVO_D2MODE_VLINE_START_END 0x6d38
507#define AVIVO_D2MODE_VLINE_STATUS 0x6d3c
505#define AVIVO_D2MODE_VIEWPORT_START 0x6d80 508#define AVIVO_D2MODE_VIEWPORT_START 0x6d80
506#define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84 509#define AVIVO_D2MODE_VIEWPORT_SIZE 0x6d84
507#define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88 510#define AVIVO_D2MODE_EXT_OVERSCAN_LEFT_RIGHT 0x6d88
diff --git a/drivers/gpu/drm/radeon/r520.c b/drivers/gpu/drm/radeon/r520.c
index d4b0b9d2e39b..0bf13fccdaf2 100644
--- a/drivers/gpu/drm/radeon/r520.c
+++ b/drivers/gpu/drm/radeon/r520.c
@@ -26,108 +26,13 @@
26 * Jerome Glisse 26 * Jerome Glisse
27 */ 27 */
28#include "drmP.h" 28#include "drmP.h"
29#include "radeon_reg.h"
30#include "radeon.h" 29#include "radeon.h"
30#include "atom.h"
31#include "r520d.h"
31 32
32/* r520,rv530,rv560,rv570,r580 depends on : */ 33/* This files gather functions specifics to: r520,rv530,rv560,rv570,r580 */
33void r100_hdp_reset(struct radeon_device *rdev);
34void r420_pipes_init(struct radeon_device *rdev);
35void rs600_mc_disable_clients(struct radeon_device *rdev);
36void rs600_disable_vga(struct radeon_device *rdev);
37int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
38int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
39 34
40/* This files gather functions specifics to: 35static int r520_mc_wait_for_idle(struct radeon_device *rdev)
41 * r520,rv530,rv560,rv570,r580
42 *
43 * Some of these functions might be used by newer ASICs.
44 */
45void r520_gpu_init(struct radeon_device *rdev);
46int r520_mc_wait_for_idle(struct radeon_device *rdev);
47
48
49/*
50 * MC
51 */
52int r520_mc_init(struct radeon_device *rdev)
53{
54 uint32_t tmp;
55 int r;
56
57 if (r100_debugfs_rbbm_init(rdev)) {
58 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
59 }
60 if (rv515_debugfs_pipes_info_init(rdev)) {
61 DRM_ERROR("Failed to register debugfs file for pipes !\n");
62 }
63 if (rv515_debugfs_ga_info_init(rdev)) {
64 DRM_ERROR("Failed to register debugfs file for pipes !\n");
65 }
66
67 r520_gpu_init(rdev);
68 rv370_pcie_gart_disable(rdev);
69
70 /* Setup GPU memory space */
71 rdev->mc.vram_location = 0xFFFFFFFFUL;
72 rdev->mc.gtt_location = 0xFFFFFFFFUL;
73 if (rdev->flags & RADEON_IS_AGP) {
74 r = radeon_agp_init(rdev);
75 if (r) {
76 printk(KERN_WARNING "[drm] Disabling AGP\n");
77 rdev->flags &= ~RADEON_IS_AGP;
78 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
79 } else {
80 rdev->mc.gtt_location = rdev->mc.agp_base;
81 }
82 }
83 r = radeon_mc_setup(rdev);
84 if (r) {
85 return r;
86 }
87
88 /* Program GPU memory space */
89 rs600_mc_disable_clients(rdev);
90 if (r520_mc_wait_for_idle(rdev)) {
91 printk(KERN_WARNING "Failed to wait MC idle while "
92 "programming pipes. Bad things might happen.\n");
93 }
94 /* Write VRAM size in case we are limiting it */
95 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
96 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
97 tmp = REG_SET(R520_MC_FB_TOP, tmp >> 16);
98 tmp |= REG_SET(R520_MC_FB_START, rdev->mc.vram_location >> 16);
99 WREG32_MC(R520_MC_FB_LOCATION, tmp);
100 WREG32(RS690_HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
101 WREG32(0x310, rdev->mc.vram_location);
102 if (rdev->flags & RADEON_IS_AGP) {
103 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
104 tmp = REG_SET(R520_MC_AGP_TOP, tmp >> 16);
105 tmp |= REG_SET(R520_MC_AGP_START, rdev->mc.gtt_location >> 16);
106 WREG32_MC(R520_MC_AGP_LOCATION, tmp);
107 WREG32_MC(R520_MC_AGP_BASE, rdev->mc.agp_base);
108 WREG32_MC(R520_MC_AGP_BASE_2, 0);
109 } else {
110 WREG32_MC(R520_MC_AGP_LOCATION, 0x0FFFFFFF);
111 WREG32_MC(R520_MC_AGP_BASE, 0);
112 WREG32_MC(R520_MC_AGP_BASE_2, 0);
113 }
114 return 0;
115}
116
117void r520_mc_fini(struct radeon_device *rdev)
118{
119}
120
121
122/*
123 * Global GPU functions
124 */
125void r520_errata(struct radeon_device *rdev)
126{
127 rdev->pll_errata = 0;
128}
129
130int r520_mc_wait_for_idle(struct radeon_device *rdev)
131{ 36{
132 unsigned i; 37 unsigned i;
133 uint32_t tmp; 38 uint32_t tmp;
@@ -143,12 +48,12 @@ int r520_mc_wait_for_idle(struct radeon_device *rdev)
143 return -1; 48 return -1;
144} 49}
145 50
146void r520_gpu_init(struct radeon_device *rdev) 51static void r520_gpu_init(struct radeon_device *rdev)
147{ 52{
148 unsigned pipe_select_current, gb_pipe_select, tmp; 53 unsigned pipe_select_current, gb_pipe_select, tmp;
149 54
150 r100_hdp_reset(rdev); 55 r100_hdp_reset(rdev);
151 rs600_disable_vga(rdev); 56 rv515_vga_render_disable(rdev);
152 /* 57 /*
153 * DST_PIPE_CONFIG 0x170C 58 * DST_PIPE_CONFIG 0x170C
154 * GB_TILE_CONFIG 0x4018 59 * GB_TILE_CONFIG 0x4018
@@ -186,10 +91,6 @@ void r520_gpu_init(struct radeon_device *rdev)
186 } 91 }
187} 92}
188 93
189
190/*
191 * VRAM info
192 */
193static void r520_vram_get_type(struct radeon_device *rdev) 94static void r520_vram_get_type(struct radeon_device *rdev)
194{ 95{
195 uint32_t tmp; 96 uint32_t tmp;
@@ -233,7 +134,168 @@ void r520_vram_info(struct radeon_device *rdev)
233 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); 134 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
234} 135}
235 136
236void r520_bandwidth_update(struct radeon_device *rdev) 137void r520_mc_program(struct radeon_device *rdev)
138{
139 struct rv515_mc_save save;
140
141 /* Stops all mc clients */
142 rv515_mc_stop(rdev, &save);
143
144 /* Wait for mc idle */
145 if (r520_mc_wait_for_idle(rdev))
146 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
147 /* Write VRAM size in case we are limiting it */
148 WREG32(R_0000F8_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
149 /* Program MC, should be a 32bits limited address space */
150 WREG32_MC(R_000004_MC_FB_LOCATION,
151 S_000004_MC_FB_START(rdev->mc.vram_start >> 16) |
152 S_000004_MC_FB_TOP(rdev->mc.vram_end >> 16));
153 WREG32(R_000134_HDP_FB_LOCATION,
154 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
155 if (rdev->flags & RADEON_IS_AGP) {
156 WREG32_MC(R_000005_MC_AGP_LOCATION,
157 S_000005_MC_AGP_START(rdev->mc.gtt_start >> 16) |
158 S_000005_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
159 WREG32_MC(R_000006_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
160 WREG32_MC(R_000007_AGP_BASE_2,
161 S_000007_AGP_BASE_ADDR_2(upper_32_bits(rdev->mc.agp_base)));
162 } else {
163 WREG32_MC(R_000005_MC_AGP_LOCATION, 0xFFFFFFFF);
164 WREG32_MC(R_000006_AGP_BASE, 0);
165 WREG32_MC(R_000007_AGP_BASE_2, 0);
166 }
167
168 rv515_mc_resume(rdev, &save);
169}
170
171static int r520_startup(struct radeon_device *rdev)
172{
173 int r;
174
175 r520_mc_program(rdev);
176 /* Resume clock */
177 rv515_clock_startup(rdev);
178 /* Initialize GPU configuration (# pipes, ...) */
179 r520_gpu_init(rdev);
180 /* Initialize GART (initialize after TTM so we can allocate
181 * memory through TTM but finalize after TTM) */
182 if (rdev->flags & RADEON_IS_PCIE) {
183 r = rv370_pcie_gart_enable(rdev);
184 if (r)
185 return r;
186 }
187 /* Enable IRQ */
188 rdev->irq.sw_int = true;
189 r100_irq_set(rdev);
190 /* 1M ring buffer */
191 r = r100_cp_init(rdev, 1024 * 1024);
192 if (r) {
193 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
194 return r;
195 }
196 r = r100_wb_init(rdev);
197 if (r)
198 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
199 r = r100_ib_init(rdev);
200 if (r) {
201 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
202 return r;
203 }
204 return 0;
205}
206
207int r520_resume(struct radeon_device *rdev)
237{ 208{
238 rv515_bandwidth_avivo_update(rdev); 209 /* Make sur GART are not working */
210 if (rdev->flags & RADEON_IS_PCIE)
211 rv370_pcie_gart_disable(rdev);
212 /* Resume clock before doing reset */
213 rv515_clock_startup(rdev);
214 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
215 if (radeon_gpu_reset(rdev)) {
216 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
217 RREG32(R_000E40_RBBM_STATUS),
218 RREG32(R_0007C0_CP_STAT));
219 }
220 /* post */
221 atom_asic_init(rdev->mode_info.atom_context);
222 /* Resume clock after posting */
223 rv515_clock_startup(rdev);
224 return r520_startup(rdev);
225}
226
227int r520_init(struct radeon_device *rdev)
228{
229 int r;
230
231 rdev->new_init_path = true;
232 /* Initialize scratch registers */
233 radeon_scratch_init(rdev);
234 /* Initialize surface registers */
235 radeon_surface_init(rdev);
236 /* TODO: disable VGA need to use VGA request */
237 /* BIOS*/
238 if (!radeon_get_bios(rdev)) {
239 if (ASIC_IS_AVIVO(rdev))
240 return -EINVAL;
241 }
242 if (rdev->is_atom_bios) {
243 r = radeon_atombios_init(rdev);
244 if (r)
245 return r;
246 } else {
247 dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
248 return -EINVAL;
249 }
250 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
251 if (radeon_gpu_reset(rdev)) {
252 dev_warn(rdev->dev,
253 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
254 RREG32(R_000E40_RBBM_STATUS),
255 RREG32(R_0007C0_CP_STAT));
256 }
257 /* check if cards are posted or not */
258 if (!radeon_card_posted(rdev) && rdev->bios) {
259 DRM_INFO("GPU not posted. posting now...\n");
260 atom_asic_init(rdev->mode_info.atom_context);
261 }
262 /* Initialize clocks */
263 radeon_get_clock_info(rdev->ddev);
264 /* Get vram informations */
265 r520_vram_info(rdev);
266 /* Initialize memory controller (also test AGP) */
267 r = r420_mc_init(rdev);
268 if (r)
269 return r;
270 rv515_debugfs(rdev);
271 /* Fence driver */
272 r = radeon_fence_driver_init(rdev);
273 if (r)
274 return r;
275 r = radeon_irq_kms_init(rdev);
276 if (r)
277 return r;
278 /* Memory manager */
279 r = radeon_object_init(rdev);
280 if (r)
281 return r;
282 r = rv370_pcie_gart_init(rdev);
283 if (r)
284 return r;
285 rv515_set_safe_registers(rdev);
286 rdev->accel_working = true;
287 r = r520_startup(rdev);
288 if (r) {
289 /* Somethings want wront with the accel init stop accel */
290 dev_err(rdev->dev, "Disabling GPU acceleration\n");
291 rv515_suspend(rdev);
292 r100_cp_fini(rdev);
293 r100_wb_fini(rdev);
294 r100_ib_fini(rdev);
295 rv370_pcie_gart_fini(rdev);
296 radeon_agp_fini(rdev);
297 radeon_irq_kms_fini(rdev);
298 rdev->accel_working = false;
299 }
300 return 0;
239} 301}
diff --git a/drivers/gpu/drm/radeon/r520d.h b/drivers/gpu/drm/radeon/r520d.h
new file mode 100644
index 000000000000..61af61f644bc
--- /dev/null
+++ b/drivers/gpu/drm/radeon/r520d.h
@@ -0,0 +1,187 @@
1/*
2 * Copyright 2008 Advanced Micro Devices, Inc.
3 * Copyright 2008 Red Hat Inc.
4 * Copyright 2009 Jerome Glisse.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 * Authors: Dave Airlie
25 * Alex Deucher
26 * Jerome Glisse
27 */
28#ifndef __R520D_H__
29#define __R520D_H__
30
31/* Registers */
32#define R_0000F8_CONFIG_MEMSIZE 0x0000F8
33#define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0)
34#define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF)
35#define C_0000F8_CONFIG_MEMSIZE 0x00000000
36#define R_000134_HDP_FB_LOCATION 0x000134
37#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
38#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)
39#define C_000134_HDP_FB_START 0xFFFF0000
40#define R_0007C0_CP_STAT 0x0007C0
41#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
42#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
43#define C_0007C0_MRU_BUSY 0xFFFFFFFE
44#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
45#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
46#define C_0007C0_MWU_BUSY 0xFFFFFFFD
47#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
48#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
49#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
50#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
51#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
52#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
53#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
54#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
55#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
56#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
57#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
58#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
59#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
60#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
61#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
62#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
63#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
64#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
65#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
66#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
67#define C_0007C0_CSI_BUSY 0xFFFFDFFF
68#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
69#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
70#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
71#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
72#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
73#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
74#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
75#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
76#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
77#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
78#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
79#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
80#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
81#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
82#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
83#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
84#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
85#define C_0007C0_CP_BUSY 0x7FFFFFFF
86#define R_000E40_RBBM_STATUS 0x000E40
87#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
88#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
89#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
90#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
91#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
92#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
93#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
94#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
95#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
96#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
97#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
98#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
99#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
100#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
101#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
102#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
103#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
104#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
105#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
106#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
107#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
108#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
109#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
110#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
111#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
112#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
113#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
114#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
115#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
116#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
117#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
118#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
119#define C_000E40_E2_BUSY 0xFFFDFFFF
120#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
121#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
122#define C_000E40_RB2D_BUSY 0xFFFBFFFF
123#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
124#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
125#define C_000E40_RB3D_BUSY 0xFFF7FFFF
126#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
127#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
128#define C_000E40_VAP_BUSY 0xFFEFFFFF
129#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
130#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
131#define C_000E40_RE_BUSY 0xFFDFFFFF
132#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
133#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
134#define C_000E40_TAM_BUSY 0xFFBFFFFF
135#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
136#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
137#define C_000E40_TDM_BUSY 0xFF7FFFFF
138#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
139#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
140#define C_000E40_PB_BUSY 0xFEFFFFFF
141#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
142#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
143#define C_000E40_TIM_BUSY 0xFDFFFFFF
144#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
145#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
146#define C_000E40_GA_BUSY 0xFBFFFFFF
147#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
148#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
149#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
150#define S_000E40_RBBM_HIBUSY(x) (((x) & 0x1) << 28)
151#define G_000E40_RBBM_HIBUSY(x) (((x) >> 28) & 0x1)
152#define C_000E40_RBBM_HIBUSY 0xEFFFFFFF
153#define S_000E40_SKID_CFBUSY(x) (((x) & 0x1) << 29)
154#define G_000E40_SKID_CFBUSY(x) (((x) >> 29) & 0x1)
155#define C_000E40_SKID_CFBUSY 0xDFFFFFFF
156#define S_000E40_VAP_VF_BUSY(x) (((x) & 0x1) << 30)
157#define G_000E40_VAP_VF_BUSY(x) (((x) >> 30) & 0x1)
158#define C_000E40_VAP_VF_BUSY 0xBFFFFFFF
159#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
160#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
161#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
162
163
164#define R_000004_MC_FB_LOCATION 0x000004
165#define S_000004_MC_FB_START(x) (((x) & 0xFFFF) << 0)
166#define G_000004_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
167#define C_000004_MC_FB_START 0xFFFF0000
168#define S_000004_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
169#define G_000004_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
170#define C_000004_MC_FB_TOP 0x0000FFFF
171#define R_000005_MC_AGP_LOCATION 0x000005
172#define S_000005_MC_AGP_START(x) (((x) & 0xFFFF) << 0)
173#define G_000005_MC_AGP_START(x) (((x) >> 0) & 0xFFFF)
174#define C_000005_MC_AGP_START 0xFFFF0000
175#define S_000005_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16)
176#define G_000005_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF)
177#define C_000005_MC_AGP_TOP 0x0000FFFF
178#define R_000006_AGP_BASE 0x000006
179#define S_000006_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
180#define G_000006_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
181#define C_000006_AGP_BASE_ADDR 0x00000000
182#define R_000007_AGP_BASE_2 0x000007
183#define S_000007_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0)
184#define G_000007_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF)
185#define C_000007_AGP_BASE_ADDR_2 0xFFFFFFF0
186
187#endif
diff --git a/drivers/gpu/drm/radeon/r600.c b/drivers/gpu/drm/radeon/r600.c
index eab31c1d6df1..2e4e60edbff4 100644
--- a/drivers/gpu/drm/radeon/r600.c
+++ b/drivers/gpu/drm/radeon/r600.c
@@ -33,8 +33,8 @@
33#include "radeon.h" 33#include "radeon.h"
34#include "radeon_mode.h" 34#include "radeon_mode.h"
35#include "r600d.h" 35#include "r600d.h"
36#include "avivod.h"
37#include "atom.h" 36#include "atom.h"
37#include "avivod.h"
38 38
39#define PFP_UCODE_SIZE 576 39#define PFP_UCODE_SIZE 576
40#define PM4_UCODE_SIZE 1792 40#define PM4_UCODE_SIZE 1792
@@ -342,7 +342,7 @@ static void r600_mc_resume(struct radeon_device *rdev)
342 342
343 /* we need to own VRAM, so turn off the VGA renderer here 343 /* we need to own VRAM, so turn off the VGA renderer here
344 * to stop it overwriting our objects */ 344 * to stop it overwriting our objects */
345 radeon_avivo_vga_render_disable(rdev); 345 rv515_vga_render_disable(rdev);
346} 346}
347 347
348int r600_mc_init(struct radeon_device *rdev) 348int r600_mc_init(struct radeon_device *rdev)
@@ -380,6 +380,13 @@ int r600_mc_init(struct radeon_device *rdev)
380 /* Setup GPU memory space */ 380 /* Setup GPU memory space */
381 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 381 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
382 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 382 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
383
384 if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
385 rdev->mc.mc_vram_size = rdev->mc.aper_size;
386
387 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
388 rdev->mc.real_vram_size = rdev->mc.aper_size;
389
383 if (rdev->flags & RADEON_IS_AGP) { 390 if (rdev->flags & RADEON_IS_AGP) {
384 r = radeon_agp_init(rdev); 391 r = radeon_agp_init(rdev);
385 if (r) 392 if (r)
diff --git a/drivers/gpu/drm/radeon/r600_cs.c b/drivers/gpu/drm/radeon/r600_cs.c
index 33b89cd8743e..d28970db6a2d 100644
--- a/drivers/gpu/drm/radeon/r600_cs.c
+++ b/drivers/gpu/drm/radeon/r600_cs.c
@@ -28,7 +28,6 @@
28#include "drmP.h" 28#include "drmP.h"
29#include "radeon.h" 29#include "radeon.h"
30#include "r600d.h" 30#include "r600d.h"
31#include "avivod.h"
32 31
33static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, 32static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
34 struct radeon_cs_reloc **cs_reloc); 33 struct radeon_cs_reloc **cs_reloc);
@@ -57,7 +56,7 @@ int r600_cs_packet_parse(struct radeon_cs_parser *p,
57 idx, ib_chunk->length_dw); 56 idx, ib_chunk->length_dw);
58 return -EINVAL; 57 return -EINVAL;
59 } 58 }
60 header = ib_chunk->kdata[idx]; 59 header = radeon_get_ib_value(p, idx);
61 pkt->idx = idx; 60 pkt->idx = idx;
62 pkt->type = CP_PACKET_GET_TYPE(header); 61 pkt->type = CP_PACKET_GET_TYPE(header);
63 pkt->count = CP_PACKET_GET_COUNT(header); 62 pkt->count = CP_PACKET_GET_COUNT(header);
@@ -98,7 +97,6 @@ int r600_cs_packet_parse(struct radeon_cs_parser *p,
98static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p, 97static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
99 struct radeon_cs_reloc **cs_reloc) 98 struct radeon_cs_reloc **cs_reloc)
100{ 99{
101 struct radeon_cs_chunk *ib_chunk;
102 struct radeon_cs_chunk *relocs_chunk; 100 struct radeon_cs_chunk *relocs_chunk;
103 struct radeon_cs_packet p3reloc; 101 struct radeon_cs_packet p3reloc;
104 unsigned idx; 102 unsigned idx;
@@ -109,7 +107,6 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
109 return -EINVAL; 107 return -EINVAL;
110 } 108 }
111 *cs_reloc = NULL; 109 *cs_reloc = NULL;
112 ib_chunk = &p->chunks[p->chunk_ib_idx];
113 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 110 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
114 r = r600_cs_packet_parse(p, &p3reloc, p->idx); 111 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
115 if (r) { 112 if (r) {
@@ -121,7 +118,7 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
121 p3reloc.idx); 118 p3reloc.idx);
122 return -EINVAL; 119 return -EINVAL;
123 } 120 }
124 idx = ib_chunk->kdata[p3reloc.idx + 1]; 121 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
125 if (idx >= relocs_chunk->length_dw) { 122 if (idx >= relocs_chunk->length_dw) {
126 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 123 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
127 idx, relocs_chunk->length_dw); 124 idx, relocs_chunk->length_dw);
@@ -146,7 +143,6 @@ static int r600_cs_packet_next_reloc_mm(struct radeon_cs_parser *p,
146static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p, 143static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
147 struct radeon_cs_reloc **cs_reloc) 144 struct radeon_cs_reloc **cs_reloc)
148{ 145{
149 struct radeon_cs_chunk *ib_chunk;
150 struct radeon_cs_chunk *relocs_chunk; 146 struct radeon_cs_chunk *relocs_chunk;
151 struct radeon_cs_packet p3reloc; 147 struct radeon_cs_packet p3reloc;
152 unsigned idx; 148 unsigned idx;
@@ -157,7 +153,6 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
157 return -EINVAL; 153 return -EINVAL;
158 } 154 }
159 *cs_reloc = NULL; 155 *cs_reloc = NULL;
160 ib_chunk = &p->chunks[p->chunk_ib_idx];
161 relocs_chunk = &p->chunks[p->chunk_relocs_idx]; 156 relocs_chunk = &p->chunks[p->chunk_relocs_idx];
162 r = r600_cs_packet_parse(p, &p3reloc, p->idx); 157 r = r600_cs_packet_parse(p, &p3reloc, p->idx);
163 if (r) { 158 if (r) {
@@ -169,7 +164,7 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
169 p3reloc.idx); 164 p3reloc.idx);
170 return -EINVAL; 165 return -EINVAL;
171 } 166 }
172 idx = ib_chunk->kdata[p3reloc.idx + 1]; 167 idx = radeon_get_ib_value(p, p3reloc.idx + 1);
173 if (idx >= relocs_chunk->length_dw) { 168 if (idx >= relocs_chunk->length_dw) {
174 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n", 169 DRM_ERROR("Relocs at %d after relocations chunk end %d !\n",
175 idx, relocs_chunk->length_dw); 170 idx, relocs_chunk->length_dw);
@@ -181,13 +176,136 @@ static int r600_cs_packet_next_reloc_nomm(struct radeon_cs_parser *p,
181 return 0; 176 return 0;
182} 177}
183 178
179/**
180 * r600_cs_packet_next_vline() - parse userspace VLINE packet
181 * @parser: parser structure holding parsing context.
182 *
183 * Userspace sends a special sequence for VLINE waits.
184 * PACKET0 - VLINE_START_END + value
185 * PACKET3 - WAIT_REG_MEM poll vline status reg
186 * RELOC (P3) - crtc_id in reloc.
187 *
188 * This function parses this and relocates the VLINE START END
189 * and WAIT_REG_MEM packets to the correct crtc.
190 * It also detects a switched off crtc and nulls out the
191 * wait in that case.
192 */
193static int r600_cs_packet_parse_vline(struct radeon_cs_parser *p)
194{
195 struct drm_mode_object *obj;
196 struct drm_crtc *crtc;
197 struct radeon_crtc *radeon_crtc;
198 struct radeon_cs_packet p3reloc, wait_reg_mem;
199 int crtc_id;
200 int r;
201 uint32_t header, h_idx, reg, wait_reg_mem_info;
202 volatile uint32_t *ib;
203
204 ib = p->ib->ptr;
205
206 /* parse the WAIT_REG_MEM */
207 r = r600_cs_packet_parse(p, &wait_reg_mem, p->idx);
208 if (r)
209 return r;
210
211 /* check its a WAIT_REG_MEM */
212 if (wait_reg_mem.type != PACKET_TYPE3 ||
213 wait_reg_mem.opcode != PACKET3_WAIT_REG_MEM) {
214 DRM_ERROR("vline wait missing WAIT_REG_MEM segment\n");
215 r = -EINVAL;
216 return r;
217 }
218
219 wait_reg_mem_info = radeon_get_ib_value(p, wait_reg_mem.idx + 1);
220 /* bit 4 is reg (0) or mem (1) */
221 if (wait_reg_mem_info & 0x10) {
222 DRM_ERROR("vline WAIT_REG_MEM waiting on MEM rather than REG\n");
223 r = -EINVAL;
224 return r;
225 }
226 /* waiting for value to be equal */
227 if ((wait_reg_mem_info & 0x7) != 0x3) {
228 DRM_ERROR("vline WAIT_REG_MEM function not equal\n");
229 r = -EINVAL;
230 return r;
231 }
232 if ((radeon_get_ib_value(p, wait_reg_mem.idx + 2) << 2) != AVIVO_D1MODE_VLINE_STATUS) {
233 DRM_ERROR("vline WAIT_REG_MEM bad reg\n");
234 r = -EINVAL;
235 return r;
236 }
237
238 if (radeon_get_ib_value(p, wait_reg_mem.idx + 5) != AVIVO_D1MODE_VLINE_STAT) {
239 DRM_ERROR("vline WAIT_REG_MEM bad bit mask\n");
240 r = -EINVAL;
241 return r;
242 }
243
244 /* jump over the NOP */
245 r = r600_cs_packet_parse(p, &p3reloc, p->idx + wait_reg_mem.count + 2);
246 if (r)
247 return r;
248
249 h_idx = p->idx - 2;
250 p->idx += wait_reg_mem.count + 2;
251 p->idx += p3reloc.count + 2;
252
253 header = radeon_get_ib_value(p, h_idx);
254 crtc_id = radeon_get_ib_value(p, h_idx + 2 + 7 + 1);
255 reg = header >> 2;
256 mutex_lock(&p->rdev->ddev->mode_config.mutex);
257 obj = drm_mode_object_find(p->rdev->ddev, crtc_id, DRM_MODE_OBJECT_CRTC);
258 if (!obj) {
259 DRM_ERROR("cannot find crtc %d\n", crtc_id);
260 r = -EINVAL;
261 goto out;
262 }
263 crtc = obj_to_crtc(obj);
264 radeon_crtc = to_radeon_crtc(crtc);
265 crtc_id = radeon_crtc->crtc_id;
266
267 if (!crtc->enabled) {
268 /* if the CRTC isn't enabled - we need to nop out the WAIT_REG_MEM */
269 ib[h_idx + 2] = PACKET2(0);
270 ib[h_idx + 3] = PACKET2(0);
271 ib[h_idx + 4] = PACKET2(0);
272 ib[h_idx + 5] = PACKET2(0);
273 ib[h_idx + 6] = PACKET2(0);
274 ib[h_idx + 7] = PACKET2(0);
275 ib[h_idx + 8] = PACKET2(0);
276 } else if (crtc_id == 1) {
277 switch (reg) {
278 case AVIVO_D1MODE_VLINE_START_END:
279 header &= ~R600_CP_PACKET0_REG_MASK;
280 header |= AVIVO_D2MODE_VLINE_START_END >> 2;
281 break;
282 default:
283 DRM_ERROR("unknown crtc reloc\n");
284 r = -EINVAL;
285 goto out;
286 }
287 ib[h_idx] = header;
288 ib[h_idx + 4] = AVIVO_D2MODE_VLINE_STATUS >> 2;
289 }
290out:
291 mutex_unlock(&p->rdev->ddev->mode_config.mutex);
292 return r;
293}
294
184static int r600_packet0_check(struct radeon_cs_parser *p, 295static int r600_packet0_check(struct radeon_cs_parser *p,
185 struct radeon_cs_packet *pkt, 296 struct radeon_cs_packet *pkt,
186 unsigned idx, unsigned reg) 297 unsigned idx, unsigned reg)
187{ 298{
299 int r;
300
188 switch (reg) { 301 switch (reg) {
189 case AVIVO_D1MODE_VLINE_START_END: 302 case AVIVO_D1MODE_VLINE_START_END:
190 case AVIVO_D2MODE_VLINE_START_END: 303 r = r600_cs_packet_parse_vline(p);
304 if (r) {
305 DRM_ERROR("No reloc for ib[%d]=0x%04X\n",
306 idx, reg);
307 return r;
308 }
191 break; 309 break;
192 default: 310 default:
193 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n", 311 printk(KERN_ERR "Forbidden register 0x%04X in cs at %d\n",
@@ -218,17 +336,18 @@ static int r600_cs_parse_packet0(struct radeon_cs_parser *p,
218static int r600_packet3_check(struct radeon_cs_parser *p, 336static int r600_packet3_check(struct radeon_cs_parser *p,
219 struct radeon_cs_packet *pkt) 337 struct radeon_cs_packet *pkt)
220{ 338{
221 struct radeon_cs_chunk *ib_chunk;
222 struct radeon_cs_reloc *reloc; 339 struct radeon_cs_reloc *reloc;
223 volatile u32 *ib; 340 volatile u32 *ib;
224 unsigned idx; 341 unsigned idx;
225 unsigned i; 342 unsigned i;
226 unsigned start_reg, end_reg, reg; 343 unsigned start_reg, end_reg, reg;
227 int r; 344 int r;
345 u32 idx_value;
228 346
229 ib = p->ib->ptr; 347 ib = p->ib->ptr;
230 ib_chunk = &p->chunks[p->chunk_ib_idx];
231 idx = pkt->idx + 1; 348 idx = pkt->idx + 1;
349 idx_value = radeon_get_ib_value(p, idx);
350
232 switch (pkt->opcode) { 351 switch (pkt->opcode) {
233 case PACKET3_START_3D_CMDBUF: 352 case PACKET3_START_3D_CMDBUF:
234 if (p->family >= CHIP_RV770 || pkt->count) { 353 if (p->family >= CHIP_RV770 || pkt->count) {
@@ -259,8 +378,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
259 DRM_ERROR("bad DRAW_INDEX\n"); 378 DRM_ERROR("bad DRAW_INDEX\n");
260 return -EINVAL; 379 return -EINVAL;
261 } 380 }
262 ib[idx+0] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); 381 ib[idx+0] = idx_value + (u32)(reloc->lobj.gpu_offset & 0xffffffff);
263 ib[idx+1] = upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 382 ib[idx+1] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
264 break; 383 break;
265 case PACKET3_DRAW_INDEX_AUTO: 384 case PACKET3_DRAW_INDEX_AUTO:
266 if (pkt->count != 1) { 385 if (pkt->count != 1) {
@@ -281,14 +400,14 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
281 return -EINVAL; 400 return -EINVAL;
282 } 401 }
283 /* bit 4 is reg (0) or mem (1) */ 402 /* bit 4 is reg (0) or mem (1) */
284 if (ib_chunk->kdata[idx+0] & 0x10) { 403 if (idx_value & 0x10) {
285 r = r600_cs_packet_next_reloc(p, &reloc); 404 r = r600_cs_packet_next_reloc(p, &reloc);
286 if (r) { 405 if (r) {
287 DRM_ERROR("bad WAIT_REG_MEM\n"); 406 DRM_ERROR("bad WAIT_REG_MEM\n");
288 return -EINVAL; 407 return -EINVAL;
289 } 408 }
290 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); 409 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
291 ib[idx+2] = upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 410 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
292 } 411 }
293 break; 412 break;
294 case PACKET3_SURFACE_SYNC: 413 case PACKET3_SURFACE_SYNC:
@@ -297,8 +416,8 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
297 return -EINVAL; 416 return -EINVAL;
298 } 417 }
299 /* 0xffffffff/0x0 is flush all cache flag */ 418 /* 0xffffffff/0x0 is flush all cache flag */
300 if (ib_chunk->kdata[idx+1] != 0xffffffff || 419 if (radeon_get_ib_value(p, idx + 1) != 0xffffffff ||
301 ib_chunk->kdata[idx+2] != 0) { 420 radeon_get_ib_value(p, idx + 2) != 0) {
302 r = r600_cs_packet_next_reloc(p, &reloc); 421 r = r600_cs_packet_next_reloc(p, &reloc);
303 if (r) { 422 if (r) {
304 DRM_ERROR("bad SURFACE_SYNC\n"); 423 DRM_ERROR("bad SURFACE_SYNC\n");
@@ -319,7 +438,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
319 return -EINVAL; 438 return -EINVAL;
320 } 439 }
321 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); 440 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
322 ib[idx+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 441 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
323 } 442 }
324 break; 443 break;
325 case PACKET3_EVENT_WRITE_EOP: 444 case PACKET3_EVENT_WRITE_EOP:
@@ -333,10 +452,10 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
333 return -EINVAL; 452 return -EINVAL;
334 } 453 }
335 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff); 454 ib[idx+1] += (u32)(reloc->lobj.gpu_offset & 0xffffffff);
336 ib[idx+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 455 ib[idx+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
337 break; 456 break;
338 case PACKET3_SET_CONFIG_REG: 457 case PACKET3_SET_CONFIG_REG:
339 start_reg = (ib[idx+0] << 2) + PACKET3_SET_CONFIG_REG_OFFSET; 458 start_reg = (idx_value << 2) + PACKET3_SET_CONFIG_REG_OFFSET;
340 end_reg = 4 * pkt->count + start_reg - 4; 459 end_reg = 4 * pkt->count + start_reg - 4;
341 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) || 460 if ((start_reg < PACKET3_SET_CONFIG_REG_OFFSET) ||
342 (start_reg >= PACKET3_SET_CONFIG_REG_END) || 461 (start_reg >= PACKET3_SET_CONFIG_REG_END) ||
@@ -356,7 +475,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
356 } 475 }
357 break; 476 break;
358 case PACKET3_SET_CONTEXT_REG: 477 case PACKET3_SET_CONTEXT_REG:
359 start_reg = (ib[idx+0] << 2) + PACKET3_SET_CONTEXT_REG_OFFSET; 478 start_reg = (idx_value << 2) + PACKET3_SET_CONTEXT_REG_OFFSET;
360 end_reg = 4 * pkt->count + start_reg - 4; 479 end_reg = 4 * pkt->count + start_reg - 4;
361 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) || 480 if ((start_reg < PACKET3_SET_CONTEXT_REG_OFFSET) ||
362 (start_reg >= PACKET3_SET_CONTEXT_REG_END) || 481 (start_reg >= PACKET3_SET_CONTEXT_REG_END) ||
@@ -421,7 +540,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
421 DRM_ERROR("bad SET_RESOURCE\n"); 540 DRM_ERROR("bad SET_RESOURCE\n");
422 return -EINVAL; 541 return -EINVAL;
423 } 542 }
424 start_reg = (ib[idx+0] << 2) + PACKET3_SET_RESOURCE_OFFSET; 543 start_reg = (idx_value << 2) + PACKET3_SET_RESOURCE_OFFSET;
425 end_reg = 4 * pkt->count + start_reg - 4; 544 end_reg = 4 * pkt->count + start_reg - 4;
426 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) || 545 if ((start_reg < PACKET3_SET_RESOURCE_OFFSET) ||
427 (start_reg >= PACKET3_SET_RESOURCE_END) || 546 (start_reg >= PACKET3_SET_RESOURCE_END) ||
@@ -430,7 +549,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
430 return -EINVAL; 549 return -EINVAL;
431 } 550 }
432 for (i = 0; i < (pkt->count / 7); i++) { 551 for (i = 0; i < (pkt->count / 7); i++) {
433 switch (G__SQ_VTX_CONSTANT_TYPE(ib[idx+(i*7)+6+1])) { 552 switch (G__SQ_VTX_CONSTANT_TYPE(radeon_get_ib_value(p, idx+(i*7)+6+1))) {
434 case SQ_TEX_VTX_VALID_TEXTURE: 553 case SQ_TEX_VTX_VALID_TEXTURE:
435 /* tex base */ 554 /* tex base */
436 r = r600_cs_packet_next_reloc(p, &reloc); 555 r = r600_cs_packet_next_reloc(p, &reloc);
@@ -455,7 +574,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
455 return -EINVAL; 574 return -EINVAL;
456 } 575 }
457 ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff); 576 ib[idx+1+(i*7)+0] += (u32)((reloc->lobj.gpu_offset) & 0xffffffff);
458 ib[idx+1+(i*7)+2] |= upper_32_bits(reloc->lobj.gpu_offset) & 0xff; 577 ib[idx+1+(i*7)+2] += upper_32_bits(reloc->lobj.gpu_offset) & 0xff;
459 break; 578 break;
460 case SQ_TEX_VTX_INVALID_TEXTURE: 579 case SQ_TEX_VTX_INVALID_TEXTURE:
461 case SQ_TEX_VTX_INVALID_BUFFER: 580 case SQ_TEX_VTX_INVALID_BUFFER:
@@ -466,7 +585,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
466 } 585 }
467 break; 586 break;
468 case PACKET3_SET_ALU_CONST: 587 case PACKET3_SET_ALU_CONST:
469 start_reg = (ib[idx+0] << 2) + PACKET3_SET_ALU_CONST_OFFSET; 588 start_reg = (idx_value << 2) + PACKET3_SET_ALU_CONST_OFFSET;
470 end_reg = 4 * pkt->count + start_reg - 4; 589 end_reg = 4 * pkt->count + start_reg - 4;
471 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) || 590 if ((start_reg < PACKET3_SET_ALU_CONST_OFFSET) ||
472 (start_reg >= PACKET3_SET_ALU_CONST_END) || 591 (start_reg >= PACKET3_SET_ALU_CONST_END) ||
@@ -476,7 +595,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
476 } 595 }
477 break; 596 break;
478 case PACKET3_SET_BOOL_CONST: 597 case PACKET3_SET_BOOL_CONST:
479 start_reg = (ib[idx+0] << 2) + PACKET3_SET_BOOL_CONST_OFFSET; 598 start_reg = (idx_value << 2) + PACKET3_SET_BOOL_CONST_OFFSET;
480 end_reg = 4 * pkt->count + start_reg - 4; 599 end_reg = 4 * pkt->count + start_reg - 4;
481 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) || 600 if ((start_reg < PACKET3_SET_BOOL_CONST_OFFSET) ||
482 (start_reg >= PACKET3_SET_BOOL_CONST_END) || 601 (start_reg >= PACKET3_SET_BOOL_CONST_END) ||
@@ -486,7 +605,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
486 } 605 }
487 break; 606 break;
488 case PACKET3_SET_LOOP_CONST: 607 case PACKET3_SET_LOOP_CONST:
489 start_reg = (ib[idx+0] << 2) + PACKET3_SET_LOOP_CONST_OFFSET; 608 start_reg = (idx_value << 2) + PACKET3_SET_LOOP_CONST_OFFSET;
490 end_reg = 4 * pkt->count + start_reg - 4; 609 end_reg = 4 * pkt->count + start_reg - 4;
491 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) || 610 if ((start_reg < PACKET3_SET_LOOP_CONST_OFFSET) ||
492 (start_reg >= PACKET3_SET_LOOP_CONST_END) || 611 (start_reg >= PACKET3_SET_LOOP_CONST_END) ||
@@ -496,7 +615,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
496 } 615 }
497 break; 616 break;
498 case PACKET3_SET_CTL_CONST: 617 case PACKET3_SET_CTL_CONST:
499 start_reg = (ib[idx+0] << 2) + PACKET3_SET_CTL_CONST_OFFSET; 618 start_reg = (idx_value << 2) + PACKET3_SET_CTL_CONST_OFFSET;
500 end_reg = 4 * pkt->count + start_reg - 4; 619 end_reg = 4 * pkt->count + start_reg - 4;
501 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) || 620 if ((start_reg < PACKET3_SET_CTL_CONST_OFFSET) ||
502 (start_reg >= PACKET3_SET_CTL_CONST_END) || 621 (start_reg >= PACKET3_SET_CTL_CONST_END) ||
@@ -510,7 +629,7 @@ static int r600_packet3_check(struct radeon_cs_parser *p,
510 DRM_ERROR("bad SET_SAMPLER\n"); 629 DRM_ERROR("bad SET_SAMPLER\n");
511 return -EINVAL; 630 return -EINVAL;
512 } 631 }
513 start_reg = (ib[idx+0] << 2) + PACKET3_SET_SAMPLER_OFFSET; 632 start_reg = (idx_value << 2) + PACKET3_SET_SAMPLER_OFFSET;
514 end_reg = 4 * pkt->count + start_reg - 4; 633 end_reg = 4 * pkt->count + start_reg - 4;
515 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) || 634 if ((start_reg < PACKET3_SET_SAMPLER_OFFSET) ||
516 (start_reg >= PACKET3_SET_SAMPLER_END) || 635 (start_reg >= PACKET3_SET_SAMPLER_END) ||
@@ -602,6 +721,8 @@ static void r600_cs_parser_fini(struct radeon_cs_parser *parser, int error)
602 kfree(parser->relocs); 721 kfree(parser->relocs);
603 for (i = 0; i < parser->nchunks; i++) { 722 for (i = 0; i < parser->nchunks; i++) {
604 kfree(parser->chunks[i].kdata); 723 kfree(parser->chunks[i].kdata);
724 kfree(parser->chunks[i].kpage[0]);
725 kfree(parser->chunks[i].kpage[1]);
605 } 726 }
606 kfree(parser->chunks); 727 kfree(parser->chunks);
607 kfree(parser->chunks_array); 728 kfree(parser->chunks_array);
@@ -639,7 +760,6 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
639 * uncached). */ 760 * uncached). */
640 ib_chunk = &parser.chunks[parser.chunk_ib_idx]; 761 ib_chunk = &parser.chunks[parser.chunk_ib_idx];
641 parser.ib->length_dw = ib_chunk->length_dw; 762 parser.ib->length_dw = ib_chunk->length_dw;
642 memcpy((void *)parser.ib->ptr, ib_chunk->kdata, ib_chunk->length_dw*4);
643 *l = parser.ib->length_dw; 763 *l = parser.ib->length_dw;
644 r = r600_cs_parse(&parser); 764 r = r600_cs_parse(&parser);
645 if (r) { 765 if (r) {
@@ -647,6 +767,12 @@ int r600_cs_legacy(struct drm_device *dev, void *data, struct drm_file *filp,
647 r600_cs_parser_fini(&parser, r); 767 r600_cs_parser_fini(&parser, r);
648 return r; 768 return r;
649 } 769 }
770 r = radeon_cs_finish_pages(&parser);
771 if (r) {
772 DRM_ERROR("Invalid command stream !\n");
773 r600_cs_parser_fini(&parser, r);
774 return r;
775 }
650 r600_cs_parser_fini(&parser, r); 776 r600_cs_parser_fini(&parser, r);
651 return r; 777 return r;
652} 778}
diff --git a/drivers/gpu/drm/radeon/radeon.h b/drivers/gpu/drm/radeon/radeon.h
index 6311b1362594..950b346e343f 100644
--- a/drivers/gpu/drm/radeon/radeon.h
+++ b/drivers/gpu/drm/radeon/radeon.h
@@ -44,6 +44,24 @@
44 * - TESTING, TESTING, TESTING 44 * - TESTING, TESTING, TESTING
45 */ 45 */
46 46
47/* Initialization path:
48 * We expect that acceleration initialization might fail for various
49 * reasons even thought we work hard to make it works on most
50 * configurations. In order to still have a working userspace in such
51 * situation the init path must succeed up to the memory controller
52 * initialization point. Failure before this point are considered as
53 * fatal error. Here is the init callchain :
54 * radeon_device_init perform common structure, mutex initialization
55 * asic_init setup the GPU memory layout and perform all
56 * one time initialization (failure in this
57 * function are considered fatal)
58 * asic_startup setup the GPU acceleration, in order to
59 * follow guideline the first thing this
60 * function should do is setting the GPU
61 * memory controller (only MC setup failure
62 * are considered as fatal)
63 */
64
47#include <asm/atomic.h> 65#include <asm/atomic.h>
48#include <linux/wait.h> 66#include <linux/wait.h>
49#include <linux/list.h> 67#include <linux/list.h>
@@ -342,7 +360,7 @@ struct radeon_ib {
342 unsigned long idx; 360 unsigned long idx;
343 uint64_t gpu_addr; 361 uint64_t gpu_addr;
344 struct radeon_fence *fence; 362 struct radeon_fence *fence;
345 volatile uint32_t *ptr; 363 uint32_t *ptr;
346 uint32_t length_dw; 364 uint32_t length_dw;
347}; 365};
348 366
@@ -415,7 +433,12 @@ struct radeon_cs_reloc {
415struct radeon_cs_chunk { 433struct radeon_cs_chunk {
416 uint32_t chunk_id; 434 uint32_t chunk_id;
417 uint32_t length_dw; 435 uint32_t length_dw;
436 int kpage_idx[2];
437 uint32_t *kpage[2];
418 uint32_t *kdata; 438 uint32_t *kdata;
439 void __user *user_ptr;
440 int last_copied_page;
441 int last_page_index;
419}; 442};
420 443
421struct radeon_cs_parser { 444struct radeon_cs_parser {
@@ -438,8 +461,38 @@ struct radeon_cs_parser {
438 struct radeon_ib *ib; 461 struct radeon_ib *ib;
439 void *track; 462 void *track;
440 unsigned family; 463 unsigned family;
464 int parser_error;
441}; 465};
442 466
467extern int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx);
468extern int radeon_cs_finish_pages(struct radeon_cs_parser *p);
469
470
471static inline u32 radeon_get_ib_value(struct radeon_cs_parser *p, int idx)
472{
473 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
474 u32 pg_idx, pg_offset;
475 u32 idx_value = 0;
476 int new_page;
477
478 pg_idx = (idx * 4) / PAGE_SIZE;
479 pg_offset = (idx * 4) % PAGE_SIZE;
480
481 if (ibc->kpage_idx[0] == pg_idx)
482 return ibc->kpage[0][pg_offset/4];
483 if (ibc->kpage_idx[1] == pg_idx)
484 return ibc->kpage[1][pg_offset/4];
485
486 new_page = radeon_cs_update_pages(p, pg_idx);
487 if (new_page < 0) {
488 p->parser_error = new_page;
489 return 0;
490 }
491
492 idx_value = ibc->kpage[new_page][pg_offset/4];
493 return idx_value;
494}
495
443struct radeon_cs_packet { 496struct radeon_cs_packet {
444 unsigned idx; 497 unsigned idx;
445 unsigned type; 498 unsigned type;
@@ -943,6 +996,7 @@ extern void radeon_clocks_fini(struct radeon_device *rdev);
943extern void radeon_scratch_init(struct radeon_device *rdev); 996extern void radeon_scratch_init(struct radeon_device *rdev);
944extern void radeon_surface_init(struct radeon_device *rdev); 997extern void radeon_surface_init(struct radeon_device *rdev);
945extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data); 998extern int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data);
999extern void radeon_atom_set_clock_gating(struct radeon_device *rdev, int enable);
946 1000
947/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */ 1001/* r100,rv100,rs100,rv200,rs200,r200,rv250,rs300,rv280 */
948struct r100_mc_save { 1002struct r100_mc_save {
@@ -974,6 +1028,9 @@ extern void r100_vram_init_sizes(struct radeon_device *rdev);
974extern void r100_wb_disable(struct radeon_device *rdev); 1028extern void r100_wb_disable(struct radeon_device *rdev);
975extern void r100_wb_fini(struct radeon_device *rdev); 1029extern void r100_wb_fini(struct radeon_device *rdev);
976extern int r100_wb_init(struct radeon_device *rdev); 1030extern int r100_wb_init(struct radeon_device *rdev);
1031extern void r100_hdp_reset(struct radeon_device *rdev);
1032extern int r100_rb2d_reset(struct radeon_device *rdev);
1033extern int r100_cp_reset(struct radeon_device *rdev);
977 1034
978/* r300,r350,rv350,rv370,rv380 */ 1035/* r300,r350,rv350,rv370,rv380 */
979extern void r300_set_reg_safe(struct radeon_device *rdev); 1036extern void r300_set_reg_safe(struct radeon_device *rdev);
@@ -985,12 +1042,29 @@ extern int rv370_pcie_gart_enable(struct radeon_device *rdev);
985extern void rv370_pcie_gart_disable(struct radeon_device *rdev); 1042extern void rv370_pcie_gart_disable(struct radeon_device *rdev);
986 1043
987/* r420,r423,rv410 */ 1044/* r420,r423,rv410 */
1045extern int r420_mc_init(struct radeon_device *rdev);
988extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg); 1046extern u32 r420_mc_rreg(struct radeon_device *rdev, u32 reg);
989extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v); 1047extern void r420_mc_wreg(struct radeon_device *rdev, u32 reg, u32 v);
990extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev); 1048extern int r420_debugfs_pipes_info_init(struct radeon_device *rdev);
1049extern void r420_pipes_init(struct radeon_device *rdev);
991 1050
992/* rv515 */ 1051/* rv515 */
1052struct rv515_mc_save {
1053 u32 d1vga_control;
1054 u32 d2vga_control;
1055 u32 vga_render_control;
1056 u32 vga_hdp_control;
1057 u32 d1crtc_control;
1058 u32 d2crtc_control;
1059};
993extern void rv515_bandwidth_avivo_update(struct radeon_device *rdev); 1060extern void rv515_bandwidth_avivo_update(struct radeon_device *rdev);
1061extern void rv515_vga_render_disable(struct radeon_device *rdev);
1062extern void rv515_set_safe_registers(struct radeon_device *rdev);
1063extern void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save);
1064extern void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save);
1065extern void rv515_clock_startup(struct radeon_device *rdev);
1066extern void rv515_debugfs(struct radeon_device *rdev);
1067extern int rv515_suspend(struct radeon_device *rdev);
994 1068
995/* rs690, rs740 */ 1069/* rs690, rs740 */
996extern void rs690_line_buffer_adjust(struct radeon_device *rdev, 1070extern void rs690_line_buffer_adjust(struct radeon_device *rdev,
diff --git a/drivers/gpu/drm/radeon/radeon_asic.h b/drivers/gpu/drm/radeon/radeon_asic.h
index 8968f78fa1e3..c8a4e7b5663d 100644
--- a/drivers/gpu/drm/radeon/radeon_asic.h
+++ b/drivers/gpu/drm/radeon/radeon_asic.h
@@ -420,41 +420,43 @@ static struct radeon_asic rs690_asic = {
420 * rv515 420 * rv515
421 */ 421 */
422int rv515_init(struct radeon_device *rdev); 422int rv515_init(struct radeon_device *rdev);
423void rv515_errata(struct radeon_device *rdev); 423void rv515_fini(struct radeon_device *rdev);
424void rv515_vram_info(struct radeon_device *rdev);
425int rv515_gpu_reset(struct radeon_device *rdev); 424int rv515_gpu_reset(struct radeon_device *rdev);
426int rv515_mc_init(struct radeon_device *rdev);
427void rv515_mc_fini(struct radeon_device *rdev);
428uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg); 425uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg);
429void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 426void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
430void rv515_ring_start(struct radeon_device *rdev); 427void rv515_ring_start(struct radeon_device *rdev);
431uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg); 428uint32_t rv515_pcie_rreg(struct radeon_device *rdev, uint32_t reg);
432void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v); 429void rv515_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v);
433void rv515_bandwidth_update(struct radeon_device *rdev); 430void rv515_bandwidth_update(struct radeon_device *rdev);
431int rv515_resume(struct radeon_device *rdev);
432int rv515_suspend(struct radeon_device *rdev);
434static struct radeon_asic rv515_asic = { 433static struct radeon_asic rv515_asic = {
435 .init = &rv515_init, 434 .init = &rv515_init,
436 .errata = &rv515_errata, 435 .fini = &rv515_fini,
437 .vram_info = &rv515_vram_info, 436 .suspend = &rv515_suspend,
437 .resume = &rv515_resume,
438 .errata = NULL,
439 .vram_info = NULL,
438 .vga_set_state = &r100_vga_set_state, 440 .vga_set_state = &r100_vga_set_state,
439 .gpu_reset = &rv515_gpu_reset, 441 .gpu_reset = &rv515_gpu_reset,
440 .mc_init = &rv515_mc_init, 442 .mc_init = NULL,
441 .mc_fini = &rv515_mc_fini, 443 .mc_fini = NULL,
442 .wb_init = &r100_wb_init, 444 .wb_init = NULL,
443 .wb_fini = &r100_wb_fini, 445 .wb_fini = NULL,
444 .gart_init = &rv370_pcie_gart_init, 446 .gart_init = &rv370_pcie_gart_init,
445 .gart_fini = &rv370_pcie_gart_fini, 447 .gart_fini = &rv370_pcie_gart_fini,
446 .gart_enable = &rv370_pcie_gart_enable, 448 .gart_enable = NULL,
447 .gart_disable = &rv370_pcie_gart_disable, 449 .gart_disable = NULL,
448 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 450 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
449 .gart_set_page = &rv370_pcie_gart_set_page, 451 .gart_set_page = &rv370_pcie_gart_set_page,
450 .cp_init = &r100_cp_init, 452 .cp_init = NULL,
451 .cp_fini = &r100_cp_fini, 453 .cp_fini = NULL,
452 .cp_disable = &r100_cp_disable, 454 .cp_disable = NULL,
453 .cp_commit = &r100_cp_commit, 455 .cp_commit = &r100_cp_commit,
454 .ring_start = &rv515_ring_start, 456 .ring_start = &rv515_ring_start,
455 .ring_test = &r100_ring_test, 457 .ring_test = &r100_ring_test,
456 .ring_ib_execute = &r100_ring_ib_execute, 458 .ring_ib_execute = &r100_ring_ib_execute,
457 .ib_test = &r100_ib_test, 459 .ib_test = NULL,
458 .irq_set = &rs600_irq_set, 460 .irq_set = &rs600_irq_set,
459 .irq_process = &rs600_irq_process, 461 .irq_process = &rs600_irq_process,
460 .get_vblank_counter = &rs600_get_vblank_counter, 462 .get_vblank_counter = &rs600_get_vblank_counter,
@@ -476,35 +478,35 @@ static struct radeon_asic rv515_asic = {
476/* 478/*
477 * r520,rv530,rv560,rv570,r580 479 * r520,rv530,rv560,rv570,r580
478 */ 480 */
479void r520_errata(struct radeon_device *rdev); 481int r520_init(struct radeon_device *rdev);
480void r520_vram_info(struct radeon_device *rdev); 482int r520_resume(struct radeon_device *rdev);
481int r520_mc_init(struct radeon_device *rdev);
482void r520_mc_fini(struct radeon_device *rdev);
483void r520_bandwidth_update(struct radeon_device *rdev);
484static struct radeon_asic r520_asic = { 483static struct radeon_asic r520_asic = {
485 .init = &rv515_init, 484 .init = &r520_init,
486 .errata = &r520_errata, 485 .fini = &rv515_fini,
487 .vram_info = &r520_vram_info, 486 .suspend = &rv515_suspend,
487 .resume = &r520_resume,
488 .errata = NULL,
489 .vram_info = NULL,
488 .vga_set_state = &r100_vga_set_state, 490 .vga_set_state = &r100_vga_set_state,
489 .gpu_reset = &rv515_gpu_reset, 491 .gpu_reset = &rv515_gpu_reset,
490 .mc_init = &r520_mc_init, 492 .mc_init = NULL,
491 .mc_fini = &r520_mc_fini, 493 .mc_fini = NULL,
492 .wb_init = &r100_wb_init, 494 .wb_init = NULL,
493 .wb_fini = &r100_wb_fini, 495 .wb_fini = NULL,
494 .gart_init = &rv370_pcie_gart_init, 496 .gart_init = NULL,
495 .gart_fini = &rv370_pcie_gart_fini, 497 .gart_fini = NULL,
496 .gart_enable = &rv370_pcie_gart_enable, 498 .gart_enable = NULL,
497 .gart_disable = &rv370_pcie_gart_disable, 499 .gart_disable = NULL,
498 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush, 500 .gart_tlb_flush = &rv370_pcie_gart_tlb_flush,
499 .gart_set_page = &rv370_pcie_gart_set_page, 501 .gart_set_page = &rv370_pcie_gart_set_page,
500 .cp_init = &r100_cp_init, 502 .cp_init = NULL,
501 .cp_fini = &r100_cp_fini, 503 .cp_fini = NULL,
502 .cp_disable = &r100_cp_disable, 504 .cp_disable = NULL,
503 .cp_commit = &r100_cp_commit, 505 .cp_commit = &r100_cp_commit,
504 .ring_start = &rv515_ring_start, 506 .ring_start = &rv515_ring_start,
505 .ring_test = &r100_ring_test, 507 .ring_test = &r100_ring_test,
506 .ring_ib_execute = &r100_ring_ib_execute, 508 .ring_ib_execute = &r100_ring_ib_execute,
507 .ib_test = &r100_ib_test, 509 .ib_test = NULL,
508 .irq_set = &rs600_irq_set, 510 .irq_set = &rs600_irq_set,
509 .irq_process = &rs600_irq_process, 511 .irq_process = &rs600_irq_process,
510 .get_vblank_counter = &rs600_get_vblank_counter, 512 .get_vblank_counter = &rs600_get_vblank_counter,
@@ -519,7 +521,7 @@ static struct radeon_asic r520_asic = {
519 .set_clock_gating = &radeon_atom_set_clock_gating, 521 .set_clock_gating = &radeon_atom_set_clock_gating,
520 .set_surface_reg = r100_set_surface_reg, 522 .set_surface_reg = r100_set_surface_reg,
521 .clear_surface_reg = r100_clear_surface_reg, 523 .clear_surface_reg = r100_clear_surface_reg,
522 .bandwidth_update = &r520_bandwidth_update, 524 .bandwidth_update = &rv515_bandwidth_update,
523}; 525};
524 526
525/* 527/*
@@ -596,7 +598,7 @@ static struct radeon_asic r600_asic = {
596 .set_clock_gating = &radeon_atom_set_clock_gating, 598 .set_clock_gating = &radeon_atom_set_clock_gating,
597 .set_surface_reg = r600_set_surface_reg, 599 .set_surface_reg = r600_set_surface_reg,
598 .clear_surface_reg = r600_clear_surface_reg, 600 .clear_surface_reg = r600_clear_surface_reg,
599 .bandwidth_update = &r520_bandwidth_update, 601 .bandwidth_update = &rv515_bandwidth_update,
600}; 602};
601 603
602/* 604/*
@@ -646,7 +648,7 @@ static struct radeon_asic rv770_asic = {
646 .set_clock_gating = &radeon_atom_set_clock_gating, 648 .set_clock_gating = &radeon_atom_set_clock_gating,
647 .set_surface_reg = r600_set_surface_reg, 649 .set_surface_reg = r600_set_surface_reg,
648 .clear_surface_reg = r600_clear_surface_reg, 650 .clear_surface_reg = r600_clear_surface_reg,
649 .bandwidth_update = &r520_bandwidth_update, 651 .bandwidth_update = &rv515_bandwidth_update,
650}; 652};
651 653
652#endif 654#endif
diff --git a/drivers/gpu/drm/radeon/radeon_atombios.c b/drivers/gpu/drm/radeon/radeon_atombios.c
index 743742128307..5b6c08cee40e 100644
--- a/drivers/gpu/drm/radeon/radeon_atombios.c
+++ b/drivers/gpu/drm/radeon/radeon_atombios.c
@@ -272,12 +272,9 @@ bool radeon_get_atom_connector_info_from_object_table(struct drm_device *dev)
272 (le16_to_cpu(path->usConnObjectId) & 272 (le16_to_cpu(path->usConnObjectId) &
273 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT; 273 OBJECT_TYPE_MASK) >> OBJECT_TYPE_SHIFT;
274 274
275 if ((le16_to_cpu(path->usDeviceTag) == 275 /* TODO CV support */
276 ATOM_DEVICE_TV1_SUPPORT) 276 if (le16_to_cpu(path->usDeviceTag) ==
277 || (le16_to_cpu(path->usDeviceTag) == 277 ATOM_DEVICE_CV_SUPPORT)
278 ATOM_DEVICE_TV2_SUPPORT)
279 || (le16_to_cpu(path->usDeviceTag) ==
280 ATOM_DEVICE_CV_SUPPORT))
281 continue; 278 continue;
282 279
283 if ((rdev->family == CHIP_RS780) && 280 if ((rdev->family == CHIP_RS780) &&
diff --git a/drivers/gpu/drm/radeon/radeon_connectors.c b/drivers/gpu/drm/radeon/radeon_connectors.c
index af1d551f1a8f..e376be47a4a0 100644
--- a/drivers/gpu/drm/radeon/radeon_connectors.c
+++ b/drivers/gpu/drm/radeon/radeon_connectors.c
@@ -26,6 +26,7 @@
26#include "drmP.h" 26#include "drmP.h"
27#include "drm_edid.h" 27#include "drm_edid.h"
28#include "drm_crtc_helper.h" 28#include "drm_crtc_helper.h"
29#include "drm_fb_helper.h"
29#include "radeon_drm.h" 30#include "radeon_drm.h"
30#include "radeon.h" 31#include "radeon.h"
31#include "atom.h" 32#include "atom.h"
@@ -245,7 +246,7 @@ static void radeon_add_common_modes(struct drm_encoder *encoder, struct drm_conn
245 if (common_modes[i].w < 320 || common_modes[i].h < 200) 246 if (common_modes[i].w < 320 || common_modes[i].h < 200)
246 continue; 247 continue;
247 248
248 mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false); 249 mode = drm_cvt_mode(dev, common_modes[i].w, common_modes[i].h, 60, false, false, false);
249 drm_mode_probed_add(connector, mode); 250 drm_mode_probed_add(connector, mode);
250 } 251 }
251} 252}
@@ -559,7 +560,7 @@ static int radeon_tv_get_modes(struct drm_connector *connector)
559 radeon_add_common_modes(encoder, connector); 560 radeon_add_common_modes(encoder, connector);
560 else { 561 else {
561 /* only 800x600 is supported right now on pre-avivo chips */ 562 /* only 800x600 is supported right now on pre-avivo chips */
562 tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false); 563 tv_mode = drm_cvt_mode(dev, 800, 600, 60, false, false, false);
563 tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED; 564 tv_mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
564 drm_mode_probed_add(connector, tv_mode); 565 drm_mode_probed_add(connector, tv_mode);
565 } 566 }
@@ -743,6 +744,15 @@ struct drm_encoder *radeon_dvi_encoder(struct drm_connector *connector)
743 return NULL; 744 return NULL;
744} 745}
745 746
747static void radeon_dvi_force(struct drm_connector *connector)
748{
749 struct radeon_connector *radeon_connector = to_radeon_connector(connector);
750 if (connector->force == DRM_FORCE_ON)
751 radeon_connector->use_digital = false;
752 if (connector->force == DRM_FORCE_ON_DIGITAL)
753 radeon_connector->use_digital = true;
754}
755
746struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = { 756struct drm_connector_helper_funcs radeon_dvi_connector_helper_funcs = {
747 .get_modes = radeon_dvi_get_modes, 757 .get_modes = radeon_dvi_get_modes,
748 .mode_valid = radeon_vga_mode_valid, 758 .mode_valid = radeon_vga_mode_valid,
@@ -755,6 +765,7 @@ struct drm_connector_funcs radeon_dvi_connector_funcs = {
755 .fill_modes = drm_helper_probe_single_connector_modes, 765 .fill_modes = drm_helper_probe_single_connector_modes,
756 .set_property = radeon_connector_set_property, 766 .set_property = radeon_connector_set_property,
757 .destroy = radeon_connector_destroy, 767 .destroy = radeon_connector_destroy,
768 .force = radeon_dvi_force,
758}; 769};
759 770
760void 771void
@@ -771,6 +782,7 @@ radeon_add_atom_connector(struct drm_device *dev,
771 struct radeon_connector *radeon_connector; 782 struct radeon_connector *radeon_connector;
772 struct radeon_connector_atom_dig *radeon_dig_connector; 783 struct radeon_connector_atom_dig *radeon_dig_connector;
773 uint32_t subpixel_order = SubPixelNone; 784 uint32_t subpixel_order = SubPixelNone;
785 int ret;
774 786
775 /* fixme - tv/cv/din */ 787 /* fixme - tv/cv/din */
776 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 788 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -796,24 +808,30 @@ radeon_add_atom_connector(struct drm_device *dev,
796 switch (connector_type) { 808 switch (connector_type) {
797 case DRM_MODE_CONNECTOR_VGA: 809 case DRM_MODE_CONNECTOR_VGA:
798 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 810 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
799 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); 811 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
812 if (ret)
813 goto failed;
800 if (i2c_bus->valid) { 814 if (i2c_bus->valid) {
801 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA"); 815 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
802 if (!radeon_connector->ddc_bus) 816 if (!radeon_connector->ddc_bus)
803 goto failed; 817 goto failed;
804 } 818 }
819 radeon_connector->dac_load_detect = true;
805 drm_connector_attach_property(&radeon_connector->base, 820 drm_connector_attach_property(&radeon_connector->base,
806 rdev->mode_info.load_detect_property, 821 rdev->mode_info.load_detect_property,
807 1); 822 1);
808 break; 823 break;
809 case DRM_MODE_CONNECTOR_DVIA: 824 case DRM_MODE_CONNECTOR_DVIA:
810 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 825 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
811 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); 826 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
827 if (ret)
828 goto failed;
812 if (i2c_bus->valid) { 829 if (i2c_bus->valid) {
813 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); 830 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
814 if (!radeon_connector->ddc_bus) 831 if (!radeon_connector->ddc_bus)
815 goto failed; 832 goto failed;
816 } 833 }
834 radeon_connector->dac_load_detect = true;
817 drm_connector_attach_property(&radeon_connector->base, 835 drm_connector_attach_property(&radeon_connector->base,
818 rdev->mode_info.load_detect_property, 836 rdev->mode_info.load_detect_property,
819 1); 837 1);
@@ -827,7 +845,9 @@ radeon_add_atom_connector(struct drm_device *dev,
827 radeon_dig_connector->igp_lane_info = igp_lane_info; 845 radeon_dig_connector->igp_lane_info = igp_lane_info;
828 radeon_connector->con_priv = radeon_dig_connector; 846 radeon_connector->con_priv = radeon_dig_connector;
829 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 847 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
830 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 848 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
849 if (ret)
850 goto failed;
831 if (i2c_bus->valid) { 851 if (i2c_bus->valid) {
832 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); 852 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
833 if (!radeon_connector->ddc_bus) 853 if (!radeon_connector->ddc_bus)
@@ -837,6 +857,7 @@ radeon_add_atom_connector(struct drm_device *dev,
837 drm_connector_attach_property(&radeon_connector->base, 857 drm_connector_attach_property(&radeon_connector->base,
838 rdev->mode_info.coherent_mode_property, 858 rdev->mode_info.coherent_mode_property,
839 1); 859 1);
860 radeon_connector->dac_load_detect = true;
840 drm_connector_attach_property(&radeon_connector->base, 861 drm_connector_attach_property(&radeon_connector->base,
841 rdev->mode_info.load_detect_property, 862 rdev->mode_info.load_detect_property,
842 1); 863 1);
@@ -850,7 +871,9 @@ radeon_add_atom_connector(struct drm_device *dev,
850 radeon_dig_connector->igp_lane_info = igp_lane_info; 871 radeon_dig_connector->igp_lane_info = igp_lane_info;
851 radeon_connector->con_priv = radeon_dig_connector; 872 radeon_connector->con_priv = radeon_dig_connector;
852 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 873 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
853 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 874 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
875 if (ret)
876 goto failed;
854 if (i2c_bus->valid) { 877 if (i2c_bus->valid) {
855 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI"); 878 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "HDMI");
856 if (!radeon_connector->ddc_bus) 879 if (!radeon_connector->ddc_bus)
@@ -869,7 +892,9 @@ radeon_add_atom_connector(struct drm_device *dev,
869 radeon_dig_connector->igp_lane_info = igp_lane_info; 892 radeon_dig_connector->igp_lane_info = igp_lane_info;
870 radeon_connector->con_priv = radeon_dig_connector; 893 radeon_connector->con_priv = radeon_dig_connector;
871 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 894 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
872 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 895 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
896 if (ret)
897 goto failed;
873 if (i2c_bus->valid) { 898 if (i2c_bus->valid) {
874 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP"); 899 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DP");
875 if (!radeon_connector->ddc_bus) 900 if (!radeon_connector->ddc_bus)
@@ -882,11 +907,14 @@ radeon_add_atom_connector(struct drm_device *dev,
882 case DRM_MODE_CONNECTOR_9PinDIN: 907 case DRM_MODE_CONNECTOR_9PinDIN:
883 if (radeon_tv == 1) { 908 if (radeon_tv == 1) {
884 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); 909 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
885 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); 910 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
911 if (ret)
912 goto failed;
913 radeon_connector->dac_load_detect = true;
914 drm_connector_attach_property(&radeon_connector->base,
915 rdev->mode_info.load_detect_property,
916 1);
886 } 917 }
887 drm_connector_attach_property(&radeon_connector->base,
888 rdev->mode_info.load_detect_property,
889 1);
890 break; 918 break;
891 case DRM_MODE_CONNECTOR_LVDS: 919 case DRM_MODE_CONNECTOR_LVDS:
892 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL); 920 radeon_dig_connector = kzalloc(sizeof(struct radeon_connector_atom_dig), GFP_KERNEL);
@@ -896,7 +924,9 @@ radeon_add_atom_connector(struct drm_device *dev,
896 radeon_dig_connector->igp_lane_info = igp_lane_info; 924 radeon_dig_connector->igp_lane_info = igp_lane_info;
897 radeon_connector->con_priv = radeon_dig_connector; 925 radeon_connector->con_priv = radeon_dig_connector;
898 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); 926 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
899 drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); 927 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
928 if (ret)
929 goto failed;
900 if (i2c_bus->valid) { 930 if (i2c_bus->valid) {
901 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS"); 931 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
902 if (!radeon_connector->ddc_bus) 932 if (!radeon_connector->ddc_bus)
@@ -932,6 +962,7 @@ radeon_add_legacy_connector(struct drm_device *dev,
932 struct drm_connector *connector; 962 struct drm_connector *connector;
933 struct radeon_connector *radeon_connector; 963 struct radeon_connector *radeon_connector;
934 uint32_t subpixel_order = SubPixelNone; 964 uint32_t subpixel_order = SubPixelNone;
965 int ret;
935 966
936 /* fixme - tv/cv/din */ 967 /* fixme - tv/cv/din */
937 if (connector_type == DRM_MODE_CONNECTOR_Unknown) 968 if (connector_type == DRM_MODE_CONNECTOR_Unknown)
@@ -957,24 +988,30 @@ radeon_add_legacy_connector(struct drm_device *dev,
957 switch (connector_type) { 988 switch (connector_type) {
958 case DRM_MODE_CONNECTOR_VGA: 989 case DRM_MODE_CONNECTOR_VGA:
959 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 990 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
960 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); 991 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
992 if (ret)
993 goto failed;
961 if (i2c_bus->valid) { 994 if (i2c_bus->valid) {
962 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA"); 995 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "VGA");
963 if (!radeon_connector->ddc_bus) 996 if (!radeon_connector->ddc_bus)
964 goto failed; 997 goto failed;
965 } 998 }
999 radeon_connector->dac_load_detect = true;
966 drm_connector_attach_property(&radeon_connector->base, 1000 drm_connector_attach_property(&radeon_connector->base,
967 rdev->mode_info.load_detect_property, 1001 rdev->mode_info.load_detect_property,
968 1); 1002 1);
969 break; 1003 break;
970 case DRM_MODE_CONNECTOR_DVIA: 1004 case DRM_MODE_CONNECTOR_DVIA:
971 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type); 1005 drm_connector_init(dev, &radeon_connector->base, &radeon_vga_connector_funcs, connector_type);
972 drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs); 1006 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_vga_connector_helper_funcs);
1007 if (ret)
1008 goto failed;
973 if (i2c_bus->valid) { 1009 if (i2c_bus->valid) {
974 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); 1010 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
975 if (!radeon_connector->ddc_bus) 1011 if (!radeon_connector->ddc_bus)
976 goto failed; 1012 goto failed;
977 } 1013 }
1014 radeon_connector->dac_load_detect = true;
978 drm_connector_attach_property(&radeon_connector->base, 1015 drm_connector_attach_property(&radeon_connector->base,
979 rdev->mode_info.load_detect_property, 1016 rdev->mode_info.load_detect_property,
980 1); 1017 1);
@@ -982,11 +1019,14 @@ radeon_add_legacy_connector(struct drm_device *dev,
982 case DRM_MODE_CONNECTOR_DVII: 1019 case DRM_MODE_CONNECTOR_DVII:
983 case DRM_MODE_CONNECTOR_DVID: 1020 case DRM_MODE_CONNECTOR_DVID:
984 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type); 1021 drm_connector_init(dev, &radeon_connector->base, &radeon_dvi_connector_funcs, connector_type);
985 drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs); 1022 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_dvi_connector_helper_funcs);
1023 if (ret)
1024 goto failed;
986 if (i2c_bus->valid) { 1025 if (i2c_bus->valid) {
987 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI"); 1026 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "DVI");
988 if (!radeon_connector->ddc_bus) 1027 if (!radeon_connector->ddc_bus)
989 goto failed; 1028 goto failed;
1029 radeon_connector->dac_load_detect = true;
990 drm_connector_attach_property(&radeon_connector->base, 1030 drm_connector_attach_property(&radeon_connector->base,
991 rdev->mode_info.load_detect_property, 1031 rdev->mode_info.load_detect_property,
992 1); 1032 1);
@@ -998,7 +1038,10 @@ radeon_add_legacy_connector(struct drm_device *dev,
998 case DRM_MODE_CONNECTOR_9PinDIN: 1038 case DRM_MODE_CONNECTOR_9PinDIN:
999 if (radeon_tv == 1) { 1039 if (radeon_tv == 1) {
1000 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type); 1040 drm_connector_init(dev, &radeon_connector->base, &radeon_tv_connector_funcs, connector_type);
1001 drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs); 1041 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_tv_connector_helper_funcs);
1042 if (ret)
1043 goto failed;
1044 radeon_connector->dac_load_detect = true;
1002 drm_connector_attach_property(&radeon_connector->base, 1045 drm_connector_attach_property(&radeon_connector->base,
1003 rdev->mode_info.load_detect_property, 1046 rdev->mode_info.load_detect_property,
1004 1); 1047 1);
@@ -1006,7 +1049,9 @@ radeon_add_legacy_connector(struct drm_device *dev,
1006 break; 1049 break;
1007 case DRM_MODE_CONNECTOR_LVDS: 1050 case DRM_MODE_CONNECTOR_LVDS:
1008 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type); 1051 drm_connector_init(dev, &radeon_connector->base, &radeon_lvds_connector_funcs, connector_type);
1009 drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs); 1052 ret = drm_connector_helper_add(&radeon_connector->base, &radeon_lvds_connector_helper_funcs);
1053 if (ret)
1054 goto failed;
1010 if (i2c_bus->valid) { 1055 if (i2c_bus->valid) {
1011 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS"); 1056 radeon_connector->ddc_bus = radeon_i2c_create(dev, i2c_bus, "LVDS");
1012 if (!radeon_connector->ddc_bus) 1057 if (!radeon_connector->ddc_bus)
diff --git a/drivers/gpu/drm/radeon/radeon_cs.c b/drivers/gpu/drm/radeon/radeon_cs.c
index 12f5990c2d2a..5ab2cf96a264 100644
--- a/drivers/gpu/drm/radeon/radeon_cs.c
+++ b/drivers/gpu/drm/radeon/radeon_cs.c
@@ -142,15 +142,31 @@ int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
142 } 142 }
143 143
144 p->chunks[i].length_dw = user_chunk.length_dw; 144 p->chunks[i].length_dw = user_chunk.length_dw;
145 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data; 145 p->chunks[i].user_ptr = (void __user *)(unsigned long)user_chunk.chunk_data;
146 146
147 size = p->chunks[i].length_dw * sizeof(uint32_t); 147 cdata = (uint32_t *)(unsigned long)user_chunk.chunk_data;
148 p->chunks[i].kdata = kmalloc(size, GFP_KERNEL); 148 if (p->chunks[i].chunk_id != RADEON_CHUNK_ID_IB) {
149 if (p->chunks[i].kdata == NULL) { 149 size = p->chunks[i].length_dw * sizeof(uint32_t);
150 return -ENOMEM; 150 p->chunks[i].kdata = kmalloc(size, GFP_KERNEL);
151 } 151 if (p->chunks[i].kdata == NULL) {
152 if (DRM_COPY_FROM_USER(p->chunks[i].kdata, cdata, size)) { 152 return -ENOMEM;
153 return -EFAULT; 153 }
154 if (DRM_COPY_FROM_USER(p->chunks[i].kdata,
155 p->chunks[i].user_ptr, size)) {
156 return -EFAULT;
157 }
158 } else {
159 p->chunks[i].kpage[0] = kmalloc(PAGE_SIZE, GFP_KERNEL);
160 p->chunks[i].kpage[1] = kmalloc(PAGE_SIZE, GFP_KERNEL);
161 if (p->chunks[i].kpage[0] == NULL || p->chunks[i].kpage[1] == NULL) {
162 kfree(p->chunks[i].kpage[0]);
163 kfree(p->chunks[i].kpage[1]);
164 return -ENOMEM;
165 }
166 p->chunks[i].kpage_idx[0] = -1;
167 p->chunks[i].kpage_idx[1] = -1;
168 p->chunks[i].last_copied_page = -1;
169 p->chunks[i].last_page_index = ((p->chunks[i].length_dw * 4) - 1) / PAGE_SIZE;
154 } 170 }
155 } 171 }
156 if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) { 172 if (p->chunks[p->chunk_ib_idx].length_dw > (16 * 1024)) {
@@ -190,6 +206,8 @@ static void radeon_cs_parser_fini(struct radeon_cs_parser *parser, int error)
190 kfree(parser->relocs_ptr); 206 kfree(parser->relocs_ptr);
191 for (i = 0; i < parser->nchunks; i++) { 207 for (i = 0; i < parser->nchunks; i++) {
192 kfree(parser->chunks[i].kdata); 208 kfree(parser->chunks[i].kdata);
209 kfree(parser->chunks[i].kpage[0]);
210 kfree(parser->chunks[i].kpage[1]);
193 } 211 }
194 kfree(parser->chunks); 212 kfree(parser->chunks);
195 kfree(parser->chunks_array); 213 kfree(parser->chunks_array);
@@ -238,8 +256,14 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
238 * uncached). */ 256 * uncached). */
239 ib_chunk = &parser.chunks[parser.chunk_ib_idx]; 257 ib_chunk = &parser.chunks[parser.chunk_ib_idx];
240 parser.ib->length_dw = ib_chunk->length_dw; 258 parser.ib->length_dw = ib_chunk->length_dw;
241 memcpy((void *)parser.ib->ptr, ib_chunk->kdata, ib_chunk->length_dw*4);
242 r = radeon_cs_parse(&parser); 259 r = radeon_cs_parse(&parser);
260 if (r || parser.parser_error) {
261 DRM_ERROR("Invalid command stream !\n");
262 radeon_cs_parser_fini(&parser, r);
263 mutex_unlock(&rdev->cs_mutex);
264 return r;
265 }
266 r = radeon_cs_finish_pages(&parser);
243 if (r) { 267 if (r) {
244 DRM_ERROR("Invalid command stream !\n"); 268 DRM_ERROR("Invalid command stream !\n");
245 radeon_cs_parser_fini(&parser, r); 269 radeon_cs_parser_fini(&parser, r);
@@ -254,3 +278,64 @@ int radeon_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
254 mutex_unlock(&rdev->cs_mutex); 278 mutex_unlock(&rdev->cs_mutex);
255 return r; 279 return r;
256} 280}
281
282int radeon_cs_finish_pages(struct radeon_cs_parser *p)
283{
284 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
285 int i;
286 int size = PAGE_SIZE;
287
288 for (i = ibc->last_copied_page + 1; i <= ibc->last_page_index; i++) {
289 if (i == ibc->last_page_index) {
290 size = (ibc->length_dw * 4) % PAGE_SIZE;
291 if (size == 0)
292 size = PAGE_SIZE;
293 }
294
295 if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
296 ibc->user_ptr + (i * PAGE_SIZE),
297 size))
298 return -EFAULT;
299 }
300 return 0;
301}
302
303int radeon_cs_update_pages(struct radeon_cs_parser *p, int pg_idx)
304{
305 int new_page;
306 struct radeon_cs_chunk *ibc = &p->chunks[p->chunk_ib_idx];
307 int i;
308 int size = PAGE_SIZE;
309
310 for (i = ibc->last_copied_page + 1; i < pg_idx; i++) {
311 if (DRM_COPY_FROM_USER(p->ib->ptr + (i * (PAGE_SIZE/4)),
312 ibc->user_ptr + (i * PAGE_SIZE),
313 PAGE_SIZE)) {
314 p->parser_error = -EFAULT;
315 return 0;
316 }
317 }
318
319 new_page = ibc->kpage_idx[0] < ibc->kpage_idx[1] ? 0 : 1;
320
321 if (pg_idx == ibc->last_page_index) {
322 size = (ibc->length_dw * 4) % PAGE_SIZE;
323 if (size == 0)
324 size = PAGE_SIZE;
325 }
326
327 if (DRM_COPY_FROM_USER(ibc->kpage[new_page],
328 ibc->user_ptr + (pg_idx * PAGE_SIZE),
329 size)) {
330 p->parser_error = -EFAULT;
331 return 0;
332 }
333
334 /* copy to IB here */
335 memcpy((void *)(p->ib->ptr+(pg_idx*(PAGE_SIZE/4))), ibc->kpage[new_page], size);
336
337 ibc->last_copied_page = pg_idx;
338 ibc->kpage_idx[new_page] = pg_idx;
339
340 return new_page;
341}
diff --git a/drivers/gpu/drm/radeon/radeon_device.c b/drivers/gpu/drm/radeon/radeon_device.c
index daf5db780956..ec835d56d30a 100644
--- a/drivers/gpu/drm/radeon/radeon_device.c
+++ b/drivers/gpu/drm/radeon/radeon_device.c
@@ -532,10 +532,13 @@ int radeon_device_init(struct radeon_device *rdev,
532 532
533 if (radeon_agpmode == -1) { 533 if (radeon_agpmode == -1) {
534 rdev->flags &= ~RADEON_IS_AGP; 534 rdev->flags &= ~RADEON_IS_AGP;
535 if (rdev->family >= CHIP_RV515 || 535 if (rdev->family >= CHIP_R600) {
536 rdev->family == CHIP_RV380 || 536 DRM_INFO("Forcing AGP to PCIE mode\n");
537 rdev->family == CHIP_RV410 || 537 rdev->flags |= RADEON_IS_PCIE;
538 rdev->family == CHIP_R423) { 538 } else if (rdev->family >= CHIP_RV515 ||
539 rdev->family == CHIP_RV380 ||
540 rdev->family == CHIP_RV410 ||
541 rdev->family == CHIP_R423) {
539 DRM_INFO("Forcing AGP to PCIE mode\n"); 542 DRM_INFO("Forcing AGP to PCIE mode\n");
540 rdev->flags |= RADEON_IS_PCIE; 543 rdev->flags |= RADEON_IS_PCIE;
541 rdev->asic->gart_init = &rv370_pcie_gart_init; 544 rdev->asic->gart_init = &rv370_pcie_gart_init;
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 50fce498910c..7f50fb864af8 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -62,9 +62,6 @@ void radeon_driver_irq_preinstall_kms(struct drm_device *dev);
62int radeon_driver_irq_postinstall_kms(struct drm_device *dev); 62int radeon_driver_irq_postinstall_kms(struct drm_device *dev);
63void radeon_driver_irq_uninstall_kms(struct drm_device *dev); 63void radeon_driver_irq_uninstall_kms(struct drm_device *dev);
64irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS); 64irqreturn_t radeon_driver_irq_handler_kms(DRM_IRQ_ARGS);
65int radeon_master_create_kms(struct drm_device *dev, struct drm_master *master);
66void radeon_master_destroy_kms(struct drm_device *dev,
67 struct drm_master *master);
68int radeon_dma_ioctl_kms(struct drm_device *dev, void *data, 65int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
69 struct drm_file *file_priv); 66 struct drm_file *file_priv);
70int radeon_gem_object_init(struct drm_gem_object *obj); 67int radeon_gem_object_init(struct drm_gem_object *obj);
@@ -260,8 +257,6 @@ static struct drm_driver kms_driver = {
260 .get_vblank_counter = radeon_get_vblank_counter_kms, 257 .get_vblank_counter = radeon_get_vblank_counter_kms,
261 .enable_vblank = radeon_enable_vblank_kms, 258 .enable_vblank = radeon_enable_vblank_kms,
262 .disable_vblank = radeon_disable_vblank_kms, 259 .disable_vblank = radeon_disable_vblank_kms,
263 .master_create = radeon_master_create_kms,
264 .master_destroy = radeon_master_destroy_kms,
265#if defined(CONFIG_DEBUG_FS) 260#if defined(CONFIG_DEBUG_FS)
266 .debugfs_init = radeon_debugfs_init, 261 .debugfs_init = radeon_debugfs_init,
267 .debugfs_cleanup = radeon_debugfs_cleanup, 262 .debugfs_cleanup = radeon_debugfs_cleanup,
diff --git a/drivers/gpu/drm/radeon/radeon_fb.c b/drivers/gpu/drm/radeon/radeon_fb.c
index 944e4fa78db5..1ba704eedefb 100644
--- a/drivers/gpu/drm/radeon/radeon_fb.c
+++ b/drivers/gpu/drm/radeon/radeon_fb.c
@@ -128,6 +128,7 @@ static struct drm_fb_helper_funcs radeon_fb_helper_funcs = {
128int radeonfb_create(struct drm_device *dev, 128int radeonfb_create(struct drm_device *dev,
129 uint32_t fb_width, uint32_t fb_height, 129 uint32_t fb_width, uint32_t fb_height,
130 uint32_t surface_width, uint32_t surface_height, 130 uint32_t surface_width, uint32_t surface_height,
131 uint32_t surface_depth, uint32_t surface_bpp,
131 struct drm_framebuffer **fb_p) 132 struct drm_framebuffer **fb_p)
132{ 133{
133 struct radeon_device *rdev = dev->dev_private; 134 struct radeon_device *rdev = dev->dev_private;
@@ -148,10 +149,10 @@ int radeonfb_create(struct drm_device *dev,
148 149
149 mode_cmd.width = surface_width; 150 mode_cmd.width = surface_width;
150 mode_cmd.height = surface_height; 151 mode_cmd.height = surface_height;
151 mode_cmd.bpp = 32; 152 mode_cmd.bpp = surface_bpp;
152 /* need to align pitch with crtc limits */ 153 /* need to align pitch with crtc limits */
153 mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8); 154 mode_cmd.pitch = radeon_align_pitch(rdev, mode_cmd.width, mode_cmd.bpp, fb_tiled) * ((mode_cmd.bpp + 1) / 8);
154 mode_cmd.depth = 24; 155 mode_cmd.depth = surface_depth;
155 156
156 size = mode_cmd.pitch * mode_cmd.height; 157 size = mode_cmd.pitch * mode_cmd.height;
157 aligned_size = ALIGN(size, PAGE_SIZE); 158 aligned_size = ALIGN(size, PAGE_SIZE);
@@ -290,13 +291,26 @@ out:
290 return ret; 291 return ret;
291} 292}
292 293
294static char *mode_option;
295int radeon_parse_options(char *options)
296{
297 char *this_opt;
298
299 if (!options || !*options)
300 return 0;
301
302 while ((this_opt = strsep(&options, ",")) != NULL) {
303 if (!*this_opt)
304 continue;
305 mode_option = this_opt;
306 }
307 return 0;
308}
309
293int radeonfb_probe(struct drm_device *dev) 310int radeonfb_probe(struct drm_device *dev)
294{ 311{
295 int ret; 312 return drm_fb_helper_single_fb_probe(dev, &radeonfb_create);
296 ret = drm_fb_helper_single_fb_probe(dev, &radeonfb_create);
297 return ret;
298} 313}
299EXPORT_SYMBOL(radeonfb_probe);
300 314
301int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb) 315int radeonfb_remove(struct drm_device *dev, struct drm_framebuffer *fb)
302{ 316{
diff --git a/drivers/gpu/drm/radeon/radeon_kms.c b/drivers/gpu/drm/radeon/radeon_kms.c
index 709bd892b3a9..ba128621057a 100644
--- a/drivers/gpu/drm/radeon/radeon_kms.c
+++ b/drivers/gpu/drm/radeon/radeon_kms.c
@@ -201,55 +201,6 @@ void radeon_disable_vblank_kms(struct drm_device *dev, int crtc)
201 201
202 202
203/* 203/*
204 * For multiple master (like multiple X).
205 */
206struct drm_radeon_master_private {
207 drm_local_map_t *sarea;
208 drm_radeon_sarea_t *sarea_priv;
209};
210
211int radeon_master_create_kms(struct drm_device *dev, struct drm_master *master)
212{
213 struct drm_radeon_master_private *master_priv;
214 unsigned long sareapage;
215 int ret;
216
217 master_priv = kzalloc(sizeof(*master_priv), GFP_KERNEL);
218 if (master_priv == NULL) {
219 return -ENOMEM;
220 }
221 /* prebuild the SAREA */
222 sareapage = max_t(unsigned long, SAREA_MAX, PAGE_SIZE);
223 ret = drm_addmap(dev, 0, sareapage, _DRM_SHM,
224 _DRM_CONTAINS_LOCK,
225 &master_priv->sarea);
226 if (ret) {
227 DRM_ERROR("SAREA setup failed\n");
228 return ret;
229 }
230 master_priv->sarea_priv = master_priv->sarea->handle + sizeof(struct drm_sarea);
231 master_priv->sarea_priv->pfCurrentPage = 0;
232 master->driver_priv = master_priv;
233 return 0;
234}
235
236void radeon_master_destroy_kms(struct drm_device *dev,
237 struct drm_master *master)
238{
239 struct drm_radeon_master_private *master_priv = master->driver_priv;
240
241 if (master_priv == NULL) {
242 return;
243 }
244 if (master_priv->sarea) {
245 drm_rmmap_locked(dev, master_priv->sarea);
246 }
247 kfree(master_priv);
248 master->driver_priv = NULL;
249}
250
251
252/*
253 * IOCTL. 204 * IOCTL.
254 */ 205 */
255int radeon_dma_ioctl_kms(struct drm_device *dev, void *data, 206int radeon_dma_ioctl_kms(struct drm_device *dev, void *data,
diff --git a/drivers/gpu/drm/radeon/radeon_reg.h b/drivers/gpu/drm/radeon/radeon_reg.h
index 21da871a793c..bfa1ab9c93e1 100644
--- a/drivers/gpu/drm/radeon/radeon_reg.h
+++ b/drivers/gpu/drm/radeon/radeon_reg.h
@@ -3333,6 +3333,7 @@
3333# define RADEON_CP_PACKET_MAX_DWORDS (1 << 12) 3333# define RADEON_CP_PACKET_MAX_DWORDS (1 << 12)
3334# define RADEON_CP_PACKET0_REG_MASK 0x000007ff 3334# define RADEON_CP_PACKET0_REG_MASK 0x000007ff
3335# define R300_CP_PACKET0_REG_MASK 0x00001fff 3335# define R300_CP_PACKET0_REG_MASK 0x00001fff
3336# define R600_CP_PACKET0_REG_MASK 0x0000ffff
3336# define RADEON_CP_PACKET1_REG0_MASK 0x000007ff 3337# define RADEON_CP_PACKET1_REG0_MASK 0x000007ff
3337# define RADEON_CP_PACKET1_REG1_MASK 0x003ff800 3338# define RADEON_CP_PACKET1_REG1_MASK 0x003ff800
3338 3339
diff --git a/drivers/gpu/drm/radeon/radeon_ttm.c b/drivers/gpu/drm/radeon/radeon_ttm.c
index acd889c94549..765bd184b6fc 100644
--- a/drivers/gpu/drm/radeon/radeon_ttm.c
+++ b/drivers/gpu/drm/radeon/radeon_ttm.c
@@ -530,7 +530,7 @@ void radeon_ttm_fini(struct radeon_device *rdev)
530} 530}
531 531
532static struct vm_operations_struct radeon_ttm_vm_ops; 532static struct vm_operations_struct radeon_ttm_vm_ops;
533static struct vm_operations_struct *ttm_vm_ops = NULL; 533static const struct vm_operations_struct *ttm_vm_ops = NULL;
534 534
535static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf) 535static int radeon_ttm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
536{ 536{
@@ -689,9 +689,6 @@ struct ttm_backend *radeon_ttm_backend_create(struct radeon_device *rdev)
689 689
690#define RADEON_DEBUGFS_MEM_TYPES 2 690#define RADEON_DEBUGFS_MEM_TYPES 2
691 691
692static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
693static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
694
695#if defined(CONFIG_DEBUG_FS) 692#if defined(CONFIG_DEBUG_FS)
696static int radeon_mm_dump_table(struct seq_file *m, void *data) 693static int radeon_mm_dump_table(struct seq_file *m, void *data)
697{ 694{
@@ -711,9 +708,11 @@ static int radeon_mm_dump_table(struct seq_file *m, void *data)
711 708
712static int radeon_ttm_debugfs_init(struct radeon_device *rdev) 709static int radeon_ttm_debugfs_init(struct radeon_device *rdev)
713{ 710{
711#if defined(CONFIG_DEBUG_FS)
712 static struct drm_info_list radeon_mem_types_list[RADEON_DEBUGFS_MEM_TYPES];
713 static char radeon_mem_types_names[RADEON_DEBUGFS_MEM_TYPES][32];
714 unsigned i; 714 unsigned i;
715 715
716#if defined(CONFIG_DEBUG_FS)
717 for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) { 716 for (i = 0; i < RADEON_DEBUGFS_MEM_TYPES; i++) {
718 if (i == 0) 717 if (i == 0)
719 sprintf(radeon_mem_types_names[i], "radeon_vram_mm"); 718 sprintf(radeon_mem_types_names[i], "radeon_vram_mm");
diff --git a/drivers/gpu/drm/radeon/rs600.c b/drivers/gpu/drm/radeon/rs600.c
index 0e791e26def3..4a4fe1cb131c 100644
--- a/drivers/gpu/drm/radeon/rs600.c
+++ b/drivers/gpu/drm/radeon/rs600.c
@@ -28,7 +28,6 @@
28#include "drmP.h" 28#include "drmP.h"
29#include "radeon_reg.h" 29#include "radeon_reg.h"
30#include "radeon.h" 30#include "radeon.h"
31#include "avivod.h"
32 31
33#include "rs600_reg_safe.h" 32#include "rs600_reg_safe.h"
34 33
@@ -45,7 +44,6 @@ void r420_pipes_init(struct radeon_device *rdev);
45 */ 44 */
46void rs600_gpu_init(struct radeon_device *rdev); 45void rs600_gpu_init(struct radeon_device *rdev);
47int rs600_mc_wait_for_idle(struct radeon_device *rdev); 46int rs600_mc_wait_for_idle(struct radeon_device *rdev);
48void rs600_disable_vga(struct radeon_device *rdev);
49 47
50 48
51/* 49/*
@@ -198,7 +196,7 @@ void rs600_mc_disable_clients(struct radeon_device *rdev)
198 "programming pipes. Bad things might happen.\n"); 196 "programming pipes. Bad things might happen.\n");
199 } 197 }
200 198
201 radeon_avivo_vga_render_disable(rdev); 199 rv515_vga_render_disable(rdev);
202 200
203 tmp = RREG32(AVIVO_D1VGA_CONTROL); 201 tmp = RREG32(AVIVO_D1VGA_CONTROL);
204 WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE); 202 WREG32(AVIVO_D1VGA_CONTROL, tmp & ~AVIVO_DVGA_CONTROL_MODE_ENABLE);
@@ -346,20 +344,6 @@ u32 rs600_get_vblank_counter(struct radeon_device *rdev, int crtc)
346/* 344/*
347 * Global GPU functions 345 * Global GPU functions
348 */ 346 */
349void rs600_disable_vga(struct radeon_device *rdev)
350{
351 unsigned tmp;
352
353 WREG32(0x330, 0);
354 WREG32(0x338, 0);
355 tmp = RREG32(0x300);
356 tmp &= ~(3 << 16);
357 WREG32(0x300, tmp);
358 WREG32(0x308, (1 << 8));
359 WREG32(0x310, rdev->mc.vram_location);
360 WREG32(0x594, 0);
361}
362
363int rs600_mc_wait_for_idle(struct radeon_device *rdev) 347int rs600_mc_wait_for_idle(struct radeon_device *rdev)
364{ 348{
365 unsigned i; 349 unsigned i;
@@ -385,7 +369,7 @@ void rs600_gpu_init(struct radeon_device *rdev)
385{ 369{
386 /* FIXME: HDP same place on rs600 ? */ 370 /* FIXME: HDP same place on rs600 ? */
387 r100_hdp_reset(rdev); 371 r100_hdp_reset(rdev);
388 rs600_disable_vga(rdev); 372 rv515_vga_render_disable(rdev);
389 /* FIXME: is this correct ? */ 373 /* FIXME: is this correct ? */
390 r420_pipes_init(rdev); 374 r420_pipes_init(rdev);
391 if (rs600_mc_wait_for_idle(rdev)) { 375 if (rs600_mc_wait_for_idle(rdev)) {
diff --git a/drivers/gpu/drm/radeon/rs690.c b/drivers/gpu/drm/radeon/rs690.c
index 0f585ca8276d..7a0098ddf977 100644
--- a/drivers/gpu/drm/radeon/rs690.c
+++ b/drivers/gpu/drm/radeon/rs690.c
@@ -40,7 +40,6 @@ void rs400_gart_disable(struct radeon_device *rdev);
40int rs400_gart_enable(struct radeon_device *rdev); 40int rs400_gart_enable(struct radeon_device *rdev);
41void rs400_gart_adjust_size(struct radeon_device *rdev); 41void rs400_gart_adjust_size(struct radeon_device *rdev);
42void rs600_mc_disable_clients(struct radeon_device *rdev); 42void rs600_mc_disable_clients(struct radeon_device *rdev);
43void rs600_disable_vga(struct radeon_device *rdev);
44 43
45/* This files gather functions specifics to : 44/* This files gather functions specifics to :
46 * rs690,rs740 45 * rs690,rs740
@@ -125,7 +124,7 @@ void rs690_gpu_init(struct radeon_device *rdev)
125{ 124{
126 /* FIXME: HDP same place on rs690 ? */ 125 /* FIXME: HDP same place on rs690 ? */
127 r100_hdp_reset(rdev); 126 r100_hdp_reset(rdev);
128 rs600_disable_vga(rdev); 127 rv515_vga_render_disable(rdev);
129 /* FIXME: is this correct ? */ 128 /* FIXME: is this correct ? */
130 r420_pipes_init(rdev); 129 r420_pipes_init(rdev);
131 if (rs690_mc_wait_for_idle(rdev)) { 130 if (rs690_mc_wait_for_idle(rdev)) {
diff --git a/drivers/gpu/drm/radeon/rv515.c b/drivers/gpu/drm/radeon/rv515.c
index fd799748e7d8..e53b5ca7a253 100644
--- a/drivers/gpu/drm/radeon/rv515.c
+++ b/drivers/gpu/drm/radeon/rv515.c
@@ -29,37 +29,17 @@
29#include "drmP.h" 29#include "drmP.h"
30#include "rv515d.h" 30#include "rv515d.h"
31#include "radeon.h" 31#include "radeon.h"
32 32#include "atom.h"
33#include "rv515_reg_safe.h" 33#include "rv515_reg_safe.h"
34/* rv515 depends on : */ 34
35void r100_hdp_reset(struct radeon_device *rdev); 35/* This files gather functions specifics to: rv515 */
36int r100_cp_reset(struct radeon_device *rdev);
37int r100_rb2d_reset(struct radeon_device *rdev);
38int r100_gui_wait_for_idle(struct radeon_device *rdev);
39int r100_cp_init(struct radeon_device *rdev, unsigned ring_size);
40void r420_pipes_init(struct radeon_device *rdev);
41void rs600_mc_disable_clients(struct radeon_device *rdev);
42void rs600_disable_vga(struct radeon_device *rdev);
43
44/* This files gather functions specifics to:
45 * rv515
46 *
47 * Some of these functions might be used by newer ASICs.
48 */
49int rv515_debugfs_pipes_info_init(struct radeon_device *rdev); 36int rv515_debugfs_pipes_info_init(struct radeon_device *rdev);
50int rv515_debugfs_ga_info_init(struct radeon_device *rdev); 37int rv515_debugfs_ga_info_init(struct radeon_device *rdev);
51void rv515_gpu_init(struct radeon_device *rdev); 38void rv515_gpu_init(struct radeon_device *rdev);
52int rv515_mc_wait_for_idle(struct radeon_device *rdev); 39int rv515_mc_wait_for_idle(struct radeon_device *rdev);
53 40
54 41void rv515_debugfs(struct radeon_device *rdev)
55/*
56 * MC
57 */
58int rv515_mc_init(struct radeon_device *rdev)
59{ 42{
60 uint32_t tmp;
61 int r;
62
63 if (r100_debugfs_rbbm_init(rdev)) { 43 if (r100_debugfs_rbbm_init(rdev)) {
64 DRM_ERROR("Failed to register debugfs file for RBBM !\n"); 44 DRM_ERROR("Failed to register debugfs file for RBBM !\n");
65 } 45 }
@@ -69,67 +49,8 @@ int rv515_mc_init(struct radeon_device *rdev)
69 if (rv515_debugfs_ga_info_init(rdev)) { 49 if (rv515_debugfs_ga_info_init(rdev)) {
70 DRM_ERROR("Failed to register debugfs file for pipes !\n"); 50 DRM_ERROR("Failed to register debugfs file for pipes !\n");
71 } 51 }
72
73 rv515_gpu_init(rdev);
74 rv370_pcie_gart_disable(rdev);
75
76 /* Setup GPU memory space */
77 rdev->mc.vram_location = 0xFFFFFFFFUL;
78 rdev->mc.gtt_location = 0xFFFFFFFFUL;
79 if (rdev->flags & RADEON_IS_AGP) {
80 r = radeon_agp_init(rdev);
81 if (r) {
82 printk(KERN_WARNING "[drm] Disabling AGP\n");
83 rdev->flags &= ~RADEON_IS_AGP;
84 rdev->mc.gtt_size = radeon_gart_size * 1024 * 1024;
85 } else {
86 rdev->mc.gtt_location = rdev->mc.agp_base;
87 }
88 }
89 r = radeon_mc_setup(rdev);
90 if (r) {
91 return r;
92 }
93
94 /* Program GPU memory space */
95 rs600_mc_disable_clients(rdev);
96 if (rv515_mc_wait_for_idle(rdev)) {
97 printk(KERN_WARNING "Failed to wait MC idle while "
98 "programming pipes. Bad things might happen.\n");
99 }
100 /* Write VRAM size in case we are limiting it */
101 WREG32(RADEON_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
102 tmp = REG_SET(MC_FB_START, rdev->mc.vram_location >> 16);
103 WREG32(0x134, tmp);
104 tmp = rdev->mc.vram_location + rdev->mc.mc_vram_size - 1;
105 tmp = REG_SET(MC_FB_TOP, tmp >> 16);
106 tmp |= REG_SET(MC_FB_START, rdev->mc.vram_location >> 16);
107 WREG32_MC(MC_FB_LOCATION, tmp);
108 WREG32(HDP_FB_LOCATION, rdev->mc.vram_location >> 16);
109 WREG32(0x310, rdev->mc.vram_location);
110 if (rdev->flags & RADEON_IS_AGP) {
111 tmp = rdev->mc.gtt_location + rdev->mc.gtt_size - 1;
112 tmp = REG_SET(MC_AGP_TOP, tmp >> 16);
113 tmp |= REG_SET(MC_AGP_START, rdev->mc.gtt_location >> 16);
114 WREG32_MC(MC_AGP_LOCATION, tmp);
115 WREG32_MC(MC_AGP_BASE, rdev->mc.agp_base);
116 WREG32_MC(MC_AGP_BASE_2, 0);
117 } else {
118 WREG32_MC(MC_AGP_LOCATION, 0x0FFFFFFF);
119 WREG32_MC(MC_AGP_BASE, 0);
120 WREG32_MC(MC_AGP_BASE_2, 0);
121 }
122 return 0;
123}
124
125void rv515_mc_fini(struct radeon_device *rdev)
126{
127} 52}
128 53
129
130/*
131 * Global GPU functions
132 */
133void rv515_ring_start(struct radeon_device *rdev) 54void rv515_ring_start(struct radeon_device *rdev)
134{ 55{
135 int r; 56 int r;
@@ -198,11 +119,6 @@ void rv515_ring_start(struct radeon_device *rdev)
198 radeon_ring_unlock_commit(rdev); 119 radeon_ring_unlock_commit(rdev);
199} 120}
200 121
201void rv515_errata(struct radeon_device *rdev)
202{
203 rdev->pll_errata = 0;
204}
205
206int rv515_mc_wait_for_idle(struct radeon_device *rdev) 122int rv515_mc_wait_for_idle(struct radeon_device *rdev)
207{ 123{
208 unsigned i; 124 unsigned i;
@@ -219,6 +135,12 @@ int rv515_mc_wait_for_idle(struct radeon_device *rdev)
219 return -1; 135 return -1;
220} 136}
221 137
138void rv515_vga_render_disable(struct radeon_device *rdev)
139{
140 WREG32(R_000300_VGA_RENDER_CONTROL,
141 RREG32(R_000300_VGA_RENDER_CONTROL) & C_000300_VGA_VSTATUS_CNTL);
142}
143
222void rv515_gpu_init(struct radeon_device *rdev) 144void rv515_gpu_init(struct radeon_device *rdev)
223{ 145{
224 unsigned pipe_select_current, gb_pipe_select, tmp; 146 unsigned pipe_select_current, gb_pipe_select, tmp;
@@ -231,7 +153,7 @@ void rv515_gpu_init(struct radeon_device *rdev)
231 "reseting GPU. Bad things might happen.\n"); 153 "reseting GPU. Bad things might happen.\n");
232 } 154 }
233 155
234 rs600_disable_vga(rdev); 156 rv515_vga_render_disable(rdev);
235 157
236 r420_pipes_init(rdev); 158 r420_pipes_init(rdev);
237 gb_pipe_select = RREG32(0x402C); 159 gb_pipe_select = RREG32(0x402C);
@@ -335,10 +257,6 @@ int rv515_gpu_reset(struct radeon_device *rdev)
335 return 0; 257 return 0;
336} 258}
337 259
338
339/*
340 * VRAM info
341 */
342static void rv515_vram_get_type(struct radeon_device *rdev) 260static void rv515_vram_get_type(struct radeon_device *rdev)
343{ 261{
344 uint32_t tmp; 262 uint32_t tmp;
@@ -374,10 +292,6 @@ void rv515_vram_info(struct radeon_device *rdev)
374 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a); 292 rdev->pm.sclk.full = rfixed_div(rdev->pm.sclk, a);
375} 293}
376 294
377
378/*
379 * Indirect registers accessor
380 */
381uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg) 295uint32_t rv515_mc_rreg(struct radeon_device *rdev, uint32_t reg)
382{ 296{
383 uint32_t r; 297 uint32_t r;
@@ -395,9 +309,6 @@ void rv515_mc_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v)
395 WREG32(MC_IND_INDEX, 0); 309 WREG32(MC_IND_INDEX, 0);
396} 310}
397 311
398/*
399 * Debugfs info
400 */
401#if defined(CONFIG_DEBUG_FS) 312#if defined(CONFIG_DEBUG_FS)
402static int rv515_debugfs_pipes_info(struct seq_file *m, void *data) 313static int rv515_debugfs_pipes_info(struct seq_file *m, void *data)
403{ 314{
@@ -459,13 +370,258 @@ int rv515_debugfs_ga_info_init(struct radeon_device *rdev)
459#endif 370#endif
460} 371}
461 372
462/* 373void rv515_mc_stop(struct radeon_device *rdev, struct rv515_mc_save *save)
463 * Asic initialization 374{
464 */ 375 save->d1vga_control = RREG32(R_000330_D1VGA_CONTROL);
465int rv515_init(struct radeon_device *rdev) 376 save->d2vga_control = RREG32(R_000338_D2VGA_CONTROL);
377 save->vga_render_control = RREG32(R_000300_VGA_RENDER_CONTROL);
378 save->vga_hdp_control = RREG32(R_000328_VGA_HDP_CONTROL);
379 save->d1crtc_control = RREG32(R_006080_D1CRTC_CONTROL);
380 save->d2crtc_control = RREG32(R_006880_D2CRTC_CONTROL);
381
382 /* Stop all video */
383 WREG32(R_000330_D1VGA_CONTROL, 0);
384 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
385 WREG32(R_000300_VGA_RENDER_CONTROL, 0);
386 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
387 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
388 WREG32(R_006080_D1CRTC_CONTROL, 0);
389 WREG32(R_006880_D2CRTC_CONTROL, 0);
390 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
391 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
392}
393
394void rv515_mc_resume(struct radeon_device *rdev, struct rv515_mc_save *save)
395{
396 WREG32(R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start);
397 WREG32(R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start);
398 WREG32(R_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS, rdev->mc.vram_start);
399 WREG32(R_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS, rdev->mc.vram_start);
400 WREG32(R_000310_VGA_MEMORY_BASE_ADDRESS, rdev->mc.vram_start);
401 /* Unlock host access */
402 WREG32(R_000328_VGA_HDP_CONTROL, save->vga_hdp_control);
403 mdelay(1);
404 /* Restore video state */
405 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 1);
406 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 1);
407 WREG32(R_006080_D1CRTC_CONTROL, save->d1crtc_control);
408 WREG32(R_006880_D2CRTC_CONTROL, save->d2crtc_control);
409 WREG32(R_0060E8_D1CRTC_UPDATE_LOCK, 0);
410 WREG32(R_0068E8_D2CRTC_UPDATE_LOCK, 0);
411 WREG32(R_000330_D1VGA_CONTROL, save->d1vga_control);
412 WREG32(R_000338_D2VGA_CONTROL, save->d2vga_control);
413 WREG32(R_000300_VGA_RENDER_CONTROL, save->vga_render_control);
414}
415
416void rv515_mc_program(struct radeon_device *rdev)
417{
418 struct rv515_mc_save save;
419
420 /* Stops all mc clients */
421 rv515_mc_stop(rdev, &save);
422
423 /* Wait for mc idle */
424 if (rv515_mc_wait_for_idle(rdev))
425 dev_warn(rdev->dev, "Wait MC idle timeout before updating MC.\n");
426 /* Write VRAM size in case we are limiting it */
427 WREG32(R_0000F8_CONFIG_MEMSIZE, rdev->mc.real_vram_size);
428 /* Program MC, should be a 32bits limited address space */
429 WREG32_MC(R_000001_MC_FB_LOCATION,
430 S_000001_MC_FB_START(rdev->mc.vram_start >> 16) |
431 S_000001_MC_FB_TOP(rdev->mc.vram_end >> 16));
432 WREG32(R_000134_HDP_FB_LOCATION,
433 S_000134_HDP_FB_START(rdev->mc.vram_start >> 16));
434 if (rdev->flags & RADEON_IS_AGP) {
435 WREG32_MC(R_000002_MC_AGP_LOCATION,
436 S_000002_MC_AGP_START(rdev->mc.gtt_start >> 16) |
437 S_000002_MC_AGP_TOP(rdev->mc.gtt_end >> 16));
438 WREG32_MC(R_000003_MC_AGP_BASE, lower_32_bits(rdev->mc.agp_base));
439 WREG32_MC(R_000004_MC_AGP_BASE_2,
440 S_000004_AGP_BASE_ADDR_2(upper_32_bits(rdev->mc.agp_base)));
441 } else {
442 WREG32_MC(R_000002_MC_AGP_LOCATION, 0xFFFFFFFF);
443 WREG32_MC(R_000003_MC_AGP_BASE, 0);
444 WREG32_MC(R_000004_MC_AGP_BASE_2, 0);
445 }
446
447 rv515_mc_resume(rdev, &save);
448}
449
450void rv515_clock_startup(struct radeon_device *rdev)
451{
452 if (radeon_dynclks != -1 && radeon_dynclks)
453 radeon_atom_set_clock_gating(rdev, 1);
454 /* We need to force on some of the block */
455 WREG32_PLL(R_00000F_CP_DYN_CNTL,
456 RREG32_PLL(R_00000F_CP_DYN_CNTL) | S_00000F_CP_FORCEON(1));
457 WREG32_PLL(R_000011_E2_DYN_CNTL,
458 RREG32_PLL(R_000011_E2_DYN_CNTL) | S_000011_E2_FORCEON(1));
459 WREG32_PLL(R_000013_IDCT_DYN_CNTL,
460 RREG32_PLL(R_000013_IDCT_DYN_CNTL) | S_000013_IDCT_FORCEON(1));
461}
462
463static int rv515_startup(struct radeon_device *rdev)
464{
465 int r;
466
467 rv515_mc_program(rdev);
468 /* Resume clock */
469 rv515_clock_startup(rdev);
470 /* Initialize GPU configuration (# pipes, ...) */
471 rv515_gpu_init(rdev);
472 /* Initialize GART (initialize after TTM so we can allocate
473 * memory through TTM but finalize after TTM) */
474 if (rdev->flags & RADEON_IS_PCIE) {
475 r = rv370_pcie_gart_enable(rdev);
476 if (r)
477 return r;
478 }
479 /* Enable IRQ */
480 rdev->irq.sw_int = true;
481 r100_irq_set(rdev);
482 /* 1M ring buffer */
483 r = r100_cp_init(rdev, 1024 * 1024);
484 if (r) {
485 dev_err(rdev->dev, "failled initializing CP (%d).\n", r);
486 return r;
487 }
488 r = r100_wb_init(rdev);
489 if (r)
490 dev_err(rdev->dev, "failled initializing WB (%d).\n", r);
491 r = r100_ib_init(rdev);
492 if (r) {
493 dev_err(rdev->dev, "failled initializing IB (%d).\n", r);
494 return r;
495 }
496 return 0;
497}
498
499int rv515_resume(struct radeon_device *rdev)
500{
501 /* Make sur GART are not working */
502 if (rdev->flags & RADEON_IS_PCIE)
503 rv370_pcie_gart_disable(rdev);
504 /* Resume clock before doing reset */
505 rv515_clock_startup(rdev);
506 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
507 if (radeon_gpu_reset(rdev)) {
508 dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
509 RREG32(R_000E40_RBBM_STATUS),
510 RREG32(R_0007C0_CP_STAT));
511 }
512 /* post */
513 atom_asic_init(rdev->mode_info.atom_context);
514 /* Resume clock after posting */
515 rv515_clock_startup(rdev);
516 return rv515_startup(rdev);
517}
518
519int rv515_suspend(struct radeon_device *rdev)
520{
521 r100_cp_disable(rdev);
522 r100_wb_disable(rdev);
523 r100_irq_disable(rdev);
524 if (rdev->flags & RADEON_IS_PCIE)
525 rv370_pcie_gart_disable(rdev);
526 return 0;
527}
528
529void rv515_set_safe_registers(struct radeon_device *rdev)
466{ 530{
467 rdev->config.r300.reg_safe_bm = rv515_reg_safe_bm; 531 rdev->config.r300.reg_safe_bm = rv515_reg_safe_bm;
468 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rv515_reg_safe_bm); 532 rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(rv515_reg_safe_bm);
533}
534
535void rv515_fini(struct radeon_device *rdev)
536{
537 rv515_suspend(rdev);
538 r100_cp_fini(rdev);
539 r100_wb_fini(rdev);
540 r100_ib_fini(rdev);
541 radeon_gem_fini(rdev);
542 rv370_pcie_gart_fini(rdev);
543 radeon_agp_fini(rdev);
544 radeon_irq_kms_fini(rdev);
545 radeon_fence_driver_fini(rdev);
546 radeon_object_fini(rdev);
547 radeon_atombios_fini(rdev);
548 kfree(rdev->bios);
549 rdev->bios = NULL;
550}
551
552int rv515_init(struct radeon_device *rdev)
553{
554 int r;
555
556 rdev->new_init_path = true;
557 /* Initialize scratch registers */
558 radeon_scratch_init(rdev);
559 /* Initialize surface registers */
560 radeon_surface_init(rdev);
561 /* TODO: disable VGA need to use VGA request */
562 /* BIOS*/
563 if (!radeon_get_bios(rdev)) {
564 if (ASIC_IS_AVIVO(rdev))
565 return -EINVAL;
566 }
567 if (rdev->is_atom_bios) {
568 r = radeon_atombios_init(rdev);
569 if (r)
570 return r;
571 } else {
572 dev_err(rdev->dev, "Expecting atombios for RV515 GPU\n");
573 return -EINVAL;
574 }
575 /* Reset gpu before posting otherwise ATOM will enter infinite loop */
576 if (radeon_gpu_reset(rdev)) {
577 dev_warn(rdev->dev,
578 "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n",
579 RREG32(R_000E40_RBBM_STATUS),
580 RREG32(R_0007C0_CP_STAT));
581 }
582 /* check if cards are posted or not */
583 if (!radeon_card_posted(rdev) && rdev->bios) {
584 DRM_INFO("GPU not posted. posting now...\n");
585 atom_asic_init(rdev->mode_info.atom_context);
586 }
587 /* Initialize clocks */
588 radeon_get_clock_info(rdev->ddev);
589 /* Get vram informations */
590 rv515_vram_info(rdev);
591 /* Initialize memory controller (also test AGP) */
592 r = r420_mc_init(rdev);
593 if (r)
594 return r;
595 rv515_debugfs(rdev);
596 /* Fence driver */
597 r = radeon_fence_driver_init(rdev);
598 if (r)
599 return r;
600 r = radeon_irq_kms_init(rdev);
601 if (r)
602 return r;
603 /* Memory manager */
604 r = radeon_object_init(rdev);
605 if (r)
606 return r;
607 r = rv370_pcie_gart_init(rdev);
608 if (r)
609 return r;
610 rv515_set_safe_registers(rdev);
611 rdev->accel_working = true;
612 r = rv515_startup(rdev);
613 if (r) {
614 /* Somethings want wront with the accel init stop accel */
615 dev_err(rdev->dev, "Disabling GPU acceleration\n");
616 rv515_suspend(rdev);
617 r100_cp_fini(rdev);
618 r100_wb_fini(rdev);
619 r100_ib_fini(rdev);
620 rv370_pcie_gart_fini(rdev);
621 radeon_agp_fini(rdev);
622 radeon_irq_kms_fini(rdev);
623 rdev->accel_working = false;
624 }
469 return 0; 625 return 0;
470} 626}
471 627
diff --git a/drivers/gpu/drm/radeon/rv515d.h b/drivers/gpu/drm/radeon/rv515d.h
index a65e17ec1c08..fc216e49384d 100644
--- a/drivers/gpu/drm/radeon/rv515d.h
+++ b/drivers/gpu/drm/radeon/rv515d.h
@@ -216,5 +216,388 @@
216#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1) 216#define CP_PACKET0_GET_ONE_REG_WR(h) (((h) >> 15) & 1)
217#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF) 217#define CP_PACKET3_GET_OPCODE(h) (((h) >> 8) & 0xFF)
218 218
219#endif 219/* Registers */
220#define R_0000F8_CONFIG_MEMSIZE 0x0000F8
221#define S_0000F8_CONFIG_MEMSIZE(x) (((x) & 0xFFFFFFFF) << 0)
222#define G_0000F8_CONFIG_MEMSIZE(x) (((x) >> 0) & 0xFFFFFFFF)
223#define C_0000F8_CONFIG_MEMSIZE 0x00000000
224#define R_000134_HDP_FB_LOCATION 0x000134
225#define S_000134_HDP_FB_START(x) (((x) & 0xFFFF) << 0)
226#define G_000134_HDP_FB_START(x) (((x) >> 0) & 0xFFFF)
227#define C_000134_HDP_FB_START 0xFFFF0000
228#define R_000300_VGA_RENDER_CONTROL 0x000300
229#define S_000300_VGA_BLINK_RATE(x) (((x) & 0x1F) << 0)
230#define G_000300_VGA_BLINK_RATE(x) (((x) >> 0) & 0x1F)
231#define C_000300_VGA_BLINK_RATE 0xFFFFFFE0
232#define S_000300_VGA_BLINK_MODE(x) (((x) & 0x3) << 5)
233#define G_000300_VGA_BLINK_MODE(x) (((x) >> 5) & 0x3)
234#define C_000300_VGA_BLINK_MODE 0xFFFFFF9F
235#define S_000300_VGA_CURSOR_BLINK_INVERT(x) (((x) & 0x1) << 7)
236#define G_000300_VGA_CURSOR_BLINK_INVERT(x) (((x) >> 7) & 0x1)
237#define C_000300_VGA_CURSOR_BLINK_INVERT 0xFFFFFF7F
238#define S_000300_VGA_EXTD_ADDR_COUNT_ENABLE(x) (((x) & 0x1) << 8)
239#define G_000300_VGA_EXTD_ADDR_COUNT_ENABLE(x) (((x) >> 8) & 0x1)
240#define C_000300_VGA_EXTD_ADDR_COUNT_ENABLE 0xFFFFFEFF
241#define S_000300_VGA_VSTATUS_CNTL(x) (((x) & 0x3) << 16)
242#define G_000300_VGA_VSTATUS_CNTL(x) (((x) >> 16) & 0x3)
243#define C_000300_VGA_VSTATUS_CNTL 0xFFFCFFFF
244#define S_000300_VGA_LOCK_8DOT(x) (((x) & 0x1) << 24)
245#define G_000300_VGA_LOCK_8DOT(x) (((x) >> 24) & 0x1)
246#define C_000300_VGA_LOCK_8DOT 0xFEFFFFFF
247#define S_000300_VGAREG_LINECMP_COMPATIBILITY_SEL(x) (((x) & 0x1) << 25)
248#define G_000300_VGAREG_LINECMP_COMPATIBILITY_SEL(x) (((x) >> 25) & 0x1)
249#define C_000300_VGAREG_LINECMP_COMPATIBILITY_SEL 0xFDFFFFFF
250#define R_000310_VGA_MEMORY_BASE_ADDRESS 0x000310
251#define S_000310_VGA_MEMORY_BASE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
252#define G_000310_VGA_MEMORY_BASE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
253#define C_000310_VGA_MEMORY_BASE_ADDRESS 0x00000000
254#define R_000328_VGA_HDP_CONTROL 0x000328
255#define S_000328_VGA_MEM_PAGE_SELECT_EN(x) (((x) & 0x1) << 0)
256#define G_000328_VGA_MEM_PAGE_SELECT_EN(x) (((x) >> 0) & 0x1)
257#define C_000328_VGA_MEM_PAGE_SELECT_EN 0xFFFFFFFE
258#define S_000328_VGA_RBBM_LOCK_DISABLE(x) (((x) & 0x1) << 8)
259#define G_000328_VGA_RBBM_LOCK_DISABLE(x) (((x) >> 8) & 0x1)
260#define C_000328_VGA_RBBM_LOCK_DISABLE 0xFFFFFEFF
261#define S_000328_VGA_SOFT_RESET(x) (((x) & 0x1) << 16)
262#define G_000328_VGA_SOFT_RESET(x) (((x) >> 16) & 0x1)
263#define C_000328_VGA_SOFT_RESET 0xFFFEFFFF
264#define S_000328_VGA_TEST_RESET_CONTROL(x) (((x) & 0x1) << 24)
265#define G_000328_VGA_TEST_RESET_CONTROL(x) (((x) >> 24) & 0x1)
266#define C_000328_VGA_TEST_RESET_CONTROL 0xFEFFFFFF
267#define R_000330_D1VGA_CONTROL 0x000330
268#define S_000330_D1VGA_MODE_ENABLE(x) (((x) & 0x1) << 0)
269#define G_000330_D1VGA_MODE_ENABLE(x) (((x) >> 0) & 0x1)
270#define C_000330_D1VGA_MODE_ENABLE 0xFFFFFFFE
271#define S_000330_D1VGA_TIMING_SELECT(x) (((x) & 0x1) << 8)
272#define G_000330_D1VGA_TIMING_SELECT(x) (((x) >> 8) & 0x1)
273#define C_000330_D1VGA_TIMING_SELECT 0xFFFFFEFF
274#define S_000330_D1VGA_SYNC_POLARITY_SELECT(x) (((x) & 0x1) << 9)
275#define G_000330_D1VGA_SYNC_POLARITY_SELECT(x) (((x) >> 9) & 0x1)
276#define C_000330_D1VGA_SYNC_POLARITY_SELECT 0xFFFFFDFF
277#define S_000330_D1VGA_OVERSCAN_TIMING_SELECT(x) (((x) & 0x1) << 10)
278#define G_000330_D1VGA_OVERSCAN_TIMING_SELECT(x) (((x) >> 10) & 0x1)
279#define C_000330_D1VGA_OVERSCAN_TIMING_SELECT 0xFFFFFBFF
280#define S_000330_D1VGA_OVERSCAN_COLOR_EN(x) (((x) & 0x1) << 16)
281#define G_000330_D1VGA_OVERSCAN_COLOR_EN(x) (((x) >> 16) & 0x1)
282#define C_000330_D1VGA_OVERSCAN_COLOR_EN 0xFFFEFFFF
283#define S_000330_D1VGA_ROTATE(x) (((x) & 0x3) << 24)
284#define G_000330_D1VGA_ROTATE(x) (((x) >> 24) & 0x3)
285#define C_000330_D1VGA_ROTATE 0xFCFFFFFF
286#define R_000338_D2VGA_CONTROL 0x000338
287#define S_000338_D2VGA_MODE_ENABLE(x) (((x) & 0x1) << 0)
288#define G_000338_D2VGA_MODE_ENABLE(x) (((x) >> 0) & 0x1)
289#define C_000338_D2VGA_MODE_ENABLE 0xFFFFFFFE
290#define S_000338_D2VGA_TIMING_SELECT(x) (((x) & 0x1) << 8)
291#define G_000338_D2VGA_TIMING_SELECT(x) (((x) >> 8) & 0x1)
292#define C_000338_D2VGA_TIMING_SELECT 0xFFFFFEFF
293#define S_000338_D2VGA_SYNC_POLARITY_SELECT(x) (((x) & 0x1) << 9)
294#define G_000338_D2VGA_SYNC_POLARITY_SELECT(x) (((x) >> 9) & 0x1)
295#define C_000338_D2VGA_SYNC_POLARITY_SELECT 0xFFFFFDFF
296#define S_000338_D2VGA_OVERSCAN_TIMING_SELECT(x) (((x) & 0x1) << 10)
297#define G_000338_D2VGA_OVERSCAN_TIMING_SELECT(x) (((x) >> 10) & 0x1)
298#define C_000338_D2VGA_OVERSCAN_TIMING_SELECT 0xFFFFFBFF
299#define S_000338_D2VGA_OVERSCAN_COLOR_EN(x) (((x) & 0x1) << 16)
300#define G_000338_D2VGA_OVERSCAN_COLOR_EN(x) (((x) >> 16) & 0x1)
301#define C_000338_D2VGA_OVERSCAN_COLOR_EN 0xFFFEFFFF
302#define S_000338_D2VGA_ROTATE(x) (((x) & 0x3) << 24)
303#define G_000338_D2VGA_ROTATE(x) (((x) >> 24) & 0x3)
304#define C_000338_D2VGA_ROTATE 0xFCFFFFFF
305#define R_0007C0_CP_STAT 0x0007C0
306#define S_0007C0_MRU_BUSY(x) (((x) & 0x1) << 0)
307#define G_0007C0_MRU_BUSY(x) (((x) >> 0) & 0x1)
308#define C_0007C0_MRU_BUSY 0xFFFFFFFE
309#define S_0007C0_MWU_BUSY(x) (((x) & 0x1) << 1)
310#define G_0007C0_MWU_BUSY(x) (((x) >> 1) & 0x1)
311#define C_0007C0_MWU_BUSY 0xFFFFFFFD
312#define S_0007C0_RSIU_BUSY(x) (((x) & 0x1) << 2)
313#define G_0007C0_RSIU_BUSY(x) (((x) >> 2) & 0x1)
314#define C_0007C0_RSIU_BUSY 0xFFFFFFFB
315#define S_0007C0_RCIU_BUSY(x) (((x) & 0x1) << 3)
316#define G_0007C0_RCIU_BUSY(x) (((x) >> 3) & 0x1)
317#define C_0007C0_RCIU_BUSY 0xFFFFFFF7
318#define S_0007C0_CSF_PRIMARY_BUSY(x) (((x) & 0x1) << 9)
319#define G_0007C0_CSF_PRIMARY_BUSY(x) (((x) >> 9) & 0x1)
320#define C_0007C0_CSF_PRIMARY_BUSY 0xFFFFFDFF
321#define S_0007C0_CSF_INDIRECT_BUSY(x) (((x) & 0x1) << 10)
322#define G_0007C0_CSF_INDIRECT_BUSY(x) (((x) >> 10) & 0x1)
323#define C_0007C0_CSF_INDIRECT_BUSY 0xFFFFFBFF
324#define S_0007C0_CSQ_PRIMARY_BUSY(x) (((x) & 0x1) << 11)
325#define G_0007C0_CSQ_PRIMARY_BUSY(x) (((x) >> 11) & 0x1)
326#define C_0007C0_CSQ_PRIMARY_BUSY 0xFFFFF7FF
327#define S_0007C0_CSQ_INDIRECT_BUSY(x) (((x) & 0x1) << 12)
328#define G_0007C0_CSQ_INDIRECT_BUSY(x) (((x) >> 12) & 0x1)
329#define C_0007C0_CSQ_INDIRECT_BUSY 0xFFFFEFFF
330#define S_0007C0_CSI_BUSY(x) (((x) & 0x1) << 13)
331#define G_0007C0_CSI_BUSY(x) (((x) >> 13) & 0x1)
332#define C_0007C0_CSI_BUSY 0xFFFFDFFF
333#define S_0007C0_CSF_INDIRECT2_BUSY(x) (((x) & 0x1) << 14)
334#define G_0007C0_CSF_INDIRECT2_BUSY(x) (((x) >> 14) & 0x1)
335#define C_0007C0_CSF_INDIRECT2_BUSY 0xFFFFBFFF
336#define S_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) & 0x1) << 15)
337#define G_0007C0_CSQ_INDIRECT2_BUSY(x) (((x) >> 15) & 0x1)
338#define C_0007C0_CSQ_INDIRECT2_BUSY 0xFFFF7FFF
339#define S_0007C0_GUIDMA_BUSY(x) (((x) & 0x1) << 28)
340#define G_0007C0_GUIDMA_BUSY(x) (((x) >> 28) & 0x1)
341#define C_0007C0_GUIDMA_BUSY 0xEFFFFFFF
342#define S_0007C0_VIDDMA_BUSY(x) (((x) & 0x1) << 29)
343#define G_0007C0_VIDDMA_BUSY(x) (((x) >> 29) & 0x1)
344#define C_0007C0_VIDDMA_BUSY 0xDFFFFFFF
345#define S_0007C0_CMDSTRM_BUSY(x) (((x) & 0x1) << 30)
346#define G_0007C0_CMDSTRM_BUSY(x) (((x) >> 30) & 0x1)
347#define C_0007C0_CMDSTRM_BUSY 0xBFFFFFFF
348#define S_0007C0_CP_BUSY(x) (((x) & 0x1) << 31)
349#define G_0007C0_CP_BUSY(x) (((x) >> 31) & 0x1)
350#define C_0007C0_CP_BUSY 0x7FFFFFFF
351#define R_000E40_RBBM_STATUS 0x000E40
352#define S_000E40_CMDFIFO_AVAIL(x) (((x) & 0x7F) << 0)
353#define G_000E40_CMDFIFO_AVAIL(x) (((x) >> 0) & 0x7F)
354#define C_000E40_CMDFIFO_AVAIL 0xFFFFFF80
355#define S_000E40_HIRQ_ON_RBB(x) (((x) & 0x1) << 8)
356#define G_000E40_HIRQ_ON_RBB(x) (((x) >> 8) & 0x1)
357#define C_000E40_HIRQ_ON_RBB 0xFFFFFEFF
358#define S_000E40_CPRQ_ON_RBB(x) (((x) & 0x1) << 9)
359#define G_000E40_CPRQ_ON_RBB(x) (((x) >> 9) & 0x1)
360#define C_000E40_CPRQ_ON_RBB 0xFFFFFDFF
361#define S_000E40_CFRQ_ON_RBB(x) (((x) & 0x1) << 10)
362#define G_000E40_CFRQ_ON_RBB(x) (((x) >> 10) & 0x1)
363#define C_000E40_CFRQ_ON_RBB 0xFFFFFBFF
364#define S_000E40_HIRQ_IN_RTBUF(x) (((x) & 0x1) << 11)
365#define G_000E40_HIRQ_IN_RTBUF(x) (((x) >> 11) & 0x1)
366#define C_000E40_HIRQ_IN_RTBUF 0xFFFFF7FF
367#define S_000E40_CPRQ_IN_RTBUF(x) (((x) & 0x1) << 12)
368#define G_000E40_CPRQ_IN_RTBUF(x) (((x) >> 12) & 0x1)
369#define C_000E40_CPRQ_IN_RTBUF 0xFFFFEFFF
370#define S_000E40_CFRQ_IN_RTBUF(x) (((x) & 0x1) << 13)
371#define G_000E40_CFRQ_IN_RTBUF(x) (((x) >> 13) & 0x1)
372#define C_000E40_CFRQ_IN_RTBUF 0xFFFFDFFF
373#define S_000E40_CF_PIPE_BUSY(x) (((x) & 0x1) << 14)
374#define G_000E40_CF_PIPE_BUSY(x) (((x) >> 14) & 0x1)
375#define C_000E40_CF_PIPE_BUSY 0xFFFFBFFF
376#define S_000E40_ENG_EV_BUSY(x) (((x) & 0x1) << 15)
377#define G_000E40_ENG_EV_BUSY(x) (((x) >> 15) & 0x1)
378#define C_000E40_ENG_EV_BUSY 0xFFFF7FFF
379#define S_000E40_CP_CMDSTRM_BUSY(x) (((x) & 0x1) << 16)
380#define G_000E40_CP_CMDSTRM_BUSY(x) (((x) >> 16) & 0x1)
381#define C_000E40_CP_CMDSTRM_BUSY 0xFFFEFFFF
382#define S_000E40_E2_BUSY(x) (((x) & 0x1) << 17)
383#define G_000E40_E2_BUSY(x) (((x) >> 17) & 0x1)
384#define C_000E40_E2_BUSY 0xFFFDFFFF
385#define S_000E40_RB2D_BUSY(x) (((x) & 0x1) << 18)
386#define G_000E40_RB2D_BUSY(x) (((x) >> 18) & 0x1)
387#define C_000E40_RB2D_BUSY 0xFFFBFFFF
388#define S_000E40_RB3D_BUSY(x) (((x) & 0x1) << 19)
389#define G_000E40_RB3D_BUSY(x) (((x) >> 19) & 0x1)
390#define C_000E40_RB3D_BUSY 0xFFF7FFFF
391#define S_000E40_VAP_BUSY(x) (((x) & 0x1) << 20)
392#define G_000E40_VAP_BUSY(x) (((x) >> 20) & 0x1)
393#define C_000E40_VAP_BUSY 0xFFEFFFFF
394#define S_000E40_RE_BUSY(x) (((x) & 0x1) << 21)
395#define G_000E40_RE_BUSY(x) (((x) >> 21) & 0x1)
396#define C_000E40_RE_BUSY 0xFFDFFFFF
397#define S_000E40_TAM_BUSY(x) (((x) & 0x1) << 22)
398#define G_000E40_TAM_BUSY(x) (((x) >> 22) & 0x1)
399#define C_000E40_TAM_BUSY 0xFFBFFFFF
400#define S_000E40_TDM_BUSY(x) (((x) & 0x1) << 23)
401#define G_000E40_TDM_BUSY(x) (((x) >> 23) & 0x1)
402#define C_000E40_TDM_BUSY 0xFF7FFFFF
403#define S_000E40_PB_BUSY(x) (((x) & 0x1) << 24)
404#define G_000E40_PB_BUSY(x) (((x) >> 24) & 0x1)
405#define C_000E40_PB_BUSY 0xFEFFFFFF
406#define S_000E40_TIM_BUSY(x) (((x) & 0x1) << 25)
407#define G_000E40_TIM_BUSY(x) (((x) >> 25) & 0x1)
408#define C_000E40_TIM_BUSY 0xFDFFFFFF
409#define S_000E40_GA_BUSY(x) (((x) & 0x1) << 26)
410#define G_000E40_GA_BUSY(x) (((x) >> 26) & 0x1)
411#define C_000E40_GA_BUSY 0xFBFFFFFF
412#define S_000E40_CBA2D_BUSY(x) (((x) & 0x1) << 27)
413#define G_000E40_CBA2D_BUSY(x) (((x) >> 27) & 0x1)
414#define C_000E40_CBA2D_BUSY 0xF7FFFFFF
415#define S_000E40_RBBM_HIBUSY(x) (((x) & 0x1) << 28)
416#define G_000E40_RBBM_HIBUSY(x) (((x) >> 28) & 0x1)
417#define C_000E40_RBBM_HIBUSY 0xEFFFFFFF
418#define S_000E40_SKID_CFBUSY(x) (((x) & 0x1) << 29)
419#define G_000E40_SKID_CFBUSY(x) (((x) >> 29) & 0x1)
420#define C_000E40_SKID_CFBUSY 0xDFFFFFFF
421#define S_000E40_VAP_VF_BUSY(x) (((x) & 0x1) << 30)
422#define G_000E40_VAP_VF_BUSY(x) (((x) >> 30) & 0x1)
423#define C_000E40_VAP_VF_BUSY 0xBFFFFFFF
424#define S_000E40_GUI_ACTIVE(x) (((x) & 0x1) << 31)
425#define G_000E40_GUI_ACTIVE(x) (((x) >> 31) & 0x1)
426#define C_000E40_GUI_ACTIVE 0x7FFFFFFF
427#define R_006080_D1CRTC_CONTROL 0x006080
428#define S_006080_D1CRTC_MASTER_EN(x) (((x) & 0x1) << 0)
429#define G_006080_D1CRTC_MASTER_EN(x) (((x) >> 0) & 0x1)
430#define C_006080_D1CRTC_MASTER_EN 0xFFFFFFFE
431#define S_006080_D1CRTC_SYNC_RESET_SEL(x) (((x) & 0x1) << 4)
432#define G_006080_D1CRTC_SYNC_RESET_SEL(x) (((x) >> 4) & 0x1)
433#define C_006080_D1CRTC_SYNC_RESET_SEL 0xFFFFFFEF
434#define S_006080_D1CRTC_DISABLE_POINT_CNTL(x) (((x) & 0x3) << 8)
435#define G_006080_D1CRTC_DISABLE_POINT_CNTL(x) (((x) >> 8) & 0x3)
436#define C_006080_D1CRTC_DISABLE_POINT_CNTL 0xFFFFFCFF
437#define S_006080_D1CRTC_CURRENT_MASTER_EN_STATE(x) (((x) & 0x1) << 16)
438#define G_006080_D1CRTC_CURRENT_MASTER_EN_STATE(x) (((x) >> 16) & 0x1)
439#define C_006080_D1CRTC_CURRENT_MASTER_EN_STATE 0xFFFEFFFF
440#define S_006080_D1CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) & 0x1) << 24)
441#define G_006080_D1CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) >> 24) & 0x1)
442#define C_006080_D1CRTC_DISP_READ_REQUEST_DISABLE 0xFEFFFFFF
443#define R_0060E8_D1CRTC_UPDATE_LOCK 0x0060E8
444#define S_0060E8_D1CRTC_UPDATE_LOCK(x) (((x) & 0x1) << 0)
445#define G_0060E8_D1CRTC_UPDATE_LOCK(x) (((x) >> 0) & 0x1)
446#define C_0060E8_D1CRTC_UPDATE_LOCK 0xFFFFFFFE
447#define R_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x006110
448#define S_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
449#define G_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
450#define C_006110_D1GRPH_PRIMARY_SURFACE_ADDRESS 0x00000000
451#define R_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS 0x006118
452#define S_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
453#define G_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
454#define C_006118_D1GRPH_SECONDARY_SURFACE_ADDRESS 0x00000000
455#define R_006880_D2CRTC_CONTROL 0x006880
456#define S_006880_D2CRTC_MASTER_EN(x) (((x) & 0x1) << 0)
457#define G_006880_D2CRTC_MASTER_EN(x) (((x) >> 0) & 0x1)
458#define C_006880_D2CRTC_MASTER_EN 0xFFFFFFFE
459#define S_006880_D2CRTC_SYNC_RESET_SEL(x) (((x) & 0x1) << 4)
460#define G_006880_D2CRTC_SYNC_RESET_SEL(x) (((x) >> 4) & 0x1)
461#define C_006880_D2CRTC_SYNC_RESET_SEL 0xFFFFFFEF
462#define S_006880_D2CRTC_DISABLE_POINT_CNTL(x) (((x) & 0x3) << 8)
463#define G_006880_D2CRTC_DISABLE_POINT_CNTL(x) (((x) >> 8) & 0x3)
464#define C_006880_D2CRTC_DISABLE_POINT_CNTL 0xFFFFFCFF
465#define S_006880_D2CRTC_CURRENT_MASTER_EN_STATE(x) (((x) & 0x1) << 16)
466#define G_006880_D2CRTC_CURRENT_MASTER_EN_STATE(x) (((x) >> 16) & 0x1)
467#define C_006880_D2CRTC_CURRENT_MASTER_EN_STATE 0xFFFEFFFF
468#define S_006880_D2CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) & 0x1) << 24)
469#define G_006880_D2CRTC_DISP_READ_REQUEST_DISABLE(x) (((x) >> 24) & 0x1)
470#define C_006880_D2CRTC_DISP_READ_REQUEST_DISABLE 0xFEFFFFFF
471#define R_0068E8_D2CRTC_UPDATE_LOCK 0x0068E8
472#define S_0068E8_D2CRTC_UPDATE_LOCK(x) (((x) & 0x1) << 0)
473#define G_0068E8_D2CRTC_UPDATE_LOCK(x) (((x) >> 0) & 0x1)
474#define C_0068E8_D2CRTC_UPDATE_LOCK 0xFFFFFFFE
475#define R_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS 0x006910
476#define S_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
477#define G_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
478#define C_006910_D2GRPH_PRIMARY_SURFACE_ADDRESS 0x00000000
479#define R_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS 0x006918
480#define S_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) & 0xFFFFFFFF) << 0)
481#define G_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS(x) (((x) >> 0) & 0xFFFFFFFF)
482#define C_006918_D2GRPH_SECONDARY_SURFACE_ADDRESS 0x00000000
483
484
485#define R_000001_MC_FB_LOCATION 0x000001
486#define S_000001_MC_FB_START(x) (((x) & 0xFFFF) << 0)
487#define G_000001_MC_FB_START(x) (((x) >> 0) & 0xFFFF)
488#define C_000001_MC_FB_START 0xFFFF0000
489#define S_000001_MC_FB_TOP(x) (((x) & 0xFFFF) << 16)
490#define G_000001_MC_FB_TOP(x) (((x) >> 16) & 0xFFFF)
491#define C_000001_MC_FB_TOP 0x0000FFFF
492#define R_000002_MC_AGP_LOCATION 0x000002
493#define S_000002_MC_AGP_START(x) (((x) & 0xFFFF) << 0)
494#define G_000002_MC_AGP_START(x) (((x) >> 0) & 0xFFFF)
495#define C_000002_MC_AGP_START 0xFFFF0000
496#define S_000002_MC_AGP_TOP(x) (((x) & 0xFFFF) << 16)
497#define G_000002_MC_AGP_TOP(x) (((x) >> 16) & 0xFFFF)
498#define C_000002_MC_AGP_TOP 0x0000FFFF
499#define R_000003_MC_AGP_BASE 0x000003
500#define S_000003_AGP_BASE_ADDR(x) (((x) & 0xFFFFFFFF) << 0)
501#define G_000003_AGP_BASE_ADDR(x) (((x) >> 0) & 0xFFFFFFFF)
502#define C_000003_AGP_BASE_ADDR 0x00000000
503#define R_000004_MC_AGP_BASE_2 0x000004
504#define S_000004_AGP_BASE_ADDR_2(x) (((x) & 0xF) << 0)
505#define G_000004_AGP_BASE_ADDR_2(x) (((x) >> 0) & 0xF)
506#define C_000004_AGP_BASE_ADDR_2 0xFFFFFFF0
220 507
508
509#define R_00000F_CP_DYN_CNTL 0x00000F
510#define S_00000F_CP_FORCEON(x) (((x) & 0x1) << 0)
511#define G_00000F_CP_FORCEON(x) (((x) >> 0) & 0x1)
512#define C_00000F_CP_FORCEON 0xFFFFFFFE
513#define S_00000F_CP_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 1)
514#define G_00000F_CP_MAX_DYN_STOP_LAT(x) (((x) >> 1) & 0x1)
515#define C_00000F_CP_MAX_DYN_STOP_LAT 0xFFFFFFFD
516#define S_00000F_CP_CLOCK_STATUS(x) (((x) & 0x1) << 2)
517#define G_00000F_CP_CLOCK_STATUS(x) (((x) >> 2) & 0x1)
518#define C_00000F_CP_CLOCK_STATUS 0xFFFFFFFB
519#define S_00000F_CP_PROG_SHUTOFF(x) (((x) & 0x1) << 3)
520#define G_00000F_CP_PROG_SHUTOFF(x) (((x) >> 3) & 0x1)
521#define C_00000F_CP_PROG_SHUTOFF 0xFFFFFFF7
522#define S_00000F_CP_PROG_DELAY_VALUE(x) (((x) & 0xFF) << 4)
523#define G_00000F_CP_PROG_DELAY_VALUE(x) (((x) >> 4) & 0xFF)
524#define C_00000F_CP_PROG_DELAY_VALUE 0xFFFFF00F
525#define S_00000F_CP_LOWER_POWER_IDLE(x) (((x) & 0xFF) << 12)
526#define G_00000F_CP_LOWER_POWER_IDLE(x) (((x) >> 12) & 0xFF)
527#define C_00000F_CP_LOWER_POWER_IDLE 0xFFF00FFF
528#define S_00000F_CP_LOWER_POWER_IGNORE(x) (((x) & 0x1) << 20)
529#define G_00000F_CP_LOWER_POWER_IGNORE(x) (((x) >> 20) & 0x1)
530#define C_00000F_CP_LOWER_POWER_IGNORE 0xFFEFFFFF
531#define S_00000F_CP_NORMAL_POWER_IGNORE(x) (((x) & 0x1) << 21)
532#define G_00000F_CP_NORMAL_POWER_IGNORE(x) (((x) >> 21) & 0x1)
533#define C_00000F_CP_NORMAL_POWER_IGNORE 0xFFDFFFFF
534#define S_00000F_SPARE(x) (((x) & 0x3) << 22)
535#define G_00000F_SPARE(x) (((x) >> 22) & 0x3)
536#define C_00000F_SPARE 0xFF3FFFFF
537#define S_00000F_CP_NORMAL_POWER_BUSY(x) (((x) & 0xFF) << 24)
538#define G_00000F_CP_NORMAL_POWER_BUSY(x) (((x) >> 24) & 0xFF)
539#define C_00000F_CP_NORMAL_POWER_BUSY 0x00FFFFFF
540#define R_000011_E2_DYN_CNTL 0x000011
541#define S_000011_E2_FORCEON(x) (((x) & 0x1) << 0)
542#define G_000011_E2_FORCEON(x) (((x) >> 0) & 0x1)
543#define C_000011_E2_FORCEON 0xFFFFFFFE
544#define S_000011_E2_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 1)
545#define G_000011_E2_MAX_DYN_STOP_LAT(x) (((x) >> 1) & 0x1)
546#define C_000011_E2_MAX_DYN_STOP_LAT 0xFFFFFFFD
547#define S_000011_E2_CLOCK_STATUS(x) (((x) & 0x1) << 2)
548#define G_000011_E2_CLOCK_STATUS(x) (((x) >> 2) & 0x1)
549#define C_000011_E2_CLOCK_STATUS 0xFFFFFFFB
550#define S_000011_E2_PROG_SHUTOFF(x) (((x) & 0x1) << 3)
551#define G_000011_E2_PROG_SHUTOFF(x) (((x) >> 3) & 0x1)
552#define C_000011_E2_PROG_SHUTOFF 0xFFFFFFF7
553#define S_000011_E2_PROG_DELAY_VALUE(x) (((x) & 0xFF) << 4)
554#define G_000011_E2_PROG_DELAY_VALUE(x) (((x) >> 4) & 0xFF)
555#define C_000011_E2_PROG_DELAY_VALUE 0xFFFFF00F
556#define S_000011_E2_LOWER_POWER_IDLE(x) (((x) & 0xFF) << 12)
557#define G_000011_E2_LOWER_POWER_IDLE(x) (((x) >> 12) & 0xFF)
558#define C_000011_E2_LOWER_POWER_IDLE 0xFFF00FFF
559#define S_000011_E2_LOWER_POWER_IGNORE(x) (((x) & 0x1) << 20)
560#define G_000011_E2_LOWER_POWER_IGNORE(x) (((x) >> 20) & 0x1)
561#define C_000011_E2_LOWER_POWER_IGNORE 0xFFEFFFFF
562#define S_000011_E2_NORMAL_POWER_IGNORE(x) (((x) & 0x1) << 21)
563#define G_000011_E2_NORMAL_POWER_IGNORE(x) (((x) >> 21) & 0x1)
564#define C_000011_E2_NORMAL_POWER_IGNORE 0xFFDFFFFF
565#define S_000011_SPARE(x) (((x) & 0x3) << 22)
566#define G_000011_SPARE(x) (((x) >> 22) & 0x3)
567#define C_000011_SPARE 0xFF3FFFFF
568#define S_000011_E2_NORMAL_POWER_BUSY(x) (((x) & 0xFF) << 24)
569#define G_000011_E2_NORMAL_POWER_BUSY(x) (((x) >> 24) & 0xFF)
570#define C_000011_E2_NORMAL_POWER_BUSY 0x00FFFFFF
571#define R_000013_IDCT_DYN_CNTL 0x000013
572#define S_000013_IDCT_FORCEON(x) (((x) & 0x1) << 0)
573#define G_000013_IDCT_FORCEON(x) (((x) >> 0) & 0x1)
574#define C_000013_IDCT_FORCEON 0xFFFFFFFE
575#define S_000013_IDCT_MAX_DYN_STOP_LAT(x) (((x) & 0x1) << 1)
576#define G_000013_IDCT_MAX_DYN_STOP_LAT(x) (((x) >> 1) & 0x1)
577#define C_000013_IDCT_MAX_DYN_STOP_LAT 0xFFFFFFFD
578#define S_000013_IDCT_CLOCK_STATUS(x) (((x) & 0x1) << 2)
579#define G_000013_IDCT_CLOCK_STATUS(x) (((x) >> 2) & 0x1)
580#define C_000013_IDCT_CLOCK_STATUS 0xFFFFFFFB
581#define S_000013_IDCT_PROG_SHUTOFF(x) (((x) & 0x1) << 3)
582#define G_000013_IDCT_PROG_SHUTOFF(x) (((x) >> 3) & 0x1)
583#define C_000013_IDCT_PROG_SHUTOFF 0xFFFFFFF7
584#define S_000013_IDCT_PROG_DELAY_VALUE(x) (((x) & 0xFF) << 4)
585#define G_000013_IDCT_PROG_DELAY_VALUE(x) (((x) >> 4) & 0xFF)
586#define C_000013_IDCT_PROG_DELAY_VALUE 0xFFFFF00F
587#define S_000013_IDCT_LOWER_POWER_IDLE(x) (((x) & 0xFF) << 12)
588#define G_000013_IDCT_LOWER_POWER_IDLE(x) (((x) >> 12) & 0xFF)
589#define C_000013_IDCT_LOWER_POWER_IDLE 0xFFF00FFF
590#define S_000013_IDCT_LOWER_POWER_IGNORE(x) (((x) & 0x1) << 20)
591#define G_000013_IDCT_LOWER_POWER_IGNORE(x) (((x) >> 20) & 0x1)
592#define C_000013_IDCT_LOWER_POWER_IGNORE 0xFFEFFFFF
593#define S_000013_IDCT_NORMAL_POWER_IGNORE(x) (((x) & 0x1) << 21)
594#define G_000013_IDCT_NORMAL_POWER_IGNORE(x) (((x) >> 21) & 0x1)
595#define C_000013_IDCT_NORMAL_POWER_IGNORE 0xFFDFFFFF
596#define S_000013_SPARE(x) (((x) & 0x3) << 22)
597#define G_000013_SPARE(x) (((x) >> 22) & 0x3)
598#define C_000013_SPARE 0xFF3FFFFF
599#define S_000013_IDCT_NORMAL_POWER_BUSY(x) (((x) & 0xFF) << 24)
600#define G_000013_IDCT_NORMAL_POWER_BUSY(x) (((x) >> 24) & 0xFF)
601#define C_000013_IDCT_NORMAL_POWER_BUSY 0x00FFFFFF
602
603#endif
diff --git a/drivers/gpu/drm/radeon/rv770.c b/drivers/gpu/drm/radeon/rv770.c
index b574c73a5109..e0b97d161397 100644
--- a/drivers/gpu/drm/radeon/rv770.c
+++ b/drivers/gpu/drm/radeon/rv770.c
@@ -31,8 +31,8 @@
31#include "radeon.h" 31#include "radeon.h"
32#include "radeon_drm.h" 32#include "radeon_drm.h"
33#include "rv770d.h" 33#include "rv770d.h"
34#include "avivod.h"
35#include "atom.h" 34#include "atom.h"
35#include "avivod.h"
36 36
37#define R700_PFP_UCODE_SIZE 848 37#define R700_PFP_UCODE_SIZE 848
38#define R700_PM4_UCODE_SIZE 1360 38#define R700_PM4_UCODE_SIZE 1360
@@ -231,7 +231,7 @@ static void rv770_mc_resume(struct radeon_device *rdev)
231 231
232 /* we need to own VRAM, so turn off the VGA renderer here 232 /* we need to own VRAM, so turn off the VGA renderer here
233 * to stop it overwriting our objects */ 233 * to stop it overwriting our objects */
234 radeon_avivo_vga_render_disable(rdev); 234 rv515_vga_render_disable(rdev);
235} 235}
236 236
237 237
@@ -801,6 +801,13 @@ int rv770_mc_init(struct radeon_device *rdev)
801 /* Setup GPU memory space */ 801 /* Setup GPU memory space */
802 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE); 802 rdev->mc.mc_vram_size = RREG32(CONFIG_MEMSIZE);
803 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE); 803 rdev->mc.real_vram_size = RREG32(CONFIG_MEMSIZE);
804
805 if (rdev->mc.mc_vram_size > rdev->mc.aper_size)
806 rdev->mc.mc_vram_size = rdev->mc.aper_size;
807
808 if (rdev->mc.real_vram_size > rdev->mc.aper_size)
809 rdev->mc.real_vram_size = rdev->mc.aper_size;
810
804 if (rdev->flags & RADEON_IS_AGP) { 811 if (rdev->flags & RADEON_IS_AGP) {
805 r = radeon_agp_init(rdev); 812 r = radeon_agp_init(rdev);
806 if (r) 813 if (r)
diff --git a/drivers/gpu/drm/ttm/ttm_bo_vm.c b/drivers/gpu/drm/ttm/ttm_bo_vm.c
index 33de7637c0c6..1c040d040338 100644
--- a/drivers/gpu/drm/ttm/ttm_bo_vm.c
+++ b/drivers/gpu/drm/ttm/ttm_bo_vm.c
@@ -228,7 +228,7 @@ static void ttm_bo_vm_close(struct vm_area_struct *vma)
228 vma->vm_private_data = NULL; 228 vma->vm_private_data = NULL;
229} 229}
230 230
231static struct vm_operations_struct ttm_bo_vm_ops = { 231static const struct vm_operations_struct ttm_bo_vm_ops = {
232 .fault = ttm_bo_vm_fault, 232 .fault = ttm_bo_vm_fault,
233 .open = ttm_bo_vm_open, 233 .open = ttm_bo_vm_open,
234 .close = ttm_bo_vm_close 234 .close = ttm_bo_vm_close
diff --git a/drivers/hid/hidraw.c b/drivers/hid/hidraw.c
index 0c6639ea03dd..ba05275e5104 100644
--- a/drivers/hid/hidraw.c
+++ b/drivers/hid/hidraw.c
@@ -30,6 +30,7 @@
30#include <linux/major.h> 30#include <linux/major.h>
31#include <linux/hid.h> 31#include <linux/hid.h>
32#include <linux/mutex.h> 32#include <linux/mutex.h>
33#include <linux/sched.h>
33#include <linux/smp_lock.h> 34#include <linux/smp_lock.h>
34 35
35#include <linux/hidraw.h> 36#include <linux/hidraw.h>
diff --git a/drivers/hwmon/fschmd.c b/drivers/hwmon/fschmd.c
index ea955edde87e..2a7a85a6dc36 100644
--- a/drivers/hwmon/fschmd.c
+++ b/drivers/hwmon/fschmd.c
@@ -915,7 +915,7 @@ static int watchdog_ioctl(struct inode *inode, struct file *filp,
915 return ret; 915 return ret;
916} 916}
917 917
918static struct file_operations watchdog_fops = { 918static const struct file_operations watchdog_fops = {
919 .owner = THIS_MODULE, 919 .owner = THIS_MODULE,
920 .llseek = no_llseek, 920 .llseek = no_llseek,
921 .open = watchdog_open, 921 .open = watchdog_open,
diff --git a/drivers/hwmon/ltc4215.c b/drivers/hwmon/ltc4215.c
index 6c9a04136e0a..00d975eb5b83 100644
--- a/drivers/hwmon/ltc4215.c
+++ b/drivers/hwmon/ltc4215.c
@@ -20,11 +20,6 @@
20#include <linux/hwmon.h> 20#include <linux/hwmon.h>
21#include <linux/hwmon-sysfs.h> 21#include <linux/hwmon-sysfs.h>
22 22
23static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
24
25/* Insmod parameters */
26I2C_CLIENT_INSMOD_1(ltc4215);
27
28/* Here are names of the chip's registers (a.k.a. commands) */ 23/* Here are names of the chip's registers (a.k.a. commands) */
29enum ltc4215_cmd { 24enum ltc4215_cmd {
30 LTC4215_CONTROL = 0x00, /* rw */ 25 LTC4215_CONTROL = 0x00, /* rw */
@@ -246,9 +241,13 @@ static const struct attribute_group ltc4215_group = {
246static int ltc4215_probe(struct i2c_client *client, 241static int ltc4215_probe(struct i2c_client *client,
247 const struct i2c_device_id *id) 242 const struct i2c_device_id *id)
248{ 243{
244 struct i2c_adapter *adapter = client->adapter;
249 struct ltc4215_data *data; 245 struct ltc4215_data *data;
250 int ret; 246 int ret;
251 247
248 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
249 return -ENODEV;
250
252 data = kzalloc(sizeof(*data), GFP_KERNEL); 251 data = kzalloc(sizeof(*data), GFP_KERNEL);
253 if (!data) { 252 if (!data) {
254 ret = -ENOMEM; 253 ret = -ENOMEM;
@@ -294,56 +293,20 @@ static int ltc4215_remove(struct i2c_client *client)
294 return 0; 293 return 0;
295} 294}
296 295
297static int ltc4215_detect(struct i2c_client *client,
298 int kind,
299 struct i2c_board_info *info)
300{
301 struct i2c_adapter *adapter = client->adapter;
302
303 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
304 return -ENODEV;
305
306 if (kind < 0) { /* probed detection - check the chip type */
307 s32 v; /* 8 bits from the chip, or -ERRNO */
308
309 /*
310 * Register 0x01 bit b7 is reserved, expect 0
311 * Register 0x03 bit b6 and b7 are reserved, expect 0
312 */
313 v = i2c_smbus_read_byte_data(client, LTC4215_ALERT);
314 if (v < 0 || (v & (1 << 7)) != 0)
315 return -ENODEV;
316
317 v = i2c_smbus_read_byte_data(client, LTC4215_FAULT);
318 if (v < 0 || (v & ((1 << 6) | (1 << 7))) != 0)
319 return -ENODEV;
320 }
321
322 strlcpy(info->type, "ltc4215", I2C_NAME_SIZE);
323 dev_info(&adapter->dev, "ltc4215 %s at address 0x%02x\n",
324 kind < 0 ? "probed" : "forced",
325 client->addr);
326
327 return 0;
328}
329
330static const struct i2c_device_id ltc4215_id[] = { 296static const struct i2c_device_id ltc4215_id[] = {
331 { "ltc4215", ltc4215 }, 297 { "ltc4215", 0 },
332 { } 298 { }
333}; 299};
334MODULE_DEVICE_TABLE(i2c, ltc4215_id); 300MODULE_DEVICE_TABLE(i2c, ltc4215_id);
335 301
336/* This is the driver that will be inserted */ 302/* This is the driver that will be inserted */
337static struct i2c_driver ltc4215_driver = { 303static struct i2c_driver ltc4215_driver = {
338 .class = I2C_CLASS_HWMON,
339 .driver = { 304 .driver = {
340 .name = "ltc4215", 305 .name = "ltc4215",
341 }, 306 },
342 .probe = ltc4215_probe, 307 .probe = ltc4215_probe,
343 .remove = ltc4215_remove, 308 .remove = ltc4215_remove,
344 .id_table = ltc4215_id, 309 .id_table = ltc4215_id,
345 .detect = ltc4215_detect,
346 .address_data = &addr_data,
347}; 310};
348 311
349static int __init ltc4215_init(void) 312static int __init ltc4215_init(void)
diff --git a/drivers/hwmon/ltc4245.c b/drivers/hwmon/ltc4245.c
index e38964333612..65c232a9d0c5 100644
--- a/drivers/hwmon/ltc4245.c
+++ b/drivers/hwmon/ltc4245.c
@@ -22,15 +22,6 @@
22#include <linux/hwmon.h> 22#include <linux/hwmon.h>
23#include <linux/hwmon-sysfs.h> 23#include <linux/hwmon-sysfs.h>
24 24
25/* Valid addresses are 0x20 - 0x3f
26 *
27 * For now, we do not probe, since some of these addresses
28 * are known to be unfriendly to probing */
29static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
30
31/* Insmod parameters */
32I2C_CLIENT_INSMOD_1(ltc4245);
33
34/* Here are names of the chip's registers (a.k.a. commands) */ 25/* Here are names of the chip's registers (a.k.a. commands) */
35enum ltc4245_cmd { 26enum ltc4245_cmd {
36 LTC4245_STATUS = 0x00, /* readonly */ 27 LTC4245_STATUS = 0x00, /* readonly */
@@ -369,9 +360,13 @@ static const struct attribute_group ltc4245_group = {
369static int ltc4245_probe(struct i2c_client *client, 360static int ltc4245_probe(struct i2c_client *client,
370 const struct i2c_device_id *id) 361 const struct i2c_device_id *id)
371{ 362{
363 struct i2c_adapter *adapter = client->adapter;
372 struct ltc4245_data *data; 364 struct ltc4245_data *data;
373 int ret; 365 int ret;
374 366
367 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
368 return -ENODEV;
369
375 data = kzalloc(sizeof(*data), GFP_KERNEL); 370 data = kzalloc(sizeof(*data), GFP_KERNEL);
376 if (!data) { 371 if (!data) {
377 ret = -ENOMEM; 372 ret = -ENOMEM;
@@ -418,136 +413,20 @@ static int ltc4245_remove(struct i2c_client *client)
418 return 0; 413 return 0;
419} 414}
420 415
421/* Check that some bits in a control register appear at all possible
422 * locations without changing value
423 *
424 * @client: the i2c client to use
425 * @reg: the register to read
426 * @bits: the bits to check (0xff checks all bits,
427 * 0x03 checks only the last two bits)
428 *
429 * return -ERRNO if the register read failed
430 * return -ENODEV if the register value doesn't stay constant at all
431 * possible addresses
432 *
433 * return 0 for success
434 */
435static int ltc4245_check_control_reg(struct i2c_client *client, u8 reg, u8 bits)
436{
437 int i;
438 s32 v, voff1, voff2;
439
440 /* Read register and check for error */
441 v = i2c_smbus_read_byte_data(client, reg);
442 if (v < 0)
443 return v;
444
445 v &= bits;
446
447 for (i = 0x00; i < 0xff; i += 0x20) {
448
449 voff1 = i2c_smbus_read_byte_data(client, reg + i);
450 if (voff1 < 0)
451 return voff1;
452
453 voff2 = i2c_smbus_read_byte_data(client, reg + i + 0x08);
454 if (voff2 < 0)
455 return voff2;
456
457 voff1 &= bits;
458 voff2 &= bits;
459
460 if (v != voff1 || v != voff2)
461 return -ENODEV;
462 }
463
464 return 0;
465}
466
467static int ltc4245_detect(struct i2c_client *client,
468 int kind,
469 struct i2c_board_info *info)
470{
471 struct i2c_adapter *adapter = client->adapter;
472
473 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE_DATA))
474 return -ENODEV;
475
476 if (kind < 0) { /* probed detection - check the chip type */
477 s32 v; /* 8 bits from the chip, or -ERRNO */
478
479 /* Chip registers 0x00-0x07 are control registers
480 * Chip registers 0x10-0x1f are data registers
481 *
482 * Address bits b7-b5 are ignored. This makes the chip "repeat"
483 * in steps of 0x20. Any control registers should appear with
484 * the same values across all duplicated addresses.
485 *
486 * Register 0x02 bit b2 is reserved, expect 0
487 * Register 0x07 bits b7 to b4 are reserved, expect 0
488 *
489 * Registers 0x01, 0x02 are control registers and should not
490 * change on their own.
491 *
492 * Register 0x06 bits b6 and b7 are control bits, and should
493 * not change on their own.
494 *
495 * Register 0x07 bits b3 to b0 are control bits, and should
496 * not change on their own.
497 */
498
499 /* read register 0x02 reserved bit, expect 0 */
500 v = i2c_smbus_read_byte_data(client, LTC4245_CONTROL);
501 if (v < 0 || (v & 0x04) != 0)
502 return -ENODEV;
503
504 /* read register 0x07 reserved bits, expect 0 */
505 v = i2c_smbus_read_byte_data(client, LTC4245_ADCADR);
506 if (v < 0 || (v & 0xf0) != 0)
507 return -ENODEV;
508
509 /* check that the alert register appears at all locations */
510 if (ltc4245_check_control_reg(client, LTC4245_ALERT, 0xff))
511 return -ENODEV;
512
513 /* check that the control register appears at all locations */
514 if (ltc4245_check_control_reg(client, LTC4245_CONTROL, 0xff))
515 return -ENODEV;
516
517 /* check that register 0x06 bits b6 and b7 stay constant */
518 if (ltc4245_check_control_reg(client, LTC4245_GPIO, 0xc0))
519 return -ENODEV;
520
521 /* check that register 0x07 bits b3-b0 stay constant */
522 if (ltc4245_check_control_reg(client, LTC4245_ADCADR, 0x0f))
523 return -ENODEV;
524 }
525
526 strlcpy(info->type, "ltc4245", I2C_NAME_SIZE);
527 dev_info(&adapter->dev, "ltc4245 %s at address 0x%02x\n",
528 kind < 0 ? "probed" : "forced",
529 client->addr);
530
531 return 0;
532}
533
534static const struct i2c_device_id ltc4245_id[] = { 416static const struct i2c_device_id ltc4245_id[] = {
535 { "ltc4245", ltc4245 }, 417 { "ltc4245", 0 },
536 { } 418 { }
537}; 419};
538MODULE_DEVICE_TABLE(i2c, ltc4245_id); 420MODULE_DEVICE_TABLE(i2c, ltc4245_id);
539 421
540/* This is the driver that will be inserted */ 422/* This is the driver that will be inserted */
541static struct i2c_driver ltc4245_driver = { 423static struct i2c_driver ltc4245_driver = {
542 .class = I2C_CLASS_HWMON,
543 .driver = { 424 .driver = {
544 .name = "ltc4245", 425 .name = "ltc4245",
545 }, 426 },
546 .probe = ltc4245_probe, 427 .probe = ltc4245_probe,
547 .remove = ltc4245_remove, 428 .remove = ltc4245_remove,
548 .id_table = ltc4245_id, 429 .id_table = ltc4245_id,
549 .detect = ltc4245_detect,
550 .address_data = &addr_data,
551}; 430};
552 431
553static int __init ltc4245_init(void) 432static int __init ltc4245_init(void)
diff --git a/drivers/i2c/busses/Kconfig b/drivers/i2c/busses/Kconfig
index 6bedd2fcfc15..737335ff2b21 100644
--- a/drivers/i2c/busses/Kconfig
+++ b/drivers/i2c/busses/Kconfig
@@ -477,8 +477,8 @@ config I2C_PNX
477 will be called i2c-pnx. 477 will be called i2c-pnx.
478 478
479config I2C_PXA 479config I2C_PXA
480 tristate "Intel PXA2XX I2C adapter (EXPERIMENTAL)" 480 tristate "Intel PXA2XX I2C adapter"
481 depends on EXPERIMENTAL && ARCH_PXA 481 depends on ARCH_PXA || ARCH_MMP
482 help 482 help
483 If you have devices in the PXA I2C bus, say yes to this option. 483 If you have devices in the PXA I2C bus, say yes to this option.
484 This driver can also be built as a module. If so, the module 484 This driver can also be built as a module. If so, the module
diff --git a/drivers/i2c/busses/i2c-amd756.c b/drivers/i2c/busses/i2c-amd756.c
index f7d6fe9c49ba..8f0b90ef8c76 100644
--- a/drivers/i2c/busses/i2c-amd756.c
+++ b/drivers/i2c/busses/i2c-amd756.c
@@ -364,7 +364,7 @@ static int __devinit amd756_probe(struct pci_dev *pdev,
364 error = acpi_check_region(amd756_ioport, SMB_IOSIZE, 364 error = acpi_check_region(amd756_ioport, SMB_IOSIZE,
365 amd756_driver.name); 365 amd756_driver.name);
366 if (error) 366 if (error)
367 return error; 367 return -ENODEV;
368 368
369 if (!request_region(amd756_ioport, SMB_IOSIZE, amd756_driver.name)) { 369 if (!request_region(amd756_ioport, SMB_IOSIZE, amd756_driver.name)) {
370 dev_err(&pdev->dev, "SMB region 0x%x already in use!\n", 370 dev_err(&pdev->dev, "SMB region 0x%x already in use!\n",
diff --git a/drivers/i2c/busses/i2c-amd8111.c b/drivers/i2c/busses/i2c-amd8111.c
index a7c59908c457..5b4ad86ca166 100644
--- a/drivers/i2c/busses/i2c-amd8111.c
+++ b/drivers/i2c/busses/i2c-amd8111.c
@@ -376,8 +376,10 @@ static int __devinit amd8111_probe(struct pci_dev *dev,
376 smbus->size = pci_resource_len(dev, 0); 376 smbus->size = pci_resource_len(dev, 0);
377 377
378 error = acpi_check_resource_conflict(&dev->resource[0]); 378 error = acpi_check_resource_conflict(&dev->resource[0]);
379 if (error) 379 if (error) {
380 error = -ENODEV;
380 goto out_kfree; 381 goto out_kfree;
382 }
381 383
382 if (!request_region(smbus->base, smbus->size, amd8111_driver.name)) { 384 if (!request_region(smbus->base, smbus->size, amd8111_driver.name)) {
383 error = -EBUSY; 385 error = -EBUSY;
diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c
index 9d2c5adf5d4f..55edcfe5b851 100644
--- a/drivers/i2c/busses/i2c-i801.c
+++ b/drivers/i2c/busses/i2c-i801.c
@@ -732,8 +732,10 @@ static int __devinit i801_probe(struct pci_dev *dev, const struct pci_device_id
732 } 732 }
733 733
734 err = acpi_check_resource_conflict(&dev->resource[SMBBAR]); 734 err = acpi_check_resource_conflict(&dev->resource[SMBBAR]);
735 if (err) 735 if (err) {
736 err = -ENODEV;
736 goto exit; 737 goto exit;
738 }
737 739
738 err = pci_request_region(dev, SMBBAR, i801_driver.name); 740 err = pci_request_region(dev, SMBBAR, i801_driver.name);
739 if (err) { 741 if (err) {
diff --git a/drivers/i2c/busses/i2c-isch.c b/drivers/i2c/busses/i2c-isch.c
index 9f6b8e0f8632..dba6eb053e2f 100644
--- a/drivers/i2c/busses/i2c-isch.c
+++ b/drivers/i2c/busses/i2c-isch.c
@@ -281,7 +281,7 @@ static int __devinit sch_probe(struct pci_dev *dev,
281 return -ENODEV; 281 return -ENODEV;
282 } 282 }
283 if (acpi_check_region(sch_smba, SMBIOSIZE, sch_driver.name)) 283 if (acpi_check_region(sch_smba, SMBIOSIZE, sch_driver.name))
284 return -EBUSY; 284 return -ENODEV;
285 if (!request_region(sch_smba, SMBIOSIZE, sch_driver.name)) { 285 if (!request_region(sch_smba, SMBIOSIZE, sch_driver.name)) {
286 dev_err(&dev->dev, "SMBus region 0x%x already in use!\n", 286 dev_err(&dev->dev, "SMBus region 0x%x already in use!\n",
287 sch_smba); 287 sch_smba);
diff --git a/drivers/i2c/busses/i2c-piix4.c b/drivers/i2c/busses/i2c-piix4.c
index a782c7a08f9e..d26a972aacaa 100644
--- a/drivers/i2c/busses/i2c-piix4.c
+++ b/drivers/i2c/busses/i2c-piix4.c
@@ -169,7 +169,7 @@ static int __devinit piix4_setup(struct pci_dev *PIIX4_dev,
169 } 169 }
170 170
171 if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) 171 if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name))
172 return -EBUSY; 172 return -ENODEV;
173 173
174 if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) { 174 if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) {
175 dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n", 175 dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n",
@@ -260,7 +260,7 @@ static int __devinit piix4_setup_sb800(struct pci_dev *PIIX4_dev,
260 260
261 piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0; 261 piix4_smba = ((smba_en_hi << 8) | smba_en_lo) & 0xffe0;
262 if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) 262 if (acpi_check_region(piix4_smba, SMBIOSIZE, piix4_driver.name))
263 return -EBUSY; 263 return -ENODEV;
264 264
265 if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) { 265 if (!request_region(piix4_smba, SMBIOSIZE, piix4_driver.name)) {
266 dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n", 266 dev_err(&PIIX4_dev->dev, "SMBus region 0x%x already in use!\n",
diff --git a/drivers/i2c/busses/i2c-scmi.c b/drivers/i2c/busses/i2c-scmi.c
index 276a046ac93f..b4a55d407bf5 100644
--- a/drivers/i2c/busses/i2c-scmi.c
+++ b/drivers/i2c/busses/i2c-scmi.c
@@ -369,9 +369,8 @@ static int acpi_smbus_cmi_add(struct acpi_device *device)
369 goto err; 369 goto err;
370 370
371 snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name), 371 snprintf(smbus_cmi->adapter.name, sizeof(smbus_cmi->adapter.name),
372 "SMBus CMI adapter %s (%s)", 372 "SMBus CMI adapter %s",
373 acpi_device_name(device), 373 acpi_device_name(device));
374 acpi_device_uid(device));
375 smbus_cmi->adapter.owner = THIS_MODULE; 374 smbus_cmi->adapter.owner = THIS_MODULE;
376 smbus_cmi->adapter.algo = &acpi_smbus_cmi_algorithm; 375 smbus_cmi->adapter.algo = &acpi_smbus_cmi_algorithm;
377 smbus_cmi->adapter.algo_data = smbus_cmi; 376 smbus_cmi->adapter.algo_data = smbus_cmi;
diff --git a/drivers/i2c/busses/i2c-sis96x.c b/drivers/i2c/busses/i2c-sis96x.c
index 8295885b2fdb..1649963b00dc 100644
--- a/drivers/i2c/busses/i2c-sis96x.c
+++ b/drivers/i2c/busses/i2c-sis96x.c
@@ -280,7 +280,7 @@ static int __devinit sis96x_probe(struct pci_dev *dev,
280 280
281 retval = acpi_check_resource_conflict(&dev->resource[SIS96x_BAR]); 281 retval = acpi_check_resource_conflict(&dev->resource[SIS96x_BAR]);
282 if (retval) 282 if (retval)
283 return retval; 283 return -ENODEV;
284 284
285 /* Everything is happy, let's grab the memory and set things up. */ 285 /* Everything is happy, let's grab the memory and set things up. */
286 if (!request_region(sis96x_smbus_base, SMB_IOSIZE, 286 if (!request_region(sis96x_smbus_base, SMB_IOSIZE,
diff --git a/drivers/i2c/busses/i2c-viapro.c b/drivers/i2c/busses/i2c-viapro.c
index 54d810a4d00f..e4b1543015af 100644
--- a/drivers/i2c/busses/i2c-viapro.c
+++ b/drivers/i2c/busses/i2c-viapro.c
@@ -365,7 +365,7 @@ static int __devinit vt596_probe(struct pci_dev *pdev,
365found: 365found:
366 error = acpi_check_region(vt596_smba, 8, vt596_driver.name); 366 error = acpi_check_region(vt596_smba, 8, vt596_driver.name);
367 if (error) 367 if (error)
368 return error; 368 return -ENODEV;
369 369
370 if (!request_region(vt596_smba, 8, vt596_driver.name)) { 370 if (!request_region(vt596_smba, 8, vt596_driver.name)) {
371 dev_err(&pdev->dev, "SMBus region 0x%x already in use!\n", 371 dev_err(&pdev->dev, "SMBus region 0x%x already in use!\n",
diff --git a/drivers/ieee1394/dma.c b/drivers/ieee1394/dma.c
index 1aba8c13fe8f..8e7e3344c4b3 100644
--- a/drivers/ieee1394/dma.c
+++ b/drivers/ieee1394/dma.c
@@ -247,7 +247,7 @@ static int dma_region_pagefault(struct vm_area_struct *vma,
247 return 0; 247 return 0;
248} 248}
249 249
250static struct vm_operations_struct dma_region_vm_ops = { 250static const struct vm_operations_struct dma_region_vm_ops = {
251 .fault = dma_region_pagefault, 251 .fault = dma_region_pagefault,
252}; 252};
253 253
diff --git a/drivers/ieee802154/fakehard.c b/drivers/ieee802154/fakehard.c
index 96a2959ce877..f877f13e3ab3 100644
--- a/drivers/ieee802154/fakehard.c
+++ b/drivers/ieee802154/fakehard.c
@@ -32,9 +32,29 @@
32#include <net/nl802154.h> 32#include <net/nl802154.h>
33#include <net/wpan-phy.h> 33#include <net/wpan-phy.h>
34 34
35struct wpan_phy *net_to_phy(struct net_device *dev) 35struct fakehard_priv {
36 struct wpan_phy *phy;
37};
38
39static struct wpan_phy *fake_to_phy(const struct net_device *dev)
36{ 40{
37 return container_of(dev->dev.parent, struct wpan_phy, dev); 41 struct fakehard_priv *priv = netdev_priv(dev);
42 return priv->phy;
43}
44
45/**
46 * fake_get_phy - Return a phy corresponding to this device.
47 * @dev: The network device for which to return the wan-phy object
48 *
49 * This function returns a wpan-phy object corresponding to the passed
50 * network device. Reference counter for wpan-phy object is incremented,
51 * so when the wpan-phy isn't necessary, you should drop the reference
52 * via @wpan_phy_put() call.
53 */
54static struct wpan_phy *fake_get_phy(const struct net_device *dev)
55{
56 struct wpan_phy *phy = fake_to_phy(dev);
57 return to_phy(get_device(&phy->dev));
38} 58}
39 59
40/** 60/**
@@ -43,7 +63,7 @@ struct wpan_phy *net_to_phy(struct net_device *dev)
43 * 63 *
44 * Return the ID of the PAN from the PIB. 64 * Return the ID of the PAN from the PIB.
45 */ 65 */
46static u16 fake_get_pan_id(struct net_device *dev) 66static u16 fake_get_pan_id(const struct net_device *dev)
47{ 67{
48 BUG_ON(dev->type != ARPHRD_IEEE802154); 68 BUG_ON(dev->type != ARPHRD_IEEE802154);
49 69
@@ -58,7 +78,7 @@ static u16 fake_get_pan_id(struct net_device *dev)
58 * device. If the device has not yet had a short address assigned 78 * device. If the device has not yet had a short address assigned
59 * then this should return 0xFFFF to indicate a lack of association. 79 * then this should return 0xFFFF to indicate a lack of association.
60 */ 80 */
61static u16 fake_get_short_addr(struct net_device *dev) 81static u16 fake_get_short_addr(const struct net_device *dev)
62{ 82{
63 BUG_ON(dev->type != ARPHRD_IEEE802154); 83 BUG_ON(dev->type != ARPHRD_IEEE802154);
64 84
@@ -78,7 +98,7 @@ static u16 fake_get_short_addr(struct net_device *dev)
78 * Note: This is in section 7.2.1.2 of the IEEE 802.15.4-2006 98 * Note: This is in section 7.2.1.2 of the IEEE 802.15.4-2006
79 * document. 99 * document.
80 */ 100 */
81static u8 fake_get_dsn(struct net_device *dev) 101static u8 fake_get_dsn(const struct net_device *dev)
82{ 102{
83 BUG_ON(dev->type != ARPHRD_IEEE802154); 103 BUG_ON(dev->type != ARPHRD_IEEE802154);
84 104
@@ -98,7 +118,7 @@ static u8 fake_get_dsn(struct net_device *dev)
98 * Note: This is in section 7.2.1.2 of the IEEE 802.15.4-2006 118 * Note: This is in section 7.2.1.2 of the IEEE 802.15.4-2006
99 * document. 119 * document.
100 */ 120 */
101static u8 fake_get_bsn(struct net_device *dev) 121static u8 fake_get_bsn(const struct net_device *dev)
102{ 122{
103 BUG_ON(dev->type != ARPHRD_IEEE802154); 123 BUG_ON(dev->type != ARPHRD_IEEE802154);
104 124
@@ -121,7 +141,7 @@ static u8 fake_get_bsn(struct net_device *dev)
121static int fake_assoc_req(struct net_device *dev, 141static int fake_assoc_req(struct net_device *dev,
122 struct ieee802154_addr *addr, u8 channel, u8 page, u8 cap) 142 struct ieee802154_addr *addr, u8 channel, u8 page, u8 cap)
123{ 143{
124 struct wpan_phy *phy = net_to_phy(dev); 144 struct wpan_phy *phy = fake_to_phy(dev);
125 145
126 mutex_lock(&phy->pib_lock); 146 mutex_lock(&phy->pib_lock);
127 phy->current_channel = channel; 147 phy->current_channel = channel;
@@ -196,7 +216,7 @@ static int fake_start_req(struct net_device *dev, struct ieee802154_addr *addr,
196 u8 bcn_ord, u8 sf_ord, u8 pan_coord, u8 blx, 216 u8 bcn_ord, u8 sf_ord, u8 pan_coord, u8 blx,
197 u8 coord_realign) 217 u8 coord_realign)
198{ 218{
199 struct wpan_phy *phy = net_to_phy(dev); 219 struct wpan_phy *phy = fake_to_phy(dev);
200 220
201 mutex_lock(&phy->pib_lock); 221 mutex_lock(&phy->pib_lock);
202 phy->current_channel = channel; 222 phy->current_channel = channel;
@@ -239,6 +259,8 @@ static struct ieee802154_mlme_ops fake_mlme = {
239 .start_req = fake_start_req, 259 .start_req = fake_start_req,
240 .scan_req = fake_scan_req, 260 .scan_req = fake_scan_req,
241 261
262 .get_phy = fake_get_phy,
263
242 .get_pan_id = fake_get_pan_id, 264 .get_pan_id = fake_get_pan_id,
243 .get_short_addr = fake_get_short_addr, 265 .get_short_addr = fake_get_short_addr,
244 .get_dsn = fake_get_dsn, 266 .get_dsn = fake_get_dsn,
@@ -313,7 +335,7 @@ static const struct net_device_ops fake_ops = {
313 335
314static void ieee802154_fake_destruct(struct net_device *dev) 336static void ieee802154_fake_destruct(struct net_device *dev)
315{ 337{
316 struct wpan_phy *phy = net_to_phy(dev); 338 struct wpan_phy *phy = fake_to_phy(dev);
317 339
318 wpan_phy_unregister(phy); 340 wpan_phy_unregister(phy);
319 free_netdev(dev); 341 free_netdev(dev);
@@ -338,13 +360,14 @@ static void ieee802154_fake_setup(struct net_device *dev)
338static int __devinit ieee802154fake_probe(struct platform_device *pdev) 360static int __devinit ieee802154fake_probe(struct platform_device *pdev)
339{ 361{
340 struct net_device *dev; 362 struct net_device *dev;
363 struct fakehard_priv *priv;
341 struct wpan_phy *phy = wpan_phy_alloc(0); 364 struct wpan_phy *phy = wpan_phy_alloc(0);
342 int err; 365 int err;
343 366
344 if (!phy) 367 if (!phy)
345 return -ENOMEM; 368 return -ENOMEM;
346 369
347 dev = alloc_netdev(0, "hardwpan%d", ieee802154_fake_setup); 370 dev = alloc_netdev(sizeof(struct fakehard_priv), "hardwpan%d", ieee802154_fake_setup);
348 if (!dev) { 371 if (!dev) {
349 wpan_phy_free(phy); 372 wpan_phy_free(phy);
350 return -ENOMEM; 373 return -ENOMEM;
@@ -356,12 +379,23 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
356 dev->addr_len); 379 dev->addr_len);
357 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len); 380 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
358 381
359 phy->channels_supported = (1 << 27) - 1; 382 /*
383 * For now we'd like to emulate 2.4 GHz-only device,
384 * both O-QPSK and CSS
385 */
386 /* 2.4 GHz O-QPSK 802.15.4-2003 */
387 phy->channels_supported[0] |= 0x7FFF800;
388 /* 2.4 GHz CSS 802.15.4a-2007 */
389 phy->channels_supported[3] |= 0x3fff;
390
360 phy->transmit_power = 0xbf; 391 phy->transmit_power = 0xbf;
361 392
362 dev->netdev_ops = &fake_ops; 393 dev->netdev_ops = &fake_ops;
363 dev->ml_priv = &fake_mlme; 394 dev->ml_priv = &fake_mlme;
364 395
396 priv = netdev_priv(dev);
397 priv->phy = phy;
398
365 /* 399 /*
366 * If the name is a format string the caller wants us to do a 400 * If the name is a format string the caller wants us to do a
367 * name allocation. 401 * name allocation.
@@ -372,11 +406,12 @@ static int __devinit ieee802154fake_probe(struct platform_device *pdev)
372 goto out; 406 goto out;
373 } 407 }
374 408
409 wpan_phy_set_dev(phy, &pdev->dev);
375 SET_NETDEV_DEV(dev, &phy->dev); 410 SET_NETDEV_DEV(dev, &phy->dev);
376 411
377 platform_set_drvdata(pdev, dev); 412 platform_set_drvdata(pdev, dev);
378 413
379 err = wpan_phy_register(&pdev->dev, phy); 414 err = wpan_phy_register(phy);
380 if (err) 415 if (err)
381 goto out; 416 goto out;
382 417
diff --git a/drivers/infiniband/core/mad_rmpp.c b/drivers/infiniband/core/mad_rmpp.c
index 57a3c6f947b2..4e0f2829e0e5 100644
--- a/drivers/infiniband/core/mad_rmpp.c
+++ b/drivers/infiniband/core/mad_rmpp.c
@@ -37,7 +37,8 @@
37enum rmpp_state { 37enum rmpp_state {
38 RMPP_STATE_ACTIVE, 38 RMPP_STATE_ACTIVE,
39 RMPP_STATE_TIMEOUT, 39 RMPP_STATE_TIMEOUT,
40 RMPP_STATE_COMPLETE 40 RMPP_STATE_COMPLETE,
41 RMPP_STATE_CANCELING
41}; 42};
42 43
43struct mad_rmpp_recv { 44struct mad_rmpp_recv {
@@ -87,18 +88,22 @@ void ib_cancel_rmpp_recvs(struct ib_mad_agent_private *agent)
87 88
88 spin_lock_irqsave(&agent->lock, flags); 89 spin_lock_irqsave(&agent->lock, flags);
89 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) { 90 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
91 if (rmpp_recv->state != RMPP_STATE_COMPLETE)
92 ib_free_recv_mad(rmpp_recv->rmpp_wc);
93 rmpp_recv->state = RMPP_STATE_CANCELING;
94 }
95 spin_unlock_irqrestore(&agent->lock, flags);
96
97 list_for_each_entry(rmpp_recv, &agent->rmpp_list, list) {
90 cancel_delayed_work(&rmpp_recv->timeout_work); 98 cancel_delayed_work(&rmpp_recv->timeout_work);
91 cancel_delayed_work(&rmpp_recv->cleanup_work); 99 cancel_delayed_work(&rmpp_recv->cleanup_work);
92 } 100 }
93 spin_unlock_irqrestore(&agent->lock, flags);
94 101
95 flush_workqueue(agent->qp_info->port_priv->wq); 102 flush_workqueue(agent->qp_info->port_priv->wq);
96 103
97 list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv, 104 list_for_each_entry_safe(rmpp_recv, temp_rmpp_recv,
98 &agent->rmpp_list, list) { 105 &agent->rmpp_list, list) {
99 list_del(&rmpp_recv->list); 106 list_del(&rmpp_recv->list);
100 if (rmpp_recv->state != RMPP_STATE_COMPLETE)
101 ib_free_recv_mad(rmpp_recv->rmpp_wc);
102 destroy_rmpp_recv(rmpp_recv); 107 destroy_rmpp_recv(rmpp_recv);
103 } 108 }
104} 109}
@@ -260,6 +265,10 @@ static void recv_cleanup_handler(struct work_struct *work)
260 unsigned long flags; 265 unsigned long flags;
261 266
262 spin_lock_irqsave(&rmpp_recv->agent->lock, flags); 267 spin_lock_irqsave(&rmpp_recv->agent->lock, flags);
268 if (rmpp_recv->state == RMPP_STATE_CANCELING) {
269 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
270 return;
271 }
263 list_del(&rmpp_recv->list); 272 list_del(&rmpp_recv->list);
264 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags); 273 spin_unlock_irqrestore(&rmpp_recv->agent->lock, flags);
265 destroy_rmpp_recv(rmpp_recv); 274 destroy_rmpp_recv(rmpp_recv);
diff --git a/drivers/infiniband/core/ucm.c b/drivers/infiniband/core/ucm.c
index 51bd9669cb1f..f504c9b00c1b 100644
--- a/drivers/infiniband/core/ucm.c
+++ b/drivers/infiniband/core/ucm.c
@@ -38,6 +38,7 @@
38#include <linux/device.h> 38#include <linux/device.h>
39#include <linux/err.h> 39#include <linux/err.h>
40#include <linux/poll.h> 40#include <linux/poll.h>
41#include <linux/sched.h>
41#include <linux/file.h> 42#include <linux/file.h>
42#include <linux/mount.h> 43#include <linux/mount.h>
43#include <linux/cdev.h> 44#include <linux/cdev.h>
diff --git a/drivers/infiniband/core/user_mad.c b/drivers/infiniband/core/user_mad.c
index 8c46f2257098..7de02969ed7d 100644
--- a/drivers/infiniband/core/user_mad.c
+++ b/drivers/infiniband/core/user_mad.c
@@ -44,6 +44,7 @@
44#include <linux/mutex.h> 44#include <linux/mutex.h>
45#include <linux/kref.h> 45#include <linux/kref.h>
46#include <linux/compat.h> 46#include <linux/compat.h>
47#include <linux/sched.h>
47#include <linux/semaphore.h> 48#include <linux/semaphore.h>
48 49
49#include <asm/uaccess.h> 50#include <asm/uaccess.h>
diff --git a/drivers/infiniband/core/uverbs_main.c b/drivers/infiniband/core/uverbs_main.c
index d3fff9e008a3..aec0fbdfe7f0 100644
--- a/drivers/infiniband/core/uverbs_main.c
+++ b/drivers/infiniband/core/uverbs_main.c
@@ -40,6 +40,7 @@
40#include <linux/err.h> 40#include <linux/err.h>
41#include <linux/fs.h> 41#include <linux/fs.h>
42#include <linux/poll.h> 42#include <linux/poll.h>
43#include <linux/sched.h>
43#include <linux/file.h> 44#include <linux/file.h>
44#include <linux/mount.h> 45#include <linux/mount.h>
45#include <linux/cdev.h> 46#include <linux/cdev.h>
diff --git a/drivers/infiniband/hw/ehca/ehca_uverbs.c b/drivers/infiniband/hw/ehca/ehca_uverbs.c
index 3cb688d29131..f1565cae8ec6 100644
--- a/drivers/infiniband/hw/ehca/ehca_uverbs.c
+++ b/drivers/infiniband/hw/ehca/ehca_uverbs.c
@@ -95,7 +95,7 @@ static void ehca_mm_close(struct vm_area_struct *vma)
95 vma->vm_start, vma->vm_end, *count); 95 vma->vm_start, vma->vm_end, *count);
96} 96}
97 97
98static struct vm_operations_struct vm_ops = { 98static const struct vm_operations_struct vm_ops = {
99 .open = ehca_mm_open, 99 .open = ehca_mm_open,
100 .close = ehca_mm_close, 100 .close = ehca_mm_close,
101}; 101};
diff --git a/drivers/infiniband/hw/ipath/ipath_file_ops.c b/drivers/infiniband/hw/ipath/ipath_file_ops.c
index 38a287006612..40dbe54056c7 100644
--- a/drivers/infiniband/hw/ipath/ipath_file_ops.c
+++ b/drivers/infiniband/hw/ipath/ipath_file_ops.c
@@ -1151,7 +1151,7 @@ static int ipath_file_vma_fault(struct vm_area_struct *vma,
1151 return 0; 1151 return 0;
1152} 1152}
1153 1153
1154static struct vm_operations_struct ipath_file_vm_ops = { 1154static const struct vm_operations_struct ipath_file_vm_ops = {
1155 .fault = ipath_file_vma_fault, 1155 .fault = ipath_file_vma_fault,
1156}; 1156};
1157 1157
diff --git a/drivers/infiniband/hw/ipath/ipath_mmap.c b/drivers/infiniband/hw/ipath/ipath_mmap.c
index fa830e22002f..b28865faf435 100644
--- a/drivers/infiniband/hw/ipath/ipath_mmap.c
+++ b/drivers/infiniband/hw/ipath/ipath_mmap.c
@@ -74,7 +74,7 @@ static void ipath_vma_close(struct vm_area_struct *vma)
74 kref_put(&ip->ref, ipath_release_mmap_info); 74 kref_put(&ip->ref, ipath_release_mmap_info);
75} 75}
76 76
77static struct vm_operations_struct ipath_vm_ops = { 77static const struct vm_operations_struct ipath_vm_ops = {
78 .open = ipath_vma_open, 78 .open = ipath_vma_open,
79 .close = ipath_vma_close, 79 .close = ipath_vma_close,
80}; 80};
diff --git a/drivers/infiniband/hw/mthca/mthca_catas.c b/drivers/infiniband/hw/mthca/mthca_catas.c
index 056b2a4c6970..0aa0110e4b6c 100644
--- a/drivers/infiniband/hw/mthca/mthca_catas.c
+++ b/drivers/infiniband/hw/mthca/mthca_catas.c
@@ -68,11 +68,16 @@ static void catas_reset(struct work_struct *work)
68 spin_unlock_irq(&catas_lock); 68 spin_unlock_irq(&catas_lock);
69 69
70 list_for_each_entry_safe(dev, tmpdev, &tlist, catas_err.list) { 70 list_for_each_entry_safe(dev, tmpdev, &tlist, catas_err.list) {
71 struct pci_dev *pdev = dev->pdev;
71 ret = __mthca_restart_one(dev->pdev); 72 ret = __mthca_restart_one(dev->pdev);
73 /* 'dev' now is not valid */
72 if (ret) 74 if (ret)
73 mthca_err(dev, "Reset failed (%d)\n", ret); 75 printk(KERN_ERR "mthca %s: Reset failed (%d)\n",
74 else 76 pci_name(pdev), ret);
75 mthca_dbg(dev, "Reset succeeded\n"); 77 else {
78 struct mthca_dev *d = pci_get_drvdata(pdev);
79 mthca_dbg(d, "Reset succeeded\n");
80 }
76 } 81 }
77 82
78 mutex_unlock(&mthca_device_mutex); 83 mutex_unlock(&mthca_device_mutex);
diff --git a/drivers/infiniband/hw/nes/nes_nic.c b/drivers/infiniband/hw/nes/nes_nic.c
index 538e409d4515..de18fdfdadf2 100644
--- a/drivers/infiniband/hw/nes/nes_nic.c
+++ b/drivers/infiniband/hw/nes/nes_nic.c
@@ -1080,11 +1080,14 @@ static int nes_netdev_set_rx_csum(struct net_device *netdev, u32 enable)
1080 1080
1081 1081
1082/** 1082/**
1083 * nes_netdev_get_stats_count 1083 * nes_netdev_get_sset_count
1084 */ 1084 */
1085static int nes_netdev_get_stats_count(struct net_device *netdev) 1085static int nes_netdev_get_sset_count(struct net_device *netdev, int stringset)
1086{ 1086{
1087 return NES_ETHTOOL_STAT_COUNT; 1087 if (stringset == ETH_SS_STATS)
1088 return NES_ETHTOOL_STAT_COUNT;
1089 else
1090 return -EINVAL;
1088} 1091}
1089 1092
1090 1093
@@ -1264,7 +1267,6 @@ static void nes_netdev_get_drvinfo(struct net_device *netdev,
1264 sprintf(drvinfo->fw_version, "%u.%u", nesadapter->firmware_version>>16, 1267 sprintf(drvinfo->fw_version, "%u.%u", nesadapter->firmware_version>>16,
1265 nesadapter->firmware_version & 0x000000ff); 1268 nesadapter->firmware_version & 0x000000ff);
1266 strcpy(drvinfo->version, DRV_VERSION); 1269 strcpy(drvinfo->version, DRV_VERSION);
1267 drvinfo->n_stats = nes_netdev_get_stats_count(netdev);
1268 drvinfo->testinfo_len = 0; 1270 drvinfo->testinfo_len = 0;
1269 drvinfo->eedump_len = 0; 1271 drvinfo->eedump_len = 0;
1270 drvinfo->regdump_len = 0; 1272 drvinfo->regdump_len = 0;
@@ -1516,7 +1518,7 @@ static const struct ethtool_ops nes_ethtool_ops = {
1516 .get_rx_csum = nes_netdev_get_rx_csum, 1518 .get_rx_csum = nes_netdev_get_rx_csum,
1517 .get_sg = ethtool_op_get_sg, 1519 .get_sg = ethtool_op_get_sg,
1518 .get_strings = nes_netdev_get_strings, 1520 .get_strings = nes_netdev_get_strings,
1519 .get_stats_count = nes_netdev_get_stats_count, 1521 .get_sset_count = nes_netdev_get_sset_count,
1520 .get_ethtool_stats = nes_netdev_get_ethtool_stats, 1522 .get_ethtool_stats = nes_netdev_get_ethtool_stats,
1521 .get_drvinfo = nes_netdev_get_drvinfo, 1523 .get_drvinfo = nes_netdev_get_drvinfo,
1522 .get_coalesce = nes_netdev_get_coalesce, 1524 .get_coalesce = nes_netdev_get_coalesce,
@@ -1566,7 +1568,6 @@ static const struct net_device_ops nes_netdev_ops = {
1566 .ndo_set_mac_address = nes_netdev_set_mac_address, 1568 .ndo_set_mac_address = nes_netdev_set_mac_address,
1567 .ndo_set_multicast_list = nes_netdev_set_multicast_list, 1569 .ndo_set_multicast_list = nes_netdev_set_multicast_list,
1568 .ndo_change_mtu = nes_netdev_change_mtu, 1570 .ndo_change_mtu = nes_netdev_change_mtu,
1569 .ndo_set_mac_address = eth_mac_addr,
1570 .ndo_validate_addr = eth_validate_addr, 1571 .ndo_validate_addr = eth_validate_addr,
1571 .ndo_vlan_rx_register = nes_netdev_vlan_rx_register, 1572 .ndo_vlan_rx_register = nes_netdev_vlan_rx_register,
1572}; 1573};
diff --git a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
index 25874fc680c9..8763c1ea5eb4 100644
--- a/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
+++ b/drivers/infiniband/ulp/ipoib/ipoib_multicast.c
@@ -362,12 +362,19 @@ void ipoib_mcast_carrier_on_task(struct work_struct *work)
362{ 362{
363 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv, 363 struct ipoib_dev_priv *priv = container_of(work, struct ipoib_dev_priv,
364 carrier_on_task); 364 carrier_on_task);
365 struct ib_port_attr attr;
365 366
366 /* 367 /*
367 * Take rtnl_lock to avoid racing with ipoib_stop() and 368 * Take rtnl_lock to avoid racing with ipoib_stop() and
368 * turning the carrier back on while a device is being 369 * turning the carrier back on while a device is being
369 * removed. 370 * removed.
370 */ 371 */
372 if (ib_query_port(priv->ca, priv->port, &attr) ||
373 attr.state != IB_PORT_ACTIVE) {
374 ipoib_dbg(priv, "Keeping carrier off until IB port is active\n");
375 return;
376 }
377
371 rtnl_lock(); 378 rtnl_lock();
372 netif_carrier_on(priv->dev); 379 netif_carrier_on(priv->dev);
373 rtnl_unlock(); 380 rtnl_unlock();
diff --git a/drivers/input/evdev.c b/drivers/input/evdev.c
index 1148140d08a1..dee6706038aa 100644
--- a/drivers/input/evdev.c
+++ b/drivers/input/evdev.c
@@ -13,6 +13,7 @@
13#define EVDEV_BUFFER_SIZE 64 13#define EVDEV_BUFFER_SIZE 64
14 14
15#include <linux/poll.h> 15#include <linux/poll.h>
16#include <linux/sched.h>
16#include <linux/slab.h> 17#include <linux/slab.h>
17#include <linux/module.h> 18#include <linux/module.h>
18#include <linux/init.h> 19#include <linux/init.h>
diff --git a/drivers/input/input.c b/drivers/input/input.c
index e828aab7dace..c6f88ebb40c7 100644
--- a/drivers/input/input.c
+++ b/drivers/input/input.c
@@ -17,6 +17,7 @@
17#include <linux/random.h> 17#include <linux/random.h>
18#include <linux/major.h> 18#include <linux/major.h>
19#include <linux/proc_fs.h> 19#include <linux/proc_fs.h>
20#include <linux/sched.h>
20#include <linux/seq_file.h> 21#include <linux/seq_file.h>
21#include <linux/poll.h> 22#include <linux/poll.h>
22#include <linux/device.h> 23#include <linux/device.h>
@@ -1273,6 +1274,7 @@ static int input_dev_uevent(struct device *device, struct kobj_uevent_env *env)
1273 } \ 1274 } \
1274 } while (0) 1275 } while (0)
1275 1276
1277#ifdef CONFIG_PM
1276static void input_dev_reset(struct input_dev *dev, bool activate) 1278static void input_dev_reset(struct input_dev *dev, bool activate)
1277{ 1279{
1278 if (!dev->event) 1280 if (!dev->event)
@@ -1287,7 +1289,6 @@ static void input_dev_reset(struct input_dev *dev, bool activate)
1287 } 1289 }
1288} 1290}
1289 1291
1290#ifdef CONFIG_PM
1291static int input_dev_suspend(struct device *dev) 1292static int input_dev_suspend(struct device *dev)
1292{ 1293{
1293 struct input_dev *input_dev = to_input_dev(dev); 1294 struct input_dev *input_dev = to_input_dev(dev);
diff --git a/drivers/input/joydev.c b/drivers/input/joydev.c
index 901b2525993e..b1bd6dd32286 100644
--- a/drivers/input/joydev.c
+++ b/drivers/input/joydev.c
@@ -18,6 +18,7 @@
18#include <linux/input.h> 18#include <linux/input.h>
19#include <linux/kernel.h> 19#include <linux/kernel.h>
20#include <linux/major.h> 20#include <linux/major.h>
21#include <linux/sched.h>
21#include <linux/slab.h> 22#include <linux/slab.h>
22#include <linux/mm.h> 23#include <linux/mm.h>
23#include <linux/miscdevice.h> 24#include <linux/miscdevice.h>
diff --git a/drivers/input/misc/uinput.c b/drivers/input/misc/uinput.c
index c5a49aba418f..d3f57245420a 100644
--- a/drivers/input/misc/uinput.c
+++ b/drivers/input/misc/uinput.c
@@ -30,6 +30,7 @@
30 * - first public version 30 * - first public version
31 */ 31 */
32#include <linux/poll.h> 32#include <linux/poll.h>
33#include <linux/sched.h>
33#include <linux/slab.h> 34#include <linux/slab.h>
34#include <linux/module.h> 35#include <linux/module.h>
35#include <linux/init.h> 36#include <linux/init.h>
diff --git a/drivers/input/mousedev.c b/drivers/input/mousedev.c
index 966b8868f792..a13d80f7da17 100644
--- a/drivers/input/mousedev.c
+++ b/drivers/input/mousedev.c
@@ -13,6 +13,7 @@
13#define MOUSEDEV_MINORS 32 13#define MOUSEDEV_MINORS 32
14#define MOUSEDEV_MIX 31 14#define MOUSEDEV_MIX 31
15 15
16#include <linux/sched.h>
16#include <linux/slab.h> 17#include <linux/slab.h>
17#include <linux/smp_lock.h> 18#include <linux/smp_lock.h>
18#include <linux/poll.h> 19#include <linux/poll.h>
diff --git a/drivers/isdn/divert/divert_procfs.c b/drivers/isdn/divert/divert_procfs.c
index 8b256a617c8a..3697c409bec6 100644
--- a/drivers/isdn/divert/divert_procfs.c
+++ b/drivers/isdn/divert/divert_procfs.c
@@ -16,6 +16,7 @@
16#else 16#else
17#include <linux/fs.h> 17#include <linux/fs.h>
18#endif 18#endif
19#include <linux/sched.h>
19#include <linux/isdnif.h> 20#include <linux/isdnif.h>
20#include <net/net_namespace.h> 21#include <net/net_namespace.h>
21#include "isdn_divert.h" 22#include "isdn_divert.h"
diff --git a/drivers/isdn/gigaset/Kconfig b/drivers/isdn/gigaset/Kconfig
index 18ab8652aa57..dcefedc7044a 100644
--- a/drivers/isdn/gigaset/Kconfig
+++ b/drivers/isdn/gigaset/Kconfig
@@ -1,6 +1,5 @@
1menuconfig ISDN_DRV_GIGASET 1menuconfig ISDN_DRV_GIGASET
2 tristate "Siemens Gigaset support" 2 tristate "Siemens Gigaset support"
3 depends on ISDN_I4L
4 select CRC_CCITT 3 select CRC_CCITT
5 select BITREVERSE 4 select BITREVERSE
6 help 5 help
@@ -11,9 +10,33 @@ menuconfig ISDN_DRV_GIGASET
11 If you have one of these devices, say M here and for at least 10 If you have one of these devices, say M here and for at least
12 one of the connection specific parts that follow. 11 one of the connection specific parts that follow.
13 This will build a module called "gigaset". 12 This will build a module called "gigaset".
13 Note: If you build your ISDN subsystem (ISDN_CAPI or ISDN_I4L)
14 as a module, you have to build this driver as a module too,
15 otherwise the Gigaset device won't show up as an ISDN device.
14 16
15if ISDN_DRV_GIGASET 17if ISDN_DRV_GIGASET
16 18
19config GIGASET_CAPI
20 bool "Gigaset CAPI support (EXPERIMENTAL)"
21 depends on EXPERIMENTAL
22 depends on ISDN_CAPI='y'||(ISDN_CAPI='m'&&ISDN_DRV_GIGASET='m')
23 default ISDN_I4L='n'
24 help
25 Build the Gigaset driver as a CAPI 2.0 driver interfacing with
26 the Kernel CAPI subsystem. To use it with the old ISDN4Linux
27 subsystem you'll have to enable the capidrv glue driver.
28 (select ISDN_CAPI_CAPIDRV.)
29 Say N to build the old native ISDN4Linux variant.
30
31config GIGASET_I4L
32 bool
33 depends on ISDN_I4L='y'||(ISDN_I4L='m'&&ISDN_DRV_GIGASET='m')
34 default !GIGASET_CAPI
35
36config GIGASET_DUMMYLL
37 bool
38 default !GIGASET_CAPI&&!GIGASET_I4L
39
17config GIGASET_BASE 40config GIGASET_BASE
18 tristate "Gigaset base station support" 41 tristate "Gigaset base station support"
19 depends on USB 42 depends on USB
diff --git a/drivers/isdn/gigaset/Makefile b/drivers/isdn/gigaset/Makefile
index e9d3189f56b7..c453b72272a0 100644
--- a/drivers/isdn/gigaset/Makefile
+++ b/drivers/isdn/gigaset/Makefile
@@ -1,4 +1,7 @@
1gigaset-y := common.o interface.o proc.o ev-layer.o i4l.o asyncdata.o 1gigaset-y := common.o interface.o proc.o ev-layer.o asyncdata.o
2gigaset-$(CONFIG_GIGASET_CAPI) += capi.o
3gigaset-$(CONFIG_GIGASET_I4L) += i4l.o
4gigaset-$(CONFIG_GIGASET_DUMMYLL) += dummyll.o
2usb_gigaset-y := usb-gigaset.o 5usb_gigaset-y := usb-gigaset.o
3ser_gigaset-y := ser-gigaset.o 6ser_gigaset-y := ser-gigaset.o
4bas_gigaset-y := bas-gigaset.o isocdata.o 7bas_gigaset-y := bas-gigaset.o isocdata.o
diff --git a/drivers/isdn/gigaset/asyncdata.c b/drivers/isdn/gigaset/asyncdata.c
index 44a58e6f8f65..ccb2a7b7c41d 100644
--- a/drivers/isdn/gigaset/asyncdata.c
+++ b/drivers/isdn/gigaset/asyncdata.c
@@ -19,7 +19,7 @@
19 19
20/* check if byte must be stuffed/escaped 20/* check if byte must be stuffed/escaped
21 * I'm not sure which data should be encoded. 21 * I'm not sure which data should be encoded.
22 * Therefore I will go the hard way and decode every value 22 * Therefore I will go the hard way and encode every value
23 * less than 0x20, the flag sequence and the control escape char. 23 * less than 0x20, the flag sequence and the control escape char.
24 */ 24 */
25static inline int muststuff(unsigned char c) 25static inline int muststuff(unsigned char c)
@@ -35,303 +35,383 @@ static inline int muststuff(unsigned char c)
35 35
36/* == data input =========================================================== */ 36/* == data input =========================================================== */
37 37
38/* process a block of received bytes in command mode (modem response) 38/* process a block of received bytes in command mode
39 * (mstate != MS_LOCKED && (inputstate & INS_command))
40 * Append received bytes to the command response buffer and forward them
41 * line by line to the response handler. Exit whenever a mode/state change
42 * might have occurred.
39 * Return value: 43 * Return value:
40 * number of processed bytes 44 * number of processed bytes
41 */ 45 */
42static inline int cmd_loop(unsigned char c, unsigned char *src, int numbytes, 46static unsigned cmd_loop(unsigned numbytes, struct inbuf_t *inbuf)
43 struct inbuf_t *inbuf)
44{ 47{
48 unsigned char *src = inbuf->data + inbuf->head;
45 struct cardstate *cs = inbuf->cs; 49 struct cardstate *cs = inbuf->cs;
46 unsigned cbytes = cs->cbytes; 50 unsigned cbytes = cs->cbytes;
47 int inputstate = inbuf->inputstate; 51 unsigned procbytes = 0;
48 int startbytes = numbytes; 52 unsigned char c;
49 53
50 for (;;) { 54 while (procbytes < numbytes) {
51 cs->respdata[cbytes] = c; 55 c = *src++;
52 if (c == 10 || c == 13) { 56 procbytes++;
53 gig_dbg(DEBUG_TRANSCMD, "%s: End of Command (%d Bytes)", 57
58 switch (c) {
59 case '\n':
60 if (cbytes == 0 && cs->respdata[0] == '\r') {
61 /* collapse LF with preceding CR */
62 cs->respdata[0] = 0;
63 break;
64 }
65 /* --v-- fall through --v-- */
66 case '\r':
67 /* end of message line, pass to response handler */
68 gig_dbg(DEBUG_TRANSCMD, "%s: End of Message (%d Bytes)",
54 __func__, cbytes); 69 __func__, cbytes);
70 if (cbytes >= MAX_RESP_SIZE) {
71 dev_warn(cs->dev, "response too large (%d)\n",
72 cbytes);
73 cbytes = MAX_RESP_SIZE;
74 }
55 cs->cbytes = cbytes; 75 cs->cbytes = cbytes;
56 gigaset_handle_modem_response(cs); /* can change 76 gigaset_handle_modem_response(cs);
57 cs->dle */
58 cbytes = 0; 77 cbytes = 0;
59 78
60 if (cs->dle && 79 /* store EOL byte for CRLF collapsing */
61 !(inputstate & INS_DLE_command)) { 80 cs->respdata[0] = c;
62 inputstate &= ~INS_command;
63 break;
64 }
65 } else {
66 /* advance in line buffer, checking for overflow */
67 if (cbytes < MAX_RESP_SIZE - 1)
68 cbytes++;
69 else
70 dev_warn(cs->dev, "response too large\n");
71 }
72 81
73 if (!numbytes) 82 /* cs->dle may have changed */
74 break; 83 if (cs->dle && !(inbuf->inputstate & INS_DLE_command))
75 c = *src++; 84 inbuf->inputstate &= ~INS_command;
76 --numbytes; 85
77 if (c == DLE_FLAG && 86 /* return for reevaluating state */
78 (cs->dle || inputstate & INS_DLE_command)) { 87 goto exit;
79 inputstate |= INS_DLE_char; 88
80 break; 89 case DLE_FLAG:
90 if (inbuf->inputstate & INS_DLE_char) {
91 /* quoted DLE: clear quote flag */
92 inbuf->inputstate &= ~INS_DLE_char;
93 } else if (cs->dle ||
94 (inbuf->inputstate & INS_DLE_command)) {
95 /* DLE escape, pass up for handling */
96 inbuf->inputstate |= INS_DLE_char;
97 goto exit;
98 }
99 /* quoted or not in DLE mode: treat as regular data */
100 /* --v-- fall through --v-- */
101 default:
102 /* append to line buffer if possible */
103 if (cbytes < MAX_RESP_SIZE)
104 cs->respdata[cbytes] = c;
105 cbytes++;
81 } 106 }
82 } 107 }
83 108exit:
84 cs->cbytes = cbytes; 109 cs->cbytes = cbytes;
85 inbuf->inputstate = inputstate; 110 return procbytes;
86
87 return startbytes - numbytes;
88} 111}
89 112
90/* process a block of received bytes in lock mode (tty i/f) 113/* process a block of received bytes in lock mode
114 * All received bytes are passed unmodified to the tty i/f.
91 * Return value: 115 * Return value:
92 * number of processed bytes 116 * number of processed bytes
93 */ 117 */
94static inline int lock_loop(unsigned char *src, int numbytes, 118static unsigned lock_loop(unsigned numbytes, struct inbuf_t *inbuf)
95 struct inbuf_t *inbuf)
96{ 119{
97 struct cardstate *cs = inbuf->cs; 120 unsigned char *src = inbuf->data + inbuf->head;
98
99 gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response",
100 numbytes, src);
101 gigaset_if_receive(cs, src, numbytes);
102 121
122 gigaset_dbg_buffer(DEBUG_LOCKCMD, "received response", numbytes, src);
123 gigaset_if_receive(inbuf->cs, src, numbytes);
103 return numbytes; 124 return numbytes;
104} 125}
105 126
127/* set up next receive skb for data mode
128 */
129static void new_rcv_skb(struct bc_state *bcs)
130{
131 struct cardstate *cs = bcs->cs;
132 unsigned short hw_hdr_len = cs->hw_hdr_len;
133
134 if (bcs->ignore) {
135 bcs->skb = NULL;
136 return;
137 }
138
139 bcs->skb = dev_alloc_skb(SBUFSIZE + hw_hdr_len);
140 if (bcs->skb == NULL) {
141 dev_warn(cs->dev, "could not allocate new skb\n");
142 return;
143 }
144 skb_reserve(bcs->skb, hw_hdr_len);
145}
146
106/* process a block of received bytes in HDLC data mode 147/* process a block of received bytes in HDLC data mode
148 * (mstate != MS_LOCKED && !(inputstate & INS_command) && proto2 == L2_HDLC)
107 * Collect HDLC frames, undoing byte stuffing and watching for DLE escapes. 149 * Collect HDLC frames, undoing byte stuffing and watching for DLE escapes.
108 * When a frame is complete, check the FCS and pass valid frames to the LL. 150 * When a frame is complete, check the FCS and pass valid frames to the LL.
109 * If DLE is encountered, return immediately to let the caller handle it. 151 * If DLE is encountered, return immediately to let the caller handle it.
110 * Return value: 152 * Return value:
111 * number of processed bytes 153 * number of processed bytes
112 * numbytes (all bytes processed) on error --FIXME
113 */ 154 */
114static inline int hdlc_loop(unsigned char c, unsigned char *src, int numbytes, 155static unsigned hdlc_loop(unsigned numbytes, struct inbuf_t *inbuf)
115 struct inbuf_t *inbuf)
116{ 156{
117 struct cardstate *cs = inbuf->cs; 157 struct cardstate *cs = inbuf->cs;
118 struct bc_state *bcs = inbuf->bcs; 158 struct bc_state *bcs = cs->bcs;
119 int inputstate = bcs->inputstate; 159 int inputstate = bcs->inputstate;
120 __u16 fcs = bcs->fcs; 160 __u16 fcs = bcs->fcs;
121 struct sk_buff *skb = bcs->skb; 161 struct sk_buff *skb = bcs->skb;
122 unsigned char error; 162 unsigned char *src = inbuf->data + inbuf->head;
123 struct sk_buff *compskb; 163 unsigned procbytes = 0;
124 int startbytes = numbytes; 164 unsigned char c;
125 int l;
126 165
127 if (unlikely(inputstate & INS_byte_stuff)) { 166 if (inputstate & INS_byte_stuff) {
167 if (!numbytes)
168 return 0;
128 inputstate &= ~INS_byte_stuff; 169 inputstate &= ~INS_byte_stuff;
129 goto byte_stuff; 170 goto byte_stuff;
130 } 171 }
131 for (;;) { 172
132 if (unlikely(c == PPP_ESCAPE)) { 173 while (procbytes < numbytes) {
133 if (unlikely(!numbytes)) { 174 c = *src++;
134 inputstate |= INS_byte_stuff; 175 procbytes++;
176 if (c == DLE_FLAG) {
177 if (inputstate & INS_DLE_char) {
178 /* quoted DLE: clear quote flag */
179 inputstate &= ~INS_DLE_char;
180 } else if (cs->dle || (inputstate & INS_DLE_command)) {
181 /* DLE escape, pass up for handling */
182 inputstate |= INS_DLE_char;
135 break; 183 break;
136 } 184 }
137 c = *src++; 185 }
138 --numbytes; 186
139 if (unlikely(c == DLE_FLAG && 187 if (c == PPP_ESCAPE) {
140 (cs->dle || 188 /* byte stuffing indicator: pull in next byte */
141 inbuf->inputstate & INS_DLE_command))) { 189 if (procbytes >= numbytes) {
142 inbuf->inputstate |= INS_DLE_char; 190 /* end of buffer, save for later processing */
143 inputstate |= INS_byte_stuff; 191 inputstate |= INS_byte_stuff;
144 break; 192 break;
145 } 193 }
146byte_stuff: 194byte_stuff:
195 c = *src++;
196 procbytes++;
197 if (c == DLE_FLAG) {
198 if (inputstate & INS_DLE_char) {
199 /* quoted DLE: clear quote flag */
200 inputstate &= ~INS_DLE_char;
201 } else if (cs->dle ||
202 (inputstate & INS_DLE_command)) {
203 /* DLE escape, pass up for handling */
204 inputstate |=
205 INS_DLE_char | INS_byte_stuff;
206 break;
207 }
208 }
147 c ^= PPP_TRANS; 209 c ^= PPP_TRANS;
148 if (unlikely(!muststuff(c)))
149 gig_dbg(DEBUG_HDLC, "byte stuffed: 0x%02x", c);
150 } else if (unlikely(c == PPP_FLAG)) {
151 if (unlikely(inputstate & INS_skip_frame)) {
152#ifdef CONFIG_GIGASET_DEBUG
153 if (!(inputstate & INS_have_data)) { /* 7E 7E */
154 ++bcs->emptycount;
155 } else
156 gig_dbg(DEBUG_HDLC,
157 "7e----------------------------");
158#endif
159
160 /* end of frame */
161 error = 1;
162 gigaset_rcv_error(NULL, cs, bcs);
163 } else if (!(inputstate & INS_have_data)) { /* 7E 7E */
164#ifdef CONFIG_GIGASET_DEBUG 210#ifdef CONFIG_GIGASET_DEBUG
165 ++bcs->emptycount; 211 if (!muststuff(c))
212 gig_dbg(DEBUG_HDLC, "byte stuffed: 0x%02x", c);
166#endif 213#endif
167 break; 214 } else if (c == PPP_FLAG) {
168 } else { 215 /* end of frame: process content if any */
216 if (inputstate & INS_have_data) {
169 gig_dbg(DEBUG_HDLC, 217 gig_dbg(DEBUG_HDLC,
170 "7e----------------------------"); 218 "7e----------------------------");
171 219
172 /* end of frame */ 220 /* check and pass received frame */
173 error = 0; 221 if (!skb) {
174 222 /* skipped frame */
175 if (unlikely(fcs != PPP_GOODFCS)) { 223 gigaset_isdn_rcv_err(bcs);
224 } else if (skb->len < 2) {
225 /* frame too short for FCS */
226 dev_warn(cs->dev,
227 "short frame (%d)\n",
228 skb->len);
229 gigaset_isdn_rcv_err(bcs);
230 dev_kfree_skb_any(skb);
231 } else if (fcs != PPP_GOODFCS) {
232 /* frame check error */
176 dev_err(cs->dev, 233 dev_err(cs->dev,
177 "Checksum failed, %u bytes corrupted!\n", 234 "Checksum failed, %u bytes corrupted!\n",
178 skb->len); 235 skb->len);
179 compskb = NULL; 236 gigaset_isdn_rcv_err(bcs);
180 gigaset_rcv_error(compskb, cs, bcs); 237 dev_kfree_skb_any(skb);
181 error = 1;
182 } else { 238 } else {
183 if (likely((l = skb->len) > 2)) { 239 /* good frame */
184 skb->tail -= 2; 240 __skb_trim(skb, skb->len - 2);
185 skb->len -= 2; 241 gigaset_skb_rcvd(bcs, skb);
186 } else {
187 dev_kfree_skb(skb);
188 skb = NULL;
189 inputstate |= INS_skip_frame;
190 if (l == 1) {
191 dev_err(cs->dev,
192 "invalid packet size (1)!\n");
193 error = 1;
194 gigaset_rcv_error(NULL,
195 cs, bcs);
196 }
197 }
198 if (likely(!(error ||
199 (inputstate &
200 INS_skip_frame)))) {
201 gigaset_rcv_skb(skb, cs, bcs);
202 }
203 } 242 }
204 }
205 243
206 if (unlikely(error)) 244 /* prepare reception of next frame */
207 if (skb) 245 inputstate &= ~INS_have_data;
208 dev_kfree_skb(skb); 246 new_rcv_skb(bcs);
209 247 skb = bcs->skb;
210 fcs = PPP_INITFCS;
211 inputstate &= ~(INS_have_data | INS_skip_frame);
212 if (unlikely(bcs->ignore)) {
213 inputstate |= INS_skip_frame;
214 skb = NULL;
215 } else if (likely((skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)) {
216 skb_reserve(skb, HW_HDR_LEN);
217 } else { 248 } else {
218 dev_warn(cs->dev, 249 /* empty frame (7E 7E) */
219 "could not allocate new skb\n"); 250#ifdef CONFIG_GIGASET_DEBUG
220 inputstate |= INS_skip_frame; 251 ++bcs->emptycount;
252#endif
253 if (!skb) {
254 /* skipped (?) */
255 gigaset_isdn_rcv_err(bcs);
256 new_rcv_skb(bcs);
257 skb = bcs->skb;
258 }
221 } 259 }
222 260
223 break; 261 fcs = PPP_INITFCS;
224 } else if (unlikely(muststuff(c))) { 262 continue;
263#ifdef CONFIG_GIGASET_DEBUG
264 } else if (muststuff(c)) {
225 /* Should not happen. Possible after ZDLE=1<CR><LF>. */ 265 /* Should not happen. Possible after ZDLE=1<CR><LF>. */
226 gig_dbg(DEBUG_HDLC, "not byte stuffed: 0x%02x", c); 266 gig_dbg(DEBUG_HDLC, "not byte stuffed: 0x%02x", c);
267#endif
227 } 268 }
228 269
229 /* add character */ 270 /* regular data byte, append to skb */
230
231#ifdef CONFIG_GIGASET_DEBUG 271#ifdef CONFIG_GIGASET_DEBUG
232 if (unlikely(!(inputstate & INS_have_data))) { 272 if (!(inputstate & INS_have_data)) {
233 gig_dbg(DEBUG_HDLC, "7e (%d x) ================", 273 gig_dbg(DEBUG_HDLC, "7e (%d x) ================",
234 bcs->emptycount); 274 bcs->emptycount);
235 bcs->emptycount = 0; 275 bcs->emptycount = 0;
236 } 276 }
237#endif 277#endif
238
239 inputstate |= INS_have_data; 278 inputstate |= INS_have_data;
240 279 if (skb) {
241 if (likely(!(inputstate & INS_skip_frame))) { 280 if (skb->len == SBUFSIZE) {
242 if (unlikely(skb->len == SBUFSIZE)) {
243 dev_warn(cs->dev, "received packet too long\n"); 281 dev_warn(cs->dev, "received packet too long\n");
244 dev_kfree_skb_any(skb); 282 dev_kfree_skb_any(skb);
245 skb = NULL; 283 /* skip remainder of packet */
246 inputstate |= INS_skip_frame; 284 bcs->skb = skb = NULL;
247 break; 285 } else {
286 *__skb_put(skb, 1) = c;
287 fcs = crc_ccitt_byte(fcs, c);
248 } 288 }
249 *__skb_put(skb, 1) = c;
250 fcs = crc_ccitt_byte(fcs, c);
251 }
252
253 if (unlikely(!numbytes))
254 break;
255 c = *src++;
256 --numbytes;
257 if (unlikely(c == DLE_FLAG &&
258 (cs->dle ||
259 inbuf->inputstate & INS_DLE_command))) {
260 inbuf->inputstate |= INS_DLE_char;
261 break;
262 } 289 }
263 } 290 }
291
264 bcs->inputstate = inputstate; 292 bcs->inputstate = inputstate;
265 bcs->fcs = fcs; 293 bcs->fcs = fcs;
266 bcs->skb = skb; 294 return procbytes;
267 return startbytes - numbytes;
268} 295}
269 296
270/* process a block of received bytes in transparent data mode 297/* process a block of received bytes in transparent data mode
298 * (mstate != MS_LOCKED && !(inputstate & INS_command) && proto2 != L2_HDLC)
271 * Invert bytes, undoing byte stuffing and watching for DLE escapes. 299 * Invert bytes, undoing byte stuffing and watching for DLE escapes.
272 * If DLE is encountered, return immediately to let the caller handle it. 300 * If DLE is encountered, return immediately to let the caller handle it.
273 * Return value: 301 * Return value:
274 * number of processed bytes 302 * number of processed bytes
275 * numbytes (all bytes processed) on error --FIXME
276 */ 303 */
277static inline int iraw_loop(unsigned char c, unsigned char *src, int numbytes, 304static unsigned iraw_loop(unsigned numbytes, struct inbuf_t *inbuf)
278 struct inbuf_t *inbuf)
279{ 305{
280 struct cardstate *cs = inbuf->cs; 306 struct cardstate *cs = inbuf->cs;
281 struct bc_state *bcs = inbuf->bcs; 307 struct bc_state *bcs = cs->bcs;
282 int inputstate = bcs->inputstate; 308 int inputstate = bcs->inputstate;
283 struct sk_buff *skb = bcs->skb; 309 struct sk_buff *skb = bcs->skb;
284 int startbytes = numbytes; 310 unsigned char *src = inbuf->data + inbuf->head;
311 unsigned procbytes = 0;
312 unsigned char c;
285 313
286 for (;;) { 314 if (!skb) {
287 /* add character */ 315 /* skip this block */
288 inputstate |= INS_have_data; 316 new_rcv_skb(bcs);
317 return numbytes;
318 }
289 319
290 if (likely(!(inputstate & INS_skip_frame))) { 320 while (procbytes < numbytes && skb->len < SBUFSIZE) {
291 if (unlikely(skb->len == SBUFSIZE)) { 321 c = *src++;
292 //FIXME just pass skb up and allocate a new one 322 procbytes++;
293 dev_warn(cs->dev, "received packet too long\n"); 323
294 dev_kfree_skb_any(skb); 324 if (c == DLE_FLAG) {
295 skb = NULL; 325 if (inputstate & INS_DLE_char) {
296 inputstate |= INS_skip_frame; 326 /* quoted DLE: clear quote flag */
327 inputstate &= ~INS_DLE_char;
328 } else if (cs->dle || (inputstate & INS_DLE_command)) {
329 /* DLE escape, pass up for handling */
330 inputstate |= INS_DLE_char;
297 break; 331 break;
298 } 332 }
299 *__skb_put(skb, 1) = bitrev8(c);
300 } 333 }
301 334
302 if (unlikely(!numbytes)) 335 /* regular data byte: append to current skb */
303 break; 336 inputstate |= INS_have_data;
304 c = *src++; 337 *__skb_put(skb, 1) = bitrev8(c);
305 --numbytes;
306 if (unlikely(c == DLE_FLAG &&
307 (cs->dle ||
308 inbuf->inputstate & INS_DLE_command))) {
309 inbuf->inputstate |= INS_DLE_char;
310 break;
311 }
312 } 338 }
313 339
314 /* pass data up */ 340 /* pass data up */
315 if (likely(inputstate & INS_have_data)) { 341 if (inputstate & INS_have_data) {
316 if (likely(!(inputstate & INS_skip_frame))) { 342 gigaset_skb_rcvd(bcs, skb);
317 gigaset_rcv_skb(skb, cs, bcs); 343 inputstate &= ~INS_have_data;
318 } 344 new_rcv_skb(bcs);
319 inputstate &= ~(INS_have_data | INS_skip_frame); 345 }
320 if (unlikely(bcs->ignore)) { 346
321 inputstate |= INS_skip_frame; 347 bcs->inputstate = inputstate;
322 skb = NULL; 348 return procbytes;
323 } else if (likely((skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) 349}
324 != NULL)) { 350
325 skb_reserve(skb, HW_HDR_LEN); 351/* process DLE escapes
352 * Called whenever a DLE sequence might be encountered in the input stream.
353 * Either processes the entire DLE sequence or, if that isn't possible,
354 * notes the fact that an initial DLE has been received in the INS_DLE_char
355 * inputstate flag and resumes processing of the sequence on the next call.
356 */
357static void handle_dle(struct inbuf_t *inbuf)
358{
359 struct cardstate *cs = inbuf->cs;
360
361 if (cs->mstate == MS_LOCKED)
362 return; /* no DLE processing in lock mode */
363
364 if (!(inbuf->inputstate & INS_DLE_char)) {
365 /* no DLE pending */
366 if (inbuf->data[inbuf->head] == DLE_FLAG &&
367 (cs->dle || inbuf->inputstate & INS_DLE_command)) {
368 /* start of DLE sequence */
369 inbuf->head++;
370 if (inbuf->head == inbuf->tail ||
371 inbuf->head == RBUFSIZE) {
372 /* end of buffer, save for later processing */
373 inbuf->inputstate |= INS_DLE_char;
374 return;
375 }
326 } else { 376 } else {
327 dev_warn(cs->dev, "could not allocate new skb\n"); 377 /* regular data byte */
328 inputstate |= INS_skip_frame; 378 return;
329 } 379 }
330 } 380 }
331 381
332 bcs->inputstate = inputstate; 382 /* consume pending DLE */
333 bcs->skb = skb; 383 inbuf->inputstate &= ~INS_DLE_char;
334 return startbytes - numbytes; 384
385 switch (inbuf->data[inbuf->head]) {
386 case 'X': /* begin of event message */
387 if (inbuf->inputstate & INS_command)
388 dev_notice(cs->dev,
389 "received <DLE>X in command mode\n");
390 inbuf->inputstate |= INS_command | INS_DLE_command;
391 inbuf->head++; /* byte consumed */
392 break;
393 case '.': /* end of event message */
394 if (!(inbuf->inputstate & INS_DLE_command))
395 dev_notice(cs->dev,
396 "received <DLE>. without <DLE>X\n");
397 inbuf->inputstate &= ~INS_DLE_command;
398 /* return to data mode if in DLE mode */
399 if (cs->dle)
400 inbuf->inputstate &= ~INS_command;
401 inbuf->head++; /* byte consumed */
402 break;
403 case DLE_FLAG: /* DLE in data stream */
404 /* mark as quoted */
405 inbuf->inputstate |= INS_DLE_char;
406 if (!(cs->dle || inbuf->inputstate & INS_DLE_command))
407 dev_notice(cs->dev,
408 "received <DLE><DLE> not in DLE mode\n");
409 break; /* quoted byte left in buffer */
410 default:
411 dev_notice(cs->dev, "received <DLE><%02x>\n",
412 inbuf->data[inbuf->head]);
413 /* quoted byte left in buffer */
414 }
335} 415}
336 416
337/** 417/**
@@ -345,94 +425,39 @@ static inline int iraw_loop(unsigned char c, unsigned char *src, int numbytes,
345 */ 425 */
346void gigaset_m10x_input(struct inbuf_t *inbuf) 426void gigaset_m10x_input(struct inbuf_t *inbuf)
347{ 427{
348 struct cardstate *cs; 428 struct cardstate *cs = inbuf->cs;
349 unsigned tail, head, numbytes; 429 unsigned numbytes, procbytes;
350 unsigned char *src, c;
351 int procbytes;
352
353 head = inbuf->head;
354 tail = inbuf->tail;
355 gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", head, tail);
356
357 if (head != tail) {
358 cs = inbuf->cs;
359 src = inbuf->data + head;
360 numbytes = (head > tail ? RBUFSIZE : tail) - head;
361 gig_dbg(DEBUG_INTR, "processing %u bytes", numbytes);
362 430
363 while (numbytes) { 431 gig_dbg(DEBUG_INTR, "buffer state: %u -> %u", inbuf->head, inbuf->tail);
364 if (cs->mstate == MS_LOCKED) {
365 procbytes = lock_loop(src, numbytes, inbuf);
366 src += procbytes;
367 numbytes -= procbytes;
368 } else {
369 c = *src++;
370 --numbytes;
371 if (c == DLE_FLAG && (cs->dle ||
372 inbuf->inputstate & INS_DLE_command)) {
373 if (!(inbuf->inputstate & INS_DLE_char)) {
374 inbuf->inputstate |= INS_DLE_char;
375 goto nextbyte;
376 }
377 /* <DLE> <DLE> => <DLE> in data stream */
378 inbuf->inputstate &= ~INS_DLE_char;
379 }
380 432
381 if (!(inbuf->inputstate & INS_DLE_char)) { 433 while (inbuf->head != inbuf->tail) {
382 434 /* check for DLE escape */
383 /* FIXME use function pointers? */ 435 handle_dle(inbuf);
384 if (inbuf->inputstate & INS_command)
385 procbytes = cmd_loop(c, src, numbytes, inbuf);
386 else if (inbuf->bcs->proto2 == ISDN_PROTO_L2_HDLC)
387 procbytes = hdlc_loop(c, src, numbytes, inbuf);
388 else
389 procbytes = iraw_loop(c, src, numbytes, inbuf);
390
391 src += procbytes;
392 numbytes -= procbytes;
393 } else { /* DLE char */
394 inbuf->inputstate &= ~INS_DLE_char;
395 switch (c) {
396 case 'X': /*begin of command*/
397 if (inbuf->inputstate & INS_command)
398 dev_warn(cs->dev,
399 "received <DLE> 'X' in command mode\n");
400 inbuf->inputstate |=
401 INS_command | INS_DLE_command;
402 break;
403 case '.': /*end of command*/
404 if (!(inbuf->inputstate & INS_command))
405 dev_warn(cs->dev,
406 "received <DLE> '.' in hdlc mode\n");
407 inbuf->inputstate &= cs->dle ?
408 ~(INS_DLE_command|INS_command)
409 : ~INS_DLE_command;
410 break;
411 //case DLE_FLAG: /*DLE_FLAG in data stream*/ /* schon oben behandelt! */
412 default:
413 dev_err(cs->dev,
414 "received 0x10 0x%02x!\n",
415 (int) c);
416 /* FIXME: reset driver?? */
417 }
418 }
419 }
420nextbyte:
421 if (!numbytes) {
422 /* end of buffer, check for wrap */
423 if (head > tail) {
424 head = 0;
425 src = inbuf->data;
426 numbytes = tail;
427 } else {
428 head = tail;
429 break;
430 }
431 }
432 }
433 436
434 gig_dbg(DEBUG_INTR, "setting head to %u", head); 437 /* process a contiguous block of bytes */
435 inbuf->head = head; 438 numbytes = (inbuf->head > inbuf->tail ?
439 RBUFSIZE : inbuf->tail) - inbuf->head;
440 gig_dbg(DEBUG_INTR, "processing %u bytes", numbytes);
441 /*
442 * numbytes may be 0 if handle_dle() ate the last byte.
443 * This does no harm, *_loop() will just return 0 immediately.
444 */
445
446 if (cs->mstate == MS_LOCKED)
447 procbytes = lock_loop(numbytes, inbuf);
448 else if (inbuf->inputstate & INS_command)
449 procbytes = cmd_loop(numbytes, inbuf);
450 else if (cs->bcs->proto2 == L2_HDLC)
451 procbytes = hdlc_loop(numbytes, inbuf);
452 else
453 procbytes = iraw_loop(numbytes, inbuf);
454 inbuf->head += procbytes;
455
456 /* check for buffer wraparound */
457 if (inbuf->head >= RBUFSIZE)
458 inbuf->head = 0;
459
460 gig_dbg(DEBUG_INTR, "head set to %u", inbuf->head);
436 } 461 }
437} 462}
438EXPORT_SYMBOL_GPL(gigaset_m10x_input); 463EXPORT_SYMBOL_GPL(gigaset_m10x_input);
@@ -440,16 +465,16 @@ EXPORT_SYMBOL_GPL(gigaset_m10x_input);
440 465
441/* == data output ========================================================== */ 466/* == data output ========================================================== */
442 467
443/* Encoding of a PPP packet into an octet stuffed HDLC frame 468/*
444 * with FCS, opening and closing flags. 469 * Encode a data packet into an octet stuffed HDLC frame with FCS,
470 * opening and closing flags, preserving headroom data.
445 * parameters: 471 * parameters:
446 * skb skb containing original packet (freed upon return) 472 * skb skb containing original packet (freed upon return)
447 * head number of headroom bytes to allocate in result skb
448 * tail number of tailroom bytes to allocate in result skb
449 * Return value: 473 * Return value:
450 * pointer to newly allocated skb containing the result frame 474 * pointer to newly allocated skb containing the result frame
475 * and the original link layer header, NULL on error
451 */ 476 */
452static struct sk_buff *HDLC_Encode(struct sk_buff *skb, int head, int tail) 477static struct sk_buff *HDLC_Encode(struct sk_buff *skb)
453{ 478{
454 struct sk_buff *hdlc_skb; 479 struct sk_buff *hdlc_skb;
455 __u16 fcs; 480 __u16 fcs;
@@ -471,16 +496,19 @@ static struct sk_buff *HDLC_Encode(struct sk_buff *skb, int head, int tail)
471 496
472 /* size of new buffer: original size + number of stuffing bytes 497 /* size of new buffer: original size + number of stuffing bytes
473 * + 2 bytes FCS + 2 stuffing bytes for FCS (if needed) + 2 flag bytes 498 * + 2 bytes FCS + 2 stuffing bytes for FCS (if needed) + 2 flag bytes
499 * + room for link layer header
474 */ 500 */
475 hdlc_skb = dev_alloc_skb(skb->len + stuf_cnt + 6 + tail + head); 501 hdlc_skb = dev_alloc_skb(skb->len + stuf_cnt + 6 + skb->mac_len);
476 if (!hdlc_skb) { 502 if (!hdlc_skb) {
477 dev_kfree_skb(skb); 503 dev_kfree_skb_any(skb);
478 return NULL; 504 return NULL;
479 } 505 }
480 skb_reserve(hdlc_skb, head);
481 506
482 /* Copy acknowledge request into new skb */ 507 /* Copy link layer header into new skb */
483 memcpy(hdlc_skb->head, skb->head, 2); 508 skb_reset_mac_header(hdlc_skb);
509 skb_reserve(hdlc_skb, skb->mac_len);
510 memcpy(skb_mac_header(hdlc_skb), skb_mac_header(skb), skb->mac_len);
511 hdlc_skb->mac_len = skb->mac_len;
484 512
485 /* Add flag sequence in front of everything.. */ 513 /* Add flag sequence in front of everything.. */
486 *(skb_put(hdlc_skb, 1)) = PPP_FLAG; 514 *(skb_put(hdlc_skb, 1)) = PPP_FLAG;
@@ -511,33 +539,42 @@ static struct sk_buff *HDLC_Encode(struct sk_buff *skb, int head, int tail)
511 539
512 *(skb_put(hdlc_skb, 1)) = PPP_FLAG; 540 *(skb_put(hdlc_skb, 1)) = PPP_FLAG;
513 541
514 dev_kfree_skb(skb); 542 dev_kfree_skb_any(skb);
515 return hdlc_skb; 543 return hdlc_skb;
516} 544}
517 545
518/* Encoding of a raw packet into an octet stuffed bit inverted frame 546/*
547 * Encode a data packet into an octet stuffed raw bit inverted frame,
548 * preserving headroom data.
519 * parameters: 549 * parameters:
520 * skb skb containing original packet (freed upon return) 550 * skb skb containing original packet (freed upon return)
521 * head number of headroom bytes to allocate in result skb
522 * tail number of tailroom bytes to allocate in result skb
523 * Return value: 551 * Return value:
524 * pointer to newly allocated skb containing the result frame 552 * pointer to newly allocated skb containing the result frame
553 * and the original link layer header, NULL on error
525 */ 554 */
526static struct sk_buff *iraw_encode(struct sk_buff *skb, int head, int tail) 555static struct sk_buff *iraw_encode(struct sk_buff *skb)
527{ 556{
528 struct sk_buff *iraw_skb; 557 struct sk_buff *iraw_skb;
529 unsigned char c; 558 unsigned char c;
530 unsigned char *cp; 559 unsigned char *cp;
531 int len; 560 int len;
532 561
533 /* worst case: every byte must be stuffed */ 562 /* size of new buffer (worst case = every byte must be stuffed):
534 iraw_skb = dev_alloc_skb(2*skb->len + tail + head); 563 * 2 * original size + room for link layer header
564 */
565 iraw_skb = dev_alloc_skb(2*skb->len + skb->mac_len);
535 if (!iraw_skb) { 566 if (!iraw_skb) {
536 dev_kfree_skb(skb); 567 dev_kfree_skb_any(skb);
537 return NULL; 568 return NULL;
538 } 569 }
539 skb_reserve(iraw_skb, head);
540 570
571 /* copy link layer header into new skb */
572 skb_reset_mac_header(iraw_skb);
573 skb_reserve(iraw_skb, skb->mac_len);
574 memcpy(skb_mac_header(iraw_skb), skb_mac_header(skb), skb->mac_len);
575 iraw_skb->mac_len = skb->mac_len;
576
577 /* copy and stuff data */
541 cp = skb->data; 578 cp = skb->data;
542 len = skb->len; 579 len = skb->len;
543 while (len--) { 580 while (len--) {
@@ -546,7 +583,7 @@ static struct sk_buff *iraw_encode(struct sk_buff *skb, int head, int tail)
546 *(skb_put(iraw_skb, 1)) = c; 583 *(skb_put(iraw_skb, 1)) = c;
547 *(skb_put(iraw_skb, 1)) = c; 584 *(skb_put(iraw_skb, 1)) = c;
548 } 585 }
549 dev_kfree_skb(skb); 586 dev_kfree_skb_any(skb);
550 return iraw_skb; 587 return iraw_skb;
551} 588}
552 589
@@ -555,8 +592,10 @@ static struct sk_buff *iraw_encode(struct sk_buff *skb, int head, int tail)
555 * @bcs: B channel descriptor structure. 592 * @bcs: B channel descriptor structure.
556 * @skb: data to send. 593 * @skb: data to send.
557 * 594 *
558 * Called by i4l.c to encode and queue an skb for sending, and start 595 * Called by LL to encode and queue an skb for sending, and start
559 * transmission if necessary. 596 * transmission if necessary.
597 * Once the payload data has been transmitted completely, gigaset_skb_sent()
598 * will be called with the skb's link layer header preserved.
560 * 599 *
561 * Return value: 600 * Return value:
562 * number of bytes accepted for sending (skb->len) if ok, 601 * number of bytes accepted for sending (skb->len) if ok,
@@ -564,24 +603,25 @@ static struct sk_buff *iraw_encode(struct sk_buff *skb, int head, int tail)
564 */ 603 */
565int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb) 604int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb)
566{ 605{
606 struct cardstate *cs = bcs->cs;
567 unsigned len = skb->len; 607 unsigned len = skb->len;
568 unsigned long flags; 608 unsigned long flags;
569 609
570 if (bcs->proto2 == ISDN_PROTO_L2_HDLC) 610 if (bcs->proto2 == L2_HDLC)
571 skb = HDLC_Encode(skb, HW_HDR_LEN, 0); 611 skb = HDLC_Encode(skb);
572 else 612 else
573 skb = iraw_encode(skb, HW_HDR_LEN, 0); 613 skb = iraw_encode(skb);
574 if (!skb) { 614 if (!skb) {
575 dev_err(bcs->cs->dev, 615 dev_err(cs->dev,
576 "unable to allocate memory for encoding!\n"); 616 "unable to allocate memory for encoding!\n");
577 return -ENOMEM; 617 return -ENOMEM;
578 } 618 }
579 619
580 skb_queue_tail(&bcs->squeue, skb); 620 skb_queue_tail(&bcs->squeue, skb);
581 spin_lock_irqsave(&bcs->cs->lock, flags); 621 spin_lock_irqsave(&cs->lock, flags);
582 if (bcs->cs->connected) 622 if (cs->connected)
583 tasklet_schedule(&bcs->cs->write_tasklet); 623 tasklet_schedule(&cs->write_tasklet);
584 spin_unlock_irqrestore(&bcs->cs->lock, flags); 624 spin_unlock_irqrestore(&cs->lock, flags);
585 625
586 return len; /* ok so far */ 626 return len; /* ok so far */
587} 627}
diff --git a/drivers/isdn/gigaset/bas-gigaset.c b/drivers/isdn/gigaset/bas-gigaset.c
index 5ed1d99eb9f3..9fd19db045fb 100644
--- a/drivers/isdn/gigaset/bas-gigaset.c
+++ b/drivers/isdn/gigaset/bas-gigaset.c
@@ -57,7 +57,7 @@ MODULE_PARM_DESC(cidmode, "Call-ID mode");
57#define USB_SX353_PRODUCT_ID 0x0022 57#define USB_SX353_PRODUCT_ID 0x0022
58 58
59/* table of devices that work with this driver */ 59/* table of devices that work with this driver */
60static const struct usb_device_id gigaset_table [] = { 60static const struct usb_device_id gigaset_table[] = {
61 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_3070_PRODUCT_ID) }, 61 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_3070_PRODUCT_ID) },
62 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_3075_PRODUCT_ID) }, 62 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_3075_PRODUCT_ID) },
63 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX303_PRODUCT_ID) }, 63 { USB_DEVICE(USB_GIGA_VENDOR_ID, USB_SX303_PRODUCT_ID) },
@@ -137,7 +137,7 @@ struct bas_cardstate {
137#define BS_RESETTING 0x200 /* waiting for HD_RESET_INTERRUPT_PIPE_ACK */ 137#define BS_RESETTING 0x200 /* waiting for HD_RESET_INTERRUPT_PIPE_ACK */
138 138
139 139
140static struct gigaset_driver *driver = NULL; 140static struct gigaset_driver *driver;
141 141
142/* usb specific object needed to register this driver with the usb subsystem */ 142/* usb specific object needed to register this driver with the usb subsystem */
143static struct usb_driver gigaset_usb_driver = { 143static struct usb_driver gigaset_usb_driver = {
@@ -601,11 +601,12 @@ static int atread_submit(struct cardstate *cs, int timeout)
601 ucs->dr_cmd_in.wLength = cpu_to_le16(ucs->rcvbuf_size); 601 ucs->dr_cmd_in.wLength = cpu_to_le16(ucs->rcvbuf_size);
602 usb_fill_control_urb(ucs->urb_cmd_in, ucs->udev, 602 usb_fill_control_urb(ucs->urb_cmd_in, ucs->udev,
603 usb_rcvctrlpipe(ucs->udev, 0), 603 usb_rcvctrlpipe(ucs->udev, 0),
604 (unsigned char*) & ucs->dr_cmd_in, 604 (unsigned char *) &ucs->dr_cmd_in,
605 ucs->rcvbuf, ucs->rcvbuf_size, 605 ucs->rcvbuf, ucs->rcvbuf_size,
606 read_ctrl_callback, cs->inbuf); 606 read_ctrl_callback, cs->inbuf);
607 607
608 if ((ret = usb_submit_urb(ucs->urb_cmd_in, GFP_ATOMIC)) != 0) { 608 ret = usb_submit_urb(ucs->urb_cmd_in, GFP_ATOMIC);
609 if (ret != 0) {
609 update_basstate(ucs, 0, BS_ATRDPEND); 610 update_basstate(ucs, 0, BS_ATRDPEND);
610 dev_err(cs->dev, "could not submit HD_READ_ATMESSAGE: %s\n", 611 dev_err(cs->dev, "could not submit HD_READ_ATMESSAGE: %s\n",
611 get_usb_rcmsg(ret)); 612 get_usb_rcmsg(ret));
@@ -652,13 +653,11 @@ static void read_int_callback(struct urb *urb)
652 return; 653 return;
653 case -ENODEV: /* device removed */ 654 case -ENODEV: /* device removed */
654 case -ESHUTDOWN: /* device shut down */ 655 case -ESHUTDOWN: /* device shut down */
655 //FIXME use this as disconnect indicator?
656 gig_dbg(DEBUG_USBREQ, "%s: device disconnected", __func__); 656 gig_dbg(DEBUG_USBREQ, "%s: device disconnected", __func__);
657 return; 657 return;
658 default: /* severe trouble */ 658 default: /* severe trouble */
659 dev_warn(cs->dev, "interrupt read: %s\n", 659 dev_warn(cs->dev, "interrupt read: %s\n",
660 get_usb_statmsg(status)); 660 get_usb_statmsg(status));
661 //FIXME corrective action? resubmission always ok?
662 goto resubmit; 661 goto resubmit;
663 } 662 }
664 663
@@ -742,7 +741,8 @@ static void read_int_callback(struct urb *urb)
742 kfree(ucs->rcvbuf); 741 kfree(ucs->rcvbuf);
743 ucs->rcvbuf_size = 0; 742 ucs->rcvbuf_size = 0;
744 } 743 }
745 if ((ucs->rcvbuf = kmalloc(l, GFP_ATOMIC)) == NULL) { 744 ucs->rcvbuf = kmalloc(l, GFP_ATOMIC);
745 if (ucs->rcvbuf == NULL) {
746 spin_unlock_irqrestore(&cs->lock, flags); 746 spin_unlock_irqrestore(&cs->lock, flags);
747 dev_err(cs->dev, "out of memory receiving AT data\n"); 747 dev_err(cs->dev, "out of memory receiving AT data\n");
748 error_reset(cs); 748 error_reset(cs);
@@ -750,12 +750,12 @@ static void read_int_callback(struct urb *urb)
750 } 750 }
751 ucs->rcvbuf_size = l; 751 ucs->rcvbuf_size = l;
752 ucs->retry_cmd_in = 0; 752 ucs->retry_cmd_in = 0;
753 if ((rc = atread_submit(cs, BAS_TIMEOUT)) < 0) { 753 rc = atread_submit(cs, BAS_TIMEOUT);
754 if (rc < 0) {
754 kfree(ucs->rcvbuf); 755 kfree(ucs->rcvbuf);
755 ucs->rcvbuf = NULL; 756 ucs->rcvbuf = NULL;
756 ucs->rcvbuf_size = 0; 757 ucs->rcvbuf_size = 0;
757 if (rc != -ENODEV) { 758 if (rc != -ENODEV) {
758 //FIXME corrective action?
759 spin_unlock_irqrestore(&cs->lock, flags); 759 spin_unlock_irqrestore(&cs->lock, flags);
760 error_reset(cs); 760 error_reset(cs);
761 break; 761 break;
@@ -911,7 +911,7 @@ static int starturbs(struct bc_state *bcs)
911 int rc; 911 int rc;
912 912
913 /* initialize L2 reception */ 913 /* initialize L2 reception */
914 if (bcs->proto2 == ISDN_PROTO_L2_HDLC) 914 if (bcs->proto2 == L2_HDLC)
915 bcs->inputstate |= INS_flag_hunt; 915 bcs->inputstate |= INS_flag_hunt;
916 916
917 /* submit all isochronous input URBs */ 917 /* submit all isochronous input URBs */
@@ -940,7 +940,8 @@ static int starturbs(struct bc_state *bcs)
940 } 940 }
941 941
942 dump_urb(DEBUG_ISO, "Initial isoc read", urb); 942 dump_urb(DEBUG_ISO, "Initial isoc read", urb);
943 if ((rc = usb_submit_urb(urb, GFP_ATOMIC)) != 0) 943 rc = usb_submit_urb(urb, GFP_ATOMIC);
944 if (rc != 0)
944 goto error; 945 goto error;
945 } 946 }
946 947
@@ -1045,7 +1046,8 @@ static int submit_iso_write_urb(struct isow_urbctx_t *ucx)
1045 1046
1046 /* compute frame length according to flow control */ 1047 /* compute frame length according to flow control */
1047 ifd->length = BAS_NORMFRAME; 1048 ifd->length = BAS_NORMFRAME;
1048 if ((corrbytes = atomic_read(&ubc->corrbytes)) != 0) { 1049 corrbytes = atomic_read(&ubc->corrbytes);
1050 if (corrbytes != 0) {
1049 gig_dbg(DEBUG_ISO, "%s: corrbytes=%d", 1051 gig_dbg(DEBUG_ISO, "%s: corrbytes=%d",
1050 __func__, corrbytes); 1052 __func__, corrbytes);
1051 if (corrbytes > BAS_HIGHFRAME - BAS_NORMFRAME) 1053 if (corrbytes > BAS_HIGHFRAME - BAS_NORMFRAME)
@@ -1064,7 +1066,7 @@ static int submit_iso_write_urb(struct isow_urbctx_t *ucx)
1064 "%s: buffer busy at frame %d", 1066 "%s: buffer busy at frame %d",
1065 __func__, nframe); 1067 __func__, nframe);
1066 /* tasklet will be restarted from 1068 /* tasklet will be restarted from
1067 gigaset_send_skb() */ 1069 gigaset_isoc_send_skb() */
1068 } else { 1070 } else {
1069 dev_err(ucx->bcs->cs->dev, 1071 dev_err(ucx->bcs->cs->dev,
1070 "%s: buffer error %d at frame %d\n", 1072 "%s: buffer error %d at frame %d\n",
@@ -1284,7 +1286,8 @@ static void read_iso_tasklet(unsigned long data)
1284 for (;;) { 1286 for (;;) {
1285 /* retrieve URB */ 1287 /* retrieve URB */
1286 spin_lock_irqsave(&ubc->isoinlock, flags); 1288 spin_lock_irqsave(&ubc->isoinlock, flags);
1287 if (!(urb = ubc->isoindone)) { 1289 urb = ubc->isoindone;
1290 if (!urb) {
1288 spin_unlock_irqrestore(&ubc->isoinlock, flags); 1291 spin_unlock_irqrestore(&ubc->isoinlock, flags);
1289 return; 1292 return;
1290 } 1293 }
@@ -1371,7 +1374,7 @@ static void read_iso_tasklet(unsigned long data)
1371 "isochronous read: %d data bytes missing\n", 1374 "isochronous read: %d data bytes missing\n",
1372 totleft); 1375 totleft);
1373 1376
1374 error: 1377error:
1375 /* URB processed, resubmit */ 1378 /* URB processed, resubmit */
1376 for (frame = 0; frame < BAS_NUMFRAMES; frame++) { 1379 for (frame = 0; frame < BAS_NUMFRAMES; frame++) {
1377 urb->iso_frame_desc[frame].status = 0; 1380 urb->iso_frame_desc[frame].status = 0;
@@ -1568,7 +1571,7 @@ static int req_submit(struct bc_state *bcs, int req, int val, int timeout)
1568 ucs->dr_ctrl.wLength = 0; 1571 ucs->dr_ctrl.wLength = 0;
1569 usb_fill_control_urb(ucs->urb_ctrl, ucs->udev, 1572 usb_fill_control_urb(ucs->urb_ctrl, ucs->udev,
1570 usb_sndctrlpipe(ucs->udev, 0), 1573 usb_sndctrlpipe(ucs->udev, 0),
1571 (unsigned char*) &ucs->dr_ctrl, NULL, 0, 1574 (unsigned char *) &ucs->dr_ctrl, NULL, 0,
1572 write_ctrl_callback, ucs); 1575 write_ctrl_callback, ucs);
1573 ucs->retry_ctrl = 0; 1576 ucs->retry_ctrl = 0;
1574 ret = usb_submit_urb(ucs->urb_ctrl, GFP_ATOMIC); 1577 ret = usb_submit_urb(ucs->urb_ctrl, GFP_ATOMIC);
@@ -1621,7 +1624,8 @@ static int gigaset_init_bchannel(struct bc_state *bcs)
1621 return -EHOSTUNREACH; 1624 return -EHOSTUNREACH;
1622 } 1625 }
1623 1626
1624 if ((ret = starturbs(bcs)) < 0) { 1627 ret = starturbs(bcs);
1628 if (ret < 0) {
1625 dev_err(cs->dev, 1629 dev_err(cs->dev,
1626 "could not start isochronous I/O for channel B%d: %s\n", 1630 "could not start isochronous I/O for channel B%d: %s\n",
1627 bcs->channel + 1, 1631 bcs->channel + 1,
@@ -1633,7 +1637,8 @@ static int gigaset_init_bchannel(struct bc_state *bcs)
1633 } 1637 }
1634 1638
1635 req = bcs->channel ? HD_OPEN_B2CHANNEL : HD_OPEN_B1CHANNEL; 1639 req = bcs->channel ? HD_OPEN_B2CHANNEL : HD_OPEN_B1CHANNEL;
1636 if ((ret = req_submit(bcs, req, 0, BAS_TIMEOUT)) < 0) { 1640 ret = req_submit(bcs, req, 0, BAS_TIMEOUT);
1641 if (ret < 0) {
1637 dev_err(cs->dev, "could not open channel B%d\n", 1642 dev_err(cs->dev, "could not open channel B%d\n",
1638 bcs->channel + 1); 1643 bcs->channel + 1);
1639 stopurbs(bcs->hw.bas); 1644 stopurbs(bcs->hw.bas);
@@ -1677,7 +1682,8 @@ static int gigaset_close_bchannel(struct bc_state *bcs)
1677 1682
1678 /* channel running: tell device to close it */ 1683 /* channel running: tell device to close it */
1679 req = bcs->channel ? HD_CLOSE_B2CHANNEL : HD_CLOSE_B1CHANNEL; 1684 req = bcs->channel ? HD_CLOSE_B2CHANNEL : HD_CLOSE_B1CHANNEL;
1680 if ((ret = req_submit(bcs, req, 0, BAS_TIMEOUT)) < 0) 1685 ret = req_submit(bcs, req, 0, BAS_TIMEOUT);
1686 if (ret < 0)
1681 dev_err(cs->dev, "closing channel B%d failed\n", 1687 dev_err(cs->dev, "closing channel B%d failed\n",
1682 bcs->channel + 1); 1688 bcs->channel + 1);
1683 1689
@@ -1703,10 +1709,12 @@ static void complete_cb(struct cardstate *cs)
1703 gig_dbg(DEBUG_TRANSCMD|DEBUG_LOCKCMD, 1709 gig_dbg(DEBUG_TRANSCMD|DEBUG_LOCKCMD,
1704 "write_command: sent %u bytes, %u left", 1710 "write_command: sent %u bytes, %u left",
1705 cs->curlen, cs->cmdbytes); 1711 cs->curlen, cs->cmdbytes);
1706 if ((cs->cmdbuf = cb->next) != NULL) { 1712 if (cb->next != NULL) {
1713 cs->cmdbuf = cb->next;
1707 cs->cmdbuf->prev = NULL; 1714 cs->cmdbuf->prev = NULL;
1708 cs->curlen = cs->cmdbuf->len; 1715 cs->curlen = cs->cmdbuf->len;
1709 } else { 1716 } else {
1717 cs->cmdbuf = NULL;
1710 cs->lastcmdbuf = NULL; 1718 cs->lastcmdbuf = NULL;
1711 cs->curlen = 0; 1719 cs->curlen = 0;
1712 } 1720 }
@@ -1833,7 +1841,7 @@ static int atwrite_submit(struct cardstate *cs, unsigned char *buf, int len)
1833 ucs->dr_cmd_out.wLength = cpu_to_le16(len); 1841 ucs->dr_cmd_out.wLength = cpu_to_le16(len);
1834 usb_fill_control_urb(ucs->urb_cmd_out, ucs->udev, 1842 usb_fill_control_urb(ucs->urb_cmd_out, ucs->udev,
1835 usb_sndctrlpipe(ucs->udev, 0), 1843 usb_sndctrlpipe(ucs->udev, 0),
1836 (unsigned char*) &ucs->dr_cmd_out, buf, len, 1844 (unsigned char *) &ucs->dr_cmd_out, buf, len,
1837 write_command_callback, cs); 1845 write_command_callback, cs);
1838 rc = usb_submit_urb(ucs->urb_cmd_out, GFP_ATOMIC); 1846 rc = usb_submit_urb(ucs->urb_cmd_out, GFP_ATOMIC);
1839 if (unlikely(rc)) { 1847 if (unlikely(rc)) {
@@ -1953,7 +1961,8 @@ static int gigaset_write_cmd(struct cardstate *cs,
1953 1961
1954 if (len > IF_WRITEBUF) 1962 if (len > IF_WRITEBUF)
1955 len = IF_WRITEBUF; 1963 len = IF_WRITEBUF;
1956 if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) { 1964 cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC);
1965 if (!cb) {
1957 dev_err(cs->dev, "%s: out of memory\n", __func__); 1966 dev_err(cs->dev, "%s: out of memory\n", __func__);
1958 rc = -ENOMEM; 1967 rc = -ENOMEM;
1959 goto notqueued; 1968 goto notqueued;
@@ -2100,7 +2109,8 @@ static int gigaset_initbcshw(struct bc_state *bcs)
2100 } 2109 }
2101 ubc->isooutdone = ubc->isooutfree = ubc->isooutovfl = NULL; 2110 ubc->isooutdone = ubc->isooutfree = ubc->isooutovfl = NULL;
2102 ubc->numsub = 0; 2111 ubc->numsub = 0;
2103 if (!(ubc->isooutbuf = kmalloc(sizeof(struct isowbuf_t), GFP_KERNEL))) { 2112 ubc->isooutbuf = kmalloc(sizeof(struct isowbuf_t), GFP_KERNEL);
2113 if (!ubc->isooutbuf) {
2104 pr_err("out of memory\n"); 2114 pr_err("out of memory\n");
2105 kfree(ubc); 2115 kfree(ubc);
2106 bcs->hw.bas = NULL; 2116 bcs->hw.bas = NULL;
@@ -2252,7 +2262,8 @@ static int gigaset_probe(struct usb_interface *interface,
2252 gig_dbg(DEBUG_ANY, 2262 gig_dbg(DEBUG_ANY,
2253 "%s: wrong alternate setting %d - trying to switch", 2263 "%s: wrong alternate setting %d - trying to switch",
2254 __func__, hostif->desc.bAlternateSetting); 2264 __func__, hostif->desc.bAlternateSetting);
2255 if (usb_set_interface(udev, hostif->desc.bInterfaceNumber, 3) < 0) { 2265 if (usb_set_interface(udev, hostif->desc.bInterfaceNumber, 3)
2266 < 0) {
2256 dev_warn(&udev->dev, "usb_set_interface failed, " 2267 dev_warn(&udev->dev, "usb_set_interface failed, "
2257 "device %d interface %d altsetting %d\n", 2268 "device %d interface %d altsetting %d\n",
2258 udev->devnum, hostif->desc.bInterfaceNumber, 2269 udev->devnum, hostif->desc.bInterfaceNumber,
@@ -2321,14 +2332,16 @@ static int gigaset_probe(struct usb_interface *interface,
2321 (endpoint->bEndpointAddress) & 0x0f), 2332 (endpoint->bEndpointAddress) & 0x0f),
2322 ucs->int_in_buf, IP_MSGSIZE, read_int_callback, cs, 2333 ucs->int_in_buf, IP_MSGSIZE, read_int_callback, cs,
2323 endpoint->bInterval); 2334 endpoint->bInterval);
2324 if ((rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL)) != 0) { 2335 rc = usb_submit_urb(ucs->urb_int_in, GFP_KERNEL);
2336 if (rc != 0) {
2325 dev_err(cs->dev, "could not submit interrupt URB: %s\n", 2337 dev_err(cs->dev, "could not submit interrupt URB: %s\n",
2326 get_usb_rcmsg(rc)); 2338 get_usb_rcmsg(rc));
2327 goto error; 2339 goto error;
2328 } 2340 }
2329 2341
2330 /* tell the device that the driver is ready */ 2342 /* tell the device that the driver is ready */
2331 if ((rc = req_submit(cs->bcs, HD_DEVICE_INIT_ACK, 0, 0)) != 0) 2343 rc = req_submit(cs->bcs, HD_DEVICE_INIT_ACK, 0, 0);
2344 if (rc != 0)
2332 goto error; 2345 goto error;
2333 2346
2334 /* tell common part that the device is ready */ 2347 /* tell common part that the device is ready */
@@ -2524,9 +2537,10 @@ static int __init bas_gigaset_init(void)
2524 int result; 2537 int result;
2525 2538
2526 /* allocate memory for our driver state and intialize it */ 2539 /* allocate memory for our driver state and intialize it */
2527 if ((driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS, 2540 driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
2528 GIGASET_MODULENAME, GIGASET_DEVNAME, 2541 GIGASET_MODULENAME, GIGASET_DEVNAME,
2529 &gigops, THIS_MODULE)) == NULL) 2542 &gigops, THIS_MODULE);
2543 if (driver == NULL)
2530 goto error; 2544 goto error;
2531 2545
2532 /* register this driver with the USB subsystem */ 2546 /* register this driver with the USB subsystem */
diff --git a/drivers/isdn/gigaset/capi.c b/drivers/isdn/gigaset/capi.c
new file mode 100644
index 000000000000..3f5cd06af104
--- /dev/null
+++ b/drivers/isdn/gigaset/capi.c
@@ -0,0 +1,2292 @@
1/*
2 * Kernel CAPI interface for the Gigaset driver
3 *
4 * Copyright (c) 2009 by Tilman Schmidt <tilman@imap.cc>.
5 *
6 * =====================================================================
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
11 * =====================================================================
12 */
13
14#include "gigaset.h"
15#include <linux/ctype.h>
16#include <linux/isdn/capilli.h>
17#include <linux/isdn/capicmd.h>
18#include <linux/isdn/capiutil.h>
19
20/* missing from kernelcapi.h */
21#define CapiNcpiNotSupportedByProtocol 0x0001
22#define CapiFlagsNotSupportedByProtocol 0x0002
23#define CapiAlertAlreadySent 0x0003
24#define CapiFacilitySpecificFunctionNotSupported 0x3011
25
26/* missing from capicmd.h */
27#define CAPI_CONNECT_IND_BASELEN (CAPI_MSG_BASELEN+4+2+8*1)
28#define CAPI_CONNECT_ACTIVE_IND_BASELEN (CAPI_MSG_BASELEN+4+3*1)
29#define CAPI_CONNECT_B3_IND_BASELEN (CAPI_MSG_BASELEN+4+1)
30#define CAPI_CONNECT_B3_ACTIVE_IND_BASELEN (CAPI_MSG_BASELEN+4+1)
31#define CAPI_DATA_B3_REQ_LEN64 (CAPI_MSG_BASELEN+4+4+2+2+2+8)
32#define CAPI_DATA_B3_CONF_LEN (CAPI_MSG_BASELEN+4+2+2)
33#define CAPI_DISCONNECT_IND_LEN (CAPI_MSG_BASELEN+4+2)
34#define CAPI_DISCONNECT_B3_IND_BASELEN (CAPI_MSG_BASELEN+4+2+1)
35#define CAPI_FACILITY_CONF_BASELEN (CAPI_MSG_BASELEN+4+2+2+1)
36/* most _CONF messages contain only Controller/PLCI/NCCI and Info parameters */
37#define CAPI_STDCONF_LEN (CAPI_MSG_BASELEN+4+2)
38
39#define CAPI_FACILITY_HANDSET 0x0000
40#define CAPI_FACILITY_DTMF 0x0001
41#define CAPI_FACILITY_V42BIS 0x0002
42#define CAPI_FACILITY_SUPPSVC 0x0003
43#define CAPI_FACILITY_WAKEUP 0x0004
44#define CAPI_FACILITY_LI 0x0005
45
46#define CAPI_SUPPSVC_GETSUPPORTED 0x0000
47
48/* missing from capiutil.h */
49#define CAPIMSG_PLCI_PART(m) CAPIMSG_U8(m, 9)
50#define CAPIMSG_NCCI_PART(m) CAPIMSG_U16(m, 10)
51#define CAPIMSG_HANDLE_REQ(m) CAPIMSG_U16(m, 18) /* DATA_B3_REQ/_IND only! */
52#define CAPIMSG_FLAGS(m) CAPIMSG_U16(m, 20)
53#define CAPIMSG_SETCONTROLLER(m, contr) capimsg_setu8(m, 8, contr)
54#define CAPIMSG_SETPLCI_PART(m, plci) capimsg_setu8(m, 9, plci)
55#define CAPIMSG_SETNCCI_PART(m, ncci) capimsg_setu16(m, 10, ncci)
56#define CAPIMSG_SETFLAGS(m, flags) capimsg_setu16(m, 20, flags)
57
58/* parameters with differing location in DATA_B3_CONF/_RESP: */
59#define CAPIMSG_SETHANDLE_CONF(m, handle) capimsg_setu16(m, 12, handle)
60#define CAPIMSG_SETINFO_CONF(m, info) capimsg_setu16(m, 14, info)
61
62/* Flags (DATA_B3_REQ/_IND) */
63#define CAPI_FLAGS_DELIVERY_CONFIRMATION 0x04
64#define CAPI_FLAGS_RESERVED (~0x1f)
65
66/* buffer sizes */
67#define MAX_BC_OCTETS 11
68#define MAX_HLC_OCTETS 3
69#define MAX_NUMBER_DIGITS 20
70#define MAX_FMT_IE_LEN 20
71
72/* values for gigaset_capi_appl.connected */
73#define APCONN_NONE 0 /* inactive/listening */
74#define APCONN_SETUP 1 /* connecting */
75#define APCONN_ACTIVE 2 /* B channel up */
76
77/* registered application data structure */
78struct gigaset_capi_appl {
79 struct list_head ctrlist;
80 struct gigaset_capi_appl *bcnext;
81 u16 id;
82 u16 nextMessageNumber;
83 u32 listenInfoMask;
84 u32 listenCIPmask;
85 int connected;
86};
87
88/* CAPI specific controller data structure */
89struct gigaset_capi_ctr {
90 struct capi_ctr ctr;
91 struct list_head appls;
92 struct sk_buff_head sendqueue;
93 atomic_t sendqlen;
94 /* two _cmsg structures possibly used concurrently: */
95 _cmsg hcmsg; /* for message composition triggered from hardware */
96 _cmsg acmsg; /* for dissection of messages sent from application */
97 u8 bc_buf[MAX_BC_OCTETS+1];
98 u8 hlc_buf[MAX_HLC_OCTETS+1];
99 u8 cgpty_buf[MAX_NUMBER_DIGITS+3];
100 u8 cdpty_buf[MAX_NUMBER_DIGITS+2];
101};
102
103/* CIP Value table (from CAPI 2.0 standard, ch. 6.1) */
104static struct {
105 u8 *bc;
106 u8 *hlc;
107} cip2bchlc[] = {
108 [1] = { "8090A3", NULL },
109 /* Speech (A-law) */
110 [2] = { "8890", NULL },
111 /* Unrestricted digital information */
112 [3] = { "8990", NULL },
113 /* Restricted digital information */
114 [4] = { "9090A3", NULL },
115 /* 3,1 kHz audio (A-law) */
116 [5] = { "9190", NULL },
117 /* 7 kHz audio */
118 [6] = { "9890", NULL },
119 /* Video */
120 [7] = { "88C0C6E6", NULL },
121 /* Packet mode */
122 [8] = { "8890218F", NULL },
123 /* 56 kbit/s rate adaptation */
124 [9] = { "9190A5", NULL },
125 /* Unrestricted digital information with tones/announcements */
126 [16] = { "8090A3", "9181" },
127 /* Telephony */
128 [17] = { "9090A3", "9184" },
129 /* Group 2/3 facsimile */
130 [18] = { "8890", "91A1" },
131 /* Group 4 facsimile Class 1 */
132 [19] = { "8890", "91A4" },
133 /* Teletex service basic and mixed mode
134 and Group 4 facsimile service Classes II and III */
135 [20] = { "8890", "91A8" },
136 /* Teletex service basic and processable mode */
137 [21] = { "8890", "91B1" },
138 /* Teletex service basic mode */
139 [22] = { "8890", "91B2" },
140 /* International interworking for Videotex */
141 [23] = { "8890", "91B5" },
142 /* Telex */
143 [24] = { "8890", "91B8" },
144 /* Message Handling Systems in accordance with X.400 */
145 [25] = { "8890", "91C1" },
146 /* OSI application in accordance with X.200 */
147 [26] = { "9190A5", "9181" },
148 /* 7 kHz telephony */
149 [27] = { "9190A5", "916001" },
150 /* Video telephony, first connection */
151 [28] = { "8890", "916002" },
152 /* Video telephony, second connection */
153};
154
155/*
156 * helper functions
157 * ================
158 */
159
160/*
161 * emit unsupported parameter warning
162 */
163static inline void ignore_cstruct_param(struct cardstate *cs, _cstruct param,
164 char *msgname, char *paramname)
165{
166 if (param && *param)
167 dev_warn(cs->dev, "%s: ignoring unsupported parameter: %s\n",
168 msgname, paramname);
169}
170
171/*
172 * check for legal hex digit
173 */
174static inline int ishexdigit(char c)
175{
176 if (c >= '0' && c <= '9')
177 return 1;
178 if (c >= 'A' && c <= 'F')
179 return 1;
180 if (c >= 'a' && c <= 'f')
181 return 1;
182 return 0;
183}
184
185/*
186 * convert hex to binary
187 */
188static inline u8 hex2bin(char c)
189{
190 int result = c & 0x0f;
191 if (c & 0x40)
192 result += 9;
193 return result;
194}
195
196/*
197 * convert an IE from Gigaset hex string to ETSI binary representation
198 * including length byte
199 * return value: result length, -1 on error
200 */
201static int encode_ie(char *in, u8 *out, int maxlen)
202{
203 int l = 0;
204 while (*in) {
205 if (!ishexdigit(in[0]) || !ishexdigit(in[1]) || l >= maxlen)
206 return -1;
207 out[++l] = (hex2bin(in[0]) << 4) + hex2bin(in[1]);
208 in += 2;
209 }
210 out[0] = l;
211 return l;
212}
213
214/*
215 * convert an IE from ETSI binary representation including length byte
216 * to Gigaset hex string
217 */
218static void decode_ie(u8 *in, char *out)
219{
220 int i = *in;
221 while (i-- > 0) {
222 /* ToDo: conversion to upper case necessary? */
223 *out++ = toupper(hex_asc_hi(*++in));
224 *out++ = toupper(hex_asc_lo(*in));
225 }
226}
227
228/*
229 * retrieve application data structure for an application ID
230 */
231static inline struct gigaset_capi_appl *
232get_appl(struct gigaset_capi_ctr *iif, u16 appl)
233{
234 struct gigaset_capi_appl *ap;
235
236 list_for_each_entry(ap, &iif->appls, ctrlist)
237 if (ap->id == appl)
238 return ap;
239 return NULL;
240}
241
242/*
243 * dump CAPI message to kernel messages for debugging
244 */
245static inline void dump_cmsg(enum debuglevel level, const char *tag, _cmsg *p)
246{
247#ifdef CONFIG_GIGASET_DEBUG
248 _cdebbuf *cdb;
249
250 if (!(gigaset_debuglevel & level))
251 return;
252
253 cdb = capi_cmsg2str(p);
254 if (cdb) {
255 gig_dbg(level, "%s: [%d] %s", tag, p->ApplId, cdb->buf);
256 cdebbuf_free(cdb);
257 } else {
258 gig_dbg(level, "%s: [%d] %s", tag, p->ApplId,
259 capi_cmd2str(p->Command, p->Subcommand));
260 }
261#endif
262}
263
264static inline void dump_rawmsg(enum debuglevel level, const char *tag,
265 unsigned char *data)
266{
267#ifdef CONFIG_GIGASET_DEBUG
268 char *dbgline;
269 int i, l;
270
271 if (!(gigaset_debuglevel & level))
272 return;
273
274 l = CAPIMSG_LEN(data);
275 if (l < 12) {
276 gig_dbg(level, "%s: ??? LEN=%04d", tag, l);
277 return;
278 }
279 gig_dbg(level, "%s: 0x%02x:0x%02x: ID=%03d #0x%04x LEN=%04d NCCI=0x%x",
280 tag, CAPIMSG_COMMAND(data), CAPIMSG_SUBCOMMAND(data),
281 CAPIMSG_APPID(data), CAPIMSG_MSGID(data), l,
282 CAPIMSG_CONTROL(data));
283 l -= 12;
284 dbgline = kmalloc(3*l, GFP_ATOMIC);
285 if (!dbgline)
286 return;
287 for (i = 0; i < l; i++) {
288 dbgline[3*i] = hex_asc_hi(data[12+i]);
289 dbgline[3*i+1] = hex_asc_lo(data[12+i]);
290 dbgline[3*i+2] = ' ';
291 }
292 dbgline[3*l-1] = '\0';
293 gig_dbg(level, " %s", dbgline);
294 kfree(dbgline);
295 if (CAPIMSG_COMMAND(data) == CAPI_DATA_B3 &&
296 (CAPIMSG_SUBCOMMAND(data) == CAPI_REQ ||
297 CAPIMSG_SUBCOMMAND(data) == CAPI_IND) &&
298 CAPIMSG_DATALEN(data) > 0) {
299 l = CAPIMSG_DATALEN(data);
300 dbgline = kmalloc(3*l, GFP_ATOMIC);
301 if (!dbgline)
302 return;
303 data += CAPIMSG_LEN(data);
304 for (i = 0; i < l; i++) {
305 dbgline[3*i] = hex_asc_hi(data[i]);
306 dbgline[3*i+1] = hex_asc_lo(data[i]);
307 dbgline[3*i+2] = ' ';
308 }
309 dbgline[3*l-1] = '\0';
310 gig_dbg(level, " %s", dbgline);
311 kfree(dbgline);
312 }
313#endif
314}
315
316/*
317 * format CAPI IE as string
318 */
319
320static const char *format_ie(const char *ie)
321{
322 static char result[3*MAX_FMT_IE_LEN];
323 int len, count;
324 char *pout = result;
325
326 if (!ie)
327 return "NULL";
328
329 count = len = ie[0];
330 if (count > MAX_FMT_IE_LEN)
331 count = MAX_FMT_IE_LEN-1;
332 while (count--) {
333 *pout++ = hex_asc_hi(*++ie);
334 *pout++ = hex_asc_lo(*ie);
335 *pout++ = ' ';
336 }
337 if (len > MAX_FMT_IE_LEN) {
338 *pout++ = '.';
339 *pout++ = '.';
340 *pout++ = '.';
341 }
342 *--pout = 0;
343 return result;
344}
345
346
347/*
348 * driver interface functions
349 * ==========================
350 */
351
352/**
353 * gigaset_skb_sent() - acknowledge transmission of outgoing skb
354 * @bcs: B channel descriptor structure.
355 * @skb: sent data.
356 *
357 * Called by hardware module {bas,ser,usb}_gigaset when the data in a
358 * skb has been successfully sent, for signalling completion to the LL.
359 */
360void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *dskb)
361{
362 struct cardstate *cs = bcs->cs;
363 struct gigaset_capi_ctr *iif = cs->iif;
364 struct gigaset_capi_appl *ap = bcs->ap;
365 unsigned char *req = skb_mac_header(dskb);
366 struct sk_buff *cskb;
367 u16 flags;
368
369 /* update statistics */
370 ++bcs->trans_up;
371
372 if (!ap) {
373 dev_err(cs->dev, "%s: no application\n", __func__);
374 return;
375 }
376
377 /* don't send further B3 messages if disconnected */
378 if (ap->connected < APCONN_ACTIVE) {
379 gig_dbg(DEBUG_LLDATA, "disconnected, discarding ack");
380 return;
381 }
382
383 /* ToDo: honor unset "delivery confirmation" bit */
384 flags = CAPIMSG_FLAGS(req);
385
386 /* build DATA_B3_CONF message */
387 cskb = alloc_skb(CAPI_DATA_B3_CONF_LEN, GFP_ATOMIC);
388 if (!cskb) {
389 dev_err(cs->dev, "%s: out of memory\n", __func__);
390 return;
391 }
392 /* frequent message, avoid _cmsg overhead */
393 CAPIMSG_SETLEN(cskb->data, CAPI_DATA_B3_CONF_LEN);
394 CAPIMSG_SETAPPID(cskb->data, ap->id);
395 CAPIMSG_SETCOMMAND(cskb->data, CAPI_DATA_B3);
396 CAPIMSG_SETSUBCOMMAND(cskb->data, CAPI_CONF);
397 CAPIMSG_SETMSGID(cskb->data, CAPIMSG_MSGID(req));
398 CAPIMSG_SETCONTROLLER(cskb->data, iif->ctr.cnr);
399 CAPIMSG_SETPLCI_PART(cskb->data, bcs->channel + 1);
400 CAPIMSG_SETNCCI_PART(cskb->data, 1);
401 CAPIMSG_SETHANDLE_CONF(cskb->data, CAPIMSG_HANDLE_REQ(req));
402 if (flags & ~CAPI_FLAGS_DELIVERY_CONFIRMATION)
403 CAPIMSG_SETINFO_CONF(cskb->data,
404 CapiFlagsNotSupportedByProtocol);
405 else
406 CAPIMSG_SETINFO_CONF(cskb->data, CAPI_NOERROR);
407
408 /* emit message */
409 dump_rawmsg(DEBUG_LLDATA, "DATA_B3_CONF", cskb->data);
410 capi_ctr_handle_message(&iif->ctr, ap->id, cskb);
411}
412EXPORT_SYMBOL_GPL(gigaset_skb_sent);
413
414/**
415 * gigaset_skb_rcvd() - pass received skb to LL
416 * @bcs: B channel descriptor structure.
417 * @skb: received data.
418 *
419 * Called by hardware module {bas,ser,usb}_gigaset when user data has
420 * been successfully received, for passing to the LL.
421 * Warning: skb must not be accessed anymore!
422 */
423void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb)
424{
425 struct cardstate *cs = bcs->cs;
426 struct gigaset_capi_ctr *iif = cs->iif;
427 struct gigaset_capi_appl *ap = bcs->ap;
428 int len = skb->len;
429
430 /* update statistics */
431 bcs->trans_down++;
432
433 if (!ap) {
434 dev_err(cs->dev, "%s: no application\n", __func__);
435 return;
436 }
437
438 /* don't send further B3 messages if disconnected */
439 if (ap->connected < APCONN_ACTIVE) {
440 gig_dbg(DEBUG_LLDATA, "disconnected, discarding data");
441 dev_kfree_skb_any(skb);
442 return;
443 }
444
445 /*
446 * prepend DATA_B3_IND message to payload
447 * Parameters: NCCI = 1, all others 0/unused
448 * frequent message, avoid _cmsg overhead
449 */
450 skb_push(skb, CAPI_DATA_B3_REQ_LEN);
451 CAPIMSG_SETLEN(skb->data, CAPI_DATA_B3_REQ_LEN);
452 CAPIMSG_SETAPPID(skb->data, ap->id);
453 CAPIMSG_SETCOMMAND(skb->data, CAPI_DATA_B3);
454 CAPIMSG_SETSUBCOMMAND(skb->data, CAPI_IND);
455 CAPIMSG_SETMSGID(skb->data, ap->nextMessageNumber++);
456 CAPIMSG_SETCONTROLLER(skb->data, iif->ctr.cnr);
457 CAPIMSG_SETPLCI_PART(skb->data, bcs->channel + 1);
458 CAPIMSG_SETNCCI_PART(skb->data, 1);
459 /* Data parameter not used */
460 CAPIMSG_SETDATALEN(skb->data, len);
461 /* Data handle parameter not used */
462 CAPIMSG_SETFLAGS(skb->data, 0);
463 /* Data64 parameter not present */
464
465 /* emit message */
466 dump_rawmsg(DEBUG_LLDATA, "DATA_B3_IND", skb->data);
467 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
468}
469EXPORT_SYMBOL_GPL(gigaset_skb_rcvd);
470
471/**
472 * gigaset_isdn_rcv_err() - signal receive error
473 * @bcs: B channel descriptor structure.
474 *
475 * Called by hardware module {bas,ser,usb}_gigaset when a receive error
476 * has occurred, for signalling to the LL.
477 */
478void gigaset_isdn_rcv_err(struct bc_state *bcs)
479{
480 /* if currently ignoring packets, just count down */
481 if (bcs->ignore) {
482 bcs->ignore--;
483 return;
484 }
485
486 /* update statistics */
487 bcs->corrupted++;
488
489 /* ToDo: signal error -> LL */
490}
491EXPORT_SYMBOL_GPL(gigaset_isdn_rcv_err);
492
493/**
494 * gigaset_isdn_icall() - signal incoming call
495 * @at_state: connection state structure.
496 *
497 * Called by main module at tasklet level to notify the LL that an incoming
498 * call has been received. @at_state contains the parameters of the call.
499 *
500 * Return value: call disposition (ICALL_*)
501 */
502int gigaset_isdn_icall(struct at_state_t *at_state)
503{
504 struct cardstate *cs = at_state->cs;
505 struct bc_state *bcs = at_state->bcs;
506 struct gigaset_capi_ctr *iif = cs->iif;
507 struct gigaset_capi_appl *ap;
508 u32 actCIPmask;
509 struct sk_buff *skb;
510 unsigned int msgsize;
511 int i;
512
513 /*
514 * ToDo: signal calls without a free B channel, too
515 * (requires a u8 handle for the at_state structure that can
516 * be stored in the PLCI and used in the CONNECT_RESP message
517 * handler to retrieve it)
518 */
519 if (!bcs)
520 return ICALL_IGNORE;
521
522 /* prepare CONNECT_IND message, using B channel number as PLCI */
523 capi_cmsg_header(&iif->hcmsg, 0, CAPI_CONNECT, CAPI_IND, 0,
524 iif->ctr.cnr | ((bcs->channel + 1) << 8));
525
526 /* minimum size, all structs empty */
527 msgsize = CAPI_CONNECT_IND_BASELEN;
528
529 /* Bearer Capability (mandatory) */
530 if (at_state->str_var[STR_ZBC]) {
531 /* pass on BC from Gigaset */
532 if (encode_ie(at_state->str_var[STR_ZBC], iif->bc_buf,
533 MAX_BC_OCTETS) < 0) {
534 dev_warn(cs->dev, "RING ignored - bad BC %s\n",
535 at_state->str_var[STR_ZBC]);
536 return ICALL_IGNORE;
537 }
538
539 /* look up corresponding CIP value */
540 iif->hcmsg.CIPValue = 0; /* default if nothing found */
541 for (i = 0; i < ARRAY_SIZE(cip2bchlc); i++)
542 if (cip2bchlc[i].bc != NULL &&
543 cip2bchlc[i].hlc == NULL &&
544 !strcmp(cip2bchlc[i].bc,
545 at_state->str_var[STR_ZBC])) {
546 iif->hcmsg.CIPValue = i;
547 break;
548 }
549 } else {
550 /* no BC (internal call): assume CIP 1 (speech, A-law) */
551 iif->hcmsg.CIPValue = 1;
552 encode_ie(cip2bchlc[1].bc, iif->bc_buf, MAX_BC_OCTETS);
553 }
554 iif->hcmsg.BC = iif->bc_buf;
555 msgsize += iif->hcmsg.BC[0];
556
557 /* High Layer Compatibility (optional) */
558 if (at_state->str_var[STR_ZHLC]) {
559 /* pass on HLC from Gigaset */
560 if (encode_ie(at_state->str_var[STR_ZHLC], iif->hlc_buf,
561 MAX_HLC_OCTETS) < 0) {
562 dev_warn(cs->dev, "RING ignored - bad HLC %s\n",
563 at_state->str_var[STR_ZHLC]);
564 return ICALL_IGNORE;
565 }
566 iif->hcmsg.HLC = iif->hlc_buf;
567 msgsize += iif->hcmsg.HLC[0];
568
569 /* look up corresponding CIP value */
570 /* keep BC based CIP value if none found */
571 if (at_state->str_var[STR_ZBC])
572 for (i = 0; i < ARRAY_SIZE(cip2bchlc); i++)
573 if (cip2bchlc[i].hlc != NULL &&
574 !strcmp(cip2bchlc[i].hlc,
575 at_state->str_var[STR_ZHLC]) &&
576 !strcmp(cip2bchlc[i].bc,
577 at_state->str_var[STR_ZBC])) {
578 iif->hcmsg.CIPValue = i;
579 break;
580 }
581 }
582
583 /* Called Party Number (optional) */
584 if (at_state->str_var[STR_ZCPN]) {
585 i = strlen(at_state->str_var[STR_ZCPN]);
586 if (i > MAX_NUMBER_DIGITS) {
587 dev_warn(cs->dev, "RING ignored - bad number %s\n",
588 at_state->str_var[STR_ZBC]);
589 return ICALL_IGNORE;
590 }
591 iif->cdpty_buf[0] = i + 1;
592 iif->cdpty_buf[1] = 0x80; /* type / numbering plan unknown */
593 memcpy(iif->cdpty_buf+2, at_state->str_var[STR_ZCPN], i);
594 iif->hcmsg.CalledPartyNumber = iif->cdpty_buf;
595 msgsize += iif->hcmsg.CalledPartyNumber[0];
596 }
597
598 /* Calling Party Number (optional) */
599 if (at_state->str_var[STR_NMBR]) {
600 i = strlen(at_state->str_var[STR_NMBR]);
601 if (i > MAX_NUMBER_DIGITS) {
602 dev_warn(cs->dev, "RING ignored - bad number %s\n",
603 at_state->str_var[STR_ZBC]);
604 return ICALL_IGNORE;
605 }
606 iif->cgpty_buf[0] = i + 2;
607 iif->cgpty_buf[1] = 0x00; /* type / numbering plan unknown */
608 iif->cgpty_buf[2] = 0x80; /* pres. allowed, not screened */
609 memcpy(iif->cgpty_buf+3, at_state->str_var[STR_NMBR], i);
610 iif->hcmsg.CallingPartyNumber = iif->cgpty_buf;
611 msgsize += iif->hcmsg.CallingPartyNumber[0];
612 }
613
614 /* remaining parameters (not supported, always left NULL):
615 * - CalledPartySubaddress
616 * - CallingPartySubaddress
617 * - AdditionalInfo
618 * - BChannelinformation
619 * - Keypadfacility
620 * - Useruserdata
621 * - Facilitydataarray
622 */
623
624 gig_dbg(DEBUG_CMD, "icall: PLCI %x CIP %d BC %s",
625 iif->hcmsg.adr.adrPLCI, iif->hcmsg.CIPValue,
626 format_ie(iif->hcmsg.BC));
627 gig_dbg(DEBUG_CMD, "icall: HLC %s",
628 format_ie(iif->hcmsg.HLC));
629 gig_dbg(DEBUG_CMD, "icall: CgPty %s",
630 format_ie(iif->hcmsg.CallingPartyNumber));
631 gig_dbg(DEBUG_CMD, "icall: CdPty %s",
632 format_ie(iif->hcmsg.CalledPartyNumber));
633
634 /* scan application list for matching listeners */
635 bcs->ap = NULL;
636 actCIPmask = 1 | (1 << iif->hcmsg.CIPValue);
637 list_for_each_entry(ap, &iif->appls, ctrlist)
638 if (actCIPmask & ap->listenCIPmask) {
639 /* build CONNECT_IND message for this application */
640 iif->hcmsg.ApplId = ap->id;
641 iif->hcmsg.Messagenumber = ap->nextMessageNumber++;
642
643 skb = alloc_skb(msgsize, GFP_ATOMIC);
644 if (!skb) {
645 dev_err(cs->dev, "%s: out of memory\n",
646 __func__);
647 break;
648 }
649 capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize));
650 dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
651
652 /* add to listeners on this B channel, update state */
653 ap->bcnext = bcs->ap;
654 bcs->ap = ap;
655 bcs->chstate |= CHS_NOTIFY_LL;
656 ap->connected = APCONN_SETUP;
657
658 /* emit message */
659 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
660 }
661
662 /*
663 * Return "accept" if any listeners.
664 * Gigaset will send ALERTING.
665 * There doesn't seem to be a way to avoid this.
666 */
667 return bcs->ap ? ICALL_ACCEPT : ICALL_IGNORE;
668}
669
670/*
671 * send a DISCONNECT_IND message to an application
672 * does not sleep, clobbers the controller's hcmsg structure
673 */
674static void send_disconnect_ind(struct bc_state *bcs,
675 struct gigaset_capi_appl *ap, u16 reason)
676{
677 struct cardstate *cs = bcs->cs;
678 struct gigaset_capi_ctr *iif = cs->iif;
679 struct sk_buff *skb;
680
681 if (ap->connected == APCONN_NONE)
682 return;
683
684 capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_DISCONNECT, CAPI_IND,
685 ap->nextMessageNumber++,
686 iif->ctr.cnr | ((bcs->channel + 1) << 8));
687 iif->hcmsg.Reason = reason;
688 skb = alloc_skb(CAPI_DISCONNECT_IND_LEN, GFP_ATOMIC);
689 if (!skb) {
690 dev_err(cs->dev, "%s: out of memory\n", __func__);
691 return;
692 }
693 capi_cmsg2message(&iif->hcmsg, __skb_put(skb, CAPI_DISCONNECT_IND_LEN));
694 dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
695 ap->connected = APCONN_NONE;
696 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
697}
698
699/*
700 * send a DISCONNECT_B3_IND message to an application
701 * Parameters: NCCI = 1, NCPI empty, Reason_B3 = 0
702 * does not sleep, clobbers the controller's hcmsg structure
703 */
704static void send_disconnect_b3_ind(struct bc_state *bcs,
705 struct gigaset_capi_appl *ap)
706{
707 struct cardstate *cs = bcs->cs;
708 struct gigaset_capi_ctr *iif = cs->iif;
709 struct sk_buff *skb;
710
711 /* nothing to do if no logical connection active */
712 if (ap->connected < APCONN_ACTIVE)
713 return;
714 ap->connected = APCONN_SETUP;
715
716 capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_DISCONNECT_B3, CAPI_IND,
717 ap->nextMessageNumber++,
718 iif->ctr.cnr | ((bcs->channel + 1) << 8) | (1 << 16));
719 skb = alloc_skb(CAPI_DISCONNECT_B3_IND_BASELEN, GFP_ATOMIC);
720 if (!skb) {
721 dev_err(cs->dev, "%s: out of memory\n", __func__);
722 return;
723 }
724 capi_cmsg2message(&iif->hcmsg,
725 __skb_put(skb, CAPI_DISCONNECT_B3_IND_BASELEN));
726 dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
727 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
728}
729
730/**
731 * gigaset_isdn_connD() - signal D channel connect
732 * @bcs: B channel descriptor structure.
733 *
734 * Called by main module at tasklet level to notify the LL that the D channel
735 * connection has been established.
736 */
737void gigaset_isdn_connD(struct bc_state *bcs)
738{
739 struct cardstate *cs = bcs->cs;
740 struct gigaset_capi_ctr *iif = cs->iif;
741 struct gigaset_capi_appl *ap = bcs->ap;
742 struct sk_buff *skb;
743 unsigned int msgsize;
744
745 if (!ap) {
746 dev_err(cs->dev, "%s: no application\n", __func__);
747 return;
748 }
749 while (ap->bcnext) {
750 /* this should never happen */
751 dev_warn(cs->dev, "%s: dropping extra application %u\n",
752 __func__, ap->bcnext->id);
753 send_disconnect_ind(bcs, ap->bcnext,
754 CapiCallGivenToOtherApplication);
755 ap->bcnext = ap->bcnext->bcnext;
756 }
757 if (ap->connected == APCONN_NONE) {
758 dev_warn(cs->dev, "%s: application %u not connected\n",
759 __func__, ap->id);
760 return;
761 }
762
763 /* prepare CONNECT_ACTIVE_IND message
764 * Note: LLC not supported by device
765 */
766 capi_cmsg_header(&iif->hcmsg, ap->id, CAPI_CONNECT_ACTIVE, CAPI_IND,
767 ap->nextMessageNumber++,
768 iif->ctr.cnr | ((bcs->channel + 1) << 8));
769
770 /* minimum size, all structs empty */
771 msgsize = CAPI_CONNECT_ACTIVE_IND_BASELEN;
772
773 /* ToDo: set parameter: Connected number
774 * (requires ev-layer state machine extension to collect
775 * ZCON device reply)
776 */
777
778 /* build and emit CONNECT_ACTIVE_IND message */
779 skb = alloc_skb(msgsize, GFP_ATOMIC);
780 if (!skb) {
781 dev_err(cs->dev, "%s: out of memory\n", __func__);
782 return;
783 }
784 capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize));
785 dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
786 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
787}
788
789/**
790 * gigaset_isdn_hupD() - signal D channel hangup
791 * @bcs: B channel descriptor structure.
792 *
793 * Called by main module at tasklet level to notify the LL that the D channel
794 * connection has been shut down.
795 */
796void gigaset_isdn_hupD(struct bc_state *bcs)
797{
798 struct gigaset_capi_appl *ap;
799
800 /*
801 * ToDo: pass on reason code reported by device
802 * (requires ev-layer state machine extension to collect
803 * ZCAU device reply)
804 */
805 for (ap = bcs->ap; ap != NULL; ap = ap->bcnext) {
806 send_disconnect_b3_ind(bcs, ap);
807 send_disconnect_ind(bcs, ap, 0);
808 }
809 bcs->ap = NULL;
810}
811
812/**
813 * gigaset_isdn_connB() - signal B channel connect
814 * @bcs: B channel descriptor structure.
815 *
816 * Called by main module at tasklet level to notify the LL that the B channel
817 * connection has been established.
818 */
819void gigaset_isdn_connB(struct bc_state *bcs)
820{
821 struct cardstate *cs = bcs->cs;
822 struct gigaset_capi_ctr *iif = cs->iif;
823 struct gigaset_capi_appl *ap = bcs->ap;
824 struct sk_buff *skb;
825 unsigned int msgsize;
826 u8 command;
827
828 if (!ap) {
829 dev_err(cs->dev, "%s: no application\n", __func__);
830 return;
831 }
832 while (ap->bcnext) {
833 /* this should never happen */
834 dev_warn(cs->dev, "%s: dropping extra application %u\n",
835 __func__, ap->bcnext->id);
836 send_disconnect_ind(bcs, ap->bcnext,
837 CapiCallGivenToOtherApplication);
838 ap->bcnext = ap->bcnext->bcnext;
839 }
840 if (!ap->connected) {
841 dev_warn(cs->dev, "%s: application %u not connected\n",
842 __func__, ap->id);
843 return;
844 }
845
846 /*
847 * emit CONNECT_B3_ACTIVE_IND if we already got CONNECT_B3_REQ;
848 * otherwise we have to emit CONNECT_B3_IND first, and follow up with
849 * CONNECT_B3_ACTIVE_IND in reply to CONNECT_B3_RESP
850 * Parameters in both cases always: NCCI = 1, NCPI empty
851 */
852 if (ap->connected >= APCONN_ACTIVE) {
853 command = CAPI_CONNECT_B3_ACTIVE;
854 msgsize = CAPI_CONNECT_B3_ACTIVE_IND_BASELEN;
855 } else {
856 command = CAPI_CONNECT_B3;
857 msgsize = CAPI_CONNECT_B3_IND_BASELEN;
858 }
859 capi_cmsg_header(&iif->hcmsg, ap->id, command, CAPI_IND,
860 ap->nextMessageNumber++,
861 iif->ctr.cnr | ((bcs->channel + 1) << 8) | (1 << 16));
862 skb = alloc_skb(msgsize, GFP_ATOMIC);
863 if (!skb) {
864 dev_err(cs->dev, "%s: out of memory\n", __func__);
865 return;
866 }
867 capi_cmsg2message(&iif->hcmsg, __skb_put(skb, msgsize));
868 dump_cmsg(DEBUG_CMD, __func__, &iif->hcmsg);
869 ap->connected = APCONN_ACTIVE;
870 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
871}
872
873/**
874 * gigaset_isdn_hupB() - signal B channel hangup
875 * @bcs: B channel descriptor structure.
876 *
877 * Called by main module to notify the LL that the B channel connection has
878 * been shut down.
879 */
880void gigaset_isdn_hupB(struct bc_state *bcs)
881{
882 struct cardstate *cs = bcs->cs;
883 struct gigaset_capi_appl *ap = bcs->ap;
884
885 /* ToDo: assure order of DISCONNECT_B3_IND and DISCONNECT_IND ? */
886
887 if (!ap) {
888 dev_err(cs->dev, "%s: no application\n", __func__);
889 return;
890 }
891
892 send_disconnect_b3_ind(bcs, ap);
893}
894
895/**
896 * gigaset_isdn_start() - signal device availability
897 * @cs: device descriptor structure.
898 *
899 * Called by main module to notify the LL that the device is available for
900 * use.
901 */
902void gigaset_isdn_start(struct cardstate *cs)
903{
904 struct gigaset_capi_ctr *iif = cs->iif;
905
906 /* fill profile data: manufacturer name */
907 strcpy(iif->ctr.manu, "Siemens");
908 /* CAPI and device version */
909 iif->ctr.version.majorversion = 2; /* CAPI 2.0 */
910 iif->ctr.version.minorversion = 0;
911 /* ToDo: check/assert cs->gotfwver? */
912 iif->ctr.version.majormanuversion = cs->fwver[0];
913 iif->ctr.version.minormanuversion = cs->fwver[1];
914 /* number of B channels supported */
915 iif->ctr.profile.nbchannel = cs->channels;
916 /* global options: internal controller, supplementary services */
917 iif->ctr.profile.goptions = 0x11;
918 /* B1 protocols: 64 kbit/s HDLC or transparent */
919 iif->ctr.profile.support1 = 0x03;
920 /* B2 protocols: transparent only */
921 /* ToDo: X.75 SLP ? */
922 iif->ctr.profile.support2 = 0x02;
923 /* B3 protocols: transparent only */
924 iif->ctr.profile.support3 = 0x01;
925 /* no serial number */
926 strcpy(iif->ctr.serial, "0");
927 capi_ctr_ready(&iif->ctr);
928}
929
930/**
931 * gigaset_isdn_stop() - signal device unavailability
932 * @cs: device descriptor structure.
933 *
934 * Called by main module to notify the LL that the device is no longer
935 * available for use.
936 */
937void gigaset_isdn_stop(struct cardstate *cs)
938{
939 struct gigaset_capi_ctr *iif = cs->iif;
940 capi_ctr_down(&iif->ctr);
941}
942
943/*
944 * kernel CAPI callback methods
945 * ============================
946 */
947
948/*
949 * load firmware
950 */
951static int gigaset_load_firmware(struct capi_ctr *ctr, capiloaddata *data)
952{
953 struct cardstate *cs = ctr->driverdata;
954
955 /* AVM specific operation, not needed for Gigaset -- ignore */
956 dev_notice(cs->dev, "load_firmware ignored\n");
957
958 return 0;
959}
960
961/*
962 * reset (deactivate) controller
963 */
964static void gigaset_reset_ctr(struct capi_ctr *ctr)
965{
966 struct cardstate *cs = ctr->driverdata;
967
968 /* AVM specific operation, not needed for Gigaset -- ignore */
969 dev_notice(cs->dev, "reset_ctr ignored\n");
970}
971
972/*
973 * register CAPI application
974 */
975static void gigaset_register_appl(struct capi_ctr *ctr, u16 appl,
976 capi_register_params *rp)
977{
978 struct gigaset_capi_ctr *iif
979 = container_of(ctr, struct gigaset_capi_ctr, ctr);
980 struct cardstate *cs = ctr->driverdata;
981 struct gigaset_capi_appl *ap;
982
983 list_for_each_entry(ap, &iif->appls, ctrlist)
984 if (ap->id == appl) {
985 dev_notice(cs->dev,
986 "application %u already registered\n", appl);
987 return;
988 }
989
990 ap = kzalloc(sizeof(*ap), GFP_KERNEL);
991 if (!ap) {
992 dev_err(cs->dev, "%s: out of memory\n", __func__);
993 return;
994 }
995 ap->id = appl;
996
997 list_add(&ap->ctrlist, &iif->appls);
998}
999
1000/*
1001 * release CAPI application
1002 */
1003static void gigaset_release_appl(struct capi_ctr *ctr, u16 appl)
1004{
1005 struct gigaset_capi_ctr *iif
1006 = container_of(ctr, struct gigaset_capi_ctr, ctr);
1007 struct cardstate *cs = iif->ctr.driverdata;
1008 struct gigaset_capi_appl *ap, *tmp;
1009
1010 list_for_each_entry_safe(ap, tmp, &iif->appls, ctrlist)
1011 if (ap->id == appl) {
1012 if (ap->connected != APCONN_NONE) {
1013 dev_err(cs->dev,
1014 "%s: application %u still connected\n",
1015 __func__, ap->id);
1016 /* ToDo: clear active connection */
1017 }
1018 list_del(&ap->ctrlist);
1019 kfree(ap);
1020 }
1021
1022}
1023
1024/*
1025 * =====================================================================
1026 * outgoing CAPI message handler
1027 * =====================================================================
1028 */
1029
1030/*
1031 * helper function: emit reply message with given Info value
1032 */
1033static void send_conf(struct gigaset_capi_ctr *iif,
1034 struct gigaset_capi_appl *ap,
1035 struct sk_buff *skb,
1036 u16 info)
1037{
1038 /*
1039 * _CONF replies always only have NCCI and Info parameters
1040 * so they'll fit into the _REQ message skb
1041 */
1042 capi_cmsg_answer(&iif->acmsg);
1043 iif->acmsg.Info = info;
1044 capi_cmsg2message(&iif->acmsg, skb->data);
1045 __skb_trim(skb, CAPI_STDCONF_LEN);
1046 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
1047 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
1048}
1049
1050/*
1051 * process FACILITY_REQ message
1052 */
1053static void do_facility_req(struct gigaset_capi_ctr *iif,
1054 struct gigaset_capi_appl *ap,
1055 struct sk_buff *skb)
1056{
1057 struct cardstate *cs = iif->ctr.driverdata;
1058 _cmsg *cmsg = &iif->acmsg;
1059 struct sk_buff *cskb;
1060 u8 *pparam;
1061 unsigned int msgsize = CAPI_FACILITY_CONF_BASELEN;
1062 u16 function, info;
1063 static u8 confparam[10]; /* max. 9 octets + length byte */
1064
1065 /* decode message */
1066 capi_message2cmsg(cmsg, skb->data);
1067 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1068
1069 /*
1070 * Facility Request Parameter is not decoded by capi_message2cmsg()
1071 * encoding depends on Facility Selector
1072 */
1073 switch (cmsg->FacilitySelector) {
1074 case CAPI_FACILITY_DTMF: /* ToDo */
1075 info = CapiFacilityNotSupported;
1076 confparam[0] = 2; /* length */
1077 /* DTMF information: Unknown DTMF request */
1078 capimsg_setu16(confparam, 1, 2);
1079 break;
1080
1081 case CAPI_FACILITY_V42BIS: /* not supported */
1082 info = CapiFacilityNotSupported;
1083 confparam[0] = 2; /* length */
1084 /* V.42 bis information: not available */
1085 capimsg_setu16(confparam, 1, 1);
1086 break;
1087
1088 case CAPI_FACILITY_SUPPSVC:
1089 /* decode Function parameter */
1090 pparam = cmsg->FacilityRequestParameter;
1091 if (pparam == NULL || *pparam < 2) {
1092 dev_notice(cs->dev, "%s: %s missing\n", "FACILITY_REQ",
1093 "Facility Request Parameter");
1094 send_conf(iif, ap, skb, CapiIllMessageParmCoding);
1095 return;
1096 }
1097 function = CAPIMSG_U16(pparam, 1);
1098 switch (function) {
1099 case CAPI_SUPPSVC_GETSUPPORTED:
1100 info = CapiSuccess;
1101 /* Supplementary Service specific parameter */
1102 confparam[3] = 6; /* length */
1103 /* Supplementary services info: Success */
1104 capimsg_setu16(confparam, 4, CapiSuccess);
1105 /* Supported Services: none */
1106 capimsg_setu32(confparam, 6, 0);
1107 break;
1108 /* ToDo: add supported services */
1109 default:
1110 info = CapiFacilitySpecificFunctionNotSupported;
1111 /* Supplementary Service specific parameter */
1112 confparam[3] = 2; /* length */
1113 /* Supplementary services info: not supported */
1114 capimsg_setu16(confparam, 4,
1115 CapiSupplementaryServiceNotSupported);
1116 }
1117
1118 /* Facility confirmation parameter */
1119 confparam[0] = confparam[3] + 3; /* total length */
1120 /* Function: copy from _REQ message */
1121 capimsg_setu16(confparam, 1, function);
1122 /* Supplementary Service specific parameter already set above */
1123 break;
1124
1125 case CAPI_FACILITY_WAKEUP: /* ToDo */
1126 info = CapiFacilityNotSupported;
1127 confparam[0] = 2; /* length */
1128 /* Number of accepted awake request parameters: 0 */
1129 capimsg_setu16(confparam, 1, 0);
1130 break;
1131
1132 default:
1133 info = CapiFacilityNotSupported;
1134 confparam[0] = 0; /* empty struct */
1135 }
1136
1137 /* send FACILITY_CONF with given Info and confirmation parameter */
1138 capi_cmsg_answer(cmsg);
1139 cmsg->Info = info;
1140 cmsg->FacilityConfirmationParameter = confparam;
1141 msgsize += confparam[0]; /* length */
1142 cskb = alloc_skb(msgsize, GFP_ATOMIC);
1143 if (!cskb) {
1144 dev_err(cs->dev, "%s: out of memory\n", __func__);
1145 return;
1146 }
1147 capi_cmsg2message(cmsg, __skb_put(cskb, msgsize));
1148 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1149 capi_ctr_handle_message(&iif->ctr, ap->id, cskb);
1150}
1151
1152
1153/*
1154 * process LISTEN_REQ message
1155 * just store the masks in the application data structure
1156 */
1157static void do_listen_req(struct gigaset_capi_ctr *iif,
1158 struct gigaset_capi_appl *ap,
1159 struct sk_buff *skb)
1160{
1161 /* decode message */
1162 capi_message2cmsg(&iif->acmsg, skb->data);
1163 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
1164
1165 /* store listening parameters */
1166 ap->listenInfoMask = iif->acmsg.InfoMask;
1167 ap->listenCIPmask = iif->acmsg.CIPmask;
1168 send_conf(iif, ap, skb, CapiSuccess);
1169}
1170
1171/*
1172 * process ALERT_REQ message
1173 * nothing to do, Gigaset always alerts anyway
1174 */
1175static void do_alert_req(struct gigaset_capi_ctr *iif,
1176 struct gigaset_capi_appl *ap,
1177 struct sk_buff *skb)
1178{
1179 /* decode message */
1180 capi_message2cmsg(&iif->acmsg, skb->data);
1181 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
1182 send_conf(iif, ap, skb, CapiAlertAlreadySent);
1183}
1184
1185/*
1186 * process CONNECT_REQ message
1187 * allocate a B channel, prepare dial commands, queue a DIAL event,
1188 * emit CONNECT_CONF reply
1189 */
1190static void do_connect_req(struct gigaset_capi_ctr *iif,
1191 struct gigaset_capi_appl *ap,
1192 struct sk_buff *skb)
1193{
1194 struct cardstate *cs = iif->ctr.driverdata;
1195 _cmsg *cmsg = &iif->acmsg;
1196 struct bc_state *bcs;
1197 char **commands;
1198 char *s;
1199 u8 *pp;
1200 int i, l;
1201 u16 info;
1202
1203 /* decode message */
1204 capi_message2cmsg(cmsg, skb->data);
1205 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1206
1207 /* get free B channel & construct PLCI */
1208 bcs = gigaset_get_free_channel(cs);
1209 if (!bcs) {
1210 dev_notice(cs->dev, "%s: no B channel available\n",
1211 "CONNECT_REQ");
1212 send_conf(iif, ap, skb, CapiNoPlciAvailable);
1213 return;
1214 }
1215 ap->bcnext = NULL;
1216 bcs->ap = ap;
1217 cmsg->adr.adrPLCI |= (bcs->channel + 1) << 8;
1218
1219 /* build command table */
1220 commands = kzalloc(AT_NUM*(sizeof *commands), GFP_KERNEL);
1221 if (!commands)
1222 goto oom;
1223
1224 /* encode parameter: Called party number */
1225 pp = cmsg->CalledPartyNumber;
1226 if (pp == NULL || *pp == 0) {
1227 dev_notice(cs->dev, "%s: %s missing\n",
1228 "CONNECT_REQ", "Called party number");
1229 info = CapiIllMessageParmCoding;
1230 goto error;
1231 }
1232 l = *pp++;
1233 /* check type of number/numbering plan byte */
1234 switch (*pp) {
1235 case 0x80: /* unknown type / unknown numbering plan */
1236 case 0x81: /* unknown type / ISDN/Telephony numbering plan */
1237 break;
1238 default: /* others: warn about potential misinterpretation */
1239 dev_notice(cs->dev, "%s: %s type/plan 0x%02x unsupported\n",
1240 "CONNECT_REQ", "Called party number", *pp);
1241 }
1242 pp++;
1243 l--;
1244 /* translate "**" internal call prefix to CTP value */
1245 if (l >= 2 && pp[0] == '*' && pp[1] == '*') {
1246 s = "^SCTP=0\r";
1247 pp += 2;
1248 l -= 2;
1249 } else {
1250 s = "^SCTP=1\r";
1251 }
1252 commands[AT_TYPE] = kstrdup(s, GFP_KERNEL);
1253 if (!commands[AT_TYPE])
1254 goto oom;
1255 commands[AT_DIAL] = kmalloc(l+3, GFP_KERNEL);
1256 if (!commands[AT_DIAL])
1257 goto oom;
1258 snprintf(commands[AT_DIAL], l+3, "D%.*s\r", l, pp);
1259
1260 /* encode parameter: Calling party number */
1261 pp = cmsg->CallingPartyNumber;
1262 if (pp != NULL && *pp > 0) {
1263 l = *pp++;
1264
1265 /* check type of number/numbering plan byte */
1266 /* ToDo: allow for/handle Ext=1? */
1267 switch (*pp) {
1268 case 0x00: /* unknown type / unknown numbering plan */
1269 case 0x01: /* unknown type / ISDN/Telephony num. plan */
1270 break;
1271 default:
1272 dev_notice(cs->dev,
1273 "%s: %s type/plan 0x%02x unsupported\n",
1274 "CONNECT_REQ", "Calling party number", *pp);
1275 }
1276 pp++;
1277 l--;
1278
1279 /* check presentation indicator */
1280 if (!l) {
1281 dev_notice(cs->dev, "%s: %s IE truncated\n",
1282 "CONNECT_REQ", "Calling party number");
1283 info = CapiIllMessageParmCoding;
1284 goto error;
1285 }
1286 switch (*pp & 0xfc) { /* ignore Screening indicator */
1287 case 0x80: /* Presentation allowed */
1288 s = "^SCLIP=1\r";
1289 break;
1290 case 0xa0: /* Presentation restricted */
1291 s = "^SCLIP=0\r";
1292 break;
1293 default:
1294 dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
1295 "CONNECT_REQ",
1296 "Presentation/Screening indicator",
1297 *pp);
1298 s = "^SCLIP=1\r";
1299 }
1300 commands[AT_CLIP] = kstrdup(s, GFP_KERNEL);
1301 if (!commands[AT_CLIP])
1302 goto oom;
1303 pp++;
1304 l--;
1305
1306 if (l) {
1307 /* number */
1308 commands[AT_MSN] = kmalloc(l+8, GFP_KERNEL);
1309 if (!commands[AT_MSN])
1310 goto oom;
1311 snprintf(commands[AT_MSN], l+8, "^SMSN=%*s\r", l, pp);
1312 }
1313 }
1314
1315 /* check parameter: CIP Value */
1316 if (cmsg->CIPValue > ARRAY_SIZE(cip2bchlc) ||
1317 (cmsg->CIPValue > 0 && cip2bchlc[cmsg->CIPValue].bc == NULL)) {
1318 dev_notice(cs->dev, "%s: unknown CIP value %d\n",
1319 "CONNECT_REQ", cmsg->CIPValue);
1320 info = CapiCipValueUnknown;
1321 goto error;
1322 }
1323
1324 /* check/encode parameter: BC */
1325 if (cmsg->BC && cmsg->BC[0]) {
1326 /* explicit BC overrides CIP */
1327 l = 2*cmsg->BC[0] + 7;
1328 commands[AT_BC] = kmalloc(l, GFP_KERNEL);
1329 if (!commands[AT_BC])
1330 goto oom;
1331 strcpy(commands[AT_BC], "^SBC=");
1332 decode_ie(cmsg->BC, commands[AT_BC]+5);
1333 strcpy(commands[AT_BC] + l - 2, "\r");
1334 } else if (cip2bchlc[cmsg->CIPValue].bc) {
1335 l = strlen(cip2bchlc[cmsg->CIPValue].bc) + 7;
1336 commands[AT_BC] = kmalloc(l, GFP_KERNEL);
1337 if (!commands[AT_BC])
1338 goto oom;
1339 snprintf(commands[AT_BC], l, "^SBC=%s\r",
1340 cip2bchlc[cmsg->CIPValue].bc);
1341 }
1342
1343 /* check/encode parameter: HLC */
1344 if (cmsg->HLC && cmsg->HLC[0]) {
1345 /* explicit HLC overrides CIP */
1346 l = 2*cmsg->HLC[0] + 7;
1347 commands[AT_HLC] = kmalloc(l, GFP_KERNEL);
1348 if (!commands[AT_HLC])
1349 goto oom;
1350 strcpy(commands[AT_HLC], "^SHLC=");
1351 decode_ie(cmsg->HLC, commands[AT_HLC]+5);
1352 strcpy(commands[AT_HLC] + l - 2, "\r");
1353 } else if (cip2bchlc[cmsg->CIPValue].hlc) {
1354 l = strlen(cip2bchlc[cmsg->CIPValue].hlc) + 7;
1355 commands[AT_HLC] = kmalloc(l, GFP_KERNEL);
1356 if (!commands[AT_HLC])
1357 goto oom;
1358 snprintf(commands[AT_HLC], l, "^SHLC=%s\r",
1359 cip2bchlc[cmsg->CIPValue].hlc);
1360 }
1361
1362 /* check/encode parameter: B Protocol */
1363 if (cmsg->BProtocol == CAPI_DEFAULT) {
1364 bcs->proto2 = L2_HDLC;
1365 dev_warn(cs->dev,
1366 "B2 Protocol X.75 SLP unsupported, using Transparent\n");
1367 } else {
1368 switch (cmsg->B1protocol) {
1369 case 0:
1370 bcs->proto2 = L2_HDLC;
1371 break;
1372 case 1:
1373 bcs->proto2 = L2_BITSYNC;
1374 break;
1375 default:
1376 dev_warn(cs->dev,
1377 "B1 Protocol %u unsupported, using Transparent\n",
1378 cmsg->B1protocol);
1379 bcs->proto2 = L2_BITSYNC;
1380 }
1381 if (cmsg->B2protocol != 1)
1382 dev_warn(cs->dev,
1383 "B2 Protocol %u unsupported, using Transparent\n",
1384 cmsg->B2protocol);
1385 if (cmsg->B3protocol != 0)
1386 dev_warn(cs->dev,
1387 "B3 Protocol %u unsupported, using Transparent\n",
1388 cmsg->B3protocol);
1389 ignore_cstruct_param(cs, cmsg->B1configuration,
1390 "CONNECT_REQ", "B1 Configuration");
1391 ignore_cstruct_param(cs, cmsg->B2configuration,
1392 "CONNECT_REQ", "B2 Configuration");
1393 ignore_cstruct_param(cs, cmsg->B3configuration,
1394 "CONNECT_REQ", "B3 Configuration");
1395 }
1396 commands[AT_PROTO] = kmalloc(9, GFP_KERNEL);
1397 if (!commands[AT_PROTO])
1398 goto oom;
1399 snprintf(commands[AT_PROTO], 9, "^SBPR=%u\r", bcs->proto2);
1400
1401 /* ToDo: check/encode remaining parameters */
1402 ignore_cstruct_param(cs, cmsg->CalledPartySubaddress,
1403 "CONNECT_REQ", "Called pty subaddr");
1404 ignore_cstruct_param(cs, cmsg->CallingPartySubaddress,
1405 "CONNECT_REQ", "Calling pty subaddr");
1406 ignore_cstruct_param(cs, cmsg->LLC,
1407 "CONNECT_REQ", "LLC");
1408 if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
1409 ignore_cstruct_param(cs, cmsg->BChannelinformation,
1410 "CONNECT_REQ", "B Channel Information");
1411 ignore_cstruct_param(cs, cmsg->Keypadfacility,
1412 "CONNECT_REQ", "Keypad Facility");
1413 ignore_cstruct_param(cs, cmsg->Useruserdata,
1414 "CONNECT_REQ", "User-User Data");
1415 ignore_cstruct_param(cs, cmsg->Facilitydataarray,
1416 "CONNECT_REQ", "Facility Data Array");
1417 }
1418
1419 /* encode parameter: B channel to use */
1420 commands[AT_ISO] = kmalloc(9, GFP_KERNEL);
1421 if (!commands[AT_ISO])
1422 goto oom;
1423 snprintf(commands[AT_ISO], 9, "^SISO=%u\r",
1424 (unsigned) bcs->channel + 1);
1425
1426 /* queue & schedule EV_DIAL event */
1427 if (!gigaset_add_event(cs, &bcs->at_state, EV_DIAL, commands,
1428 bcs->at_state.seq_index, NULL))
1429 goto oom;
1430 gig_dbg(DEBUG_CMD, "scheduling DIAL");
1431 gigaset_schedule_event(cs);
1432 ap->connected = APCONN_SETUP;
1433 send_conf(iif, ap, skb, CapiSuccess);
1434 return;
1435
1436oom:
1437 dev_err(cs->dev, "%s: out of memory\n", __func__);
1438 info = CAPI_MSGOSRESOURCEERR;
1439error:
1440 if (commands)
1441 for (i = 0; i < AT_NUM; i++)
1442 kfree(commands[i]);
1443 kfree(commands);
1444 gigaset_free_channel(bcs);
1445 send_conf(iif, ap, skb, info);
1446}
1447
1448/*
1449 * process CONNECT_RESP message
1450 * checks protocol parameters and queues an ACCEPT or HUP event
1451 */
1452static void do_connect_resp(struct gigaset_capi_ctr *iif,
1453 struct gigaset_capi_appl *ap,
1454 struct sk_buff *skb)
1455{
1456 struct cardstate *cs = iif->ctr.driverdata;
1457 _cmsg *cmsg = &iif->acmsg;
1458 struct bc_state *bcs;
1459 struct gigaset_capi_appl *oap;
1460 int channel;
1461
1462 /* decode message */
1463 capi_message2cmsg(cmsg, skb->data);
1464 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1465 dev_kfree_skb_any(skb);
1466
1467 /* extract and check channel number from PLCI */
1468 channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
1469 if (!channel || channel > cs->channels) {
1470 dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
1471 "CONNECT_RESP", "PLCI", cmsg->adr.adrPLCI);
1472 return;
1473 }
1474 bcs = cs->bcs + channel - 1;
1475
1476 switch (cmsg->Reject) {
1477 case 0: /* Accept */
1478 /* drop all competing applications, keep only this one */
1479 for (oap = bcs->ap; oap != NULL; oap = oap->bcnext)
1480 if (oap != ap)
1481 send_disconnect_ind(bcs, oap,
1482 CapiCallGivenToOtherApplication);
1483 ap->bcnext = NULL;
1484 bcs->ap = ap;
1485 bcs->chstate |= CHS_NOTIFY_LL;
1486
1487 /* check/encode B channel protocol */
1488 if (cmsg->BProtocol == CAPI_DEFAULT) {
1489 bcs->proto2 = L2_HDLC;
1490 dev_warn(cs->dev,
1491 "B2 Protocol X.75 SLP unsupported, using Transparent\n");
1492 } else {
1493 switch (cmsg->B1protocol) {
1494 case 0:
1495 bcs->proto2 = L2_HDLC;
1496 break;
1497 case 1:
1498 bcs->proto2 = L2_BITSYNC;
1499 break;
1500 default:
1501 dev_warn(cs->dev,
1502 "B1 Protocol %u unsupported, using Transparent\n",
1503 cmsg->B1protocol);
1504 bcs->proto2 = L2_BITSYNC;
1505 }
1506 if (cmsg->B2protocol != 1)
1507 dev_warn(cs->dev,
1508 "B2 Protocol %u unsupported, using Transparent\n",
1509 cmsg->B2protocol);
1510 if (cmsg->B3protocol != 0)
1511 dev_warn(cs->dev,
1512 "B3 Protocol %u unsupported, using Transparent\n",
1513 cmsg->B3protocol);
1514 ignore_cstruct_param(cs, cmsg->B1configuration,
1515 "CONNECT_RESP", "B1 Configuration");
1516 ignore_cstruct_param(cs, cmsg->B2configuration,
1517 "CONNECT_RESP", "B2 Configuration");
1518 ignore_cstruct_param(cs, cmsg->B3configuration,
1519 "CONNECT_RESP", "B3 Configuration");
1520 }
1521
1522 /* ToDo: check/encode remaining parameters */
1523 ignore_cstruct_param(cs, cmsg->ConnectedNumber,
1524 "CONNECT_RESP", "Connected Number");
1525 ignore_cstruct_param(cs, cmsg->ConnectedSubaddress,
1526 "CONNECT_RESP", "Connected Subaddress");
1527 ignore_cstruct_param(cs, cmsg->LLC,
1528 "CONNECT_RESP", "LLC");
1529 if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
1530 ignore_cstruct_param(cs, cmsg->BChannelinformation,
1531 "CONNECT_RESP", "BChannel Information");
1532 ignore_cstruct_param(cs, cmsg->Keypadfacility,
1533 "CONNECT_RESP", "Keypad Facility");
1534 ignore_cstruct_param(cs, cmsg->Useruserdata,
1535 "CONNECT_RESP", "User-User Data");
1536 ignore_cstruct_param(cs, cmsg->Facilitydataarray,
1537 "CONNECT_RESP", "Facility Data Array");
1538 }
1539
1540 /* Accept call */
1541 if (!gigaset_add_event(cs, &cs->bcs[channel-1].at_state,
1542 EV_ACCEPT, NULL, 0, NULL))
1543 return;
1544 gig_dbg(DEBUG_CMD, "scheduling ACCEPT");
1545 gigaset_schedule_event(cs);
1546 return;
1547
1548 case 1: /* Ignore */
1549 /* send DISCONNECT_IND to this application */
1550 send_disconnect_ind(bcs, ap, 0);
1551
1552 /* remove it from the list of listening apps */
1553 if (bcs->ap == ap) {
1554 bcs->ap = ap->bcnext;
1555 if (bcs->ap == NULL)
1556 /* last one: stop ev-layer hupD notifications */
1557 bcs->chstate &= ~CHS_NOTIFY_LL;
1558 return;
1559 }
1560 for (oap = bcs->ap; oap != NULL; oap = oap->bcnext) {
1561 if (oap->bcnext == ap) {
1562 oap->bcnext = oap->bcnext->bcnext;
1563 return;
1564 }
1565 }
1566 dev_err(cs->dev, "%s: application %u not found\n",
1567 __func__, ap->id);
1568 return;
1569
1570 default: /* Reject */
1571 /* drop all competing applications, keep only this one */
1572 for (oap = bcs->ap; oap != NULL; oap = oap->bcnext)
1573 if (oap != ap)
1574 send_disconnect_ind(bcs, oap,
1575 CapiCallGivenToOtherApplication);
1576 ap->bcnext = NULL;
1577 bcs->ap = ap;
1578
1579 /* reject call - will trigger DISCONNECT_IND for this app */
1580 dev_info(cs->dev, "%s: Reject=%x\n",
1581 "CONNECT_RESP", cmsg->Reject);
1582 if (!gigaset_add_event(cs, &cs->bcs[channel-1].at_state,
1583 EV_HUP, NULL, 0, NULL))
1584 return;
1585 gig_dbg(DEBUG_CMD, "scheduling HUP");
1586 gigaset_schedule_event(cs);
1587 return;
1588 }
1589}
1590
1591/*
1592 * process CONNECT_B3_REQ message
1593 * build NCCI and emit CONNECT_B3_CONF reply
1594 */
1595static void do_connect_b3_req(struct gigaset_capi_ctr *iif,
1596 struct gigaset_capi_appl *ap,
1597 struct sk_buff *skb)
1598{
1599 struct cardstate *cs = iif->ctr.driverdata;
1600 _cmsg *cmsg = &iif->acmsg;
1601 int channel;
1602
1603 /* decode message */
1604 capi_message2cmsg(cmsg, skb->data);
1605 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1606
1607 /* extract and check channel number from PLCI */
1608 channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
1609 if (!channel || channel > cs->channels) {
1610 dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
1611 "CONNECT_B3_REQ", "PLCI", cmsg->adr.adrPLCI);
1612 send_conf(iif, ap, skb, CapiIllContrPlciNcci);
1613 return;
1614 }
1615
1616 /* mark logical connection active */
1617 ap->connected = APCONN_ACTIVE;
1618
1619 /* build NCCI: always 1 (one B3 connection only) */
1620 cmsg->adr.adrNCCI |= 1 << 16;
1621
1622 /* NCPI parameter: not applicable for B3 Transparent */
1623 ignore_cstruct_param(cs, cmsg->NCPI, "CONNECT_B3_REQ", "NCPI");
1624 send_conf(iif, ap, skb, (cmsg->NCPI && cmsg->NCPI[0]) ?
1625 CapiNcpiNotSupportedByProtocol : CapiSuccess);
1626}
1627
1628/*
1629 * process CONNECT_B3_RESP message
1630 * Depending on the Reject parameter, either emit CONNECT_B3_ACTIVE_IND
1631 * or queue EV_HUP and emit DISCONNECT_B3_IND.
1632 * The emitted message is always shorter than the received one,
1633 * allowing to reuse the skb.
1634 */
1635static void do_connect_b3_resp(struct gigaset_capi_ctr *iif,
1636 struct gigaset_capi_appl *ap,
1637 struct sk_buff *skb)
1638{
1639 struct cardstate *cs = iif->ctr.driverdata;
1640 _cmsg *cmsg = &iif->acmsg;
1641 struct bc_state *bcs;
1642 int channel;
1643 unsigned int msgsize;
1644 u8 command;
1645
1646 /* decode message */
1647 capi_message2cmsg(cmsg, skb->data);
1648 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1649
1650 /* extract and check channel number and NCCI */
1651 channel = (cmsg->adr.adrNCCI >> 8) & 0xff;
1652 if (!channel || channel > cs->channels ||
1653 ((cmsg->adr.adrNCCI >> 16) & 0xffff) != 1) {
1654 dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
1655 "CONNECT_B3_RESP", "NCCI", cmsg->adr.adrNCCI);
1656 dev_kfree_skb_any(skb);
1657 return;
1658 }
1659 bcs = &cs->bcs[channel-1];
1660
1661 if (cmsg->Reject) {
1662 /* Reject: clear B3 connect received flag */
1663 ap->connected = APCONN_SETUP;
1664
1665 /* trigger hangup, causing eventual DISCONNECT_IND */
1666 if (!gigaset_add_event(cs, &bcs->at_state,
1667 EV_HUP, NULL, 0, NULL)) {
1668 dev_err(cs->dev, "%s: out of memory\n", __func__);
1669 dev_kfree_skb_any(skb);
1670 return;
1671 }
1672 gig_dbg(DEBUG_CMD, "scheduling HUP");
1673 gigaset_schedule_event(cs);
1674
1675 /* emit DISCONNECT_B3_IND */
1676 command = CAPI_DISCONNECT_B3;
1677 msgsize = CAPI_DISCONNECT_B3_IND_BASELEN;
1678 } else {
1679 /*
1680 * Accept: emit CONNECT_B3_ACTIVE_IND immediately, as
1681 * we only send CONNECT_B3_IND if the B channel is up
1682 */
1683 command = CAPI_CONNECT_B3_ACTIVE;
1684 msgsize = CAPI_CONNECT_B3_ACTIVE_IND_BASELEN;
1685 }
1686 capi_cmsg_header(cmsg, ap->id, command, CAPI_IND,
1687 ap->nextMessageNumber++, cmsg->adr.adrNCCI);
1688 __skb_trim(skb, msgsize);
1689 capi_cmsg2message(cmsg, skb->data);
1690 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1691 capi_ctr_handle_message(&iif->ctr, ap->id, skb);
1692}
1693
1694/*
1695 * process DISCONNECT_REQ message
1696 * schedule EV_HUP and emit DISCONNECT_B3_IND if necessary,
1697 * emit DISCONNECT_CONF reply
1698 */
1699static void do_disconnect_req(struct gigaset_capi_ctr *iif,
1700 struct gigaset_capi_appl *ap,
1701 struct sk_buff *skb)
1702{
1703 struct cardstate *cs = iif->ctr.driverdata;
1704 _cmsg *cmsg = &iif->acmsg;
1705 struct bc_state *bcs;
1706 _cmsg *b3cmsg;
1707 struct sk_buff *b3skb;
1708 int channel;
1709
1710 /* decode message */
1711 capi_message2cmsg(cmsg, skb->data);
1712 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1713
1714 /* extract and check channel number from PLCI */
1715 channel = (cmsg->adr.adrPLCI >> 8) & 0xff;
1716 if (!channel || channel > cs->channels) {
1717 dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
1718 "DISCONNECT_REQ", "PLCI", cmsg->adr.adrPLCI);
1719 send_conf(iif, ap, skb, CapiIllContrPlciNcci);
1720 return;
1721 }
1722 bcs = cs->bcs + channel - 1;
1723
1724 /* ToDo: process parameter: Additional info */
1725 if (cmsg->AdditionalInfo != CAPI_DEFAULT) {
1726 ignore_cstruct_param(cs, cmsg->BChannelinformation,
1727 "DISCONNECT_REQ", "B Channel Information");
1728 ignore_cstruct_param(cs, cmsg->Keypadfacility,
1729 "DISCONNECT_REQ", "Keypad Facility");
1730 ignore_cstruct_param(cs, cmsg->Useruserdata,
1731 "DISCONNECT_REQ", "User-User Data");
1732 ignore_cstruct_param(cs, cmsg->Facilitydataarray,
1733 "DISCONNECT_REQ", "Facility Data Array");
1734 }
1735
1736 /* skip if DISCONNECT_IND already sent */
1737 if (!ap->connected)
1738 return;
1739
1740 /* check for active logical connection */
1741 if (ap->connected >= APCONN_ACTIVE) {
1742 /*
1743 * emit DISCONNECT_B3_IND with cause 0x3301
1744 * use separate cmsg structure, as the content of iif->acmsg
1745 * is still needed for creating the _CONF message
1746 */
1747 b3cmsg = kmalloc(sizeof(*b3cmsg), GFP_KERNEL);
1748 if (!b3cmsg) {
1749 dev_err(cs->dev, "%s: out of memory\n", __func__);
1750 send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
1751 return;
1752 }
1753 capi_cmsg_header(b3cmsg, ap->id, CAPI_DISCONNECT_B3, CAPI_IND,
1754 ap->nextMessageNumber++,
1755 cmsg->adr.adrPLCI | (1 << 16));
1756 b3cmsg->Reason_B3 = CapiProtocolErrorLayer1;
1757 b3skb = alloc_skb(CAPI_DISCONNECT_B3_IND_BASELEN, GFP_KERNEL);
1758 if (b3skb == NULL) {
1759 dev_err(cs->dev, "%s: out of memory\n", __func__);
1760 send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
1761 return;
1762 }
1763 capi_cmsg2message(b3cmsg,
1764 __skb_put(b3skb, CAPI_DISCONNECT_B3_IND_BASELEN));
1765 kfree(b3cmsg);
1766 capi_ctr_handle_message(&iif->ctr, ap->id, b3skb);
1767 }
1768
1769 /* trigger hangup, causing eventual DISCONNECT_IND */
1770 if (!gigaset_add_event(cs, &bcs->at_state, EV_HUP, NULL, 0, NULL)) {
1771 dev_err(cs->dev, "%s: out of memory\n", __func__);
1772 send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
1773 return;
1774 }
1775 gig_dbg(DEBUG_CMD, "scheduling HUP");
1776 gigaset_schedule_event(cs);
1777
1778 /* emit reply */
1779 send_conf(iif, ap, skb, CapiSuccess);
1780}
1781
1782/*
1783 * process DISCONNECT_B3_REQ message
1784 * schedule EV_HUP and emit DISCONNECT_B3_CONF reply
1785 */
1786static void do_disconnect_b3_req(struct gigaset_capi_ctr *iif,
1787 struct gigaset_capi_appl *ap,
1788 struct sk_buff *skb)
1789{
1790 struct cardstate *cs = iif->ctr.driverdata;
1791 _cmsg *cmsg = &iif->acmsg;
1792 int channel;
1793
1794 /* decode message */
1795 capi_message2cmsg(cmsg, skb->data);
1796 dump_cmsg(DEBUG_CMD, __func__, cmsg);
1797
1798 /* extract and check channel number and NCCI */
1799 channel = (cmsg->adr.adrNCCI >> 8) & 0xff;
1800 if (!channel || channel > cs->channels ||
1801 ((cmsg->adr.adrNCCI >> 16) & 0xffff) != 1) {
1802 dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
1803 "DISCONNECT_B3_REQ", "NCCI", cmsg->adr.adrNCCI);
1804 send_conf(iif, ap, skb, CapiIllContrPlciNcci);
1805 return;
1806 }
1807
1808 /* reject if logical connection not active */
1809 if (ap->connected < APCONN_ACTIVE) {
1810 send_conf(iif, ap, skb,
1811 CapiMessageNotSupportedInCurrentState);
1812 return;
1813 }
1814
1815 /* trigger hangup, causing eventual DISCONNECT_B3_IND */
1816 if (!gigaset_add_event(cs, &cs->bcs[channel-1].at_state,
1817 EV_HUP, NULL, 0, NULL)) {
1818 dev_err(cs->dev, "%s: out of memory\n", __func__);
1819 send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
1820 return;
1821 }
1822 gig_dbg(DEBUG_CMD, "scheduling HUP");
1823 gigaset_schedule_event(cs);
1824
1825 /* NCPI parameter: not applicable for B3 Transparent */
1826 ignore_cstruct_param(cs, cmsg->NCPI,
1827 "DISCONNECT_B3_REQ", "NCPI");
1828 send_conf(iif, ap, skb, (cmsg->NCPI && cmsg->NCPI[0]) ?
1829 CapiNcpiNotSupportedByProtocol : CapiSuccess);
1830}
1831
1832/*
1833 * process DATA_B3_REQ message
1834 */
1835static void do_data_b3_req(struct gigaset_capi_ctr *iif,
1836 struct gigaset_capi_appl *ap,
1837 struct sk_buff *skb)
1838{
1839 struct cardstate *cs = iif->ctr.driverdata;
1840 int channel = CAPIMSG_PLCI_PART(skb->data);
1841 u16 ncci = CAPIMSG_NCCI_PART(skb->data);
1842 u16 msglen = CAPIMSG_LEN(skb->data);
1843 u16 datalen = CAPIMSG_DATALEN(skb->data);
1844 u16 flags = CAPIMSG_FLAGS(skb->data);
1845
1846 /* frequent message, avoid _cmsg overhead */
1847 dump_rawmsg(DEBUG_LLDATA, "DATA_B3_REQ", skb->data);
1848
1849 gig_dbg(DEBUG_LLDATA,
1850 "Receiving data from LL (ch: %d, flg: %x, sz: %d|%d)",
1851 channel, flags, msglen, datalen);
1852
1853 /* check parameters */
1854 if (channel == 0 || channel > cs->channels || ncci != 1) {
1855 dev_notice(cs->dev, "%s: invalid %s 0x%02x\n",
1856 "DATA_B3_REQ", "NCCI", CAPIMSG_NCCI(skb->data));
1857 send_conf(iif, ap, skb, CapiIllContrPlciNcci);
1858 return;
1859 }
1860 if (msglen != CAPI_DATA_B3_REQ_LEN && msglen != CAPI_DATA_B3_REQ_LEN64)
1861 dev_notice(cs->dev, "%s: unexpected length %d\n",
1862 "DATA_B3_REQ", msglen);
1863 if (msglen + datalen != skb->len)
1864 dev_notice(cs->dev, "%s: length mismatch (%d+%d!=%d)\n",
1865 "DATA_B3_REQ", msglen, datalen, skb->len);
1866 if (msglen + datalen > skb->len) {
1867 /* message too short for announced data length */
1868 send_conf(iif, ap, skb, CapiIllMessageParmCoding); /* ? */
1869 return;
1870 }
1871 if (flags & CAPI_FLAGS_RESERVED) {
1872 dev_notice(cs->dev, "%s: reserved flags set (%x)\n",
1873 "DATA_B3_REQ", flags);
1874 send_conf(iif, ap, skb, CapiIllMessageParmCoding);
1875 return;
1876 }
1877
1878 /* reject if logical connection not active */
1879 if (ap->connected < APCONN_ACTIVE) {
1880 send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState);
1881 return;
1882 }
1883
1884 /* pull CAPI message into link layer header */
1885 skb_reset_mac_header(skb);
1886 skb->mac_len = msglen;
1887 skb_pull(skb, msglen);
1888
1889 /* pass to device-specific module */
1890 if (cs->ops->send_skb(&cs->bcs[channel-1], skb) < 0) {
1891 send_conf(iif, ap, skb, CAPI_MSGOSRESOURCEERR);
1892 return;
1893 }
1894
1895 /* DATA_B3_CONF reply will be sent by gigaset_skb_sent() */
1896
1897 /*
1898 * ToDo: honor unset "delivery confirmation" bit
1899 * (send DATA_B3_CONF immediately?)
1900 */
1901}
1902
1903/*
1904 * process RESET_B3_REQ message
1905 * just always reply "not supported by current protocol"
1906 */
1907static void do_reset_b3_req(struct gigaset_capi_ctr *iif,
1908 struct gigaset_capi_appl *ap,
1909 struct sk_buff *skb)
1910{
1911 /* decode message */
1912 capi_message2cmsg(&iif->acmsg, skb->data);
1913 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
1914 send_conf(iif, ap, skb,
1915 CapiResetProcedureNotSupportedByCurrentProtocol);
1916}
1917
1918/*
1919 * dump unsupported/ignored messages at most twice per minute,
1920 * some apps send those very frequently
1921 */
1922static unsigned long ignored_msg_dump_time;
1923
1924/*
1925 * unsupported CAPI message handler
1926 */
1927static void do_unsupported(struct gigaset_capi_ctr *iif,
1928 struct gigaset_capi_appl *ap,
1929 struct sk_buff *skb)
1930{
1931 /* decode message */
1932 capi_message2cmsg(&iif->acmsg, skb->data);
1933 if (printk_timed_ratelimit(&ignored_msg_dump_time, 30 * 1000))
1934 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
1935 send_conf(iif, ap, skb, CapiMessageNotSupportedInCurrentState);
1936}
1937
1938/*
1939 * CAPI message handler: no-op
1940 */
1941static void do_nothing(struct gigaset_capi_ctr *iif,
1942 struct gigaset_capi_appl *ap,
1943 struct sk_buff *skb)
1944{
1945 if (printk_timed_ratelimit(&ignored_msg_dump_time, 30 * 1000)) {
1946 /* decode message */
1947 capi_message2cmsg(&iif->acmsg, skb->data);
1948 dump_cmsg(DEBUG_CMD, __func__, &iif->acmsg);
1949 }
1950 dev_kfree_skb_any(skb);
1951}
1952
1953static void do_data_b3_resp(struct gigaset_capi_ctr *iif,
1954 struct gigaset_capi_appl *ap,
1955 struct sk_buff *skb)
1956{
1957 dump_rawmsg(DEBUG_LLDATA, __func__, skb->data);
1958 dev_kfree_skb_any(skb);
1959}
1960
1961/* table of outgoing CAPI message handlers with lookup function */
1962typedef void (*capi_send_handler_t)(struct gigaset_capi_ctr *,
1963 struct gigaset_capi_appl *,
1964 struct sk_buff *);
1965
1966static struct {
1967 u16 cmd;
1968 capi_send_handler_t handler;
1969} capi_send_handler_table[] = {
1970 /* most frequent messages first for faster lookup */
1971 { CAPI_DATA_B3_REQ, do_data_b3_req },
1972 { CAPI_DATA_B3_RESP, do_data_b3_resp },
1973
1974 { CAPI_ALERT_REQ, do_alert_req },
1975 { CAPI_CONNECT_ACTIVE_RESP, do_nothing },
1976 { CAPI_CONNECT_B3_ACTIVE_RESP, do_nothing },
1977 { CAPI_CONNECT_B3_REQ, do_connect_b3_req },
1978 { CAPI_CONNECT_B3_RESP, do_connect_b3_resp },
1979 { CAPI_CONNECT_B3_T90_ACTIVE_RESP, do_nothing },
1980 { CAPI_CONNECT_REQ, do_connect_req },
1981 { CAPI_CONNECT_RESP, do_connect_resp },
1982 { CAPI_DISCONNECT_B3_REQ, do_disconnect_b3_req },
1983 { CAPI_DISCONNECT_B3_RESP, do_nothing },
1984 { CAPI_DISCONNECT_REQ, do_disconnect_req },
1985 { CAPI_DISCONNECT_RESP, do_nothing },
1986 { CAPI_FACILITY_REQ, do_facility_req },
1987 { CAPI_FACILITY_RESP, do_nothing },
1988 { CAPI_LISTEN_REQ, do_listen_req },
1989 { CAPI_SELECT_B_PROTOCOL_REQ, do_unsupported },
1990 { CAPI_RESET_B3_REQ, do_reset_b3_req },
1991 { CAPI_RESET_B3_RESP, do_nothing },
1992
1993 /*
1994 * ToDo: support overlap sending (requires ev-layer state
1995 * machine extension to generate additional ATD commands)
1996 */
1997 { CAPI_INFO_REQ, do_unsupported },
1998 { CAPI_INFO_RESP, do_nothing },
1999
2000 /*
2001 * ToDo: what's the proper response for these?
2002 */
2003 { CAPI_MANUFACTURER_REQ, do_nothing },
2004 { CAPI_MANUFACTURER_RESP, do_nothing },
2005};
2006
2007/* look up handler */
2008static inline capi_send_handler_t lookup_capi_send_handler(const u16 cmd)
2009{
2010 size_t i;
2011
2012 for (i = 0; i < ARRAY_SIZE(capi_send_handler_table); i++)
2013 if (capi_send_handler_table[i].cmd == cmd)
2014 return capi_send_handler_table[i].handler;
2015 return NULL;
2016}
2017
2018
2019/**
2020 * gigaset_send_message() - accept a CAPI message from an application
2021 * @ctr: controller descriptor structure.
2022 * @skb: CAPI message.
2023 *
2024 * Return value: CAPI error code
2025 * Note: capidrv (and probably others, too) only uses the return value to
2026 * decide whether it has to free the skb (only if result != CAPI_NOERROR (0))
2027 */
2028static u16 gigaset_send_message(struct capi_ctr *ctr, struct sk_buff *skb)
2029{
2030 struct gigaset_capi_ctr *iif
2031 = container_of(ctr, struct gigaset_capi_ctr, ctr);
2032 struct cardstate *cs = ctr->driverdata;
2033 struct gigaset_capi_appl *ap;
2034 capi_send_handler_t handler;
2035
2036 /* can only handle linear sk_buffs */
2037 if (skb_linearize(skb) < 0) {
2038 dev_warn(cs->dev, "%s: skb_linearize failed\n", __func__);
2039 return CAPI_MSGOSRESOURCEERR;
2040 }
2041
2042 /* retrieve application data structure */
2043 ap = get_appl(iif, CAPIMSG_APPID(skb->data));
2044 if (!ap) {
2045 dev_notice(cs->dev, "%s: application %u not registered\n",
2046 __func__, CAPIMSG_APPID(skb->data));
2047 return CAPI_ILLAPPNR;
2048 }
2049
2050 /* look up command */
2051 handler = lookup_capi_send_handler(CAPIMSG_CMD(skb->data));
2052 if (!handler) {
2053 /* unknown/unsupported message type */
2054 if (printk_ratelimit())
2055 dev_notice(cs->dev, "%s: unsupported message %u\n",
2056 __func__, CAPIMSG_CMD(skb->data));
2057 return CAPI_ILLCMDORSUBCMDORMSGTOSMALL;
2058 }
2059
2060 /* serialize */
2061 if (atomic_add_return(1, &iif->sendqlen) > 1) {
2062 /* queue behind other messages */
2063 skb_queue_tail(&iif->sendqueue, skb);
2064 return CAPI_NOERROR;
2065 }
2066
2067 /* process message */
2068 handler(iif, ap, skb);
2069
2070 /* process other messages arrived in the meantime */
2071 while (atomic_sub_return(1, &iif->sendqlen) > 0) {
2072 skb = skb_dequeue(&iif->sendqueue);
2073 if (!skb) {
2074 /* should never happen */
2075 dev_err(cs->dev, "%s: send queue empty\n", __func__);
2076 continue;
2077 }
2078 ap = get_appl(iif, CAPIMSG_APPID(skb->data));
2079 if (!ap) {
2080 /* could that happen? */
2081 dev_warn(cs->dev, "%s: application %u vanished\n",
2082 __func__, CAPIMSG_APPID(skb->data));
2083 continue;
2084 }
2085 handler = lookup_capi_send_handler(CAPIMSG_CMD(skb->data));
2086 if (!handler) {
2087 /* should never happen */
2088 dev_err(cs->dev, "%s: handler %x vanished\n",
2089 __func__, CAPIMSG_CMD(skb->data));
2090 continue;
2091 }
2092 handler(iif, ap, skb);
2093 }
2094
2095 return CAPI_NOERROR;
2096}
2097
2098/**
2099 * gigaset_procinfo() - build single line description for controller
2100 * @ctr: controller descriptor structure.
2101 *
2102 * Return value: pointer to generated string (null terminated)
2103 */
2104static char *gigaset_procinfo(struct capi_ctr *ctr)
2105{
2106 return ctr->name; /* ToDo: more? */
2107}
2108
2109/**
2110 * gigaset_ctr_read_proc() - build controller proc file entry
2111 * @page: buffer of PAGE_SIZE bytes for receiving the entry.
2112 * @start: unused.
2113 * @off: unused.
2114 * @count: unused.
2115 * @eof: unused.
2116 * @ctr: controller descriptor structure.
2117 *
2118 * Return value: length of generated entry
2119 */
2120static int gigaset_ctr_read_proc(char *page, char **start, off_t off,
2121 int count, int *eof, struct capi_ctr *ctr)
2122{
2123 struct cardstate *cs = ctr->driverdata;
2124 char *s;
2125 int i;
2126 int len = 0;
2127 len += sprintf(page+len, "%-16s %s\n", "name", ctr->name);
2128 len += sprintf(page+len, "%-16s %s %s\n", "dev",
2129 dev_driver_string(cs->dev), dev_name(cs->dev));
2130 len += sprintf(page+len, "%-16s %d\n", "id", cs->myid);
2131 if (cs->gotfwver)
2132 len += sprintf(page+len, "%-16s %d.%d.%d.%d\n", "firmware",
2133 cs->fwver[0], cs->fwver[1], cs->fwver[2], cs->fwver[3]);
2134 len += sprintf(page+len, "%-16s %d\n", "channels",
2135 cs->channels);
2136 len += sprintf(page+len, "%-16s %s\n", "onechannel",
2137 cs->onechannel ? "yes" : "no");
2138
2139 switch (cs->mode) {
2140 case M_UNKNOWN:
2141 s = "unknown";
2142 break;
2143 case M_CONFIG:
2144 s = "config";
2145 break;
2146 case M_UNIMODEM:
2147 s = "Unimodem";
2148 break;
2149 case M_CID:
2150 s = "CID";
2151 break;
2152 default:
2153 s = "??";
2154 }
2155 len += sprintf(page+len, "%-16s %s\n", "mode", s);
2156
2157 switch (cs->mstate) {
2158 case MS_UNINITIALIZED:
2159 s = "uninitialized";
2160 break;
2161 case MS_INIT:
2162 s = "init";
2163 break;
2164 case MS_LOCKED:
2165 s = "locked";
2166 break;
2167 case MS_SHUTDOWN:
2168 s = "shutdown";
2169 break;
2170 case MS_RECOVER:
2171 s = "recover";
2172 break;
2173 case MS_READY:
2174 s = "ready";
2175 break;
2176 default:
2177 s = "??";
2178 }
2179 len += sprintf(page+len, "%-16s %s\n", "mstate", s);
2180
2181 len += sprintf(page+len, "%-16s %s\n", "running",
2182 cs->running ? "yes" : "no");
2183 len += sprintf(page+len, "%-16s %s\n", "connected",
2184 cs->connected ? "yes" : "no");
2185 len += sprintf(page+len, "%-16s %s\n", "isdn_up",
2186 cs->isdn_up ? "yes" : "no");
2187 len += sprintf(page+len, "%-16s %s\n", "cidmode",
2188 cs->cidmode ? "yes" : "no");
2189
2190 for (i = 0; i < cs->channels; i++) {
2191 len += sprintf(page+len, "[%d]%-13s %d\n", i, "corrupted",
2192 cs->bcs[i].corrupted);
2193 len += sprintf(page+len, "[%d]%-13s %d\n", i, "trans_down",
2194 cs->bcs[i].trans_down);
2195 len += sprintf(page+len, "[%d]%-13s %d\n", i, "trans_up",
2196 cs->bcs[i].trans_up);
2197 len += sprintf(page+len, "[%d]%-13s %d\n", i, "chstate",
2198 cs->bcs[i].chstate);
2199 switch (cs->bcs[i].proto2) {
2200 case L2_BITSYNC:
2201 s = "bitsync";
2202 break;
2203 case L2_HDLC:
2204 s = "HDLC";
2205 break;
2206 case L2_VOICE:
2207 s = "voice";
2208 break;
2209 default:
2210 s = "??";
2211 }
2212 len += sprintf(page+len, "[%d]%-13s %s\n", i, "proto2", s);
2213 }
2214 return len;
2215}
2216
2217
2218static struct capi_driver capi_driver_gigaset = {
2219 .name = "gigaset",
2220 .revision = "1.0",
2221};
2222
2223/**
2224 * gigaset_isdn_register() - register to LL
2225 * @cs: device descriptor structure.
2226 * @isdnid: device name.
2227 *
2228 * Called by main module to register the device with the LL.
2229 *
2230 * Return value: 1 for success, 0 for failure
2231 */
2232int gigaset_isdn_register(struct cardstate *cs, const char *isdnid)
2233{
2234 struct gigaset_capi_ctr *iif;
2235 int rc;
2236
2237 pr_info("Kernel CAPI interface\n");
2238
2239 iif = kmalloc(sizeof(*iif), GFP_KERNEL);
2240 if (!iif) {
2241 pr_err("%s: out of memory\n", __func__);
2242 return 0;
2243 }
2244
2245 /* register driver with CAPI (ToDo: what for?) */
2246 register_capi_driver(&capi_driver_gigaset);
2247
2248 /* prepare controller structure */
2249 iif->ctr.owner = THIS_MODULE;
2250 iif->ctr.driverdata = cs;
2251 strncpy(iif->ctr.name, isdnid, sizeof(iif->ctr.name));
2252 iif->ctr.driver_name = "gigaset";
2253 iif->ctr.load_firmware = gigaset_load_firmware;
2254 iif->ctr.reset_ctr = gigaset_reset_ctr;
2255 iif->ctr.register_appl = gigaset_register_appl;
2256 iif->ctr.release_appl = gigaset_release_appl;
2257 iif->ctr.send_message = gigaset_send_message;
2258 iif->ctr.procinfo = gigaset_procinfo;
2259 iif->ctr.ctr_read_proc = gigaset_ctr_read_proc;
2260 INIT_LIST_HEAD(&iif->appls);
2261 skb_queue_head_init(&iif->sendqueue);
2262 atomic_set(&iif->sendqlen, 0);
2263
2264 /* register controller with CAPI */
2265 rc = attach_capi_ctr(&iif->ctr);
2266 if (rc) {
2267 pr_err("attach_capi_ctr failed (%d)\n", rc);
2268 unregister_capi_driver(&capi_driver_gigaset);
2269 kfree(iif);
2270 return 0;
2271 }
2272
2273 cs->iif = iif;
2274 cs->hw_hdr_len = CAPI_DATA_B3_REQ_LEN;
2275 return 1;
2276}
2277
2278/**
2279 * gigaset_isdn_unregister() - unregister from LL
2280 * @cs: device descriptor structure.
2281 *
2282 * Called by main module to unregister the device from the LL.
2283 */
2284void gigaset_isdn_unregister(struct cardstate *cs)
2285{
2286 struct gigaset_capi_ctr *iif = cs->iif;
2287
2288 detach_capi_ctr(&iif->ctr);
2289 kfree(iif);
2290 cs->iif = NULL;
2291 unregister_capi_driver(&capi_driver_gigaset);
2292}
diff --git a/drivers/isdn/gigaset/common.c b/drivers/isdn/gigaset/common.c
index 33dcd8d72b7c..c438cfcb7c6d 100644
--- a/drivers/isdn/gigaset/common.c
+++ b/drivers/isdn/gigaset/common.c
@@ -108,7 +108,7 @@ int gigaset_enterconfigmode(struct cardstate *cs)
108{ 108{
109 int i, r; 109 int i, r;
110 110
111 cs->control_state = TIOCM_RTS; //FIXME 111 cs->control_state = TIOCM_RTS;
112 112
113 r = setflags(cs, TIOCM_DTR, 200); 113 r = setflags(cs, TIOCM_DTR, 200);
114 if (r < 0) 114 if (r < 0)
@@ -132,10 +132,10 @@ int gigaset_enterconfigmode(struct cardstate *cs)
132 132
133error: 133error:
134 dev_err(cs->dev, "error %d on setuartbits\n", -r); 134 dev_err(cs->dev, "error %d on setuartbits\n", -r);
135 cs->control_state = TIOCM_RTS|TIOCM_DTR; // FIXME is this a good value? 135 cs->control_state = TIOCM_RTS|TIOCM_DTR;
136 cs->ops->set_modem_ctrl(cs, 0, TIOCM_RTS|TIOCM_DTR); 136 cs->ops->set_modem_ctrl(cs, 0, TIOCM_RTS|TIOCM_DTR);
137 137
138 return -1; //r 138 return -1;
139} 139}
140 140
141static int test_timeout(struct at_state_t *at_state) 141static int test_timeout(struct at_state_t *at_state)
@@ -150,10 +150,9 @@ static int test_timeout(struct at_state_t *at_state)
150 } 150 }
151 151
152 if (!gigaset_add_event(at_state->cs, at_state, EV_TIMEOUT, NULL, 152 if (!gigaset_add_event(at_state->cs, at_state, EV_TIMEOUT, NULL,
153 at_state->timer_index, NULL)) { 153 at_state->timer_index, NULL))
154 //FIXME what should we do? 154 dev_err(at_state->cs->dev, "%s: out of memory\n",
155 } 155 __func__);
156
157 return 1; 156 return 1;
158} 157}
159 158
@@ -207,6 +206,32 @@ int gigaset_get_channel(struct bc_state *bcs)
207 return 1; 206 return 1;
208} 207}
209 208
209struct bc_state *gigaset_get_free_channel(struct cardstate *cs)
210{
211 unsigned long flags;
212 int i;
213
214 spin_lock_irqsave(&cs->lock, flags);
215 if (!try_module_get(cs->driver->owner)) {
216 gig_dbg(DEBUG_ANY,
217 "could not get module for allocating channel");
218 spin_unlock_irqrestore(&cs->lock, flags);
219 return NULL;
220 }
221 for (i = 0; i < cs->channels; ++i)
222 if (!cs->bcs[i].use_count) {
223 ++cs->bcs[i].use_count;
224 cs->bcs[i].busy = 1;
225 spin_unlock_irqrestore(&cs->lock, flags);
226 gig_dbg(DEBUG_ANY, "allocated channel %d", i);
227 return cs->bcs + i;
228 }
229 module_put(cs->driver->owner);
230 spin_unlock_irqrestore(&cs->lock, flags);
231 gig_dbg(DEBUG_ANY, "no free channel");
232 return NULL;
233}
234
210void gigaset_free_channel(struct bc_state *bcs) 235void gigaset_free_channel(struct bc_state *bcs)
211{ 236{
212 unsigned long flags; 237 unsigned long flags;
@@ -367,16 +392,15 @@ static void gigaset_freebcs(struct bc_state *bcs)
367 int i; 392 int i;
368 393
369 gig_dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel); 394 gig_dbg(DEBUG_INIT, "freeing bcs[%d]->hw", bcs->channel);
370 if (!bcs->cs->ops->freebcshw(bcs)) { 395 if (!bcs->cs->ops->freebcshw(bcs))
371 gig_dbg(DEBUG_INIT, "failed"); 396 gig_dbg(DEBUG_INIT, "failed");
372 }
373 397
374 gig_dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel); 398 gig_dbg(DEBUG_INIT, "clearing bcs[%d]->at_state", bcs->channel);
375 clear_at_state(&bcs->at_state); 399 clear_at_state(&bcs->at_state);
376 gig_dbg(DEBUG_INIT, "freeing bcs[%d]->skb", bcs->channel); 400 gig_dbg(DEBUG_INIT, "freeing bcs[%d]->skb", bcs->channel);
401 dev_kfree_skb(bcs->skb);
402 bcs->skb = NULL;
377 403
378 if (bcs->skb)
379 dev_kfree_skb(bcs->skb);
380 for (i = 0; i < AT_NUM; ++i) { 404 for (i = 0; i < AT_NUM; ++i) {
381 kfree(bcs->commands[i]); 405 kfree(bcs->commands[i]);
382 bcs->commands[i] = NULL; 406 bcs->commands[i] = NULL;
@@ -463,6 +487,12 @@ void gigaset_freecs(struct cardstate *cs)
463 487
464 switch (cs->cs_init) { 488 switch (cs->cs_init) {
465 default: 489 default:
490 /* clear B channel structures */
491 for (i = 0; i < cs->channels; ++i) {
492 gig_dbg(DEBUG_INIT, "clearing bcs[%d]", i);
493 gigaset_freebcs(cs->bcs + i);
494 }
495
466 /* clear device sysfs */ 496 /* clear device sysfs */
467 gigaset_free_dev_sysfs(cs); 497 gigaset_free_dev_sysfs(cs);
468 498
@@ -471,28 +501,20 @@ void gigaset_freecs(struct cardstate *cs)
471 gig_dbg(DEBUG_INIT, "clearing hw"); 501 gig_dbg(DEBUG_INIT, "clearing hw");
472 cs->ops->freecshw(cs); 502 cs->ops->freecshw(cs);
473 503
474 //FIXME cmdbuf
475
476 /* fall through */ 504 /* fall through */
477 case 2: /* error in initcshw */ 505 case 2: /* error in initcshw */
478 /* Deregister from LL */ 506 /* Deregister from LL */
479 make_invalid(cs, VALID_ID); 507 make_invalid(cs, VALID_ID);
480 gig_dbg(DEBUG_INIT, "clearing iif"); 508 gigaset_isdn_unregister(cs);
481 gigaset_i4l_cmd(cs, ISDN_STAT_UNLOAD);
482 509
483 /* fall through */ 510 /* fall through */
484 case 1: /* error when regestering to LL */ 511 case 1: /* error when registering to LL */
485 gig_dbg(DEBUG_INIT, "clearing at_state"); 512 gig_dbg(DEBUG_INIT, "clearing at_state");
486 clear_at_state(&cs->at_state); 513 clear_at_state(&cs->at_state);
487 dealloc_at_states(cs); 514 dealloc_at_states(cs);
488 515
489 /* fall through */ 516 /* fall through */
490 case 0: /* error in one call to initbcs */ 517 case 0: /* error in basic setup */
491 for (i = 0; i < cs->channels; ++i) {
492 gig_dbg(DEBUG_INIT, "clearing bcs[%d]", i);
493 gigaset_freebcs(cs->bcs + i);
494 }
495
496 clear_events(cs); 518 clear_events(cs);
497 gig_dbg(DEBUG_INIT, "freeing inbuf"); 519 gig_dbg(DEBUG_INIT, "freeing inbuf");
498 kfree(cs->inbuf); 520 kfree(cs->inbuf);
@@ -534,16 +556,13 @@ void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs,
534} 556}
535 557
536 558
537static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct bc_state *bcs, 559static void gigaset_inbuf_init(struct inbuf_t *inbuf, struct cardstate *cs)
538 struct cardstate *cs, int inputstate)
539/* inbuf->read must be allocated before! */ 560/* inbuf->read must be allocated before! */
540{ 561{
541 inbuf->head = 0; 562 inbuf->head = 0;
542 inbuf->tail = 0; 563 inbuf->tail = 0;
543 inbuf->cs = cs; 564 inbuf->cs = cs;
544 inbuf->bcs = bcs; /*base driver: NULL*/ 565 inbuf->inputstate = INS_command;
545 inbuf->rcvbuf = NULL;
546 inbuf->inputstate = inputstate;
547} 566}
548 567
549/** 568/**
@@ -599,7 +618,7 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs,
599{ 618{
600 int i; 619 int i;
601 620
602 bcs->tx_skb = NULL; //FIXME -> hw part 621 bcs->tx_skb = NULL;
603 622
604 skb_queue_head_init(&bcs->squeue); 623 skb_queue_head_init(&bcs->squeue);
605 624
@@ -618,13 +637,13 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs,
618 bcs->fcs = PPP_INITFCS; 637 bcs->fcs = PPP_INITFCS;
619 bcs->inputstate = 0; 638 bcs->inputstate = 0;
620 if (cs->ignoreframes) { 639 if (cs->ignoreframes) {
621 bcs->inputstate |= INS_skip_frame;
622 bcs->skb = NULL; 640 bcs->skb = NULL;
623 } else if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL) 641 } else {
624 skb_reserve(bcs->skb, HW_HDR_LEN); 642 bcs->skb = dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len);
625 else { 643 if (bcs->skb != NULL)
626 pr_err("out of memory\n"); 644 skb_reserve(bcs->skb, cs->hw_hdr_len);
627 bcs->inputstate |= INS_skip_frame; 645 else
646 pr_err("out of memory\n");
628 } 647 }
629 648
630 bcs->channel = channel; 649 bcs->channel = channel;
@@ -645,8 +664,8 @@ static struct bc_state *gigaset_initbcs(struct bc_state *bcs,
645 gig_dbg(DEBUG_INIT, " failed"); 664 gig_dbg(DEBUG_INIT, " failed");
646 665
647 gig_dbg(DEBUG_INIT, " freeing bcs[%d]->skb", channel); 666 gig_dbg(DEBUG_INIT, " freeing bcs[%d]->skb", channel);
648 if (bcs->skb) 667 dev_kfree_skb(bcs->skb);
649 dev_kfree_skb(bcs->skb); 668 bcs->skb = NULL;
650 669
651 return NULL; 670 return NULL;
652} 671}
@@ -673,12 +692,13 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
673 int onechannel, int ignoreframes, 692 int onechannel, int ignoreframes,
674 int cidmode, const char *modulename) 693 int cidmode, const char *modulename)
675{ 694{
676 struct cardstate *cs = NULL; 695 struct cardstate *cs;
677 unsigned long flags; 696 unsigned long flags;
678 int i; 697 int i;
679 698
680 gig_dbg(DEBUG_INIT, "allocating cs"); 699 gig_dbg(DEBUG_INIT, "allocating cs");
681 if (!(cs = alloc_cs(drv))) { 700 cs = alloc_cs(drv);
701 if (!cs) {
682 pr_err("maximum number of devices exceeded\n"); 702 pr_err("maximum number of devices exceeded\n");
683 return NULL; 703 return NULL;
684 } 704 }
@@ -726,14 +746,6 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
726 cs->mode = M_UNKNOWN; 746 cs->mode = M_UNKNOWN;
727 cs->mstate = MS_UNINITIALIZED; 747 cs->mstate = MS_UNINITIALIZED;
728 748
729 for (i = 0; i < channels; ++i) {
730 gig_dbg(DEBUG_INIT, "setting up bcs[%d].read", i);
731 if (!gigaset_initbcs(cs->bcs + i, cs, i)) {
732 pr_err("could not allocate channel %d data\n", i);
733 goto error;
734 }
735 }
736
737 ++cs->cs_init; 749 ++cs->cs_init;
738 750
739 gig_dbg(DEBUG_INIT, "setting up at_state"); 751 gig_dbg(DEBUG_INIT, "setting up at_state");
@@ -743,10 +755,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
743 cs->cbytes = 0; 755 cs->cbytes = 0;
744 756
745 gig_dbg(DEBUG_INIT, "setting up inbuf"); 757 gig_dbg(DEBUG_INIT, "setting up inbuf");
746 if (onechannel) { //FIXME distinction necessary? 758 gigaset_inbuf_init(cs->inbuf, cs);
747 gigaset_inbuf_init(cs->inbuf, cs->bcs, cs, INS_command);
748 } else
749 gigaset_inbuf_init(cs->inbuf, NULL, cs, INS_command);
750 759
751 cs->connected = 0; 760 cs->connected = 0;
752 cs->isdn_up = 0; 761 cs->isdn_up = 0;
@@ -758,7 +767,7 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
758 cs->cmdbytes = 0; 767 cs->cmdbytes = 0;
759 768
760 gig_dbg(DEBUG_INIT, "setting up iif"); 769 gig_dbg(DEBUG_INIT, "setting up iif");
761 if (!gigaset_register_to_LL(cs, modulename)) { 770 if (!gigaset_isdn_register(cs, modulename)) {
762 pr_err("error registering ISDN device\n"); 771 pr_err("error registering ISDN device\n");
763 goto error; 772 goto error;
764 } 773 }
@@ -777,6 +786,15 @@ struct cardstate *gigaset_initcs(struct gigaset_driver *drv, int channels,
777 /* set up device sysfs */ 786 /* set up device sysfs */
778 gigaset_init_dev_sysfs(cs); 787 gigaset_init_dev_sysfs(cs);
779 788
789 /* set up channel data structures */
790 for (i = 0; i < channels; ++i) {
791 gig_dbg(DEBUG_INIT, "setting up bcs[%d]", i);
792 if (!gigaset_initbcs(cs->bcs + i, cs, i)) {
793 pr_err("could not allocate channel %d data\n", i);
794 goto error;
795 }
796 }
797
780 spin_lock_irqsave(&cs->lock, flags); 798 spin_lock_irqsave(&cs->lock, flags);
781 cs->running = 1; 799 cs->running = 1;
782 spin_unlock_irqrestore(&cs->lock, flags); 800 spin_unlock_irqrestore(&cs->lock, flags);
@@ -824,9 +842,10 @@ void gigaset_bcs_reinit(struct bc_state *bcs)
824 bcs->chstate = 0; 842 bcs->chstate = 0;
825 843
826 bcs->ignore = cs->ignoreframes; 844 bcs->ignore = cs->ignoreframes;
827 if (bcs->ignore) 845 if (bcs->ignore) {
828 bcs->inputstate |= INS_skip_frame; 846 dev_kfree_skb(bcs->skb);
829 847 bcs->skb = NULL;
848 }
830 849
831 cs->ops->reinitbcshw(bcs); 850 cs->ops->reinitbcshw(bcs);
832} 851}
@@ -847,8 +866,6 @@ static void cleanup_cs(struct cardstate *cs)
847 free_strings(&cs->at_state); 866 free_strings(&cs->at_state);
848 gigaset_at_init(&cs->at_state, NULL, cs, 0); 867 gigaset_at_init(&cs->at_state, NULL, cs, 0);
849 868
850 kfree(cs->inbuf->rcvbuf);
851 cs->inbuf->rcvbuf = NULL;
852 cs->inbuf->inputstate = INS_command; 869 cs->inbuf->inputstate = INS_command;
853 cs->inbuf->head = 0; 870 cs->inbuf->head = 0;
854 cs->inbuf->tail = 0; 871 cs->inbuf->tail = 0;
@@ -911,15 +928,13 @@ int gigaset_start(struct cardstate *cs)
911 cs->ops->baud_rate(cs, B115200); 928 cs->ops->baud_rate(cs, B115200);
912 cs->ops->set_line_ctrl(cs, CS8); 929 cs->ops->set_line_ctrl(cs, CS8);
913 cs->control_state = TIOCM_DTR|TIOCM_RTS; 930 cs->control_state = TIOCM_DTR|TIOCM_RTS;
914 } else {
915 //FIXME use some saved values?
916 } 931 }
917 932
918 cs->waiting = 1; 933 cs->waiting = 1;
919 934
920 if (!gigaset_add_event(cs, &cs->at_state, EV_START, NULL, 0, NULL)) { 935 if (!gigaset_add_event(cs, &cs->at_state, EV_START, NULL, 0, NULL)) {
921 cs->waiting = 0; 936 cs->waiting = 0;
922 //FIXME what should we do? 937 dev_err(cs->dev, "%s: out of memory\n", __func__);
923 goto error; 938 goto error;
924 } 939 }
925 940
@@ -959,7 +974,7 @@ int gigaset_shutdown(struct cardstate *cs)
959 cs->waiting = 1; 974 cs->waiting = 1;
960 975
961 if (!gigaset_add_event(cs, &cs->at_state, EV_SHUTDOWN, NULL, 0, NULL)) { 976 if (!gigaset_add_event(cs, &cs->at_state, EV_SHUTDOWN, NULL, 0, NULL)) {
962 //FIXME what should we do? 977 dev_err(cs->dev, "%s: out of memory\n", __func__);
963 goto exit; 978 goto exit;
964 } 979 }
965 980
@@ -990,7 +1005,7 @@ void gigaset_stop(struct cardstate *cs)
990 cs->waiting = 1; 1005 cs->waiting = 1;
991 1006
992 if (!gigaset_add_event(cs, &cs->at_state, EV_STOP, NULL, 0, NULL)) { 1007 if (!gigaset_add_event(cs, &cs->at_state, EV_STOP, NULL, 0, NULL)) {
993 //FIXME what should we do? 1008 dev_err(cs->dev, "%s: out of memory\n", __func__);
994 goto exit; 1009 goto exit;
995 } 1010 }
996 1011
diff --git a/drivers/isdn/gigaset/dummyll.c b/drivers/isdn/gigaset/dummyll.c
new file mode 100644
index 000000000000..5b27c996af6d
--- /dev/null
+++ b/drivers/isdn/gigaset/dummyll.c
@@ -0,0 +1,68 @@
1/*
2 * Dummy LL interface for the Gigaset driver
3 *
4 * Copyright (c) 2009 by Tilman Schmidt <tilman@imap.cc>.
5 *
6 * =====================================================================
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License as
9 * published by the Free Software Foundation; either version 2 of
10 * the License, or (at your option) any later version.
11 * =====================================================================
12 */
13
14#include "gigaset.h"
15
16void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
17{
18}
19EXPORT_SYMBOL_GPL(gigaset_skb_sent);
20
21void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb)
22{
23}
24EXPORT_SYMBOL_GPL(gigaset_skb_rcvd);
25
26void gigaset_isdn_rcv_err(struct bc_state *bcs)
27{
28}
29EXPORT_SYMBOL_GPL(gigaset_isdn_rcv_err);
30
31int gigaset_isdn_icall(struct at_state_t *at_state)
32{
33 return ICALL_IGNORE;
34}
35
36void gigaset_isdn_connD(struct bc_state *bcs)
37{
38}
39
40void gigaset_isdn_hupD(struct bc_state *bcs)
41{
42}
43
44void gigaset_isdn_connB(struct bc_state *bcs)
45{
46}
47
48void gigaset_isdn_hupB(struct bc_state *bcs)
49{
50}
51
52void gigaset_isdn_start(struct cardstate *cs)
53{
54}
55
56void gigaset_isdn_stop(struct cardstate *cs)
57{
58}
59
60int gigaset_isdn_register(struct cardstate *cs, const char *isdnid)
61{
62 pr_info("no ISDN subsystem interface\n");
63 return 1;
64}
65
66void gigaset_isdn_unregister(struct cardstate *cs)
67{
68}
diff --git a/drivers/isdn/gigaset/ev-layer.c b/drivers/isdn/gigaset/ev-layer.c
index cc768caa38f5..ddeb0456d202 100644
--- a/drivers/isdn/gigaset/ev-layer.c
+++ b/drivers/isdn/gigaset/ev-layer.c
@@ -40,8 +40,8 @@
40 40
41/* Possible ASCII responses */ 41/* Possible ASCII responses */
42#define RSP_OK 0 42#define RSP_OK 0
43//#define RSP_BUSY 1 43#define RSP_BUSY 1
44//#define RSP_CONNECT 2 44#define RSP_CONNECT 2
45#define RSP_ZGCI 3 45#define RSP_ZGCI 3
46#define RSP_RING 4 46#define RSP_RING 4
47#define RSP_ZAOC 5 47#define RSP_ZAOC 5
@@ -68,7 +68,6 @@
68#define RSP_ZHLC (RSP_STR + STR_ZHLC) 68#define RSP_ZHLC (RSP_STR + STR_ZHLC)
69#define RSP_ERROR -1 /* ERROR */ 69#define RSP_ERROR -1 /* ERROR */
70#define RSP_WRONG_CID -2 /* unknown cid in cmd */ 70#define RSP_WRONG_CID -2 /* unknown cid in cmd */
71//#define RSP_EMPTY -3
72#define RSP_UNKNOWN -4 /* unknown response */ 71#define RSP_UNKNOWN -4 /* unknown response */
73#define RSP_FAIL -5 /* internal error */ 72#define RSP_FAIL -5 /* internal error */
74#define RSP_INVAL -6 /* invalid response */ 73#define RSP_INVAL -6 /* invalid response */
@@ -76,9 +75,9 @@
76#define RSP_NONE -19 75#define RSP_NONE -19
77#define RSP_STRING -20 76#define RSP_STRING -20
78#define RSP_NULL -21 77#define RSP_NULL -21
79//#define RSP_RETRYFAIL -22 78#define RSP_RETRYFAIL -22
80//#define RSP_RETRY -23 79#define RSP_RETRY -23
81//#define RSP_SKIP -24 80#define RSP_SKIP -24
82#define RSP_INIT -27 81#define RSP_INIT -27
83#define RSP_ANY -26 82#define RSP_ANY -26
84#define RSP_LAST -28 83#define RSP_LAST -28
@@ -127,7 +126,6 @@
127#define ACT_NOTIFY_BC_UP 39 126#define ACT_NOTIFY_BC_UP 39
128#define ACT_DIAL 40 127#define ACT_DIAL 40
129#define ACT_ACCEPT 41 128#define ACT_ACCEPT 41
130#define ACT_PROTO_L2 42
131#define ACT_HUP 43 129#define ACT_HUP 43
132#define ACT_IF_LOCK 44 130#define ACT_IF_LOCK 44
133#define ACT_START 45 131#define ACT_START 45
@@ -159,229 +157,229 @@
159#define SEQ_UMMODE 11 157#define SEQ_UMMODE 11
160 158
161 159
162// 100: init, 200: dle0, 250:dle1, 300: get cid (dial), 350: "hup" (no cid), 400: hup, 500: reset, 600: dial, 700: ring 160/* 100: init, 200: dle0, 250:dle1, 300: get cid (dial), 350: "hup" (no cid),
161 * 400: hup, 500: reset, 600: dial, 700: ring */
163struct reply_t gigaset_tab_nocid[] = 162struct reply_t gigaset_tab_nocid[] =
164{ 163{
165 /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */ 164/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout,
166 165 * action, command */
167 /* initialize device, set cid mode if possible */ 166
168 //{RSP_INIT, -1, -1,100, 900, 0, {ACT_TEST}}, 167/* initialize device, set cid mode if possible */
169 //{RSP_ERROR, 900,900, -1, 0, 0, {ACT_FAILINIT}}, 168{RSP_INIT, -1, -1, SEQ_INIT, 100, 1, {ACT_TIMEOUT} },
170 //{RSP_OK, 900,900, -1, 100, INIT_TIMEOUT, 169
171 // {ACT_TIMEOUT}}, 170{EV_TIMEOUT, 100, 100, -1, 101, 3, {0}, "Z\r"},
172 171{RSP_OK, 101, 103, -1, 120, 5, {ACT_GETSTRING},
173 {RSP_INIT, -1, -1,SEQ_INIT, 100, INIT_TIMEOUT, 172 "+GMR\r"},
174 {ACT_TIMEOUT}}, /* wait until device is ready */ 173
175 174{EV_TIMEOUT, 101, 101, -1, 102, 5, {0}, "Z\r"},
176 {EV_TIMEOUT, 100,100, -1, 101, 3, {0}, "Z\r"}, /* device in transparent mode? try to initialize it. */ 175{RSP_ERROR, 101, 101, -1, 102, 5, {0}, "Z\r"},
177 {RSP_OK, 101,103, -1, 120, 5, {ACT_GETSTRING}, "+GMR\r"}, /* get version */ 176
178 177{EV_TIMEOUT, 102, 102, -1, 108, 5, {ACT_SETDLE1},
179 {EV_TIMEOUT, 101,101, -1, 102, 5, {0}, "Z\r"}, /* timeout => try once again. */ 178 "^SDLE=0\r"},
180 {RSP_ERROR, 101,101, -1, 102, 5, {0}, "Z\r"}, /* error => try once again. */ 179{RSP_OK, 108, 108, -1, 104, -1},
181 180{RSP_ZDLE, 104, 104, 0, 103, 5, {0}, "Z\r"},
182 {EV_TIMEOUT, 102,102, -1, 108, 5, {ACT_SETDLE1}, "^SDLE=0\r"}, /* timeout => try again in DLE mode. */ 181{EV_TIMEOUT, 104, 104, -1, 0, 0, {ACT_FAILINIT} },
183 {RSP_OK, 108,108, -1, 104,-1}, 182{RSP_ERROR, 108, 108, -1, 0, 0, {ACT_FAILINIT} },
184 {RSP_ZDLE, 104,104, 0, 103, 5, {0}, "Z\r"}, 183
185 {EV_TIMEOUT, 104,104, -1, 0, 0, {ACT_FAILINIT}}, 184{EV_TIMEOUT, 108, 108, -1, 105, 2, {ACT_SETDLE0,
186 {RSP_ERROR, 108,108, -1, 0, 0, {ACT_FAILINIT}}, 185 ACT_HUPMODEM,
187 186 ACT_TIMEOUT} },
188 {EV_TIMEOUT, 108,108, -1, 105, 2, {ACT_SETDLE0, 187{EV_TIMEOUT, 105, 105, -1, 103, 5, {0}, "Z\r"},
189 ACT_HUPMODEM, 188
190 ACT_TIMEOUT}}, /* still timeout => connection in unimodem mode? */ 189{RSP_ERROR, 102, 102, -1, 107, 5, {0}, "^GETPRE\r"},
191 {EV_TIMEOUT, 105,105, -1, 103, 5, {0}, "Z\r"}, 190{RSP_OK, 107, 107, -1, 0, 0, {ACT_CONFIGMODE} },
192 191{RSP_ERROR, 107, 107, -1, 0, 0, {ACT_FAILINIT} },
193 {RSP_ERROR, 102,102, -1, 107, 5, {0}, "^GETPRE\r"}, /* ERROR on ATZ => maybe in config mode? */ 192{EV_TIMEOUT, 107, 107, -1, 0, 0, {ACT_FAILINIT} },
194 {RSP_OK, 107,107, -1, 0, 0, {ACT_CONFIGMODE}}, 193
195 {RSP_ERROR, 107,107, -1, 0, 0, {ACT_FAILINIT}}, 194{RSP_ERROR, 103, 103, -1, 0, 0, {ACT_FAILINIT} },
196 {EV_TIMEOUT, 107,107, -1, 0, 0, {ACT_FAILINIT}}, 195{EV_TIMEOUT, 103, 103, -1, 0, 0, {ACT_FAILINIT} },
197 196
198 {RSP_ERROR, 103,103, -1, 0, 0, {ACT_FAILINIT}}, 197{RSP_STRING, 120, 120, -1, 121, -1, {ACT_SETVER} },
199 {EV_TIMEOUT, 103,103, -1, 0, 0, {ACT_FAILINIT}}, 198
200 199{EV_TIMEOUT, 120, 121, -1, 0, 0, {ACT_FAILVER,
201 {RSP_STRING, 120,120, -1, 121,-1, {ACT_SETVER}}, 200 ACT_INIT} },
202 201{RSP_ERROR, 120, 121, -1, 0, 0, {ACT_FAILVER,
203 {EV_TIMEOUT, 120,121, -1, 0, 0, {ACT_FAILVER, ACT_INIT}}, 202 ACT_INIT} },
204 {RSP_ERROR, 120,121, -1, 0, 0, {ACT_FAILVER, ACT_INIT}}, 203{RSP_OK, 121, 121, -1, 0, 0, {ACT_GOTVER,
205 {RSP_OK, 121,121, -1, 0, 0, {ACT_GOTVER, ACT_INIT}}, 204 ACT_INIT} },
206 205
207 /* leave dle mode */ 206/* leave dle mode */
208 {RSP_INIT, 0, 0,SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"}, 207{RSP_INIT, 0, 0, SEQ_DLE0, 201, 5, {0}, "^SDLE=0\r"},
209 {RSP_OK, 201,201, -1, 202,-1}, 208{RSP_OK, 201, 201, -1, 202, -1},
210 {RSP_ZDLE, 202,202, 0, 0, 0, {ACT_DLE0}}, 209{RSP_ZDLE, 202, 202, 0, 0, 0, {ACT_DLE0} },
211 {RSP_NODEV, 200,249, -1, 0, 0, {ACT_FAKEDLE0}}, 210{RSP_NODEV, 200, 249, -1, 0, 0, {ACT_FAKEDLE0} },
212 {RSP_ERROR, 200,249, -1, 0, 0, {ACT_FAILDLE0}}, 211{RSP_ERROR, 200, 249, -1, 0, 0, {ACT_FAILDLE0} },
213 {EV_TIMEOUT, 200,249, -1, 0, 0, {ACT_FAILDLE0}}, 212{EV_TIMEOUT, 200, 249, -1, 0, 0, {ACT_FAILDLE0} },
214 213
215 /* enter dle mode */ 214/* enter dle mode */
216 {RSP_INIT, 0, 0,SEQ_DLE1, 251, 5, {0}, "^SDLE=1\r"}, 215{RSP_INIT, 0, 0, SEQ_DLE1, 251, 5, {0}, "^SDLE=1\r"},
217 {RSP_OK, 251,251, -1, 252,-1}, 216{RSP_OK, 251, 251, -1, 252, -1},
218 {RSP_ZDLE, 252,252, 1, 0, 0, {ACT_DLE1}}, 217{RSP_ZDLE, 252, 252, 1, 0, 0, {ACT_DLE1} },
219 {RSP_ERROR, 250,299, -1, 0, 0, {ACT_FAILDLE1}}, 218{RSP_ERROR, 250, 299, -1, 0, 0, {ACT_FAILDLE1} },
220 {EV_TIMEOUT, 250,299, -1, 0, 0, {ACT_FAILDLE1}}, 219{EV_TIMEOUT, 250, 299, -1, 0, 0, {ACT_FAILDLE1} },
221 220
222 /* incoming call */ 221/* incoming call */
223 {RSP_RING, -1, -1, -1, -1,-1, {ACT_RING}}, 222{RSP_RING, -1, -1, -1, -1, -1, {ACT_RING} },
224 223
225 /* get cid */ 224/* get cid */
226 //{RSP_INIT, 0, 0,300, 901, 0, {ACT_TEST}}, 225{RSP_INIT, 0, 0, SEQ_CID, 301, 5, {0}, "^SGCI?\r"},
227 //{RSP_ERROR, 901,901, -1, 0, 0, {ACT_FAILCID}}, 226{RSP_OK, 301, 301, -1, 302, -1},
228 //{RSP_OK, 901,901, -1, 301, 5, {0}, "^SGCI?\r"}, 227{RSP_ZGCI, 302, 302, -1, 0, 0, {ACT_CID} },
229 228{RSP_ERROR, 301, 349, -1, 0, 0, {ACT_FAILCID} },
230 {RSP_INIT, 0, 0,SEQ_CID, 301, 5, {0}, "^SGCI?\r"}, 229{EV_TIMEOUT, 301, 349, -1, 0, 0, {ACT_FAILCID} },
231 {RSP_OK, 301,301, -1, 302,-1}, 230
232 {RSP_ZGCI, 302,302, -1, 0, 0, {ACT_CID}}, 231/* enter cid mode */
233 {RSP_ERROR, 301,349, -1, 0, 0, {ACT_FAILCID}}, 232{RSP_INIT, 0, 0, SEQ_CIDMODE, 150, 5, {0}, "^SGCI=1\r"},
234 {EV_TIMEOUT, 301,349, -1, 0, 0, {ACT_FAILCID}}, 233{RSP_OK, 150, 150, -1, 0, 0, {ACT_CMODESET} },
235 234{RSP_ERROR, 150, 150, -1, 0, 0, {ACT_FAILCMODE} },
236 /* enter cid mode */ 235{EV_TIMEOUT, 150, 150, -1, 0, 0, {ACT_FAILCMODE} },
237 {RSP_INIT, 0, 0,SEQ_CIDMODE, 150, 5, {0}, "^SGCI=1\r"}, 236
238 {RSP_OK, 150,150, -1, 0, 0, {ACT_CMODESET}}, 237/* leave cid mode */
239 {RSP_ERROR, 150,150, -1, 0, 0, {ACT_FAILCMODE}}, 238{RSP_INIT, 0, 0, SEQ_UMMODE, 160, 5, {0}, "Z\r"},
240 {EV_TIMEOUT, 150,150, -1, 0, 0, {ACT_FAILCMODE}}, 239{RSP_OK, 160, 160, -1, 0, 0, {ACT_UMODESET} },
241 240{RSP_ERROR, 160, 160, -1, 0, 0, {ACT_FAILUMODE} },
242 /* leave cid mode */ 241{EV_TIMEOUT, 160, 160, -1, 0, 0, {ACT_FAILUMODE} },
243 //{RSP_INIT, 0, 0,SEQ_UMMODE, 160, 5, {0}, "^SGCI=0\r"}, 242
244 {RSP_INIT, 0, 0,SEQ_UMMODE, 160, 5, {0}, "Z\r"}, 243/* abort getting cid */
245 {RSP_OK, 160,160, -1, 0, 0, {ACT_UMODESET}}, 244{RSP_INIT, 0, 0, SEQ_NOCID, 0, 0, {ACT_ABORTCID} },
246 {RSP_ERROR, 160,160, -1, 0, 0, {ACT_FAILUMODE}}, 245
247 {EV_TIMEOUT, 160,160, -1, 0, 0, {ACT_FAILUMODE}}, 246/* reset */
248 247{RSP_INIT, 0, 0, SEQ_SHUTDOWN, 504, 5, {0}, "Z\r"},
249 /* abort getting cid */ 248{RSP_OK, 504, 504, -1, 0, 0, {ACT_SDOWN} },
250 {RSP_INIT, 0, 0,SEQ_NOCID, 0, 0, {ACT_ABORTCID}}, 249{RSP_ERROR, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} },
251 250{EV_TIMEOUT, 501, 599, -1, 0, 0, {ACT_FAILSDOWN} },
252 /* reset */ 251{RSP_NODEV, 501, 599, -1, 0, 0, {ACT_FAKESDOWN} },
253 {RSP_INIT, 0, 0,SEQ_SHUTDOWN, 504, 5, {0}, "Z\r"}, 252
254 {RSP_OK, 504,504, -1, 0, 0, {ACT_SDOWN}}, 253{EV_PROC_CIDMODE, -1, -1, -1, -1, -1, {ACT_PROC_CIDMODE} },
255 {RSP_ERROR, 501,599, -1, 0, 0, {ACT_FAILSDOWN}}, 254{EV_IF_LOCK, -1, -1, -1, -1, -1, {ACT_IF_LOCK} },
256 {EV_TIMEOUT, 501,599, -1, 0, 0, {ACT_FAILSDOWN}}, 255{EV_IF_VER, -1, -1, -1, -1, -1, {ACT_IF_VER} },
257 {RSP_NODEV, 501,599, -1, 0, 0, {ACT_FAKESDOWN}}, 256{EV_START, -1, -1, -1, -1, -1, {ACT_START} },
258 257{EV_STOP, -1, -1, -1, -1, -1, {ACT_STOP} },
259 {EV_PROC_CIDMODE,-1, -1, -1, -1,-1, {ACT_PROC_CIDMODE}}, //FIXME 258{EV_SHUTDOWN, -1, -1, -1, -1, -1, {ACT_SHUTDOWN} },
260 {EV_IF_LOCK, -1, -1, -1, -1,-1, {ACT_IF_LOCK}}, //FIXME 259
261 {EV_IF_VER, -1, -1, -1, -1,-1, {ACT_IF_VER}}, //FIXME 260/* misc. */
262 {EV_START, -1, -1, -1, -1,-1, {ACT_START}}, //FIXME 261{RSP_ERROR, -1, -1, -1, -1, -1, {ACT_ERROR} },
263 {EV_STOP, -1, -1, -1, -1,-1, {ACT_STOP}}, //FIXME 262{RSP_ZCFGT, -1, -1, -1, -1, -1, {ACT_DEBUG} },
264 {EV_SHUTDOWN, -1, -1, -1, -1,-1, {ACT_SHUTDOWN}}, //FIXME 263{RSP_ZCFG, -1, -1, -1, -1, -1, {ACT_DEBUG} },
265 264{RSP_ZLOG, -1, -1, -1, -1, -1, {ACT_DEBUG} },
266 /* misc. */ 265{RSP_ZMWI, -1, -1, -1, -1, -1, {ACT_DEBUG} },
267 {RSP_ERROR, -1, -1, -1, -1, -1, {ACT_ERROR} }, 266{RSP_ZABINFO, -1, -1, -1, -1, -1, {ACT_DEBUG} },
268 {RSP_EMPTY, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 267{RSP_ZSMLSTCHG, -1, -1, -1, -1, -1, {ACT_DEBUG} },
269 {RSP_ZCFGT, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 268
270 {RSP_ZCFG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 269{RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} },
271 {RSP_ZLOG, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 270{RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} },
272 {RSP_ZMWI, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 271{RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} },
273 {RSP_ZABINFO, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 272{RSP_LAST}
274 {RSP_ZSMLSTCHG,-1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME
275
276 {RSP_ZCAU, -1, -1, -1, -1,-1, {ACT_ZCAU}},
277 {RSP_NONE, -1, -1, -1, -1,-1, {ACT_DEBUG}},
278 {RSP_ANY, -1, -1, -1, -1,-1, {ACT_WARN}},
279 {RSP_LAST}
280}; 273};
281 274
282// 600: start dialing, 650: dial in progress, 800: connection is up, 700: ring, 400: hup, 750: accepted icall 275/* 600: start dialing, 650: dial in progress, 800: connection is up, 700: ring,
276 * 400: hup, 750: accepted icall */
283struct reply_t gigaset_tab_cid[] = 277struct reply_t gigaset_tab_cid[] =
284{ 278{
285 /* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout, action, command */ 279/* resp_code, min_ConState, max_ConState, parameter, new_ConState, timeout,
286 280 * action, command */
287 /* dial */ 281
288 {EV_DIAL, -1, -1, -1, -1,-1, {ACT_DIAL}}, //FIXME 282/* dial */
289 {RSP_INIT, 0, 0,SEQ_DIAL, 601, 5, {ACT_CMD+AT_BC}}, 283{EV_DIAL, -1, -1, -1, -1, -1, {ACT_DIAL} },
290 {RSP_OK, 601,601, -1, 602, 5, {ACT_CMD+AT_HLC}}, 284{RSP_INIT, 0, 0, SEQ_DIAL, 601, 5, {ACT_CMD+AT_BC} },
291 {RSP_NULL, 602,602, -1, 603, 5, {ACT_CMD+AT_PROTO}}, 285{RSP_OK, 601, 601, -1, 602, 5, {ACT_CMD+AT_HLC} },
292 {RSP_OK, 602,602, -1, 603, 5, {ACT_CMD+AT_PROTO}}, 286{RSP_NULL, 602, 602, -1, 603, 5, {ACT_CMD+AT_PROTO} },
293 {RSP_OK, 603,603, -1, 604, 5, {ACT_CMD+AT_TYPE}}, 287{RSP_OK, 602, 602, -1, 603, 5, {ACT_CMD+AT_PROTO} },
294 {RSP_OK, 604,604, -1, 605, 5, {ACT_CMD+AT_MSN}}, 288{RSP_OK, 603, 603, -1, 604, 5, {ACT_CMD+AT_TYPE} },
295 {RSP_OK, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}}, 289{RSP_OK, 604, 604, -1, 605, 5, {ACT_CMD+AT_MSN} },
296 {RSP_NULL, 605,605, -1, 606, 5, {ACT_CMD+AT_ISO}}, 290{RSP_NULL, 605, 605, -1, 606, 5, {ACT_CMD+AT_CLIP} },
297 {RSP_OK, 606,606, -1, 607, 5, {0}, "+VLS=17\r"}, 291{RSP_OK, 605, 605, -1, 606, 5, {ACT_CMD+AT_CLIP} },
298 {RSP_OK, 607,607, -1, 608,-1}, 292{RSP_NULL, 606, 606, -1, 607, 5, {ACT_CMD+AT_ISO} },
299 {RSP_ZSAU, 608,608,ZSAU_PROCEEDING, 609, 5, {ACT_CMD+AT_DIAL}}, 293{RSP_OK, 606, 606, -1, 607, 5, {ACT_CMD+AT_ISO} },
300 {RSP_OK, 609,609, -1, 650, 0, {ACT_DIALING}}, 294{RSP_OK, 607, 607, -1, 608, 5, {0}, "+VLS=17\r"},
301 295{RSP_OK, 608, 608, -1, 609, -1},
302 {RSP_ERROR, 601,609, -1, 0, 0, {ACT_ABORTDIAL}}, 296{RSP_ZSAU, 609, 609, ZSAU_PROCEEDING, 610, 5, {ACT_CMD+AT_DIAL} },
303 {EV_TIMEOUT, 601,609, -1, 0, 0, {ACT_ABORTDIAL}}, 297{RSP_OK, 610, 610, -1, 650, 0, {ACT_DIALING} },
304 298
305 /* optional dialing responses */ 299{RSP_ERROR, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} },
306 {EV_BC_OPEN, 650,650, -1, 651,-1}, 300{EV_TIMEOUT, 601, 610, -1, 0, 0, {ACT_ABORTDIAL} },
307 {RSP_ZVLS, 608,651, 17, -1,-1, {ACT_DEBUG}}, 301
308 {RSP_ZCTP, 609,651, -1, -1,-1, {ACT_DEBUG}}, 302/* optional dialing responses */
309 {RSP_ZCPN, 609,651, -1, -1,-1, {ACT_DEBUG}}, 303{EV_BC_OPEN, 650, 650, -1, 651, -1},
310 {RSP_ZSAU, 650,651,ZSAU_CALL_DELIVERED, -1,-1, {ACT_DEBUG}}, 304{RSP_ZVLS, 609, 651, 17, -1, -1, {ACT_DEBUG} },
311 305{RSP_ZCTP, 610, 651, -1, -1, -1, {ACT_DEBUG} },
312 /* connect */ 306{RSP_ZCPN, 610, 651, -1, -1, -1, {ACT_DEBUG} },
313 {RSP_ZSAU, 650,650,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, 307{RSP_ZSAU, 650, 651, ZSAU_CALL_DELIVERED, -1, -1, {ACT_DEBUG} },
314 {RSP_ZSAU, 651,651,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT, 308
315 ACT_NOTIFY_BC_UP}}, 309/* connect */
316 {RSP_ZSAU, 750,750,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT}}, 310{RSP_ZSAU, 650, 650, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} },
317 {RSP_ZSAU, 751,751,ZSAU_ACTIVE, 800,-1, {ACT_CONNECT, 311{RSP_ZSAU, 651, 651, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT,
318 ACT_NOTIFY_BC_UP}}, 312 ACT_NOTIFY_BC_UP} },
319 {EV_BC_OPEN, 800,800, -1, 800,-1, {ACT_NOTIFY_BC_UP}}, 313{RSP_ZSAU, 750, 750, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT} },
320 314{RSP_ZSAU, 751, 751, ZSAU_ACTIVE, 800, -1, {ACT_CONNECT,
321 /* remote hangup */ 315 ACT_NOTIFY_BC_UP} },
322 {RSP_ZSAU, 650,651,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT}}, 316{EV_BC_OPEN, 800, 800, -1, 800, -1, {ACT_NOTIFY_BC_UP} },
323 {RSP_ZSAU, 750,751,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}}, 317
324 {RSP_ZSAU, 800,800,ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP}}, 318/* remote hangup */
325 319{RSP_ZSAU, 650, 651, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEREJECT} },
326 /* hangup */ 320{RSP_ZSAU, 750, 751, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} },
327 {EV_HUP, -1, -1, -1, -1,-1, {ACT_HUP}}, //FIXME 321{RSP_ZSAU, 800, 800, ZSAU_DISCONNECT_IND, 0, 0, {ACT_REMOTEHUP} },
328 {RSP_INIT, -1, -1,SEQ_HUP, 401, 5, {0}, "+VLS=0\r"}, /* hang up */ //-1,-1? 322
329 {RSP_OK, 401,401, -1, 402, 5}, 323/* hangup */
330 {RSP_ZVLS, 402,402, 0, 403, 5}, 324{EV_HUP, -1, -1, -1, -1, -1, {ACT_HUP} },
331 {RSP_ZSAU, 403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} }, 325{RSP_INIT, -1, -1, SEQ_HUP, 401, 5, {0}, "+VLS=0\r"},
332 {RSP_ZSAU, 403, 403, ZSAU_NULL, 0, 0, {ACT_DISCONNECT} }, 326{RSP_OK, 401, 401, -1, 402, 5},
333 {RSP_NODEV, 401, 403, -1, 0, 0, {ACT_FAKEHUP} }, 327{RSP_ZVLS, 402, 402, 0, 403, 5},
334 {RSP_ERROR, 401,401, -1, 0, 0, {ACT_ABORTHUP}}, 328{RSP_ZSAU, 403, 403, ZSAU_DISCONNECT_REQ, -1, -1, {ACT_DEBUG} },
335 {EV_TIMEOUT, 401,403, -1, 0, 0, {ACT_ABORTHUP}}, 329{RSP_ZSAU, 403, 403, ZSAU_NULL, 0, 0, {ACT_DISCONNECT} },
336 330{RSP_NODEV, 401, 403, -1, 0, 0, {ACT_FAKEHUP} },
337 {EV_BC_CLOSED, 0, 0, -1, 0,-1, {ACT_NOTIFY_BC_DOWN}}, //FIXME new constate + timeout 331{RSP_ERROR, 401, 401, -1, 0, 0, {ACT_ABORTHUP} },
338 332{EV_TIMEOUT, 401, 403, -1, 0, 0, {ACT_ABORTHUP} },
339 /* ring */ 333
340 {RSP_ZBC, 700,700, -1, -1,-1, {0}}, 334{EV_BC_CLOSED, 0, 0, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} },
341 {RSP_ZHLC, 700,700, -1, -1,-1, {0}}, 335
342 {RSP_NMBR, 700,700, -1, -1,-1, {0}}, 336/* ring */
343 {RSP_ZCPN, 700,700, -1, -1,-1, {0}}, 337{RSP_ZBC, 700, 700, -1, -1, -1, {0} },
344 {RSP_ZCTP, 700,700, -1, -1,-1, {0}}, 338{RSP_ZHLC, 700, 700, -1, -1, -1, {0} },
345 {EV_TIMEOUT, 700,700, -1, 720,720, {ACT_ICALL}}, 339{RSP_NMBR, 700, 700, -1, -1, -1, {0} },
346 {EV_BC_CLOSED,720,720, -1, 0,-1, {ACT_NOTIFY_BC_DOWN}}, 340{RSP_ZCPN, 700, 700, -1, -1, -1, {0} },
347 341{RSP_ZCTP, 700, 700, -1, -1, -1, {0} },
348 /*accept icall*/ 342{EV_TIMEOUT, 700, 700, -1, 720, 720, {ACT_ICALL} },
349 {EV_ACCEPT, -1, -1, -1, -1,-1, {ACT_ACCEPT}}, //FIXME 343{EV_BC_CLOSED, 720, 720, -1, 0, -1, {ACT_NOTIFY_BC_DOWN} },
350 {RSP_INIT, 720,720,SEQ_ACCEPT, 721, 5, {ACT_CMD+AT_PROTO}}, 344
351 {RSP_OK, 721,721, -1, 722, 5, {ACT_CMD+AT_ISO}}, 345/*accept icall*/
352 {RSP_OK, 722,722, -1, 723, 5, {0}, "+VLS=17\r"}, /* set "Endgeraetemodus" */ 346{EV_ACCEPT, -1, -1, -1, -1, -1, {ACT_ACCEPT} },
353 {RSP_OK, 723,723, -1, 724, 5, {0}}, 347{RSP_INIT, 720, 720, SEQ_ACCEPT, 721, 5, {ACT_CMD+AT_PROTO} },
354 {RSP_ZVLS, 724,724, 17, 750,50, {ACT_ACCEPTED}}, 348{RSP_OK, 721, 721, -1, 722, 5, {ACT_CMD+AT_ISO} },
355 {RSP_ERROR, 721,729, -1, 0, 0, {ACT_ABORTACCEPT}}, 349{RSP_OK, 722, 722, -1, 723, 5, {0}, "+VLS=17\r"},
356 {EV_TIMEOUT, 721,729, -1, 0, 0, {ACT_ABORTACCEPT}}, 350{RSP_OK, 723, 723, -1, 724, 5, {0} },
357 {RSP_ZSAU, 700,729,ZSAU_NULL, 0, 0, {ACT_ABORTACCEPT}}, 351{RSP_ZVLS, 724, 724, 17, 750, 50, {ACT_ACCEPTED} },
358 {RSP_ZSAU, 700,729,ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT}}, 352{RSP_ERROR, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} },
359 {RSP_ZSAU, 700,729,ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT}}, 353{EV_TIMEOUT, 721, 729, -1, 0, 0, {ACT_ABORTACCEPT} },
360 354{RSP_ZSAU, 700, 729, ZSAU_NULL, 0, 0, {ACT_ABORTACCEPT} },
361 {EV_BC_OPEN, 750,750, -1, 751,-1}, 355{RSP_ZSAU, 700, 729, ZSAU_ACTIVE, 0, 0, {ACT_ABORTACCEPT} },
362 {EV_TIMEOUT, 750,751, -1, 0, 0, {ACT_CONNTIMEOUT}}, 356{RSP_ZSAU, 700, 729, ZSAU_DISCONNECT_IND, 0, 0, {ACT_ABORTACCEPT} },
363 357
364 /* B channel closed (general case) */ 358{EV_BC_OPEN, 750, 750, -1, 751, -1},
365 {EV_BC_CLOSED, -1, -1, -1, -1,-1, {ACT_NOTIFY_BC_DOWN}}, //FIXME 359{EV_TIMEOUT, 750, 751, -1, 0, 0, {ACT_CONNTIMEOUT} },
366 360
367 /* misc. */ 361/* B channel closed (general case) */
368 {EV_PROTO_L2, -1, -1, -1, -1,-1, {ACT_PROTO_L2}}, //FIXME 362{EV_BC_CLOSED, -1, -1, -1, -1, -1, {ACT_NOTIFY_BC_DOWN} },
369 363
370 {RSP_ZCON, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 364/* misc. */
371 {RSP_ZCCR, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 365{RSP_ZCON, -1, -1, -1, -1, -1, {ACT_DEBUG} },
372 {RSP_ZAOC, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 366{RSP_ZCCR, -1, -1, -1, -1, -1, {ACT_DEBUG} },
373 {RSP_ZCSTR, -1, -1, -1, -1,-1, {ACT_DEBUG}}, //FIXME 367{RSP_ZAOC, -1, -1, -1, -1, -1, {ACT_DEBUG} },
374 368{RSP_ZCSTR, -1, -1, -1, -1, -1, {ACT_DEBUG} },
375 {RSP_ZCAU, -1, -1, -1, -1,-1, {ACT_ZCAU}}, 369
376 {RSP_NONE, -1, -1, -1, -1,-1, {ACT_DEBUG}}, 370{RSP_ZCAU, -1, -1, -1, -1, -1, {ACT_ZCAU} },
377 {RSP_ANY, -1, -1, -1, -1,-1, {ACT_WARN}}, 371{RSP_NONE, -1, -1, -1, -1, -1, {ACT_DEBUG} },
378 {RSP_LAST} 372{RSP_ANY, -1, -1, -1, -1, -1, {ACT_WARN} },
373{RSP_LAST}
379}; 374};
380 375
381 376
382static const struct resp_type_t resp_type[] = 377static const struct resp_type_t {
378 unsigned char *response;
379 int resp_code;
380 int type;
381} resp_type[] =
383{ 382{
384 /*{"", RSP_EMPTY, RT_NOTHING},*/
385 {"OK", RSP_OK, RT_NOTHING}, 383 {"OK", RSP_OK, RT_NOTHING},
386 {"ERROR", RSP_ERROR, RT_NOTHING}, 384 {"ERROR", RSP_ERROR, RT_NOTHING},
387 {"ZSAU", RSP_ZSAU, RT_ZSAU}, 385 {"ZSAU", RSP_ZSAU, RT_ZSAU},
@@ -405,7 +403,21 @@ static const struct resp_type_t resp_type[] =
405 {"ZLOG", RSP_ZLOG, RT_NOTHING}, 403 {"ZLOG", RSP_ZLOG, RT_NOTHING},
406 {"ZABINFO", RSP_ZABINFO, RT_NOTHING}, 404 {"ZABINFO", RSP_ZABINFO, RT_NOTHING},
407 {"ZSMLSTCHG", RSP_ZSMLSTCHG, RT_NOTHING}, 405 {"ZSMLSTCHG", RSP_ZSMLSTCHG, RT_NOTHING},
408 {NULL,0,0} 406 {NULL, 0, 0}
407};
408
409static const struct zsau_resp_t {
410 unsigned char *str;
411 int code;
412} zsau_resp[] =
413{
414 {"OUTGOING_CALL_PROCEEDING", ZSAU_OUTGOING_CALL_PROCEEDING},
415 {"CALL_DELIVERED", ZSAU_CALL_DELIVERED},
416 {"ACTIVE", ZSAU_ACTIVE},
417 {"DISCONNECT_IND", ZSAU_DISCONNECT_IND},
418 {"NULL", ZSAU_NULL},
419 {"DISCONNECT_REQ", ZSAU_DISCONNECT_REQ},
420 {NULL, ZSAU_UNKNOWN}
409}; 421};
410 422
411/* 423/*
@@ -470,7 +482,6 @@ static int cid_of_response(char *s)
470 if (cid < 1 || cid > 65535) 482 if (cid < 1 || cid > 65535)
471 return -1; /* CID out of range */ 483 return -1; /* CID out of range */
472 return cid; 484 return cid;
473 //FIXME is ;<digit>+ at end of non-CID response really impossible?
474} 485}
475 486
476/** 487/**
@@ -487,6 +498,7 @@ void gigaset_handle_modem_response(struct cardstate *cs)
487 int params; 498 int params;
488 int i, j; 499 int i, j;
489 const struct resp_type_t *rt; 500 const struct resp_type_t *rt;
501 const struct zsau_resp_t *zr;
490 int curarg; 502 int curarg;
491 unsigned long flags; 503 unsigned long flags;
492 unsigned next, tail, head; 504 unsigned next, tail, head;
@@ -613,24 +625,14 @@ void gigaset_handle_modem_response(struct cardstate *cs)
613 event->parameter = ZSAU_NONE; 625 event->parameter = ZSAU_NONE;
614 break; 626 break;
615 } 627 }
616 if (!strcmp(argv[curarg], "OUTGOING_CALL_PROCEEDING")) 628 for (zr = zsau_resp; zr->str; ++zr)
617 event->parameter = ZSAU_OUTGOING_CALL_PROCEEDING; 629 if (!strcmp(argv[curarg], zr->str))
618 else if (!strcmp(argv[curarg], "CALL_DELIVERED")) 630 break;
619 event->parameter = ZSAU_CALL_DELIVERED; 631 event->parameter = zr->code;
620 else if (!strcmp(argv[curarg], "ACTIVE")) 632 if (!zr->str)
621 event->parameter = ZSAU_ACTIVE;
622 else if (!strcmp(argv[curarg], "DISCONNECT_IND"))
623 event->parameter = ZSAU_DISCONNECT_IND;
624 else if (!strcmp(argv[curarg], "NULL"))
625 event->parameter = ZSAU_NULL;
626 else if (!strcmp(argv[curarg], "DISCONNECT_REQ"))
627 event->parameter = ZSAU_DISCONNECT_REQ;
628 else {
629 event->parameter = ZSAU_UNKNOWN;
630 dev_warn(cs->dev, 633 dev_warn(cs->dev,
631 "%s: unknown parameter %s after ZSAU\n", 634 "%s: unknown parameter %s after ZSAU\n",
632 __func__, argv[curarg]); 635 __func__, argv[curarg]);
633 }
634 ++curarg; 636 ++curarg;
635 break; 637 break;
636 case RT_STRING: 638 case RT_STRING:
@@ -714,7 +716,7 @@ static void disconnect(struct at_state_t **at_state_p)
714 /* notify LL */ 716 /* notify LL */
715 if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) { 717 if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) {
716 bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL); 718 bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL);
717 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DHUP); 719 gigaset_isdn_hupD(bcs);
718 } 720 }
719 } else { 721 } else {
720 /* no B channel assigned: just deallocate */ 722 /* no B channel assigned: just deallocate */
@@ -872,12 +874,12 @@ static void bchannel_down(struct bc_state *bcs)
872{ 874{
873 if (bcs->chstate & CHS_B_UP) { 875 if (bcs->chstate & CHS_B_UP) {
874 bcs->chstate &= ~CHS_B_UP; 876 bcs->chstate &= ~CHS_B_UP;
875 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_BHUP); 877 gigaset_isdn_hupB(bcs);
876 } 878 }
877 879
878 if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) { 880 if (bcs->chstate & (CHS_D_UP | CHS_NOTIFY_LL)) {
879 bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL); 881 bcs->chstate &= ~(CHS_D_UP | CHS_NOTIFY_LL);
880 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DHUP); 882 gigaset_isdn_hupD(bcs);
881 } 883 }
882 884
883 gigaset_free_channel(bcs); 885 gigaset_free_channel(bcs);
@@ -894,15 +896,17 @@ static void bchannel_up(struct bc_state *bcs)
894 } 896 }
895 897
896 bcs->chstate |= CHS_B_UP; 898 bcs->chstate |= CHS_B_UP;
897 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_BCONN); 899 gigaset_isdn_connB(bcs);
898} 900}
899 901
900static void start_dial(struct at_state_t *at_state, void *data, unsigned seq_index) 902static void start_dial(struct at_state_t *at_state, void *data,
903 unsigned seq_index)
901{ 904{
902 struct bc_state *bcs = at_state->bcs; 905 struct bc_state *bcs = at_state->bcs;
903 struct cardstate *cs = at_state->cs; 906 struct cardstate *cs = at_state->cs;
904 int retval; 907 char **commands = data;
905 unsigned long flags; 908 unsigned long flags;
909 int i;
906 910
907 bcs->chstate |= CHS_NOTIFY_LL; 911 bcs->chstate |= CHS_NOTIFY_LL;
908 912
@@ -913,10 +917,10 @@ static void start_dial(struct at_state_t *at_state, void *data, unsigned seq_ind
913 } 917 }
914 spin_unlock_irqrestore(&cs->lock, flags); 918 spin_unlock_irqrestore(&cs->lock, flags);
915 919
916 retval = gigaset_isdn_setup_dial(at_state, data); 920 for (i = 0; i < AT_NUM; ++i) {
917 if (retval != 0) 921 kfree(bcs->commands[i]);
918 goto error; 922 bcs->commands[i] = commands[i];
919 923 }
920 924
921 at_state->pending_commands |= PC_CID; 925 at_state->pending_commands |= PC_CID;
922 gig_dbg(DEBUG_CMD, "Scheduling PC_CID"); 926 gig_dbg(DEBUG_CMD, "Scheduling PC_CID");
@@ -924,6 +928,10 @@ static void start_dial(struct at_state_t *at_state, void *data, unsigned seq_ind
924 return; 928 return;
925 929
926error: 930error:
931 for (i = 0; i < AT_NUM; ++i) {
932 kfree(commands[i]);
933 commands[i] = NULL;
934 }
927 at_state->pending_commands |= PC_NOCID; 935 at_state->pending_commands |= PC_NOCID;
928 gig_dbg(DEBUG_CMD, "Scheduling PC_NOCID"); 936 gig_dbg(DEBUG_CMD, "Scheduling PC_NOCID");
929 cs->commands_pending = 1; 937 cs->commands_pending = 1;
@@ -933,20 +941,31 @@ error:
933static void start_accept(struct at_state_t *at_state) 941static void start_accept(struct at_state_t *at_state)
934{ 942{
935 struct cardstate *cs = at_state->cs; 943 struct cardstate *cs = at_state->cs;
936 int retval; 944 struct bc_state *bcs = at_state->bcs;
945 int i;
937 946
938 retval = gigaset_isdn_setup_accept(at_state); 947 for (i = 0; i < AT_NUM; ++i) {
948 kfree(bcs->commands[i]);
949 bcs->commands[i] = NULL;
950 }
939 951
940 if (retval == 0) { 952 bcs->commands[AT_PROTO] = kmalloc(9, GFP_ATOMIC);
941 at_state->pending_commands |= PC_ACCEPT; 953 bcs->commands[AT_ISO] = kmalloc(9, GFP_ATOMIC);
942 gig_dbg(DEBUG_CMD, "Scheduling PC_ACCEPT"); 954 if (!bcs->commands[AT_PROTO] || !bcs->commands[AT_ISO]) {
943 cs->commands_pending = 1; 955 dev_err(at_state->cs->dev, "out of memory\n");
944 } else {
945 /* error reset */ 956 /* error reset */
946 at_state->pending_commands |= PC_HUP; 957 at_state->pending_commands |= PC_HUP;
947 gig_dbg(DEBUG_CMD, "Scheduling PC_HUP"); 958 gig_dbg(DEBUG_CMD, "Scheduling PC_HUP");
948 cs->commands_pending = 1; 959 cs->commands_pending = 1;
960 return;
949 } 961 }
962
963 snprintf(bcs->commands[AT_PROTO], 9, "^SBPR=%u\r", bcs->proto2);
964 snprintf(bcs->commands[AT_ISO], 9, "^SISO=%u\r", bcs->channel + 1);
965
966 at_state->pending_commands |= PC_ACCEPT;
967 gig_dbg(DEBUG_CMD, "Scheduling PC_ACCEPT");
968 cs->commands_pending = 1;
950} 969}
951 970
952static void do_start(struct cardstate *cs) 971static void do_start(struct cardstate *cs)
@@ -957,9 +976,7 @@ static void do_start(struct cardstate *cs)
957 schedule_init(cs, MS_INIT); 976 schedule_init(cs, MS_INIT);
958 977
959 cs->isdn_up = 1; 978 cs->isdn_up = 1;
960 gigaset_i4l_cmd(cs, ISDN_STAT_RUN); 979 gigaset_isdn_start(cs);
961 // FIXME: not in locked mode
962 // FIXME 2: only after init sequence
963 980
964 cs->waiting = 0; 981 cs->waiting = 0;
965 wake_up(&cs->waitqueue); 982 wake_up(&cs->waitqueue);
@@ -975,7 +992,7 @@ static void finish_shutdown(struct cardstate *cs)
975 /* Tell the LL that the device is not available .. */ 992 /* Tell the LL that the device is not available .. */
976 if (cs->isdn_up) { 993 if (cs->isdn_up) {
977 cs->isdn_up = 0; 994 cs->isdn_up = 0;
978 gigaset_i4l_cmd(cs, ISDN_STAT_STOP); 995 gigaset_isdn_stop(cs);
979 } 996 }
980 997
981 /* The rest is done by cleanup_cs () in user mode. */ 998 /* The rest is done by cleanup_cs () in user mode. */
@@ -1113,7 +1130,6 @@ static int do_lock(struct cardstate *cs)
1113 1130
1114 break; 1131 break;
1115 case MS_LOCKED: 1132 case MS_LOCKED:
1116 //retval = -EACCES;
1117 break; 1133 break;
1118 default: 1134 default:
1119 return -EBUSY; 1135 return -EBUSY;
@@ -1276,7 +1292,7 @@ static void do_action(int action, struct cardstate *cs,
1276 break; 1292 break;
1277 } 1293 }
1278 bcs->chstate |= CHS_D_UP; 1294 bcs->chstate |= CHS_D_UP;
1279 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN); 1295 gigaset_isdn_connD(bcs);
1280 cs->ops->init_bchannel(bcs); 1296 cs->ops->init_bchannel(bcs);
1281 break; 1297 break;
1282 case ACT_DLE1: 1298 case ACT_DLE1:
@@ -1284,7 +1300,7 @@ static void do_action(int action, struct cardstate *cs,
1284 bcs = cs->bcs + cs->curchannel; 1300 bcs = cs->bcs + cs->curchannel;
1285 1301
1286 bcs->chstate |= CHS_D_UP; 1302 bcs->chstate |= CHS_D_UP;
1287 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN); 1303 gigaset_isdn_connD(bcs);
1288 cs->ops->init_bchannel(bcs); 1304 cs->ops->init_bchannel(bcs);
1289 break; 1305 break;
1290 case ACT_FAKEHUP: 1306 case ACT_FAKEHUP:
@@ -1369,7 +1385,7 @@ static void do_action(int action, struct cardstate *cs,
1369 cs->cur_at_seq = SEQ_NONE; 1385 cs->cur_at_seq = SEQ_NONE;
1370 break; 1386 break;
1371 1387
1372 case ACT_ABORTACCEPT: /* hangup/error/timeout during ICALL processing */ 1388 case ACT_ABORTACCEPT: /* hangup/error/timeout during ICALL procssng */
1373 disconnect(p_at_state); 1389 disconnect(p_at_state);
1374 break; 1390 break;
1375 1391
@@ -1443,17 +1459,6 @@ static void do_action(int action, struct cardstate *cs,
1443 __func__, at_state->ConState); 1459 __func__, at_state->ConState);
1444 cs->cur_at_seq = SEQ_NONE; 1460 cs->cur_at_seq = SEQ_NONE;
1445 break; 1461 break;
1446#ifdef CONFIG_GIGASET_DEBUG
1447 case ACT_TEST:
1448 {
1449 static int count = 3; //2; //1;
1450 *p_genresp = 1;
1451 *p_resp_code = count ? RSP_ERROR : RSP_OK;
1452 if (count > 0)
1453 --count;
1454 }
1455 break;
1456#endif
1457 case ACT_DEBUG: 1462 case ACT_DEBUG:
1458 gig_dbg(DEBUG_ANY, "%s: resp_code %d in ConState %d", 1463 gig_dbg(DEBUG_ANY, "%s: resp_code %d in ConState %d",
1459 __func__, ev->type, at_state->ConState); 1464 __func__, ev->type, at_state->ConState);
@@ -1474,11 +1479,6 @@ static void do_action(int action, struct cardstate *cs,
1474 case ACT_ACCEPT: 1479 case ACT_ACCEPT:
1475 start_accept(at_state); 1480 start_accept(at_state);
1476 break; 1481 break;
1477 case ACT_PROTO_L2:
1478 gig_dbg(DEBUG_CMD, "set protocol to %u",
1479 (unsigned) ev->parameter);
1480 at_state->bcs->proto2 = ev->parameter;
1481 break;
1482 case ACT_HUP: 1482 case ACT_HUP:
1483 at_state->pending_commands |= PC_HUP; 1483 at_state->pending_commands |= PC_HUP;
1484 cs->commands_pending = 1; 1484 cs->commands_pending = 1;
@@ -1493,7 +1493,7 @@ static void do_action(int action, struct cardstate *cs,
1493 do_start(cs); 1493 do_start(cs);
1494 break; 1494 break;
1495 1495
1496 /* events from the interface */ // FIXME without ACT_xxxx? 1496 /* events from the interface */
1497 case ACT_IF_LOCK: 1497 case ACT_IF_LOCK:
1498 cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs); 1498 cs->cmd_result = ev->parameter ? do_lock(cs) : do_unlock(cs);
1499 cs->waiting = 0; 1499 cs->waiting = 0;
@@ -1512,7 +1512,7 @@ static void do_action(int action, struct cardstate *cs,
1512 wake_up(&cs->waitqueue); 1512 wake_up(&cs->waitqueue);
1513 break; 1513 break;
1514 1514
1515 /* events from the proc file system */ // FIXME without ACT_xxxx? 1515 /* events from the proc file system */
1516 case ACT_PROC_CIDMODE: 1516 case ACT_PROC_CIDMODE:
1517 spin_lock_irqsave(&cs->lock, flags); 1517 spin_lock_irqsave(&cs->lock, flags);
1518 if (ev->parameter != cs->cidmode) { 1518 if (ev->parameter != cs->cidmode) {
@@ -1649,7 +1649,8 @@ static void process_event(struct cardstate *cs, struct event_t *ev)
1649 for (curact = 0; curact < MAXACT; ++curact) { 1649 for (curact = 0; curact < MAXACT; ++curact) {
1650 /* The row tells us what we should do .. 1650 /* The row tells us what we should do ..
1651 */ 1651 */
1652 do_action(rep->action[curact], cs, bcs, &at_state, &p_command, &genresp, &resp_code, ev); 1652 do_action(rep->action[curact], cs, bcs, &at_state, &p_command,
1653 &genresp, &resp_code, ev);
1653 if (!at_state) 1654 if (!at_state)
1654 break; /* may be freed after disconnect */ 1655 break; /* may be freed after disconnect */
1655 } 1656 }
@@ -1661,13 +1662,14 @@ static void process_event(struct cardstate *cs, struct event_t *ev)
1661 1662
1662 if (genresp) { 1663 if (genresp) {
1663 spin_lock_irqsave(&cs->lock, flags); 1664 spin_lock_irqsave(&cs->lock, flags);
1664 at_state->timer_expires = 0; //FIXME 1665 at_state->timer_expires = 0;
1665 at_state->timer_active = 0; //FIXME 1666 at_state->timer_active = 0;
1666 spin_unlock_irqrestore(&cs->lock, flags); 1667 spin_unlock_irqrestore(&cs->lock, flags);
1667 gigaset_add_event(cs, at_state, resp_code, NULL, 0, NULL); 1668 gigaset_add_event(cs, at_state, resp_code,
1669 NULL, 0, NULL);
1668 } else { 1670 } else {
1669 /* Send command to modem if not NULL... */ 1671 /* Send command to modem if not NULL... */
1670 if (p_command/*rep->command*/) { 1672 if (p_command) {
1671 if (cs->connected) 1673 if (cs->connected)
1672 send_command(cs, p_command, 1674 send_command(cs, p_command,
1673 sendcid, cs->dle, 1675 sendcid, cs->dle,
@@ -1754,7 +1756,8 @@ static void process_command_flags(struct cardstate *cs)
1754 } 1756 }
1755 } 1757 }
1756 1758
1757 /* only switch back to unimodem mode, if no commands are pending and no channels are up */ 1759 /* only switch back to unimodem mode if no commands are pending and
1760 * no channels are up */
1758 spin_lock_irqsave(&cs->lock, flags); 1761 spin_lock_irqsave(&cs->lock, flags);
1759 if (cs->at_state.pending_commands == PC_UMMODE 1762 if (cs->at_state.pending_commands == PC_UMMODE
1760 && !cs->cidmode 1763 && !cs->cidmode
@@ -1813,9 +1816,8 @@ static void process_command_flags(struct cardstate *cs)
1813 1816
1814 if (cs->at_state.pending_commands & PC_INIT) { 1817 if (cs->at_state.pending_commands & PC_INIT) {
1815 cs->at_state.pending_commands &= ~PC_INIT; 1818 cs->at_state.pending_commands &= ~PC_INIT;
1816 cs->dle = 0; //FIXME 1819 cs->dle = 0;
1817 cs->inbuf->inputstate = INS_command; 1820 cs->inbuf->inputstate = INS_command;
1818 //FIXME reset card state (or -> LOCK0)?
1819 schedule_sequence(cs, &cs->at_state, SEQ_INIT); 1821 schedule_sequence(cs, &cs->at_state, SEQ_INIT);
1820 return; 1822 return;
1821 } 1823 }
diff --git a/drivers/isdn/gigaset/gigaset.h b/drivers/isdn/gigaset/gigaset.h
index a2f6125739eb..e963a6c2e86d 100644
--- a/drivers/isdn/gigaset/gigaset.h
+++ b/drivers/isdn/gigaset/gigaset.h
@@ -23,7 +23,6 @@
23#include <linux/compiler.h> 23#include <linux/compiler.h>
24#include <linux/types.h> 24#include <linux/types.h>
25#include <linux/spinlock.h> 25#include <linux/spinlock.h>
26#include <linux/isdnif.h>
27#include <linux/usb.h> 26#include <linux/usb.h>
28#include <linux/skbuff.h> 27#include <linux/skbuff.h>
29#include <linux/netdevice.h> 28#include <linux/netdevice.h>
@@ -35,12 +34,11 @@
35#include <linux/list.h> 34#include <linux/list.h>
36#include <asm/atomic.h> 35#include <asm/atomic.h>
37 36
38#define GIG_VERSION {0,5,0,0} 37#define GIG_VERSION {0, 5, 0, 0}
39#define GIG_COMPAT {0,4,0,0} 38#define GIG_COMPAT {0, 4, 0, 0}
40 39
41#define MAX_REC_PARAMS 10 /* Max. number of params in response string */ 40#define MAX_REC_PARAMS 10 /* Max. number of params in response string */
42#define MAX_RESP_SIZE 512 /* Max. size of a response string */ 41#define MAX_RESP_SIZE 512 /* Max. size of a response string */
43#define HW_HDR_LEN 2 /* Header size used to store ack info */
44 42
45#define MAX_EVENTS 64 /* size of event queue */ 43#define MAX_EVENTS 64 /* size of event queue */
46 44
@@ -135,35 +133,32 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
135#define OUT_VENDOR_REQ (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT) 133#define OUT_VENDOR_REQ (USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT)
136#define IN_VENDOR_REQ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT) 134#define IN_VENDOR_REQ (USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_ENDPOINT)
137 135
138/* int-in-events 3070 */ 136/* interrupt pipe messages */
139#define HD_B1_FLOW_CONTROL 0x80 137#define HD_B1_FLOW_CONTROL 0x80
140#define HD_B2_FLOW_CONTROL 0x81 138#define HD_B2_FLOW_CONTROL 0x81
141#define HD_RECEIVEATDATA_ACK (0x35) // 3070 139#define HD_RECEIVEATDATA_ACK (0x35) /* 3070 */
142 // att: HD_RECEIVE>>AT<<DATA_ACK 140#define HD_READY_SEND_ATDATA (0x36) /* 3070 */
143#define HD_READY_SEND_ATDATA (0x36) // 3070 141#define HD_OPEN_ATCHANNEL_ACK (0x37) /* 3070 */
144#define HD_OPEN_ATCHANNEL_ACK (0x37) // 3070 142#define HD_CLOSE_ATCHANNEL_ACK (0x38) /* 3070 */
145#define HD_CLOSE_ATCHANNEL_ACK (0x38) // 3070 143#define HD_DEVICE_INIT_OK (0x11) /* ISurf USB + 3070 */
146#define HD_DEVICE_INIT_OK (0x11) // ISurf USB + 3070 144#define HD_OPEN_B1CHANNEL_ACK (0x51) /* ISurf USB + 3070 */
147#define HD_OPEN_B1CHANNEL_ACK (0x51) // ISurf USB + 3070 145#define HD_OPEN_B2CHANNEL_ACK (0x52) /* ISurf USB + 3070 */
148#define HD_OPEN_B2CHANNEL_ACK (0x52) // ISurf USB + 3070 146#define HD_CLOSE_B1CHANNEL_ACK (0x53) /* ISurf USB + 3070 */
149#define HD_CLOSE_B1CHANNEL_ACK (0x53) // ISurf USB + 3070 147#define HD_CLOSE_B2CHANNEL_ACK (0x54) /* ISurf USB + 3070 */
150#define HD_CLOSE_B2CHANNEL_ACK (0x54) // ISurf USB + 3070 148#define HD_SUSPEND_END (0x61) /* ISurf USB */
151// Powermangment 149#define HD_RESET_INTERRUPT_PIPE_ACK (0xFF) /* ISurf USB + 3070 */
152#define HD_SUSPEND_END (0x61) // ISurf USB 150
153// Configuration 151/* control requests */
154#define HD_RESET_INTERRUPT_PIPE_ACK (0xFF) // ISurf USB + 3070 152#define HD_OPEN_B1CHANNEL (0x23) /* ISurf USB + 3070 */
155 153#define HD_CLOSE_B1CHANNEL (0x24) /* ISurf USB + 3070 */
156/* control requests 3070 */ 154#define HD_OPEN_B2CHANNEL (0x25) /* ISurf USB + 3070 */
157#define HD_OPEN_B1CHANNEL (0x23) // ISurf USB + 3070 155#define HD_CLOSE_B2CHANNEL (0x26) /* ISurf USB + 3070 */
158#define HD_CLOSE_B1CHANNEL (0x24) // ISurf USB + 3070 156#define HD_RESET_INTERRUPT_PIPE (0x27) /* ISurf USB + 3070 */
159#define HD_OPEN_B2CHANNEL (0x25) // ISurf USB + 3070 157#define HD_DEVICE_INIT_ACK (0x34) /* ISurf USB + 3070 */
160#define HD_CLOSE_B2CHANNEL (0x26) // ISurf USB + 3070 158#define HD_WRITE_ATMESSAGE (0x12) /* 3070 */
161#define HD_RESET_INTERRUPT_PIPE (0x27) // ISurf USB + 3070 159#define HD_READ_ATMESSAGE (0x13) /* 3070 */
162#define HD_DEVICE_INIT_ACK (0x34) // ISurf USB + 3070 160#define HD_OPEN_ATCHANNEL (0x28) /* 3070 */
163#define HD_WRITE_ATMESSAGE (0x12) // 3070 161#define HD_CLOSE_ATCHANNEL (0x29) /* 3070 */
164#define HD_READ_ATMESSAGE (0x13) // 3070
165#define HD_OPEN_ATCHANNEL (0x28) // 3070
166#define HD_CLOSE_ATCHANNEL (0x29) // 3070
167 162
168/* number of B channels supported by base driver */ 163/* number of B channels supported by base driver */
169#define BAS_CHANNELS 2 164#define BAS_CHANNELS 2
@@ -193,7 +188,9 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
193#define AT_PROTO 4 188#define AT_PROTO 4
194#define AT_TYPE 5 189#define AT_TYPE 5
195#define AT_HLC 6 190#define AT_HLC 6
196#define AT_NUM 7 191#define AT_CLIP 7
192/* total number */
193#define AT_NUM 8
197 194
198/* variables in struct at_state_t */ 195/* variables in struct at_state_t */
199#define VAR_ZSAU 0 196#define VAR_ZSAU 0
@@ -216,7 +213,6 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
216#define EV_START -110 213#define EV_START -110
217#define EV_STOP -111 214#define EV_STOP -111
218#define EV_IF_LOCK -112 215#define EV_IF_LOCK -112
219#define EV_PROTO_L2 -113
220#define EV_ACCEPT -114 216#define EV_ACCEPT -114
221#define EV_DIAL -115 217#define EV_DIAL -115
222#define EV_HUP -116 218#define EV_HUP -116
@@ -224,12 +220,11 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
224#define EV_BC_CLOSED -118 220#define EV_BC_CLOSED -118
225 221
226/* input state */ 222/* input state */
227#define INS_command 0x0001 223#define INS_command 0x0001 /* receiving messages (not payload data) */
228#define INS_DLE_char 0x0002 224#define INS_DLE_char 0x0002 /* DLE flag received (in DLE mode) */
229#define INS_byte_stuff 0x0004 225#define INS_byte_stuff 0x0004
230#define INS_have_data 0x0008 226#define INS_have_data 0x0008
231#define INS_skip_frame 0x0010 227#define INS_DLE_command 0x0020 /* DLE message start (<DLE> X) received */
232#define INS_DLE_command 0x0020
233#define INS_flag_hunt 0x0040 228#define INS_flag_hunt 0x0040
234 229
235/* channel state */ 230/* channel state */
@@ -259,6 +254,11 @@ void gigaset_dbg_buffer(enum debuglevel level, const unsigned char *msg,
259#define SM_LOCKED 0 254#define SM_LOCKED 0
260#define SM_ISDN 1 /* default */ 255#define SM_ISDN 1 /* default */
261 256
257/* layer 2 protocols (AT^SBPR=...) */
258#define L2_BITSYNC 0
259#define L2_HDLC 1
260#define L2_VOICE 2
261
262struct gigaset_ops; 262struct gigaset_ops;
263struct gigaset_driver; 263struct gigaset_driver;
264 264
@@ -286,8 +286,6 @@ extern struct reply_t gigaset_tab_cid[];
286extern struct reply_t gigaset_tab_nocid[]; 286extern struct reply_t gigaset_tab_nocid[];
287 287
288struct inbuf_t { 288struct inbuf_t {
289 unsigned char *rcvbuf; /* usb-gigaset receive buffer */
290 struct bc_state *bcs;
291 struct cardstate *cs; 289 struct cardstate *cs;
292 int inputstate; 290 int inputstate;
293 int head, tail; 291 int head, tail;
@@ -359,12 +357,6 @@ struct at_state_t {
359 struct bc_state *bcs; 357 struct bc_state *bcs;
360}; 358};
361 359
362struct resp_type_t {
363 unsigned char *response;
364 int resp_code; /* RSP_XXXX */
365 int type; /* RT_XXXX */
366};
367
368struct event_t { 360struct event_t {
369 int type; 361 int type;
370 void *ptr, *arg; 362 void *ptr, *arg;
@@ -395,7 +387,7 @@ struct bc_state {
395 387
396 unsigned chstate; /* bitmap (CHS_*) */ 388 unsigned chstate; /* bitmap (CHS_*) */
397 int ignore; 389 int ignore;
398 unsigned proto2; /* Layer 2 protocol (ISDN_PROTO_L2_*) */ 390 unsigned proto2; /* layer 2 protocol (L2_*) */
399 char *commands[AT_NUM]; /* see AT_XXXX */ 391 char *commands[AT_NUM]; /* see AT_XXXX */
400 392
401#ifdef CONFIG_GIGASET_DEBUG 393#ifdef CONFIG_GIGASET_DEBUG
@@ -410,6 +402,8 @@ struct bc_state {
410 struct usb_bc_state *usb; /* usb hardware driver (m105) */ 402 struct usb_bc_state *usb; /* usb hardware driver (m105) */
411 struct bas_bc_state *bas; /* usb hardware driver (base) */ 403 struct bas_bc_state *bas; /* usb hardware driver (base) */
412 } hw; 404 } hw;
405
406 void *ap; /* LL application structure */
413}; 407};
414 408
415struct cardstate { 409struct cardstate {
@@ -456,12 +450,13 @@ struct cardstate {
456 450
457 unsigned running; /* !=0 if events are handled */ 451 unsigned running; /* !=0 if events are handled */
458 unsigned connected; /* !=0 if hardware is connected */ 452 unsigned connected; /* !=0 if hardware is connected */
459 unsigned isdn_up; /* !=0 after ISDN_STAT_RUN */ 453 unsigned isdn_up; /* !=0 after gigaset_isdn_start() */
460 454
461 unsigned cidmode; 455 unsigned cidmode;
462 456
463 int myid; /* id for communication with LL */ 457 int myid; /* id for communication with LL */
464 isdn_if iif; 458 void *iif; /* LL interface structure */
459 unsigned short hw_hdr_len; /* headroom needed in data skbs */
465 460
466 struct reply_t *tabnocid; 461 struct reply_t *tabnocid;
467 struct reply_t *tabcid; 462 struct reply_t *tabcid;
@@ -476,8 +471,8 @@ struct cardstate {
476 471
477 struct timer_list timer; 472 struct timer_list timer;
478 int retry_count; 473 int retry_count;
479 int dle; /* !=0 if modem commands/responses are 474 int dle; /* !=0 if DLE mode is active
480 dle encoded */ 475 (ZDLE=1 received -- M10x only) */
481 int cur_at_seq; /* sequence of AT commands being 476 int cur_at_seq; /* sequence of AT commands being
482 processed */ 477 processed */
483 int curchannel; /* channel those commands are meant 478 int curchannel; /* channel those commands are meant
@@ -616,7 +611,9 @@ struct gigaset_ops {
616 int (*baud_rate)(struct cardstate *cs, unsigned cflag); 611 int (*baud_rate)(struct cardstate *cs, unsigned cflag);
617 int (*set_line_ctrl)(struct cardstate *cs, unsigned cflag); 612 int (*set_line_ctrl)(struct cardstate *cs, unsigned cflag);
618 613
619 /* Called from i4l.c to put an skb into the send-queue. */ 614 /* Called from LL interface to put an skb into the send-queue.
615 * After sending is completed, gigaset_skb_sent() must be called
616 * with the skb's link layer header preserved. */
620 int (*send_skb)(struct bc_state *bcs, struct sk_buff *skb); 617 int (*send_skb)(struct bc_state *bcs, struct sk_buff *skb);
621 618
622 /* Called from ev-layer.c to process a block of data 619 /* Called from ev-layer.c to process a block of data
@@ -625,7 +622,8 @@ struct gigaset_ops {
625 622
626}; 623};
627 624
628/* = Common structures and definitions ======================================= */ 625/* = Common structures and definitions =======================================
626 */
629 627
630/* Parser states for DLE-Event: 628/* Parser states for DLE-Event:
631 * <DLE-EVENT>: <DLE_FLAG> "X" <EVENT> <DLE_FLAG> "." 629 * <DLE-EVENT>: <DLE_FLAG> "X" <EVENT> <DLE_FLAG> "."
@@ -638,8 +636,7 @@ struct gigaset_ops {
638 * Functions implemented in asyncdata.c 636 * Functions implemented in asyncdata.c
639 */ 637 */
640 638
641/* Called from i4l.c to put an skb into the send-queue. 639/* Called from LL interface to put an skb into the send queue. */
642 * After sending gigaset_skb_sent() should be called. */
643int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb); 640int gigaset_m10x_send_skb(struct bc_state *bcs, struct sk_buff *skb);
644 641
645/* Called from ev-layer.c to process a block of data 642/* Called from ev-layer.c to process a block of data
@@ -650,8 +647,7 @@ void gigaset_m10x_input(struct inbuf_t *inbuf);
650 * Functions implemented in isocdata.c 647 * Functions implemented in isocdata.c
651 */ 648 */
652 649
653/* Called from i4l.c to put an skb into the send-queue. 650/* Called from LL interface to put an skb into the send queue. */
654 * After sending gigaset_skb_sent() should be called. */
655int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb); 651int gigaset_isoc_send_skb(struct bc_state *bcs, struct sk_buff *skb);
656 652
657/* Called from ev-layer.c to process a block of data 653/* Called from ev-layer.c to process a block of data
@@ -674,36 +670,26 @@ void gigaset_isowbuf_init(struct isowbuf_t *iwb, unsigned char idle);
674int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size); 670int gigaset_isowbuf_getbytes(struct isowbuf_t *iwb, int size);
675 671
676/* =========================================================================== 672/* ===========================================================================
677 * Functions implemented in i4l.c/gigaset.h 673 * Functions implemented in LL interface
678 */ 674 */
679 675
680/* Called by gigaset_initcs() for setting up with the isdn4linux subsystem */ 676/* Called from common.c for setting up/shutting down with the ISDN subsystem */
681int gigaset_register_to_LL(struct cardstate *cs, const char *isdnid); 677int gigaset_isdn_register(struct cardstate *cs, const char *isdnid);
678void gigaset_isdn_unregister(struct cardstate *cs);
682 679
683/* Called from xxx-gigaset.c to indicate completion of sending an skb */ 680/* Called from hardware module to indicate completion of an skb */
684void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb); 681void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb);
682void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb);
683void gigaset_isdn_rcv_err(struct bc_state *bcs);
685 684
686/* Called from common.c/ev-layer.c to indicate events relevant to the LL */ 685/* Called from common.c/ev-layer.c to indicate events relevant to the LL */
686void gigaset_isdn_start(struct cardstate *cs);
687void gigaset_isdn_stop(struct cardstate *cs);
687int gigaset_isdn_icall(struct at_state_t *at_state); 688int gigaset_isdn_icall(struct at_state_t *at_state);
688int gigaset_isdn_setup_accept(struct at_state_t *at_state); 689void gigaset_isdn_connD(struct bc_state *bcs);
689int gigaset_isdn_setup_dial(struct at_state_t *at_state, void *data); 690void gigaset_isdn_hupD(struct bc_state *bcs);
690 691void gigaset_isdn_connB(struct bc_state *bcs);
691void gigaset_i4l_cmd(struct cardstate *cs, int cmd); 692void gigaset_isdn_hupB(struct bc_state *bcs);
692void gigaset_i4l_channel_cmd(struct bc_state *bcs, int cmd);
693
694
695static inline void gigaset_isdn_rcv_err(struct bc_state *bcs)
696{
697 isdn_ctrl response;
698
699 /* error -> LL */
700 gig_dbg(DEBUG_CMD, "sending L1ERR");
701 response.driver = bcs->cs->myid;
702 response.command = ISDN_STAT_L1ERR;
703 response.arg = bcs->channel;
704 response.parm.errcode = ISDN_STAT_L1ERR_RECV;
705 bcs->cs->iif.statcallb(&response);
706}
707 693
708/* =========================================================================== 694/* ===========================================================================
709 * Functions implemented in ev-layer.c 695 * Functions implemented in ev-layer.c
@@ -732,6 +718,7 @@ void gigaset_bcs_reinit(struct bc_state *bcs);
732void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs, 718void gigaset_at_init(struct at_state_t *at_state, struct bc_state *bcs,
733 struct cardstate *cs, int cid); 719 struct cardstate *cs, int cid);
734int gigaset_get_channel(struct bc_state *bcs); 720int gigaset_get_channel(struct bc_state *bcs);
721struct bc_state *gigaset_get_free_channel(struct cardstate *cs);
735void gigaset_free_channel(struct bc_state *bcs); 722void gigaset_free_channel(struct bc_state *bcs);
736int gigaset_get_channels(struct cardstate *cs); 723int gigaset_get_channels(struct cardstate *cs);
737void gigaset_free_channels(struct cardstate *cs); 724void gigaset_free_channels(struct cardstate *cs);
@@ -781,7 +768,7 @@ struct event_t *gigaset_add_event(struct cardstate *cs,
781 void *ptr, int parameter, void *arg); 768 void *ptr, int parameter, void *arg);
782 769
783/* Called on CONFIG1 command from frontend. */ 770/* Called on CONFIG1 command from frontend. */
784int gigaset_enterconfigmode(struct cardstate *cs); //0: success <0: errorcode 771int gigaset_enterconfigmode(struct cardstate *cs);
785 772
786/* cs->lock must not be locked */ 773/* cs->lock must not be locked */
787static inline void gigaset_schedule_event(struct cardstate *cs) 774static inline void gigaset_schedule_event(struct cardstate *cs)
@@ -816,35 +803,6 @@ static inline void gigaset_bchannel_up(struct bc_state *bcs)
816/* handling routines for sk_buff */ 803/* handling routines for sk_buff */
817/* ============================= */ 804/* ============================= */
818 805
819/* pass received skb to LL
820 * Warning: skb must not be accessed anymore!
821 */
822static inline void gigaset_rcv_skb(struct sk_buff *skb,
823 struct cardstate *cs,
824 struct bc_state *bcs)
825{
826 cs->iif.rcvcallb_skb(cs->myid, bcs->channel, skb);
827 bcs->trans_down++;
828}
829
830/* handle reception of corrupted skb
831 * Warning: skb must not be accessed anymore!
832 */
833static inline void gigaset_rcv_error(struct sk_buff *procskb,
834 struct cardstate *cs,
835 struct bc_state *bcs)
836{
837 if (procskb)
838 dev_kfree_skb(procskb);
839
840 if (bcs->ignore)
841 --bcs->ignore;
842 else {
843 ++bcs->corrupted;
844 gigaset_isdn_rcv_err(bcs);
845 }
846}
847
848/* append received bytes to inbuf */ 806/* append received bytes to inbuf */
849int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src, 807int gigaset_fill_inbuf(struct inbuf_t *inbuf, const unsigned char *src,
850 unsigned numbytes); 808 unsigned numbytes);
diff --git a/drivers/isdn/gigaset/i4l.c b/drivers/isdn/gigaset/i4l.c
index 654489d836cd..c129ee47a8fb 100644
--- a/drivers/isdn/gigaset/i4l.c
+++ b/drivers/isdn/gigaset/i4l.c
@@ -14,6 +14,9 @@
14 */ 14 */
15 15
16#include "gigaset.h" 16#include "gigaset.h"
17#include <linux/isdnif.h>
18
19#define HW_HDR_LEN 2 /* Header size used to store ack info */
17 20
18/* == Handling of I4L IO =====================================================*/ 21/* == Handling of I4L IO =====================================================*/
19 22
@@ -36,12 +39,12 @@
36static int writebuf_from_LL(int driverID, int channel, int ack, 39static int writebuf_from_LL(int driverID, int channel, int ack,
37 struct sk_buff *skb) 40 struct sk_buff *skb)
38{ 41{
39 struct cardstate *cs; 42 struct cardstate *cs = gigaset_get_cs_by_id(driverID);
40 struct bc_state *bcs; 43 struct bc_state *bcs;
44 unsigned char *ack_header;
41 unsigned len; 45 unsigned len;
42 unsigned skblen;
43 46
44 if (!(cs = gigaset_get_cs_by_id(driverID))) { 47 if (!cs) {
45 pr_err("%s: invalid driver ID (%d)\n", __func__, driverID); 48 pr_err("%s: invalid driver ID (%d)\n", __func__, driverID);
46 return -ENODEV; 49 return -ENODEV;
47 } 50 }
@@ -75,11 +78,23 @@ static int writebuf_from_LL(int driverID, int channel, int ack,
75 return -EINVAL; 78 return -EINVAL;
76 } 79 }
77 80
78 skblen = ack ? len : 0; 81 /* set up acknowledgement header */
79 skb->head[0] = skblen & 0xff; 82 if (skb_headroom(skb) < HW_HDR_LEN) {
80 skb->head[1] = skblen >> 8; 83 /* should never happen */
81 gig_dbg(DEBUG_MCMD, "skb: len=%u, skblen=%u: %02x %02x", 84 dev_err(cs->dev, "%s: insufficient skb headroom\n", __func__);
82 len, skblen, (unsigned) skb->head[0], (unsigned) skb->head[1]); 85 return -ENOMEM;
86 }
87 skb_set_mac_header(skb, -HW_HDR_LEN);
88 skb->mac_len = HW_HDR_LEN;
89 ack_header = skb_mac_header(skb);
90 if (ack) {
91 ack_header[0] = len & 0xff;
92 ack_header[1] = len >> 8;
93 } else {
94 ack_header[0] = ack_header[1] = 0;
95 }
96 gig_dbg(DEBUG_MCMD, "skb: len=%u, ack=%d: %02x %02x",
97 len, ack, ack_header[0], ack_header[1]);
83 98
84 /* pass to device-specific module */ 99 /* pass to device-specific module */
85 return cs->ops->send_skb(bcs, skb); 100 return cs->ops->send_skb(bcs, skb);
@@ -95,6 +110,8 @@ static int writebuf_from_LL(int driverID, int channel, int ack,
95 */ 110 */
96void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb) 111void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
97{ 112{
113 isdn_if *iif = bcs->cs->iif;
114 unsigned char *ack_header = skb_mac_header(skb);
98 unsigned len; 115 unsigned len;
99 isdn_ctrl response; 116 isdn_ctrl response;
100 117
@@ -104,8 +121,7 @@ void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
104 dev_warn(bcs->cs->dev, "%s: skb->len==%d\n", 121 dev_warn(bcs->cs->dev, "%s: skb->len==%d\n",
105 __func__, skb->len); 122 __func__, skb->len);
106 123
107 len = (unsigned char) skb->head[0] | 124 len = ack_header[0] + ((unsigned) ack_header[1] << 8);
108 (unsigned) (unsigned char) skb->head[1] << 8;
109 if (len) { 125 if (len) {
110 gig_dbg(DEBUG_MCMD, "ACKing to LL (id: %d, ch: %d, sz: %u)", 126 gig_dbg(DEBUG_MCMD, "ACKing to LL (id: %d, ch: %d, sz: %u)",
111 bcs->cs->myid, bcs->channel, len); 127 bcs->cs->myid, bcs->channel, len);
@@ -114,71 +130,177 @@ void gigaset_skb_sent(struct bc_state *bcs, struct sk_buff *skb)
114 response.command = ISDN_STAT_BSENT; 130 response.command = ISDN_STAT_BSENT;
115 response.arg = bcs->channel; 131 response.arg = bcs->channel;
116 response.parm.length = len; 132 response.parm.length = len;
117 bcs->cs->iif.statcallb(&response); 133 iif->statcallb(&response);
118 } 134 }
119} 135}
120EXPORT_SYMBOL_GPL(gigaset_skb_sent); 136EXPORT_SYMBOL_GPL(gigaset_skb_sent);
121 137
138/**
139 * gigaset_skb_rcvd() - pass received skb to LL
140 * @bcs: B channel descriptor structure.
141 * @skb: received data.
142 *
143 * Called by hardware module {bas,ser,usb}_gigaset when user data has
144 * been successfully received, for passing to the LL.
145 * Warning: skb must not be accessed anymore!
146 */
147void gigaset_skb_rcvd(struct bc_state *bcs, struct sk_buff *skb)
148{
149 isdn_if *iif = bcs->cs->iif;
150
151 iif->rcvcallb_skb(bcs->cs->myid, bcs->channel, skb);
152 bcs->trans_down++;
153}
154EXPORT_SYMBOL_GPL(gigaset_skb_rcvd);
155
156/**
157 * gigaset_isdn_rcv_err() - signal receive error
158 * @bcs: B channel descriptor structure.
159 *
160 * Called by hardware module {bas,ser,usb}_gigaset when a receive error
161 * has occurred, for signalling to the LL.
162 */
163void gigaset_isdn_rcv_err(struct bc_state *bcs)
164{
165 isdn_if *iif = bcs->cs->iif;
166 isdn_ctrl response;
167
168 /* if currently ignoring packets, just count down */
169 if (bcs->ignore) {
170 bcs->ignore--;
171 return;
172 }
173
174 /* update statistics */
175 bcs->corrupted++;
176
177 /* error -> LL */
178 gig_dbg(DEBUG_CMD, "sending L1ERR");
179 response.driver = bcs->cs->myid;
180 response.command = ISDN_STAT_L1ERR;
181 response.arg = bcs->channel;
182 response.parm.errcode = ISDN_STAT_L1ERR_RECV;
183 iif->statcallb(&response);
184}
185EXPORT_SYMBOL_GPL(gigaset_isdn_rcv_err);
186
122/* This function will be called by LL to send commands 187/* This function will be called by LL to send commands
123 * NOTE: LL ignores the returned value, for commands other than ISDN_CMD_IOCTL, 188 * NOTE: LL ignores the returned value, for commands other than ISDN_CMD_IOCTL,
124 * so don't put too much effort into it. 189 * so don't put too much effort into it.
125 */ 190 */
126static int command_from_LL(isdn_ctrl *cntrl) 191static int command_from_LL(isdn_ctrl *cntrl)
127{ 192{
128 struct cardstate *cs = gigaset_get_cs_by_id(cntrl->driver); 193 struct cardstate *cs;
129 struct bc_state *bcs; 194 struct bc_state *bcs;
130 int retval = 0; 195 int retval = 0;
131 struct setup_parm *sp; 196 char **commands;
197 int ch;
198 int i;
199 size_t l;
132 200
133 gigaset_debugdrivers(); 201 gigaset_debugdrivers();
134 202
135 if (!cs) { 203 gig_dbg(DEBUG_CMD, "driver: %d, command: %d, arg: 0x%lx",
204 cntrl->driver, cntrl->command, cntrl->arg);
205
206 cs = gigaset_get_cs_by_id(cntrl->driver);
207 if (cs == NULL) {
136 pr_err("%s: invalid driver ID (%d)\n", __func__, cntrl->driver); 208 pr_err("%s: invalid driver ID (%d)\n", __func__, cntrl->driver);
137 return -ENODEV; 209 return -ENODEV;
138 } 210 }
211 ch = cntrl->arg & 0xff;
139 212
140 switch (cntrl->command) { 213 switch (cntrl->command) {
141 case ISDN_CMD_IOCTL: 214 case ISDN_CMD_IOCTL:
142 gig_dbg(DEBUG_ANY, "ISDN_CMD_IOCTL (driver: %d, arg: %ld)",
143 cntrl->driver, cntrl->arg);
144
145 dev_warn(cs->dev, "ISDN_CMD_IOCTL not supported\n"); 215 dev_warn(cs->dev, "ISDN_CMD_IOCTL not supported\n");
146 return -EINVAL; 216 return -EINVAL;
147 217
148 case ISDN_CMD_DIAL: 218 case ISDN_CMD_DIAL:
149 gig_dbg(DEBUG_ANY, 219 gig_dbg(DEBUG_ANY,
150 "ISDN_CMD_DIAL (driver: %d, ch: %ld, " 220 "ISDN_CMD_DIAL (phone: %s, msn: %s, si1: %d, si2: %d)",
151 "phone: %s, ownmsn: %s, si1: %d, si2: %d)",
152 cntrl->driver, cntrl->arg,
153 cntrl->parm.setup.phone, cntrl->parm.setup.eazmsn, 221 cntrl->parm.setup.phone, cntrl->parm.setup.eazmsn,
154 cntrl->parm.setup.si1, cntrl->parm.setup.si2); 222 cntrl->parm.setup.si1, cntrl->parm.setup.si2);
155 223
156 if (cntrl->arg >= cs->channels) { 224 if (ch >= cs->channels) {
157 dev_err(cs->dev, 225 dev_err(cs->dev,
158 "ISDN_CMD_DIAL: invalid channel (%d)\n", 226 "ISDN_CMD_DIAL: invalid channel (%d)\n", ch);
159 (int) cntrl->arg);
160 return -EINVAL; 227 return -EINVAL;
161 } 228 }
162 229 bcs = cs->bcs + ch;
163 bcs = cs->bcs + cntrl->arg;
164
165 if (!gigaset_get_channel(bcs)) { 230 if (!gigaset_get_channel(bcs)) {
166 dev_err(cs->dev, "ISDN_CMD_DIAL: channel not free\n"); 231 dev_err(cs->dev, "ISDN_CMD_DIAL: channel not free\n");
167 return -EBUSY; 232 return -EBUSY;
168 } 233 }
169 234
170 sp = kmalloc(sizeof *sp, GFP_ATOMIC); 235 commands = kzalloc(AT_NUM*(sizeof *commands), GFP_ATOMIC);
171 if (!sp) { 236 if (!commands) {
172 gigaset_free_channel(bcs); 237 gigaset_free_channel(bcs);
173 dev_err(cs->dev, "ISDN_CMD_DIAL: out of memory\n"); 238 dev_err(cs->dev, "ISDN_CMD_DIAL: out of memory\n");
174 return -ENOMEM; 239 return -ENOMEM;
175 } 240 }
176 *sp = cntrl->parm.setup;
177 241
178 if (!gigaset_add_event(cs, &bcs->at_state, EV_DIAL, sp, 242 l = 3 + strlen(cntrl->parm.setup.phone);
243 commands[AT_DIAL] = kmalloc(l, GFP_ATOMIC);
244 if (!commands[AT_DIAL])
245 goto oom;
246 if (cntrl->parm.setup.phone[0] == '*' &&
247 cntrl->parm.setup.phone[1] == '*') {
248 /* internal call: translate ** prefix to CTP value */
249 commands[AT_TYPE] = kstrdup("^SCTP=0\r", GFP_ATOMIC);
250 if (!commands[AT_TYPE])
251 goto oom;
252 snprintf(commands[AT_DIAL], l,
253 "D%s\r", cntrl->parm.setup.phone+2);
254 } else {
255 commands[AT_TYPE] = kstrdup("^SCTP=1\r", GFP_ATOMIC);
256 if (!commands[AT_TYPE])
257 goto oom;
258 snprintf(commands[AT_DIAL], l,
259 "D%s\r", cntrl->parm.setup.phone);
260 }
261
262 l = strlen(cntrl->parm.setup.eazmsn);
263 if (l) {
264 l += 8;
265 commands[AT_MSN] = kmalloc(l, GFP_ATOMIC);
266 if (!commands[AT_MSN])
267 goto oom;
268 snprintf(commands[AT_MSN], l, "^SMSN=%s\r",
269 cntrl->parm.setup.eazmsn);
270 }
271
272 switch (cntrl->parm.setup.si1) {
273 case 1: /* audio */
274 /* BC = 9090A3: 3.1 kHz audio, A-law */
275 commands[AT_BC] = kstrdup("^SBC=9090A3\r", GFP_ATOMIC);
276 if (!commands[AT_BC])
277 goto oom;
278 break;
279 case 7: /* data */
280 default: /* hope the app knows what it is doing */
281 /* BC = 8890: unrestricted digital information */
282 commands[AT_BC] = kstrdup("^SBC=8890\r", GFP_ATOMIC);
283 if (!commands[AT_BC])
284 goto oom;
285 }
286 /* ToDo: other si1 values, inspect si2, set HLC/LLC */
287
288 commands[AT_PROTO] = kmalloc(9, GFP_ATOMIC);
289 if (!commands[AT_PROTO])
290 goto oom;
291 snprintf(commands[AT_PROTO], 9, "^SBPR=%u\r", bcs->proto2);
292
293 commands[AT_ISO] = kmalloc(9, GFP_ATOMIC);
294 if (!commands[AT_ISO])
295 goto oom;
296 snprintf(commands[AT_ISO], 9, "^SISO=%u\r",
297 (unsigned) bcs->channel + 1);
298
299 if (!gigaset_add_event(cs, &bcs->at_state, EV_DIAL, commands,
179 bcs->at_state.seq_index, NULL)) { 300 bcs->at_state.seq_index, NULL)) {
180 //FIXME what should we do? 301 for (i = 0; i < AT_NUM; ++i)
181 kfree(sp); 302 kfree(commands[i]);
303 kfree(commands);
182 gigaset_free_channel(bcs); 304 gigaset_free_channel(bcs);
183 return -ENOMEM; 305 return -ENOMEM;
184 } 306 }
@@ -186,115 +308,102 @@ static int command_from_LL(isdn_ctrl *cntrl)
186 gig_dbg(DEBUG_CMD, "scheduling DIAL"); 308 gig_dbg(DEBUG_CMD, "scheduling DIAL");
187 gigaset_schedule_event(cs); 309 gigaset_schedule_event(cs);
188 break; 310 break;
189 case ISDN_CMD_ACCEPTD: //FIXME 311 case ISDN_CMD_ACCEPTD:
190 gig_dbg(DEBUG_ANY, "ISDN_CMD_ACCEPTD"); 312 if (ch >= cs->channels) {
191
192 if (cntrl->arg >= cs->channels) {
193 dev_err(cs->dev, 313 dev_err(cs->dev,
194 "ISDN_CMD_ACCEPTD: invalid channel (%d)\n", 314 "ISDN_CMD_ACCEPTD: invalid channel (%d)\n", ch);
195 (int) cntrl->arg);
196 return -EINVAL; 315 return -EINVAL;
197 } 316 }
198 317 bcs = cs->bcs + ch;
199 if (!gigaset_add_event(cs, &cs->bcs[cntrl->arg].at_state, 318 if (!gigaset_add_event(cs, &bcs->at_state,
200 EV_ACCEPT, NULL, 0, NULL)) { 319 EV_ACCEPT, NULL, 0, NULL))
201 //FIXME what should we do?
202 return -ENOMEM; 320 return -ENOMEM;
203 }
204 321
205 gig_dbg(DEBUG_CMD, "scheduling ACCEPT"); 322 gig_dbg(DEBUG_CMD, "scheduling ACCEPT");
206 gigaset_schedule_event(cs); 323 gigaset_schedule_event(cs);
207 324
208 break; 325 break;
209 case ISDN_CMD_ACCEPTB: 326 case ISDN_CMD_ACCEPTB:
210 gig_dbg(DEBUG_ANY, "ISDN_CMD_ACCEPTB");
211 break; 327 break;
212 case ISDN_CMD_HANGUP: 328 case ISDN_CMD_HANGUP:
213 gig_dbg(DEBUG_ANY, "ISDN_CMD_HANGUP (ch: %d)", 329 if (ch >= cs->channels) {
214 (int) cntrl->arg);
215
216 if (cntrl->arg >= cs->channels) {
217 dev_err(cs->dev, 330 dev_err(cs->dev,
218 "ISDN_CMD_HANGUP: invalid channel (%d)\n", 331 "ISDN_CMD_HANGUP: invalid channel (%d)\n", ch);
219 (int) cntrl->arg);
220 return -EINVAL; 332 return -EINVAL;
221 } 333 }
222 334 bcs = cs->bcs + ch;
223 if (!gigaset_add_event(cs, &cs->bcs[cntrl->arg].at_state, 335 if (!gigaset_add_event(cs, &bcs->at_state,
224 EV_HUP, NULL, 0, NULL)) { 336 EV_HUP, NULL, 0, NULL))
225 //FIXME what should we do?
226 return -ENOMEM; 337 return -ENOMEM;
227 }
228 338
229 gig_dbg(DEBUG_CMD, "scheduling HUP"); 339 gig_dbg(DEBUG_CMD, "scheduling HUP");
230 gigaset_schedule_event(cs); 340 gigaset_schedule_event(cs);
231 341
232 break; 342 break;
233 case ISDN_CMD_CLREAZ: /* Do not signal incoming signals */ //FIXME 343 case ISDN_CMD_CLREAZ: /* Do not signal incoming signals */
234 gig_dbg(DEBUG_ANY, "ISDN_CMD_CLREAZ"); 344 dev_info(cs->dev, "ignoring ISDN_CMD_CLREAZ\n");
235 break; 345 break;
236 case ISDN_CMD_SETEAZ: /* Signal incoming calls for given MSN */ //FIXME 346 case ISDN_CMD_SETEAZ: /* Signal incoming calls for given MSN */
237 gig_dbg(DEBUG_ANY, 347 dev_info(cs->dev, "ignoring ISDN_CMD_SETEAZ (%s)\n",
238 "ISDN_CMD_SETEAZ (id: %d, ch: %ld, number: %s)", 348 cntrl->parm.num);
239 cntrl->driver, cntrl->arg, cntrl->parm.num);
240 break; 349 break;
241 case ISDN_CMD_SETL2: /* Set L2 to given protocol */ 350 case ISDN_CMD_SETL2: /* Set L2 to given protocol */
242 gig_dbg(DEBUG_ANY, "ISDN_CMD_SETL2 (ch: %ld, proto: %lx)", 351 if (ch >= cs->channels) {
243 cntrl->arg & 0xff, (cntrl->arg >> 8));
244
245 if ((cntrl->arg & 0xff) >= cs->channels) {
246 dev_err(cs->dev, 352 dev_err(cs->dev,
247 "ISDN_CMD_SETL2: invalid channel (%d)\n", 353 "ISDN_CMD_SETL2: invalid channel (%d)\n", ch);
248 (int) cntrl->arg & 0xff);
249 return -EINVAL; 354 return -EINVAL;
250 } 355 }
251 356 bcs = cs->bcs + ch;
252 if (!gigaset_add_event(cs, &cs->bcs[cntrl->arg & 0xff].at_state, 357 if (bcs->chstate & CHS_D_UP) {
253 EV_PROTO_L2, NULL, cntrl->arg >> 8, 358 dev_err(cs->dev,
254 NULL)) { 359 "ISDN_CMD_SETL2: channel active (%d)\n", ch);
255 //FIXME what should we do? 360 return -EINVAL;
256 return -ENOMEM; 361 }
362 switch (cntrl->arg >> 8) {
363 case ISDN_PROTO_L2_HDLC:
364 gig_dbg(DEBUG_CMD, "ISDN_CMD_SETL2: setting L2_HDLC");
365 bcs->proto2 = L2_HDLC;
366 break;
367 case ISDN_PROTO_L2_TRANS:
368 gig_dbg(DEBUG_CMD, "ISDN_CMD_SETL2: setting L2_VOICE");
369 bcs->proto2 = L2_VOICE;
370 break;
371 default:
372 dev_err(cs->dev,
373 "ISDN_CMD_SETL2: unsupported protocol (%lu)\n",
374 cntrl->arg >> 8);
375 return -EINVAL;
257 } 376 }
258
259 gig_dbg(DEBUG_CMD, "scheduling PROTO_L2");
260 gigaset_schedule_event(cs);
261 break; 377 break;
262 case ISDN_CMD_SETL3: /* Set L3 to given protocol */ 378 case ISDN_CMD_SETL3: /* Set L3 to given protocol */
263 gig_dbg(DEBUG_ANY, "ISDN_CMD_SETL3 (ch: %ld, proto: %lx)", 379 if (ch >= cs->channels) {
264 cntrl->arg & 0xff, (cntrl->arg >> 8));
265
266 if ((cntrl->arg & 0xff) >= cs->channels) {
267 dev_err(cs->dev, 380 dev_err(cs->dev,
268 "ISDN_CMD_SETL3: invalid channel (%d)\n", 381 "ISDN_CMD_SETL3: invalid channel (%d)\n", ch);
269 (int) cntrl->arg & 0xff);
270 return -EINVAL; 382 return -EINVAL;
271 } 383 }
272 384
273 if (cntrl->arg >> 8 != ISDN_PROTO_L3_TRANS) { 385 if (cntrl->arg >> 8 != ISDN_PROTO_L3_TRANS) {
274 dev_err(cs->dev, 386 dev_err(cs->dev,
275 "ISDN_CMD_SETL3: invalid protocol %lu\n", 387 "ISDN_CMD_SETL3: unsupported protocol (%lu)\n",
276 cntrl->arg >> 8); 388 cntrl->arg >> 8);
277 return -EINVAL; 389 return -EINVAL;
278 } 390 }
279 391
280 break; 392 break;
281 case ISDN_CMD_PROCEED: 393 case ISDN_CMD_PROCEED:
282 gig_dbg(DEBUG_ANY, "ISDN_CMD_PROCEED"); //FIXME 394 gig_dbg(DEBUG_ANY, "ISDN_CMD_PROCEED");
283 break; 395 break;
284 case ISDN_CMD_ALERT: 396 case ISDN_CMD_ALERT:
285 gig_dbg(DEBUG_ANY, "ISDN_CMD_ALERT"); //FIXME 397 gig_dbg(DEBUG_ANY, "ISDN_CMD_ALERT");
286 if (cntrl->arg >= cs->channels) { 398 if (cntrl->arg >= cs->channels) {
287 dev_err(cs->dev, 399 dev_err(cs->dev,
288 "ISDN_CMD_ALERT: invalid channel (%d)\n", 400 "ISDN_CMD_ALERT: invalid channel (%d)\n",
289 (int) cntrl->arg); 401 (int) cntrl->arg);
290 return -EINVAL; 402 return -EINVAL;
291 } 403 }
292 //bcs = cs->bcs + cntrl->arg;
293 //bcs->proto2 = -1;
294 // FIXME
295 break; 404 break;
296 case ISDN_CMD_REDIR: 405 case ISDN_CMD_REDIR:
297 gig_dbg(DEBUG_ANY, "ISDN_CMD_REDIR"); //FIXME 406 gig_dbg(DEBUG_ANY, "ISDN_CMD_REDIR");
298 break; 407 break;
299 case ISDN_CMD_PROT_IO: 408 case ISDN_CMD_PROT_IO:
300 gig_dbg(DEBUG_ANY, "ISDN_CMD_PROT_IO"); 409 gig_dbg(DEBUG_ANY, "ISDN_CMD_PROT_IO");
@@ -324,149 +433,34 @@ static int command_from_LL(isdn_ctrl *cntrl)
324 } 433 }
325 434
326 return retval; 435 return retval;
436
437oom:
438 dev_err(bcs->cs->dev, "out of memory\n");
439 for (i = 0; i < AT_NUM; ++i)
440 kfree(commands[i]);
441 return -ENOMEM;
327} 442}
328 443
329void gigaset_i4l_cmd(struct cardstate *cs, int cmd) 444static void gigaset_i4l_cmd(struct cardstate *cs, int cmd)
330{ 445{
446 isdn_if *iif = cs->iif;
331 isdn_ctrl command; 447 isdn_ctrl command;
332 448
333 command.driver = cs->myid; 449 command.driver = cs->myid;
334 command.command = cmd; 450 command.command = cmd;
335 command.arg = 0; 451 command.arg = 0;
336 cs->iif.statcallb(&command); 452 iif->statcallb(&command);
337} 453}
338 454
339void gigaset_i4l_channel_cmd(struct bc_state *bcs, int cmd) 455static void gigaset_i4l_channel_cmd(struct bc_state *bcs, int cmd)
340{ 456{
457 isdn_if *iif = bcs->cs->iif;
341 isdn_ctrl command; 458 isdn_ctrl command;
342 459
343 command.driver = bcs->cs->myid; 460 command.driver = bcs->cs->myid;
344 command.command = cmd; 461 command.command = cmd;
345 command.arg = bcs->channel; 462 command.arg = bcs->channel;
346 bcs->cs->iif.statcallb(&command); 463 iif->statcallb(&command);
347}
348
349int gigaset_isdn_setup_dial(struct at_state_t *at_state, void *data)
350{
351 struct bc_state *bcs = at_state->bcs;
352 unsigned proto;
353 const char *bc;
354 size_t length[AT_NUM];
355 size_t l;
356 int i;
357 struct setup_parm *sp = data;
358
359 switch (bcs->proto2) {
360 case ISDN_PROTO_L2_HDLC:
361 proto = 1; /* 0: Bitsynchron, 1: HDLC, 2: voice */
362 break;
363 case ISDN_PROTO_L2_TRANS:
364 proto = 2; /* 0: Bitsynchron, 1: HDLC, 2: voice */
365 break;
366 default:
367 dev_err(bcs->cs->dev, "%s: invalid L2 protocol: %u\n",
368 __func__, bcs->proto2);
369 return -EINVAL;
370 }
371
372 switch (sp->si1) {
373 case 1: /* audio */
374 bc = "9090A3"; /* 3.1 kHz audio, A-law */
375 break;
376 case 7: /* data */
377 default: /* hope the app knows what it is doing */
378 bc = "8890"; /* unrestricted digital information */
379 }
380 //FIXME add missing si1 values from 1TR6, inspect si2, set HLC/LLC
381
382 length[AT_DIAL ] = 1 + strlen(sp->phone) + 1 + 1;
383 l = strlen(sp->eazmsn);
384 length[AT_MSN ] = l ? 6 + l + 1 + 1 : 0;
385 length[AT_BC ] = 5 + strlen(bc) + 1 + 1;
386 length[AT_PROTO] = 6 + 1 + 1 + 1; /* proto: 1 character */
387 length[AT_ISO ] = 6 + 1 + 1 + 1; /* channel: 1 character */
388 length[AT_TYPE ] = 6 + 1 + 1 + 1; /* call type: 1 character */
389 length[AT_HLC ] = 0;
390
391 for (i = 0; i < AT_NUM; ++i) {
392 kfree(bcs->commands[i]);
393 bcs->commands[i] = NULL;
394 if (length[i] &&
395 !(bcs->commands[i] = kmalloc(length[i], GFP_ATOMIC))) {
396 dev_err(bcs->cs->dev, "out of memory\n");
397 return -ENOMEM;
398 }
399 }
400
401 /* type = 1: extern, 0: intern, 2: recall, 3: door, 4: centrex */
402 if (sp->phone[0] == '*' && sp->phone[1] == '*') {
403 /* internal call: translate ** prefix to CTP value */
404 snprintf(bcs->commands[AT_DIAL], length[AT_DIAL],
405 "D%s\r", sp->phone+2);
406 strncpy(bcs->commands[AT_TYPE], "^SCTP=0\r", length[AT_TYPE]);
407 } else {
408 snprintf(bcs->commands[AT_DIAL], length[AT_DIAL],
409 "D%s\r", sp->phone);
410 strncpy(bcs->commands[AT_TYPE], "^SCTP=1\r", length[AT_TYPE]);
411 }
412
413 if (bcs->commands[AT_MSN])
414 snprintf(bcs->commands[AT_MSN], length[AT_MSN],
415 "^SMSN=%s\r", sp->eazmsn);
416 snprintf(bcs->commands[AT_BC ], length[AT_BC ],
417 "^SBC=%s\r", bc);
418 snprintf(bcs->commands[AT_PROTO], length[AT_PROTO],
419 "^SBPR=%u\r", proto);
420 snprintf(bcs->commands[AT_ISO ], length[AT_ISO ],
421 "^SISO=%u\r", (unsigned)bcs->channel + 1);
422
423 return 0;
424}
425
426int gigaset_isdn_setup_accept(struct at_state_t *at_state)
427{
428 unsigned proto;
429 size_t length[AT_NUM];
430 int i;
431 struct bc_state *bcs = at_state->bcs;
432
433 switch (bcs->proto2) {
434 case ISDN_PROTO_L2_HDLC:
435 proto = 1; /* 0: Bitsynchron, 1: HDLC, 2: voice */
436 break;
437 case ISDN_PROTO_L2_TRANS:
438 proto = 2; /* 0: Bitsynchron, 1: HDLC, 2: voice */
439 break;
440 default:
441 dev_err(at_state->cs->dev, "%s: invalid protocol: %u\n",
442 __func__, bcs->proto2);
443 return -EINVAL;
444 }
445
446 length[AT_DIAL ] = 0;
447 length[AT_MSN ] = 0;
448 length[AT_BC ] = 0;
449 length[AT_PROTO] = 6 + 1 + 1 + 1; /* proto: 1 character */
450 length[AT_ISO ] = 6 + 1 + 1 + 1; /* channel: 1 character */
451 length[AT_TYPE ] = 0;
452 length[AT_HLC ] = 0;
453
454 for (i = 0; i < AT_NUM; ++i) {
455 kfree(bcs->commands[i]);
456 bcs->commands[i] = NULL;
457 if (length[i] &&
458 !(bcs->commands[i] = kmalloc(length[i], GFP_ATOMIC))) {
459 dev_err(at_state->cs->dev, "out of memory\n");
460 return -ENOMEM;
461 }
462 }
463
464 snprintf(bcs->commands[AT_PROTO], length[AT_PROTO],
465 "^SBPR=%u\r", proto);
466 snprintf(bcs->commands[AT_ISO ], length[AT_ISO ],
467 "^SISO=%u\r", (unsigned) bcs->channel + 1);
468
469 return 0;
470} 464}
471 465
472/** 466/**
@@ -482,13 +476,14 @@ int gigaset_isdn_icall(struct at_state_t *at_state)
482{ 476{
483 struct cardstate *cs = at_state->cs; 477 struct cardstate *cs = at_state->cs;
484 struct bc_state *bcs = at_state->bcs; 478 struct bc_state *bcs = at_state->bcs;
479 isdn_if *iif = cs->iif;
485 isdn_ctrl response; 480 isdn_ctrl response;
486 int retval; 481 int retval;
487 482
488 /* fill ICALL structure */ 483 /* fill ICALL structure */
489 response.parm.setup.si1 = 0; /* default: unknown */ 484 response.parm.setup.si1 = 0; /* default: unknown */
490 response.parm.setup.si2 = 0; 485 response.parm.setup.si2 = 0;
491 response.parm.setup.screen = 0; //FIXME how to set these? 486 response.parm.setup.screen = 0;
492 response.parm.setup.plan = 0; 487 response.parm.setup.plan = 0;
493 if (!at_state->str_var[STR_ZBC]) { 488 if (!at_state->str_var[STR_ZBC]) {
494 /* no BC (internal call): assume speech, A-law */ 489 /* no BC (internal call): assume speech, A-law */
@@ -509,29 +504,27 @@ int gigaset_isdn_icall(struct at_state_t *at_state)
509 return ICALL_IGNORE; 504 return ICALL_IGNORE;
510 } 505 }
511 if (at_state->str_var[STR_NMBR]) { 506 if (at_state->str_var[STR_NMBR]) {
512 strncpy(response.parm.setup.phone, at_state->str_var[STR_NMBR], 507 strlcpy(response.parm.setup.phone, at_state->str_var[STR_NMBR],
513 sizeof response.parm.setup.phone - 1); 508 sizeof response.parm.setup.phone);
514 response.parm.setup.phone[sizeof response.parm.setup.phone - 1] = 0;
515 } else 509 } else
516 response.parm.setup.phone[0] = 0; 510 response.parm.setup.phone[0] = 0;
517 if (at_state->str_var[STR_ZCPN]) { 511 if (at_state->str_var[STR_ZCPN]) {
518 strncpy(response.parm.setup.eazmsn, at_state->str_var[STR_ZCPN], 512 strlcpy(response.parm.setup.eazmsn, at_state->str_var[STR_ZCPN],
519 sizeof response.parm.setup.eazmsn - 1); 513 sizeof response.parm.setup.eazmsn);
520 response.parm.setup.eazmsn[sizeof response.parm.setup.eazmsn - 1] = 0;
521 } else 514 } else
522 response.parm.setup.eazmsn[0] = 0; 515 response.parm.setup.eazmsn[0] = 0;
523 516
524 if (!bcs) { 517 if (!bcs) {
525 dev_notice(cs->dev, "no channel for incoming call\n"); 518 dev_notice(cs->dev, "no channel for incoming call\n");
526 response.command = ISDN_STAT_ICALLW; 519 response.command = ISDN_STAT_ICALLW;
527 response.arg = 0; //FIXME 520 response.arg = 0;
528 } else { 521 } else {
529 gig_dbg(DEBUG_CMD, "Sending ICALL"); 522 gig_dbg(DEBUG_CMD, "Sending ICALL");
530 response.command = ISDN_STAT_ICALL; 523 response.command = ISDN_STAT_ICALL;
531 response.arg = bcs->channel; //FIXME 524 response.arg = bcs->channel;
532 } 525 }
533 response.driver = cs->myid; 526 response.driver = cs->myid;
534 retval = cs->iif.statcallb(&response); 527 retval = iif->statcallb(&response);
535 gig_dbg(DEBUG_CMD, "Response: %d", retval); 528 gig_dbg(DEBUG_CMD, "Response: %d", retval);
536 switch (retval) { 529 switch (retval) {
537 case 0: /* no takers */ 530 case 0: /* no takers */
@@ -560,16 +553,109 @@ int gigaset_isdn_icall(struct at_state_t *at_state)
560 } 553 }
561} 554}
562 555
563/* Set Callback function pointer */ 556/**
564int gigaset_register_to_LL(struct cardstate *cs, const char *isdnid) 557 * gigaset_isdn_connD() - signal D channel connect
558 * @bcs: B channel descriptor structure.
559 *
560 * Called by main module to notify the LL that the D channel connection has
561 * been established.
562 */
563void gigaset_isdn_connD(struct bc_state *bcs)
565{ 564{
566 isdn_if *iif = &cs->iif; 565 gig_dbg(DEBUG_CMD, "sending DCONN");
566 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DCONN);
567}
567 568
568 gig_dbg(DEBUG_ANY, "Register driver capabilities to LL"); 569/**
570 * gigaset_isdn_hupD() - signal D channel hangup
571 * @bcs: B channel descriptor structure.
572 *
573 * Called by main module to notify the LL that the D channel connection has
574 * been shut down.
575 */
576void gigaset_isdn_hupD(struct bc_state *bcs)
577{
578 gig_dbg(DEBUG_CMD, "sending DHUP");
579 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_DHUP);
580}
581
582/**
583 * gigaset_isdn_connB() - signal B channel connect
584 * @bcs: B channel descriptor structure.
585 *
586 * Called by main module to notify the LL that the B channel connection has
587 * been established.
588 */
589void gigaset_isdn_connB(struct bc_state *bcs)
590{
591 gig_dbg(DEBUG_CMD, "sending BCONN");
592 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_BCONN);
593}
594
595/**
596 * gigaset_isdn_hupB() - signal B channel hangup
597 * @bcs: B channel descriptor structure.
598 *
599 * Called by main module to notify the LL that the B channel connection has
600 * been shut down.
601 */
602void gigaset_isdn_hupB(struct bc_state *bcs)
603{
604 gig_dbg(DEBUG_CMD, "sending BHUP");
605 gigaset_i4l_channel_cmd(bcs, ISDN_STAT_BHUP);
606}
607
608/**
609 * gigaset_isdn_start() - signal device availability
610 * @cs: device descriptor structure.
611 *
612 * Called by main module to notify the LL that the device is available for
613 * use.
614 */
615void gigaset_isdn_start(struct cardstate *cs)
616{
617 gig_dbg(DEBUG_CMD, "sending RUN");
618 gigaset_i4l_cmd(cs, ISDN_STAT_RUN);
619}
620
621/**
622 * gigaset_isdn_stop() - signal device unavailability
623 * @cs: device descriptor structure.
624 *
625 * Called by main module to notify the LL that the device is no longer
626 * available for use.
627 */
628void gigaset_isdn_stop(struct cardstate *cs)
629{
630 gig_dbg(DEBUG_CMD, "sending STOP");
631 gigaset_i4l_cmd(cs, ISDN_STAT_STOP);
632}
633
634/**
635 * gigaset_isdn_register() - register to LL
636 * @cs: device descriptor structure.
637 * @isdnid: device name.
638 *
639 * Called by main module to register the device with the LL.
640 *
641 * Return value: 1 for success, 0 for failure
642 */
643int gigaset_isdn_register(struct cardstate *cs, const char *isdnid)
644{
645 isdn_if *iif;
646
647 pr_info("ISDN4Linux interface\n");
648
649 iif = kmalloc(sizeof *iif, GFP_KERNEL);
650 if (!iif) {
651 pr_err("out of memory\n");
652 return 0;
653 }
569 654
570 if (snprintf(iif->id, sizeof iif->id, "%s_%u", isdnid, cs->minor_index) 655 if (snprintf(iif->id, sizeof iif->id, "%s_%u", isdnid, cs->minor_index)
571 >= sizeof iif->id) { 656 >= sizeof iif->id) {
572 pr_err("ID too long: %s\n", isdnid); 657 pr_err("ID too long: %s\n", isdnid);
658 kfree(iif);
573 return 0; 659 return 0;
574 } 660 }
575 661
@@ -593,9 +679,26 @@ int gigaset_register_to_LL(struct cardstate *cs, const char *isdnid)
593 679
594 if (!register_isdn(iif)) { 680 if (!register_isdn(iif)) {
595 pr_err("register_isdn failed\n"); 681 pr_err("register_isdn failed\n");
682 kfree(iif);
596 return 0; 683 return 0;
597 } 684 }
598 685
686 cs->iif = iif;
599 cs->myid = iif->channels; /* Set my device id */ 687 cs->myid = iif->channels; /* Set my device id */
688 cs->hw_hdr_len = HW_HDR_LEN;
600 return 1; 689 return 1;
601} 690}
691
692/**
693 * gigaset_isdn_unregister() - unregister from LL
694 * @cs: device descriptor structure.
695 *
696 * Called by main module to unregister the device from the LL.
697 */
698void gigaset_isdn_unregister(struct cardstate *cs)
699{
700 gig_dbg(DEBUG_CMD, "sending UNLOAD");
701 gigaset_i4l_cmd(cs, ISDN_STAT_UNLOAD);
702 kfree(cs->iif);
703 cs->iif = NULL;
704}
diff --git a/drivers/isdn/gigaset/interface.c b/drivers/isdn/gigaset/interface.c
index 6a8e1384e7bd..577809c03aed 100644
--- a/drivers/isdn/gigaset/interface.c
+++ b/drivers/isdn/gigaset/interface.c
@@ -162,7 +162,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
162 return -ENODEV; 162 return -ENODEV;
163 163
164 if (mutex_lock_interruptible(&cs->mutex)) 164 if (mutex_lock_interruptible(&cs->mutex))
165 return -ERESTARTSYS; // FIXME -EINTR? 165 return -ERESTARTSYS;
166 tty->driver_data = cs; 166 tty->driver_data = cs;
167 167
168 ++cs->open_count; 168 ++cs->open_count;
@@ -171,7 +171,7 @@ static int if_open(struct tty_struct *tty, struct file *filp)
171 spin_lock_irqsave(&cs->lock, flags); 171 spin_lock_irqsave(&cs->lock, flags);
172 cs->tty = tty; 172 cs->tty = tty;
173 spin_unlock_irqrestore(&cs->lock, flags); 173 spin_unlock_irqrestore(&cs->lock, flags);
174 tty->low_latency = 1; //FIXME test 174 tty->low_latency = 1;
175 } 175 }
176 176
177 mutex_unlock(&cs->mutex); 177 mutex_unlock(&cs->mutex);
@@ -228,7 +228,7 @@ static int if_ioctl(struct tty_struct *tty, struct file *file,
228 gig_dbg(DEBUG_IF, "%u: %s(0x%x)", cs->minor_index, __func__, cmd); 228 gig_dbg(DEBUG_IF, "%u: %s(0x%x)", cs->minor_index, __func__, cmd);
229 229
230 if (mutex_lock_interruptible(&cs->mutex)) 230 if (mutex_lock_interruptible(&cs->mutex))
231 return -ERESTARTSYS; // FIXME -EINTR? 231 return -ERESTARTSYS;
232 232
233 if (!cs->connected) { 233 if (!cs->connected) {
234 gig_dbg(DEBUG_IF, "not connected"); 234 gig_dbg(DEBUG_IF, "not connected");
@@ -299,9 +299,8 @@ static int if_tiocmget(struct tty_struct *tty, struct file *file)
299 gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__); 299 gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
300 300
301 if (mutex_lock_interruptible(&cs->mutex)) 301 if (mutex_lock_interruptible(&cs->mutex))
302 return -ERESTARTSYS; // FIXME -EINTR? 302 return -ERESTARTSYS;
303 303
304 // FIXME read from device?
305 retval = cs->control_state & (TIOCM_RTS|TIOCM_DTR); 304 retval = cs->control_state & (TIOCM_RTS|TIOCM_DTR);
306 305
307 mutex_unlock(&cs->mutex); 306 mutex_unlock(&cs->mutex);
@@ -326,7 +325,7 @@ static int if_tiocmset(struct tty_struct *tty, struct file *file,
326 cs->minor_index, __func__, set, clear); 325 cs->minor_index, __func__, set, clear);
327 326
328 if (mutex_lock_interruptible(&cs->mutex)) 327 if (mutex_lock_interruptible(&cs->mutex))
329 return -ERESTARTSYS; // FIXME -EINTR? 328 return -ERESTARTSYS;
330 329
331 if (!cs->connected) { 330 if (!cs->connected) {
332 gig_dbg(DEBUG_IF, "not connected"); 331 gig_dbg(DEBUG_IF, "not connected");
@@ -356,7 +355,7 @@ static int if_write(struct tty_struct *tty, const unsigned char *buf, int count)
356 gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__); 355 gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
357 356
358 if (mutex_lock_interruptible(&cs->mutex)) 357 if (mutex_lock_interruptible(&cs->mutex))
359 return -ERESTARTSYS; // FIXME -EINTR? 358 return -ERESTARTSYS;
360 359
361 if (!cs->connected) { 360 if (!cs->connected) {
362 gig_dbg(DEBUG_IF, "not connected"); 361 gig_dbg(DEBUG_IF, "not connected");
@@ -390,7 +389,7 @@ static int if_write_room(struct tty_struct *tty)
390 gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__); 389 gig_dbg(DEBUG_IF, "%u: %s()", cs->minor_index, __func__);
391 390
392 if (mutex_lock_interruptible(&cs->mutex)) 391 if (mutex_lock_interruptible(&cs->mutex))
393 return -ERESTARTSYS; // FIXME -EINTR? 392 return -ERESTARTSYS;
394 393
395 if (!cs->connected) { 394 if (!cs->connected) {
396 gig_dbg(DEBUG_IF, "not connected"); 395 gig_dbg(DEBUG_IF, "not connected");
@@ -455,9 +454,8 @@ static void if_throttle(struct tty_struct *tty)
455 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */ 454 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
456 else if (!cs->open_count) 455 else if (!cs->open_count)
457 dev_warn(cs->dev, "%s: device not opened\n", __func__); 456 dev_warn(cs->dev, "%s: device not opened\n", __func__);
458 else { 457 else
459 //FIXME 458 gig_dbg(DEBUG_ANY, "%s: not implemented\n", __func__);
460 }
461 459
462 mutex_unlock(&cs->mutex); 460 mutex_unlock(&cs->mutex);
463} 461}
@@ -480,9 +478,8 @@ static void if_unthrottle(struct tty_struct *tty)
480 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */ 478 gig_dbg(DEBUG_IF, "not connected"); /* nothing to do */
481 else if (!cs->open_count) 479 else if (!cs->open_count)
482 dev_warn(cs->dev, "%s: device not opened\n", __func__); 480 dev_warn(cs->dev, "%s: device not opened\n", __func__);
483 else { 481 else
484 //FIXME 482 gig_dbg(DEBUG_ANY, "%s: not implemented\n", __func__);
485 }
486 483
487 mutex_unlock(&cs->mutex); 484 mutex_unlock(&cs->mutex);
488} 485}
@@ -515,10 +512,9 @@ static void if_set_termios(struct tty_struct *tty, struct ktermios *old)
515 goto out; 512 goto out;
516 } 513 }
517 514
518 // stolen from mct_u232.c
519 iflag = tty->termios->c_iflag; 515 iflag = tty->termios->c_iflag;
520 cflag = tty->termios->c_cflag; 516 cflag = tty->termios->c_cflag;
521 old_cflag = old ? old->c_cflag : cflag; //FIXME? 517 old_cflag = old ? old->c_cflag : cflag;
522 gig_dbg(DEBUG_IF, "%u: iflag %x cflag %x old %x", 518 gig_dbg(DEBUG_IF, "%u: iflag %x cflag %x old %x",
523 cs->minor_index, iflag, cflag, old_cflag); 519 cs->minor_index, iflag, cflag, old_cflag);
524 520
@@ -632,7 +628,8 @@ void gigaset_if_receive(struct cardstate *cs,
632 struct tty_struct *tty; 628 struct tty_struct *tty;
633 629
634 spin_lock_irqsave(&cs->lock, flags); 630 spin_lock_irqsave(&cs->lock, flags);
635 if ((tty = cs->tty) == NULL) 631 tty = cs->tty;
632 if (tty == NULL)
636 gig_dbg(DEBUG_ANY, "receive on closed device"); 633 gig_dbg(DEBUG_ANY, "receive on closed device");
637 else { 634 else {
638 tty_buffer_request_room(tty, len); 635 tty_buffer_request_room(tty, len);
@@ -659,9 +656,9 @@ void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname,
659 656
660 drv->have_tty = 0; 657 drv->have_tty = 0;
661 658
662 if ((drv->tty = alloc_tty_driver(minors)) == NULL) 659 drv->tty = tty = alloc_tty_driver(minors);
660 if (tty == NULL)
663 goto enomem; 661 goto enomem;
664 tty = drv->tty;
665 662
666 tty->magic = TTY_DRIVER_MAGIC, 663 tty->magic = TTY_DRIVER_MAGIC,
667 tty->major = GIG_MAJOR, 664 tty->major = GIG_MAJOR,
@@ -676,8 +673,8 @@ void gigaset_if_initdriver(struct gigaset_driver *drv, const char *procname,
676 673
677 tty->owner = THIS_MODULE; 674 tty->owner = THIS_MODULE;
678 675
679 tty->init_termios = tty_std_termios; //FIXME 676 tty->init_termios = tty_std_termios;
680 tty->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL; //FIXME 677 tty->init_termios.c_cflag = B9600 | CS8 | CREAD | HUPCL | CLOCAL;
681 tty_set_operations(tty, &if_ops); 678 tty_set_operations(tty, &if_ops);
682 679
683 ret = tty_register_driver(tty); 680 ret = tty_register_driver(tty);
diff --git a/drivers/isdn/gigaset/isocdata.c b/drivers/isdn/gigaset/isocdata.c
index 9f3ef7b4248c..85394a6ebae8 100644
--- a/drivers/isdn/gigaset/isocdata.c
+++ b/drivers/isdn/gigaset/isocdata.c
@@ -41,7 +41,8 @@ static inline int isowbuf_freebytes(struct isowbuf_t *iwb)
41 41
42 read = iwb->read; 42 read = iwb->read;
43 write = iwb->write; 43 write = iwb->write;
44 if ((freebytes = read - write) > 0) { 44 freebytes = read - write;
45 if (freebytes > 0) {
45 /* no wraparound: need padding space within regular area */ 46 /* no wraparound: need padding space within regular area */
46 return freebytes - BAS_OUTBUFPAD; 47 return freebytes - BAS_OUTBUFPAD;
47 } else if (read < BAS_OUTBUFPAD) { 48 } else if (read < BAS_OUTBUFPAD) {
@@ -53,29 +54,6 @@ static inline int isowbuf_freebytes(struct isowbuf_t *iwb)
53 } 54 }
54} 55}
55 56
56/* compare two offsets within the buffer
57 * The buffer is seen as circular, with the read position as start
58 * returns -1/0/1 if position a </=/> position b without crossing 'read'
59 */
60static inline int isowbuf_poscmp(struct isowbuf_t *iwb, int a, int b)
61{
62 int read;
63 if (a == b)
64 return 0;
65 read = iwb->read;
66 if (a < b) {
67 if (a < read && read <= b)
68 return +1;
69 else
70 return -1;
71 } else {
72 if (b < read && read <= a)
73 return -1;
74 else
75 return +1;
76 }
77}
78
79/* start writing 57/* start writing
80 * acquire the write semaphore 58 * acquire the write semaphore
81 * return true if acquired, false if busy 59 * return true if acquired, false if busy
@@ -271,7 +249,7 @@ static inline void dump_bytes(enum debuglevel level, const char *tag,
271 * bit 14..13 = number of bits added by stuffing 249 * bit 14..13 = number of bits added by stuffing
272 */ 250 */
273static const u16 stufftab[5 * 256] = { 251static const u16 stufftab[5 * 256] = {
274// previous 1s = 0: 252/* previous 1s = 0: */
275 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f, 253 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x000f,
276 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x201f, 254 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x201f,
277 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f, 255 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x002f,
@@ -289,7 +267,7 @@ static const u16 stufftab[5 * 256] = {
289 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x0cef, 267 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x0cef,
290 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x2ddf, 268 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x2ddf,
291 269
292// previous 1s = 1: 270/* previous 1s = 1: */
293 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x200f, 271 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x0007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x200f,
294 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x202f, 272 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x0017, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x202f,
295 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x204f, 273 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x0027, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x204f,
@@ -307,7 +285,7 @@ static const u16 stufftab[5 * 256] = {
307 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dcf, 285 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x0ce7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dcf,
308 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x31ef, 286 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x10f7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x31ef,
309 287
310// previous 1s = 2: 288/* previous 1s = 2: */
311 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x2007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x2017, 289 0x0000, 0x0001, 0x0002, 0x0003, 0x0004, 0x0005, 0x0006, 0x2007, 0x0008, 0x0009, 0x000a, 0x000b, 0x000c, 0x000d, 0x000e, 0x2017,
312 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x2027, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x2037, 290 0x0010, 0x0011, 0x0012, 0x0013, 0x0014, 0x0015, 0x0016, 0x2027, 0x0018, 0x0019, 0x001a, 0x001b, 0x001c, 0x001d, 0x001e, 0x2037,
313 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x2047, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x2057, 291 0x0020, 0x0021, 0x0022, 0x0023, 0x0024, 0x0025, 0x0026, 0x2047, 0x0028, 0x0029, 0x002a, 0x002b, 0x002c, 0x002d, 0x002e, 0x2057,
@@ -325,7 +303,7 @@ static const u16 stufftab[5 * 256] = {
325 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dc7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dd7, 303 0x0ce0, 0x0ce1, 0x0ce2, 0x0ce3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dc7, 0x0ce8, 0x0ce9, 0x0cea, 0x0ceb, 0x0cec, 0x0ced, 0x0cee, 0x2dd7,
326 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x31e7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x41f7, 304 0x10f0, 0x10f1, 0x10f2, 0x10f3, 0x10f4, 0x10f5, 0x10f6, 0x31e7, 0x20f8, 0x20f9, 0x20fa, 0x20fb, 0x257c, 0x257d, 0x29be, 0x41f7,
327 305
328// previous 1s = 3: 306/* previous 1s = 3: */
329 0x0000, 0x0001, 0x0002, 0x2003, 0x0004, 0x0005, 0x0006, 0x200b, 0x0008, 0x0009, 0x000a, 0x2013, 0x000c, 0x000d, 0x000e, 0x201b, 307 0x0000, 0x0001, 0x0002, 0x2003, 0x0004, 0x0005, 0x0006, 0x200b, 0x0008, 0x0009, 0x000a, 0x2013, 0x000c, 0x000d, 0x000e, 0x201b,
330 0x0010, 0x0011, 0x0012, 0x2023, 0x0014, 0x0015, 0x0016, 0x202b, 0x0018, 0x0019, 0x001a, 0x2033, 0x001c, 0x001d, 0x001e, 0x203b, 308 0x0010, 0x0011, 0x0012, 0x2023, 0x0014, 0x0015, 0x0016, 0x202b, 0x0018, 0x0019, 0x001a, 0x2033, 0x001c, 0x001d, 0x001e, 0x203b,
331 0x0020, 0x0021, 0x0022, 0x2043, 0x0024, 0x0025, 0x0026, 0x204b, 0x0028, 0x0029, 0x002a, 0x2053, 0x002c, 0x002d, 0x002e, 0x205b, 309 0x0020, 0x0021, 0x0022, 0x2043, 0x0024, 0x0025, 0x0026, 0x204b, 0x0028, 0x0029, 0x002a, 0x2053, 0x002c, 0x002d, 0x002e, 0x205b,
@@ -343,7 +321,7 @@ static const u16 stufftab[5 * 256] = {
343 0x0ce0, 0x0ce1, 0x0ce2, 0x2dc3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dcb, 0x0ce8, 0x0ce9, 0x0cea, 0x2dd3, 0x0cec, 0x0ced, 0x0cee, 0x2ddb, 321 0x0ce0, 0x0ce1, 0x0ce2, 0x2dc3, 0x0ce4, 0x0ce5, 0x0ce6, 0x2dcb, 0x0ce8, 0x0ce9, 0x0cea, 0x2dd3, 0x0cec, 0x0ced, 0x0cee, 0x2ddb,
344 0x10f0, 0x10f1, 0x10f2, 0x31e3, 0x10f4, 0x10f5, 0x10f6, 0x31eb, 0x20f8, 0x20f9, 0x20fa, 0x41f3, 0x257c, 0x257d, 0x29be, 0x46fb, 322 0x10f0, 0x10f1, 0x10f2, 0x31e3, 0x10f4, 0x10f5, 0x10f6, 0x31eb, 0x20f8, 0x20f9, 0x20fa, 0x41f3, 0x257c, 0x257d, 0x29be, 0x46fb,
345 323
346// previous 1s = 4: 324/* previous 1s = 4: */
347 0x0000, 0x2001, 0x0002, 0x2005, 0x0004, 0x2009, 0x0006, 0x200d, 0x0008, 0x2011, 0x000a, 0x2015, 0x000c, 0x2019, 0x000e, 0x201d, 325 0x0000, 0x2001, 0x0002, 0x2005, 0x0004, 0x2009, 0x0006, 0x200d, 0x0008, 0x2011, 0x000a, 0x2015, 0x000c, 0x2019, 0x000e, 0x201d,
348 0x0010, 0x2021, 0x0012, 0x2025, 0x0014, 0x2029, 0x0016, 0x202d, 0x0018, 0x2031, 0x001a, 0x2035, 0x001c, 0x2039, 0x001e, 0x203d, 326 0x0010, 0x2021, 0x0012, 0x2025, 0x0014, 0x2029, 0x0016, 0x202d, 0x0018, 0x2031, 0x001a, 0x2035, 0x001c, 0x2039, 0x001e, 0x203d,
349 0x0020, 0x2041, 0x0022, 0x2045, 0x0024, 0x2049, 0x0026, 0x204d, 0x0028, 0x2051, 0x002a, 0x2055, 0x002c, 0x2059, 0x002e, 0x205d, 327 0x0020, 0x2041, 0x0022, 0x2045, 0x0024, 0x2049, 0x0026, 0x204d, 0x0028, 0x2051, 0x002a, 0x2055, 0x002c, 0x2059, 0x002e, 0x205d,
@@ -367,7 +345,8 @@ static const u16 stufftab[5 * 256] = {
367 * parameters: 345 * parameters:
368 * cin input byte 346 * cin input byte
369 * ones number of trailing '1' bits in result before this step 347 * ones number of trailing '1' bits in result before this step
370 * iwb pointer to output buffer structure (write semaphore must be held) 348 * iwb pointer to output buffer structure
349 * (write semaphore must be held)
371 * return value: 350 * return value:
372 * number of trailing '1' bits in result after this step 351 * number of trailing '1' bits in result after this step
373 */ 352 */
@@ -408,7 +387,8 @@ static inline int hdlc_bitstuff_byte(struct isowbuf_t *iwb, unsigned char cin,
408 * parameters: 387 * parameters:
409 * in input buffer 388 * in input buffer
410 * count number of bytes in input buffer 389 * count number of bytes in input buffer
411 * iwb pointer to output buffer structure (write semaphore must be held) 390 * iwb pointer to output buffer structure
391 * (write semaphore must be held)
412 * return value: 392 * return value:
413 * position of end of packet in output buffer on success, 393 * position of end of packet in output buffer on success,
414 * -EAGAIN if write semaphore busy or buffer full 394 * -EAGAIN if write semaphore busy or buffer full
@@ -440,7 +420,8 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb,
440 fcs = crc_ccitt_byte(fcs, c); 420 fcs = crc_ccitt_byte(fcs, c);
441 } 421 }
442 422
443 /* bitstuff and append FCS (complemented, least significant byte first) */ 423 /* bitstuff and append FCS
424 * (complemented, least significant byte first) */
444 fcs ^= 0xffff; 425 fcs ^= 0xffff;
445 ones = hdlc_bitstuff_byte(iwb, fcs & 0x00ff, ones); 426 ones = hdlc_bitstuff_byte(iwb, fcs & 0x00ff, ones);
446 ones = hdlc_bitstuff_byte(iwb, (fcs >> 8) & 0x00ff, ones); 427 ones = hdlc_bitstuff_byte(iwb, (fcs >> 8) & 0x00ff, ones);
@@ -459,7 +440,8 @@ static inline int hdlc_buildframe(struct isowbuf_t *iwb,
459 * parameters: 440 * parameters:
460 * in input buffer 441 * in input buffer
461 * count number of bytes in input buffer 442 * count number of bytes in input buffer
462 * iwb pointer to output buffer structure (write semaphore must be held) 443 * iwb pointer to output buffer structure
444 * (write semaphore must be held)
463 * return value: 445 * return value:
464 * position of end of packet in output buffer on success, 446 * position of end of packet in output buffer on success,
465 * -EAGAIN if write semaphore busy or buffer full 447 * -EAGAIN if write semaphore busy or buffer full
@@ -500,7 +482,7 @@ int gigaset_isoc_buildframe(struct bc_state *bcs, unsigned char *in, int len)
500 int result; 482 int result;
501 483
502 switch (bcs->proto2) { 484 switch (bcs->proto2) {
503 case ISDN_PROTO_L2_HDLC: 485 case L2_HDLC:
504 result = hdlc_buildframe(bcs->hw.bas->isooutbuf, in, len); 486 result = hdlc_buildframe(bcs->hw.bas->isooutbuf, in, len);
505 gig_dbg(DEBUG_ISO, "%s: %d bytes HDLC -> %d", 487 gig_dbg(DEBUG_ISO, "%s: %d bytes HDLC -> %d",
506 __func__, len, result); 488 __func__, len, result);
@@ -542,8 +524,9 @@ static inline void hdlc_flush(struct bc_state *bcs)
542 if (likely(bcs->skb != NULL)) 524 if (likely(bcs->skb != NULL))
543 skb_trim(bcs->skb, 0); 525 skb_trim(bcs->skb, 0);
544 else if (!bcs->ignore) { 526 else if (!bcs->ignore) {
545 if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL) 527 bcs->skb = dev_alloc_skb(SBUFSIZE + bcs->cs->hw_hdr_len);
546 skb_reserve(bcs->skb, HW_HDR_LEN); 528 if (bcs->skb)
529 skb_reserve(bcs->skb, bcs->cs->hw_hdr_len);
547 else 530 else
548 dev_err(bcs->cs->dev, "could not allocate skb\n"); 531 dev_err(bcs->cs->dev, "could not allocate skb\n");
549 } 532 }
@@ -557,43 +540,46 @@ static inline void hdlc_flush(struct bc_state *bcs)
557 */ 540 */
558static inline void hdlc_done(struct bc_state *bcs) 541static inline void hdlc_done(struct bc_state *bcs)
559{ 542{
543 struct cardstate *cs = bcs->cs;
560 struct sk_buff *procskb; 544 struct sk_buff *procskb;
545 unsigned int len;
561 546
562 if (unlikely(bcs->ignore)) { 547 if (unlikely(bcs->ignore)) {
563 bcs->ignore--; 548 bcs->ignore--;
564 hdlc_flush(bcs); 549 hdlc_flush(bcs);
565 return; 550 return;
566 } 551 }
567 552 procskb = bcs->skb;
568 if ((procskb = bcs->skb) == NULL) { 553 if (procskb == NULL) {
569 /* previous error */ 554 /* previous error */
570 gig_dbg(DEBUG_ISO, "%s: skb=NULL", __func__); 555 gig_dbg(DEBUG_ISO, "%s: skb=NULL", __func__);
571 gigaset_rcv_error(NULL, bcs->cs, bcs); 556 gigaset_isdn_rcv_err(bcs);
572 } else if (procskb->len < 2) { 557 } else if (procskb->len < 2) {
573 dev_notice(bcs->cs->dev, "received short frame (%d octets)\n", 558 dev_notice(cs->dev, "received short frame (%d octets)\n",
574 procskb->len); 559 procskb->len);
575 bcs->hw.bas->runts++; 560 bcs->hw.bas->runts++;
576 gigaset_rcv_error(procskb, bcs->cs, bcs); 561 dev_kfree_skb_any(procskb);
562 gigaset_isdn_rcv_err(bcs);
577 } else if (bcs->fcs != PPP_GOODFCS) { 563 } else if (bcs->fcs != PPP_GOODFCS) {
578 dev_notice(bcs->cs->dev, "frame check error (0x%04x)\n", 564 dev_notice(cs->dev, "frame check error (0x%04x)\n", bcs->fcs);
579 bcs->fcs);
580 bcs->hw.bas->fcserrs++; 565 bcs->hw.bas->fcserrs++;
581 gigaset_rcv_error(procskb, bcs->cs, bcs); 566 dev_kfree_skb_any(procskb);
567 gigaset_isdn_rcv_err(bcs);
582 } else { 568 } else {
583 procskb->len -= 2; /* subtract FCS */ 569 len = procskb->len;
584 procskb->tail -= 2; 570 __skb_trim(procskb, len -= 2); /* subtract FCS */
585 gig_dbg(DEBUG_ISO, "%s: good frame (%d octets)", 571 gig_dbg(DEBUG_ISO, "%s: good frame (%d octets)", __func__, len);
586 __func__, procskb->len);
587 dump_bytes(DEBUG_STREAM_DUMP, 572 dump_bytes(DEBUG_STREAM_DUMP,
588 "rcv data", procskb->data, procskb->len); 573 "rcv data", procskb->data, len);
589 bcs->hw.bas->goodbytes += procskb->len; 574 bcs->hw.bas->goodbytes += len;
590 gigaset_rcv_skb(procskb, bcs->cs, bcs); 575 gigaset_skb_rcvd(bcs, procskb);
591 } 576 }
592 577
593 if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL) 578 bcs->skb = dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len);
594 skb_reserve(bcs->skb, HW_HDR_LEN); 579 if (bcs->skb)
580 skb_reserve(bcs->skb, cs->hw_hdr_len);
595 else 581 else
596 dev_err(bcs->cs->dev, "could not allocate skb\n"); 582 dev_err(cs->dev, "could not allocate skb\n");
597 bcs->fcs = PPP_INITFCS; 583 bcs->fcs = PPP_INITFCS;
598} 584}
599 585
@@ -610,12 +596,8 @@ static inline void hdlc_frag(struct bc_state *bcs, unsigned inbits)
610 596
611 dev_notice(bcs->cs->dev, "received partial byte (%d bits)\n", inbits); 597 dev_notice(bcs->cs->dev, "received partial byte (%d bits)\n", inbits);
612 bcs->hw.bas->alignerrs++; 598 bcs->hw.bas->alignerrs++;
613 gigaset_rcv_error(bcs->skb, bcs->cs, bcs); 599 gigaset_isdn_rcv_err(bcs);
614 600 __skb_trim(bcs->skb, 0);
615 if ((bcs->skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN)) != NULL)
616 skb_reserve(bcs->skb, HW_HDR_LEN);
617 else
618 dev_err(bcs->cs->dev, "could not allocate skb\n");
619 bcs->fcs = PPP_INITFCS; 601 bcs->fcs = PPP_INITFCS;
620} 602}
621 603
@@ -646,10 +628,10 @@ static const unsigned char bitcounts[256] = {
646}; 628};
647 629
648/* hdlc_unpack 630/* hdlc_unpack
649 * perform HDLC frame processing (bit unstuffing, flag detection, FCS calculation) 631 * perform HDLC frame processing (bit unstuffing, flag detection, FCS
650 * on a sequence of received data bytes (8 bits each, LSB first) 632 * calculation) on a sequence of received data bytes (8 bits each, LSB first)
651 * pass on successfully received, complete frames as SKBs via gigaset_rcv_skb 633 * pass on successfully received, complete frames as SKBs via gigaset_skb_rcvd
652 * notify of errors via gigaset_rcv_error 634 * notify of errors via gigaset_isdn_rcv_err
653 * tally frames, errors etc. in BC structure counters 635 * tally frames, errors etc. in BC structure counters
654 * parameters: 636 * parameters:
655 * src received data 637 * src received data
@@ -665,9 +647,12 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
665 647
666 /* load previous state: 648 /* load previous state:
667 * inputstate = set of flag bits: 649 * inputstate = set of flag bits:
668 * - INS_flag_hunt: no complete opening flag received since connection setup or last abort 650 * - INS_flag_hunt: no complete opening flag received since connection
669 * - INS_have_data: at least one complete data byte received since last flag 651 * setup or last abort
670 * seqlen = number of consecutive '1' bits in last 7 input stream bits (0..7) 652 * - INS_have_data: at least one complete data byte received since last
653 * flag
654 * seqlen = number of consecutive '1' bits in last 7 input stream bits
655 * (0..7)
671 * inbyte = accumulated partial data byte (if !INS_flag_hunt) 656 * inbyte = accumulated partial data byte (if !INS_flag_hunt)
672 * inbits = number of valid bits in inbyte, starting at LSB (0..6) 657 * inbits = number of valid bits in inbyte, starting at LSB (0..6)
673 */ 658 */
@@ -701,9 +686,11 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
701 inbyte = c >> (lead1 + 1); 686 inbyte = c >> (lead1 + 1);
702 inbits = 7 - lead1; 687 inbits = 7 - lead1;
703 if (trail1 >= 8) { 688 if (trail1 >= 8) {
704 /* interior stuffing: omitting the MSB handles most cases */ 689 /* interior stuffing:
690 * omitting the MSB handles most cases,
691 * correct the incorrectly handled
692 * cases individually */
705 inbits--; 693 inbits--;
706 /* correct the incorrectly handled cases individually */
707 switch (c) { 694 switch (c) {
708 case 0xbe: 695 case 0xbe:
709 inbyte = 0x3f; 696 inbyte = 0x3f;
@@ -729,13 +716,14 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
729 hdlc_flush(bcs); 716 hdlc_flush(bcs);
730 inputstate |= INS_flag_hunt; 717 inputstate |= INS_flag_hunt;
731 } else if (seqlen == 6) { 718 } else if (seqlen == 6) {
732 /* closing flag, including (6 - lead1) '1's and one '0' from inbits */ 719 /* closing flag, including (6 - lead1) '1's
720 * and one '0' from inbits */
733 if (inbits > 7 - lead1) { 721 if (inbits > 7 - lead1) {
734 hdlc_frag(bcs, inbits + lead1 - 7); 722 hdlc_frag(bcs, inbits + lead1 - 7);
735 inputstate &= ~INS_have_data; 723 inputstate &= ~INS_have_data;
736 } else { 724 } else {
737 if (inbits < 7 - lead1) 725 if (inbits < 7 - lead1)
738 ubc->stolen0s ++; 726 ubc->stolen0s++;
739 if (inputstate & INS_have_data) { 727 if (inputstate & INS_have_data) {
740 hdlc_done(bcs); 728 hdlc_done(bcs);
741 inputstate &= ~INS_have_data; 729 inputstate &= ~INS_have_data;
@@ -744,7 +732,7 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
744 732
745 if (c == PPP_FLAG) { 733 if (c == PPP_FLAG) {
746 /* complete flag, LSB overlaps preceding flag */ 734 /* complete flag, LSB overlaps preceding flag */
747 ubc->shared0s ++; 735 ubc->shared0s++;
748 inbits = 0; 736 inbits = 0;
749 inbyte = 0; 737 inbyte = 0;
750 } else if (trail1 != 7) { 738 } else if (trail1 != 7) {
@@ -752,9 +740,11 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
752 inbyte = c >> (lead1 + 1); 740 inbyte = c >> (lead1 + 1);
753 inbits = 7 - lead1; 741 inbits = 7 - lead1;
754 if (trail1 >= 8) { 742 if (trail1 >= 8) {
755 /* interior stuffing: omitting the MSB handles most cases */ 743 /* interior stuffing:
744 * omitting the MSB handles most cases,
745 * correct the incorrectly handled
746 * cases individually */
756 inbits--; 747 inbits--;
757 /* correct the incorrectly handled cases individually */
758 switch (c) { 748 switch (c) {
759 case 0xbe: 749 case 0xbe:
760 inbyte = 0x3f; 750 inbyte = 0x3f;
@@ -762,7 +752,8 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
762 } 752 }
763 } 753 }
764 } else { 754 } else {
765 /* abort sequence follows, skb already empty anyway */ 755 /* abort sequence follows,
756 * skb already empty anyway */
766 ubc->aborts++; 757 ubc->aborts++;
767 inputstate |= INS_flag_hunt; 758 inputstate |= INS_flag_hunt;
768 } 759 }
@@ -787,14 +778,17 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
787 } else { 778 } else {
788 /* stuffed data */ 779 /* stuffed data */
789 if (trail1 < 7) { /* => seqlen == 5 */ 780 if (trail1 < 7) { /* => seqlen == 5 */
790 /* stuff bit at position lead1, no interior stuffing */ 781 /* stuff bit at position lead1,
782 * no interior stuffing */
791 unsigned char mask = (1 << lead1) - 1; 783 unsigned char mask = (1 << lead1) - 1;
792 c = (c & mask) | ((c & ~mask) >> 1); 784 c = (c & mask) | ((c & ~mask) >> 1);
793 inbyte |= c << inbits; 785 inbyte |= c << inbits;
794 inbits += 7; 786 inbits += 7;
795 } else if (seqlen < 5) { /* trail1 >= 8 */ 787 } else if (seqlen < 5) { /* trail1 >= 8 */
796 /* interior stuffing: omitting the MSB handles most cases */ 788 /* interior stuffing:
797 /* correct the incorrectly handled cases individually */ 789 * omitting the MSB handles most cases,
790 * correct the incorrectly handled
791 * cases individually */
798 switch (c) { 792 switch (c) {
799 case 0xbe: 793 case 0xbe:
800 c = 0x7e; 794 c = 0x7e;
@@ -804,8 +798,9 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
804 inbits += 7; 798 inbits += 7;
805 } else { /* seqlen == 5 && trail1 >= 8 */ 799 } else { /* seqlen == 5 && trail1 >= 8 */
806 800
807 /* stuff bit at lead1 *and* interior stuffing */ 801 /* stuff bit at lead1 *and* interior
808 switch (c) { /* unstuff individually */ 802 * stuffing -- unstuff individually */
803 switch (c) {
809 case 0x7d: 804 case 0x7d:
810 c = 0x3f; 805 c = 0x3f;
811 break; 806 break;
@@ -841,7 +836,7 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
841} 836}
842 837
843/* trans_receive 838/* trans_receive
844 * pass on received USB frame transparently as SKB via gigaset_rcv_skb 839 * pass on received USB frame transparently as SKB via gigaset_skb_rcvd
845 * invert bytes 840 * invert bytes
846 * tally frames, errors etc. in BC structure counters 841 * tally frames, errors etc. in BC structure counters
847 * parameters: 842 * parameters:
@@ -852,6 +847,7 @@ static inline void hdlc_unpack(unsigned char *src, unsigned count,
852static inline void trans_receive(unsigned char *src, unsigned count, 847static inline void trans_receive(unsigned char *src, unsigned count,
853 struct bc_state *bcs) 848 struct bc_state *bcs)
854{ 849{
850 struct cardstate *cs = bcs->cs;
855 struct sk_buff *skb; 851 struct sk_buff *skb;
856 int dobytes; 852 int dobytes;
857 unsigned char *dst; 853 unsigned char *dst;
@@ -861,13 +857,14 @@ static inline void trans_receive(unsigned char *src, unsigned count,
861 hdlc_flush(bcs); 857 hdlc_flush(bcs);
862 return; 858 return;
863 } 859 }
864 if (unlikely((skb = bcs->skb) == NULL)) { 860 skb = bcs->skb;
865 bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN); 861 if (unlikely(skb == NULL)) {
862 bcs->skb = skb = dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len);
866 if (!skb) { 863 if (!skb) {
867 dev_err(bcs->cs->dev, "could not allocate skb\n"); 864 dev_err(cs->dev, "could not allocate skb\n");
868 return; 865 return;
869 } 866 }
870 skb_reserve(skb, HW_HDR_LEN); 867 skb_reserve(skb, cs->hw_hdr_len);
871 } 868 }
872 bcs->hw.bas->goodbytes += skb->len; 869 bcs->hw.bas->goodbytes += skb->len;
873 dobytes = TRANSBUFSIZE - skb->len; 870 dobytes = TRANSBUFSIZE - skb->len;
@@ -881,23 +878,24 @@ static inline void trans_receive(unsigned char *src, unsigned count,
881 if (dobytes == 0) { 878 if (dobytes == 0) {
882 dump_bytes(DEBUG_STREAM_DUMP, 879 dump_bytes(DEBUG_STREAM_DUMP,
883 "rcv data", skb->data, skb->len); 880 "rcv data", skb->data, skb->len);
884 gigaset_rcv_skb(skb, bcs->cs, bcs); 881 gigaset_skb_rcvd(bcs, skb);
885 bcs->skb = skb = dev_alloc_skb(SBUFSIZE + HW_HDR_LEN); 882 bcs->skb = skb =
883 dev_alloc_skb(SBUFSIZE + cs->hw_hdr_len);
886 if (!skb) { 884 if (!skb) {
887 dev_err(bcs->cs->dev, 885 dev_err(cs->dev, "could not allocate skb\n");
888 "could not allocate skb\n");
889 return; 886 return;
890 } 887 }
891 skb_reserve(bcs->skb, HW_HDR_LEN); 888 skb_reserve(skb, cs->hw_hdr_len);
892 dobytes = TRANSBUFSIZE; 889 dobytes = TRANSBUFSIZE;
893 } 890 }
894 } 891 }
895} 892}
896 893
897void gigaset_isoc_receive(unsigned char *src, unsigned count, struct bc_state *bcs) 894void gigaset_isoc_receive(unsigned char *src, unsigned count,
895 struct bc_state *bcs)
898{ 896{
899 switch (bcs->proto2) { 897 switch (bcs->proto2) {
900 case ISDN_PROTO_L2_HDLC: 898 case L2_HDLC:
901 hdlc_unpack(src, count, bcs); 899 hdlc_unpack(src, count, bcs);
902 break; 900 break;
903 default: /* assume transparent */ 901 default: /* assume transparent */
@@ -981,8 +979,10 @@ void gigaset_isoc_input(struct inbuf_t *inbuf)
981 * @bcs: B channel descriptor structure. 979 * @bcs: B channel descriptor structure.
982 * @skb: data to send. 980 * @skb: data to send.
983 * 981 *
984 * Called by i4l.c to queue an skb for sending, and start transmission if 982 * Called by LL to queue an skb for sending, and start transmission if
985 * necessary. 983 * necessary.
984 * Once the payload data has been transmitted completely, gigaset_skb_sent()
985 * will be called with the skb's link layer header preserved.
986 * 986 *
987 * Return value: 987 * Return value:
988 * number of bytes accepted for sending (skb->len) if ok, 988 * number of bytes accepted for sending (skb->len) if ok,
diff --git a/drivers/isdn/gigaset/proc.c b/drivers/isdn/gigaset/proc.c
index 9715aad9c3f0..758a00c1d2e2 100644
--- a/drivers/isdn/gigaset/proc.c
+++ b/drivers/isdn/gigaset/proc.c
@@ -39,7 +39,7 @@ static ssize_t set_cidmode(struct device *dev, struct device_attribute *attr,
39 return -EINVAL; 39 return -EINVAL;
40 40
41 if (mutex_lock_interruptible(&cs->mutex)) 41 if (mutex_lock_interruptible(&cs->mutex))
42 return -ERESTARTSYS; // FIXME -EINTR? 42 return -ERESTARTSYS;
43 43
44 cs->waiting = 1; 44 cs->waiting = 1;
45 if (!gigaset_add_event(cs, &cs->at_state, EV_PROC_CIDMODE, 45 if (!gigaset_add_event(cs, &cs->at_state, EV_PROC_CIDMODE,
diff --git a/drivers/isdn/gigaset/ser-gigaset.c b/drivers/isdn/gigaset/ser-gigaset.c
index 3071a52467ed..ac3409ea5d99 100644
--- a/drivers/isdn/gigaset/ser-gigaset.c
+++ b/drivers/isdn/gigaset/ser-gigaset.c
@@ -164,9 +164,15 @@ static void gigaset_modem_fill(unsigned long data)
164{ 164{
165 struct cardstate *cs = (struct cardstate *) data; 165 struct cardstate *cs = (struct cardstate *) data;
166 struct bc_state *bcs; 166 struct bc_state *bcs;
167 struct sk_buff *nextskb;
167 int sent = 0; 168 int sent = 0;
168 169
169 if (!cs || !(bcs = cs->bcs)) { 170 if (!cs) {
171 gig_dbg(DEBUG_OUTPUT, "%s: no cardstate", __func__);
172 return;
173 }
174 bcs = cs->bcs;
175 if (!bcs) {
170 gig_dbg(DEBUG_OUTPUT, "%s: no cardstate", __func__); 176 gig_dbg(DEBUG_OUTPUT, "%s: no cardstate", __func__);
171 return; 177 return;
172 } 178 }
@@ -179,9 +185,11 @@ static void gigaset_modem_fill(unsigned long data)
179 return; 185 return;
180 186
181 /* no command to send; get skb */ 187 /* no command to send; get skb */
182 if (!(bcs->tx_skb = skb_dequeue(&bcs->squeue))) 188 nextskb = skb_dequeue(&bcs->squeue);
189 if (!nextskb)
183 /* no skb either, nothing to do */ 190 /* no skb either, nothing to do */
184 return; 191 return;
192 bcs->tx_skb = nextskb;
185 193
186 gig_dbg(DEBUG_INTR, "Dequeued skb (Adr: %lx)", 194 gig_dbg(DEBUG_INTR, "Dequeued skb (Adr: %lx)",
187 (unsigned long) bcs->tx_skb); 195 (unsigned long) bcs->tx_skb);
@@ -236,19 +244,20 @@ static void flush_send_queue(struct cardstate *cs)
236 * number of bytes queued, or error code < 0 244 * number of bytes queued, or error code < 0
237 */ 245 */
238static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf, 246static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf,
239 int len, struct tasklet_struct *wake_tasklet) 247 int len, struct tasklet_struct *wake_tasklet)
240{ 248{
241 struct cmdbuf_t *cb; 249 struct cmdbuf_t *cb;
242 unsigned long flags; 250 unsigned long flags;
243 251
244 gigaset_dbg_buffer(cs->mstate != MS_LOCKED ? 252 gigaset_dbg_buffer(cs->mstate != MS_LOCKED ?
245 DEBUG_TRANSCMD : DEBUG_LOCKCMD, 253 DEBUG_TRANSCMD : DEBUG_LOCKCMD,
246 "CMD Transmit", len, buf); 254 "CMD Transmit", len, buf);
247 255
248 if (len <= 0) 256 if (len <= 0)
249 return 0; 257 return 0;
250 258
251 if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) { 259 cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC);
260 if (!cb) {
252 dev_err(cs->dev, "%s: out of memory!\n", __func__); 261 dev_err(cs->dev, "%s: out of memory!\n", __func__);
253 return -ENOMEM; 262 return -ENOMEM;
254 } 263 }
@@ -392,7 +401,6 @@ static void gigaset_device_release(struct device *dev)
392 struct platform_device *pdev = to_platform_device(dev); 401 struct platform_device *pdev = to_platform_device(dev);
393 402
394 /* adapted from platform_device_release() in drivers/base/platform.c */ 403 /* adapted from platform_device_release() in drivers/base/platform.c */
395 //FIXME is this actually necessary?
396 kfree(dev->platform_data); 404 kfree(dev->platform_data);
397 kfree(pdev->resource); 405 kfree(pdev->resource);
398} 406}
@@ -404,16 +412,20 @@ static void gigaset_device_release(struct device *dev)
404static int gigaset_initcshw(struct cardstate *cs) 412static int gigaset_initcshw(struct cardstate *cs)
405{ 413{
406 int rc; 414 int rc;
415 struct ser_cardstate *scs;
407 416
408 if (!(cs->hw.ser = kzalloc(sizeof(struct ser_cardstate), GFP_KERNEL))) { 417 scs = kzalloc(sizeof(struct ser_cardstate), GFP_KERNEL);
418 if (!scs) {
409 pr_err("out of memory\n"); 419 pr_err("out of memory\n");
410 return 0; 420 return 0;
411 } 421 }
422 cs->hw.ser = scs;
412 423
413 cs->hw.ser->dev.name = GIGASET_MODULENAME; 424 cs->hw.ser->dev.name = GIGASET_MODULENAME;
414 cs->hw.ser->dev.id = cs->minor_index; 425 cs->hw.ser->dev.id = cs->minor_index;
415 cs->hw.ser->dev.dev.release = gigaset_device_release; 426 cs->hw.ser->dev.dev.release = gigaset_device_release;
416 if ((rc = platform_device_register(&cs->hw.ser->dev)) != 0) { 427 rc = platform_device_register(&cs->hw.ser->dev);
428 if (rc != 0) {
417 pr_err("error %d registering platform device\n", rc); 429 pr_err("error %d registering platform device\n", rc);
418 kfree(cs->hw.ser); 430 kfree(cs->hw.ser);
419 cs->hw.ser = NULL; 431 cs->hw.ser = NULL;
@@ -422,7 +434,7 @@ static int gigaset_initcshw(struct cardstate *cs)
422 dev_set_drvdata(&cs->hw.ser->dev.dev, cs); 434 dev_set_drvdata(&cs->hw.ser->dev.dev, cs);
423 435
424 tasklet_init(&cs->write_tasklet, 436 tasklet_init(&cs->write_tasklet,
425 &gigaset_modem_fill, (unsigned long) cs); 437 &gigaset_modem_fill, (unsigned long) cs);
426 return 1; 438 return 1;
427} 439}
428 440
@@ -434,7 +446,8 @@ static int gigaset_initcshw(struct cardstate *cs)
434 * Called by "gigaset_start" and "gigaset_enterconfigmode" in common.c 446 * Called by "gigaset_start" and "gigaset_enterconfigmode" in common.c
435 * and by "if_lock" and "if_termios" in interface.c 447 * and by "if_lock" and "if_termios" in interface.c
436 */ 448 */
437static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state, unsigned new_state) 449static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
450 unsigned new_state)
438{ 451{
439 struct tty_struct *tty = cs->hw.ser->tty; 452 struct tty_struct *tty = cs->hw.ser->tty;
440 unsigned int set, clear; 453 unsigned int set, clear;
@@ -520,8 +533,8 @@ gigaset_tty_open(struct tty_struct *tty)
520 } 533 }
521 534
522 /* allocate memory for our device state and intialize it */ 535 /* allocate memory for our device state and intialize it */
523 if (!(cs = gigaset_initcs(driver, 1, 1, 0, cidmode, 536 cs = gigaset_initcs(driver, 1, 1, 0, cidmode, GIGASET_MODULENAME);
524 GIGASET_MODULENAME))) 537 if (!cs)
525 goto error; 538 goto error;
526 539
527 cs->dev = &cs->hw.ser->dev.dev; 540 cs->dev = &cs->hw.ser->dev.dev;
@@ -690,7 +703,8 @@ gigaset_tty_receive(struct tty_struct *tty, const unsigned char *buf,
690 703
691 if (!cs) 704 if (!cs)
692 return; 705 return;
693 if (!(inbuf = cs->inbuf)) { 706 inbuf = cs->inbuf;
707 if (!inbuf) {
694 dev_err(cs->dev, "%s: no inbuf\n", __func__); 708 dev_err(cs->dev, "%s: no inbuf\n", __func__);
695 cs_put(cs); 709 cs_put(cs);
696 return; 710 return;
@@ -770,18 +784,21 @@ static int __init ser_gigaset_init(void)
770 int rc; 784 int rc;
771 785
772 gig_dbg(DEBUG_INIT, "%s", __func__); 786 gig_dbg(DEBUG_INIT, "%s", __func__);
773 if ((rc = platform_driver_register(&device_driver)) != 0) { 787 rc = platform_driver_register(&device_driver);
788 if (rc != 0) {
774 pr_err("error %d registering platform driver\n", rc); 789 pr_err("error %d registering platform driver\n", rc);
775 return rc; 790 return rc;
776 } 791 }
777 792
778 /* allocate memory for our driver state and intialize it */ 793 /* allocate memory for our driver state and intialize it */
779 if (!(driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS, 794 driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
780 GIGASET_MODULENAME, GIGASET_DEVNAME, 795 GIGASET_MODULENAME, GIGASET_DEVNAME,
781 &ops, THIS_MODULE))) 796 &ops, THIS_MODULE);
797 if (!driver)
782 goto error; 798 goto error;
783 799
784 if ((rc = tty_register_ldisc(N_GIGASET_M101, &gigaset_ldisc)) != 0) { 800 rc = tty_register_ldisc(N_GIGASET_M101, &gigaset_ldisc);
801 if (rc != 0) {
785 pr_err("error %d registering line discipline\n", rc); 802 pr_err("error %d registering line discipline\n", rc);
786 goto error; 803 goto error;
787 } 804 }
@@ -808,7 +825,8 @@ static void __exit ser_gigaset_exit(void)
808 driver = NULL; 825 driver = NULL;
809 } 826 }
810 827
811 if ((rc = tty_unregister_ldisc(N_GIGASET_M101)) != 0) 828 rc = tty_unregister_ldisc(N_GIGASET_M101);
829 if (rc != 0)
812 pr_err("error %d unregistering line discipline\n", rc); 830 pr_err("error %d unregistering line discipline\n", rc);
813 831
814 platform_driver_unregister(&device_driver); 832 platform_driver_unregister(&device_driver);
diff --git a/drivers/isdn/gigaset/usb-gigaset.c b/drivers/isdn/gigaset/usb-gigaset.c
index 4deb1ab0dbf8..f56b2a83793e 100644
--- a/drivers/isdn/gigaset/usb-gigaset.c
+++ b/drivers/isdn/gigaset/usb-gigaset.c
@@ -43,14 +43,14 @@ MODULE_PARM_DESC(cidmode, "Call-ID mode");
43#define GIGASET_MODULENAME "usb_gigaset" 43#define GIGASET_MODULENAME "usb_gigaset"
44#define GIGASET_DEVNAME "ttyGU" 44#define GIGASET_DEVNAME "ttyGU"
45 45
46#define IF_WRITEBUF 2000 //FIXME // WAKEUP_CHARS: 256 46#define IF_WRITEBUF 2000 /* arbitrary limit */
47 47
48/* Values for the Gigaset M105 Data */ 48/* Values for the Gigaset M105 Data */
49#define USB_M105_VENDOR_ID 0x0681 49#define USB_M105_VENDOR_ID 0x0681
50#define USB_M105_PRODUCT_ID 0x0009 50#define USB_M105_PRODUCT_ID 0x0009
51 51
52/* table of devices that work with this driver */ 52/* table of devices that work with this driver */
53static const struct usb_device_id gigaset_table [] = { 53static const struct usb_device_id gigaset_table[] = {
54 { USB_DEVICE(USB_M105_VENDOR_ID, USB_M105_PRODUCT_ID) }, 54 { USB_DEVICE(USB_M105_VENDOR_ID, USB_M105_PRODUCT_ID) },
55 { } /* Terminating entry */ 55 { } /* Terminating entry */
56}; 56};
@@ -97,8 +97,8 @@ MODULE_DEVICE_TABLE(usb, gigaset_table);
97 * 41 19 -- -- -- -- 06 00 00 00 00 xx 11 13 97 * 41 19 -- -- -- -- 06 00 00 00 00 xx 11 13
98 * Used after every "configuration sequence" (RQ 12, RQs 01/03/13). 98 * Used after every "configuration sequence" (RQ 12, RQs 01/03/13).
99 * xx is usually 0x00 but was 0x7e before starting data transfer 99 * xx is usually 0x00 but was 0x7e before starting data transfer
100 * in unimodem mode. So, this might be an array of characters that need 100 * in unimodem mode. So, this might be an array of characters that
101 * special treatment ("commit all bufferd data"?), 11=^Q, 13=^S. 101 * need special treatment ("commit all bufferd data"?), 11=^Q, 13=^S.
102 * 102 *
103 * Unimodem mode: use "modprobe ppp_async flag_time=0" as the device _needs_ two 103 * Unimodem mode: use "modprobe ppp_async flag_time=0" as the device _needs_ two
104 * flags per packet. 104 * flags per packet.
@@ -114,7 +114,7 @@ static int gigaset_suspend(struct usb_interface *intf, pm_message_t message);
114static int gigaset_resume(struct usb_interface *intf); 114static int gigaset_resume(struct usb_interface *intf);
115static int gigaset_pre_reset(struct usb_interface *intf); 115static int gigaset_pre_reset(struct usb_interface *intf);
116 116
117static struct gigaset_driver *driver = NULL; 117static struct gigaset_driver *driver;
118 118
119/* usb specific object needed to register this driver with the usb subsystem */ 119/* usb specific object needed to register this driver with the usb subsystem */
120static struct usb_driver gigaset_usb_driver = { 120static struct usb_driver gigaset_usb_driver = {
@@ -141,6 +141,7 @@ struct usb_cardstate {
141 struct urb *bulk_out_urb; 141 struct urb *bulk_out_urb;
142 142
143 /* Input buffer */ 143 /* Input buffer */
144 unsigned char *rcvbuf;
144 int rcvbuf_size; 145 int rcvbuf_size;
145 struct urb *read_urb; 146 struct urb *read_urb;
146 __u8 int_in_endpointAddr; 147 __u8 int_in_endpointAddr;
@@ -164,13 +165,11 @@ static int gigaset_set_modem_ctrl(struct cardstate *cs, unsigned old_state,
164 val = tiocm_to_gigaset(new_state); 165 val = tiocm_to_gigaset(new_state);
165 166
166 gig_dbg(DEBUG_USBREQ, "set flags 0x%02x with mask 0x%02x", val, mask); 167 gig_dbg(DEBUG_USBREQ, "set flags 0x%02x with mask 0x%02x", val, mask);
167 // don't use this in an interrupt/BH
168 r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 7, 0x41, 168 r = usb_control_msg(udev, usb_sndctrlpipe(udev, 0), 7, 0x41,
169 (val & 0xff) | ((mask & 0xff) << 8), 0, 169 (val & 0xff) | ((mask & 0xff) << 8), 0,
170 NULL, 0, 2000 /* timeout? */); 170 NULL, 0, 2000 /* timeout? */);
171 if (r < 0) 171 if (r < 0)
172 return r; 172 return r;
173 //..
174 return 0; 173 return 0;
175} 174}
176 175
@@ -220,7 +219,6 @@ static int gigaset_baud_rate(struct cardstate *cs, unsigned cflag)
220 cflag &= CBAUD; 219 cflag &= CBAUD;
221 220
222 switch (cflag) { 221 switch (cflag) {
223 //FIXME more values?
224 case B300: rate = 300; break; 222 case B300: rate = 300; break;
225 case B600: rate = 600; break; 223 case B600: rate = 600; break;
226 case B1200: rate = 1200; break; 224 case B1200: rate = 1200; break;
@@ -273,7 +271,7 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
273 /* set the number of stop bits */ 271 /* set the number of stop bits */
274 if (cflag & CSTOPB) { 272 if (cflag & CSTOPB) {
275 if ((cflag & CSIZE) == CS5) 273 if ((cflag & CSIZE) == CS5)
276 val |= 1; /* 1.5 stop bits */ //FIXME is this okay? 274 val |= 1; /* 1.5 stop bits */
277 else 275 else
278 val |= 2; /* 2 stop bits */ 276 val |= 2; /* 2 stop bits */
279 } 277 }
@@ -282,7 +280,7 @@ static int gigaset_set_line_ctrl(struct cardstate *cs, unsigned cflag)
282} 280}
283 281
284 282
285 /*================================================================================================================*/ 283/*============================================================================*/
286static int gigaset_init_bchannel(struct bc_state *bcs) 284static int gigaset_init_bchannel(struct bc_state *bcs)
287{ 285{
288 /* nothing to do for M10x */ 286 /* nothing to do for M10x */
@@ -344,7 +342,6 @@ static void gigaset_modem_fill(unsigned long data)
344 if (write_modem(cs) < 0) { 342 if (write_modem(cs) < 0) {
345 gig_dbg(DEBUG_OUTPUT, 343 gig_dbg(DEBUG_OUTPUT,
346 "modem_fill: write_modem failed"); 344 "modem_fill: write_modem failed");
347 // FIXME should we tell the LL?
348 again = 1; /* no callback will be called! */ 345 again = 1; /* no callback will be called! */
349 } 346 }
350 } 347 }
@@ -356,8 +353,8 @@ static void gigaset_modem_fill(unsigned long data)
356 */ 353 */
357static void gigaset_read_int_callback(struct urb *urb) 354static void gigaset_read_int_callback(struct urb *urb)
358{ 355{
359 struct inbuf_t *inbuf = urb->context; 356 struct cardstate *cs = urb->context;
360 struct cardstate *cs = inbuf->cs; 357 struct inbuf_t *inbuf = cs->inbuf;
361 int status = urb->status; 358 int status = urb->status;
362 int r; 359 int r;
363 unsigned numbytes; 360 unsigned numbytes;
@@ -368,7 +365,7 @@ static void gigaset_read_int_callback(struct urb *urb)
368 numbytes = urb->actual_length; 365 numbytes = urb->actual_length;
369 366
370 if (numbytes) { 367 if (numbytes) {
371 src = inbuf->rcvbuf; 368 src = cs->hw.usb->rcvbuf;
372 if (unlikely(*src)) 369 if (unlikely(*src))
373 dev_warn(cs->dev, 370 dev_warn(cs->dev,
374 "%s: There was no leading 0, but 0x%02x!\n", 371 "%s: There was no leading 0, but 0x%02x!\n",
@@ -440,7 +437,7 @@ static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb)
440 struct cmdbuf_t *tcb; 437 struct cmdbuf_t *tcb;
441 unsigned long flags; 438 unsigned long flags;
442 int count; 439 int count;
443 int status = -ENOENT; // FIXME 440 int status = -ENOENT;
444 struct usb_cardstate *ucs = cs->hw.usb; 441 struct usb_cardstate *ucs = cs->hw.usb;
445 442
446 do { 443 do {
@@ -480,7 +477,9 @@ static int send_cb(struct cardstate *cs, struct cmdbuf_t *cb)
480 ucs->busy = 1; 477 ucs->busy = 1;
481 478
482 spin_lock_irqsave(&cs->lock, flags); 479 spin_lock_irqsave(&cs->lock, flags);
483 status = cs->connected ? usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC) : -ENODEV; 480 status = cs->connected ?
481 usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC) :
482 -ENODEV;
484 spin_unlock_irqrestore(&cs->lock, flags); 483 spin_unlock_irqrestore(&cs->lock, flags);
485 484
486 if (status) { 485 if (status) {
@@ -510,8 +509,8 @@ static int gigaset_write_cmd(struct cardstate *cs, const unsigned char *buf,
510 509
511 if (len <= 0) 510 if (len <= 0)
512 return 0; 511 return 0;
513 512 cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC);
514 if (!(cb = kmalloc(sizeof(struct cmdbuf_t) + len, GFP_ATOMIC))) { 513 if (!cb) {
515 dev_err(cs->dev, "%s: out of memory\n", __func__); 514 dev_err(cs->dev, "%s: out of memory\n", __func__);
516 return -ENOMEM; 515 return -ENOMEM;
517 } 516 }
@@ -637,9 +636,7 @@ static int write_modem(struct cardstate *cs)
637 return -EINVAL; 636 return -EINVAL;
638 } 637 }
639 638
640 /* Copy data to bulk out buffer and // FIXME copying not necessary 639 /* Copy data to bulk out buffer and transmit data */
641 * transmit data
642 */
643 count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size); 640 count = min(bcs->tx_skb->len, (unsigned) ucs->bulk_out_size);
644 skb_copy_from_linear_data(bcs->tx_skb, ucs->bulk_out_buffer, count); 641 skb_copy_from_linear_data(bcs->tx_skb, ucs->bulk_out_buffer, count);
645 skb_pull(bcs->tx_skb, count); 642 skb_pull(bcs->tx_skb, count);
@@ -650,7 +647,8 @@ static int write_modem(struct cardstate *cs)
650 if (cs->connected) { 647 if (cs->connected) {
651 usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev, 648 usb_fill_bulk_urb(ucs->bulk_out_urb, ucs->udev,
652 usb_sndbulkpipe(ucs->udev, 649 usb_sndbulkpipe(ucs->udev,
653 ucs->bulk_out_endpointAddr & 0x0f), 650 ucs->bulk_out_endpointAddr &
651 0x0f),
654 ucs->bulk_out_buffer, count, 652 ucs->bulk_out_buffer, count,
655 gigaset_write_bulk_callback, cs); 653 gigaset_write_bulk_callback, cs);
656 ret = usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC); 654 ret = usb_submit_urb(ucs->bulk_out_urb, GFP_ATOMIC);
@@ -666,7 +664,7 @@ static int write_modem(struct cardstate *cs)
666 664
667 if (!bcs->tx_skb->len) { 665 if (!bcs->tx_skb->len) {
668 /* skb sent completely */ 666 /* skb sent completely */
669 gigaset_skb_sent(bcs, bcs->tx_skb); //FIXME also, when ret<0? 667 gigaset_skb_sent(bcs, bcs->tx_skb);
670 668
671 gig_dbg(DEBUG_INTR, "kfree skb (Adr: %lx)!", 669 gig_dbg(DEBUG_INTR, "kfree skb (Adr: %lx)!",
672 (unsigned long) bcs->tx_skb); 670 (unsigned long) bcs->tx_skb);
@@ -763,8 +761,8 @@ static int gigaset_probe(struct usb_interface *interface,
763 buffer_size = le16_to_cpu(endpoint->wMaxPacketSize); 761 buffer_size = le16_to_cpu(endpoint->wMaxPacketSize);
764 ucs->rcvbuf_size = buffer_size; 762 ucs->rcvbuf_size = buffer_size;
765 ucs->int_in_endpointAddr = endpoint->bEndpointAddress; 763 ucs->int_in_endpointAddr = endpoint->bEndpointAddress;
766 cs->inbuf[0].rcvbuf = kmalloc(buffer_size, GFP_KERNEL); 764 ucs->rcvbuf = kmalloc(buffer_size, GFP_KERNEL);
767 if (!cs->inbuf[0].rcvbuf) { 765 if (!ucs->rcvbuf) {
768 dev_err(cs->dev, "Couldn't allocate rcvbuf\n"); 766 dev_err(cs->dev, "Couldn't allocate rcvbuf\n");
769 retval = -ENOMEM; 767 retval = -ENOMEM;
770 goto error; 768 goto error;
@@ -773,9 +771,9 @@ static int gigaset_probe(struct usb_interface *interface,
773 usb_fill_int_urb(ucs->read_urb, udev, 771 usb_fill_int_urb(ucs->read_urb, udev,
774 usb_rcvintpipe(udev, 772 usb_rcvintpipe(udev,
775 endpoint->bEndpointAddress & 0x0f), 773 endpoint->bEndpointAddress & 0x0f),
776 cs->inbuf[0].rcvbuf, buffer_size, 774 ucs->rcvbuf, buffer_size,
777 gigaset_read_int_callback, 775 gigaset_read_int_callback,
778 cs->inbuf + 0, endpoint->bInterval); 776 cs, endpoint->bInterval);
779 777
780 retval = usb_submit_urb(ucs->read_urb, GFP_KERNEL); 778 retval = usb_submit_urb(ucs->read_urb, GFP_KERNEL);
781 if (retval) { 779 if (retval) {
@@ -789,7 +787,7 @@ static int gigaset_probe(struct usb_interface *interface,
789 787
790 if (!gigaset_start(cs)) { 788 if (!gigaset_start(cs)) {
791 tasklet_kill(&cs->write_tasklet); 789 tasklet_kill(&cs->write_tasklet);
792 retval = -ENODEV; //FIXME 790 retval = -ENODEV;
793 goto error; 791 goto error;
794 } 792 }
795 return 0; 793 return 0;
@@ -798,11 +796,11 @@ error:
798 usb_kill_urb(ucs->read_urb); 796 usb_kill_urb(ucs->read_urb);
799 kfree(ucs->bulk_out_buffer); 797 kfree(ucs->bulk_out_buffer);
800 usb_free_urb(ucs->bulk_out_urb); 798 usb_free_urb(ucs->bulk_out_urb);
801 kfree(cs->inbuf[0].rcvbuf); 799 kfree(ucs->rcvbuf);
802 usb_free_urb(ucs->read_urb); 800 usb_free_urb(ucs->read_urb);
803 usb_set_intfdata(interface, NULL); 801 usb_set_intfdata(interface, NULL);
804 ucs->read_urb = ucs->bulk_out_urb = NULL; 802 ucs->read_urb = ucs->bulk_out_urb = NULL;
805 cs->inbuf[0].rcvbuf = ucs->bulk_out_buffer = NULL; 803 ucs->rcvbuf = ucs->bulk_out_buffer = NULL;
806 usb_put_dev(ucs->udev); 804 usb_put_dev(ucs->udev);
807 ucs->udev = NULL; 805 ucs->udev = NULL;
808 ucs->interface = NULL; 806 ucs->interface = NULL;
@@ -831,10 +829,10 @@ static void gigaset_disconnect(struct usb_interface *interface)
831 829
832 kfree(ucs->bulk_out_buffer); 830 kfree(ucs->bulk_out_buffer);
833 usb_free_urb(ucs->bulk_out_urb); 831 usb_free_urb(ucs->bulk_out_urb);
834 kfree(cs->inbuf[0].rcvbuf); 832 kfree(ucs->rcvbuf);
835 usb_free_urb(ucs->read_urb); 833 usb_free_urb(ucs->read_urb);
836 ucs->read_urb = ucs->bulk_out_urb = NULL; 834 ucs->read_urb = ucs->bulk_out_urb = NULL;
837 cs->inbuf[0].rcvbuf = ucs->bulk_out_buffer = NULL; 835 ucs->rcvbuf = ucs->bulk_out_buffer = NULL;
838 836
839 usb_put_dev(ucs->udev); 837 usb_put_dev(ucs->udev);
840 ucs->interface = NULL; 838 ucs->interface = NULL;
@@ -916,9 +914,10 @@ static int __init usb_gigaset_init(void)
916 int result; 914 int result;
917 915
918 /* allocate memory for our driver state and intialize it */ 916 /* allocate memory for our driver state and intialize it */
919 if ((driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS, 917 driver = gigaset_initdriver(GIGASET_MINOR, GIGASET_MINORS,
920 GIGASET_MODULENAME, GIGASET_DEVNAME, 918 GIGASET_MODULENAME, GIGASET_DEVNAME,
921 &ops, THIS_MODULE)) == NULL) 919 &ops, THIS_MODULE);
920 if (driver == NULL)
922 goto error; 921 goto error;
923 922
924 /* register this driver with the USB subsystem */ 923 /* register this driver with the USB subsystem */
diff --git a/drivers/isdn/hardware/mISDN/speedfax.c b/drivers/isdn/hardware/mISDN/speedfax.c
index ff3a4e290da3..7726afdbb40b 100644
--- a/drivers/isdn/hardware/mISDN/speedfax.c
+++ b/drivers/isdn/hardware/mISDN/speedfax.c
@@ -110,6 +110,7 @@ set_debug(const char *val, struct kernel_param *kp)
110MODULE_AUTHOR("Karsten Keil"); 110MODULE_AUTHOR("Karsten Keil");
111MODULE_LICENSE("GPL v2"); 111MODULE_LICENSE("GPL v2");
112MODULE_VERSION(SPEEDFAX_REV); 112MODULE_VERSION(SPEEDFAX_REV);
113MODULE_FIRMWARE("isdn/ISAR.BIN");
113module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR); 114module_param_call(debug, set_debug, param_get_uint, &debug, S_IRUGO | S_IWUSR);
114MODULE_PARM_DESC(debug, "Speedfax debug mask"); 115MODULE_PARM_DESC(debug, "Speedfax debug mask");
115module_param(irqloops, uint, S_IRUGO | S_IWUSR); 116module_param(irqloops, uint, S_IRUGO | S_IWUSR);
diff --git a/drivers/isdn/mISDN/socket.c b/drivers/isdn/mISDN/socket.c
index feb0fa45b664..fcfe17a19a61 100644
--- a/drivers/isdn/mISDN/socket.c
+++ b/drivers/isdn/mISDN/socket.c
@@ -779,7 +779,7 @@ base_sock_create(struct net *net, struct socket *sock, int protocol)
779} 779}
780 780
781static int 781static int
782mISDN_sock_create(struct net *net, struct socket *sock, int proto) 782mISDN_sock_create(struct net *net, struct socket *sock, int proto, int kern)
783{ 783{
784 int err = -EPROTONOSUPPORT; 784 int err = -EPROTONOSUPPORT;
785 785
@@ -808,8 +808,7 @@ mISDN_sock_create(struct net *net, struct socket *sock, int proto)
808 return err; 808 return err;
809} 809}
810 810
811static struct 811static const struct net_proto_family mISDN_sock_family_ops = {
812net_proto_family mISDN_sock_family_ops = {
813 .owner = THIS_MODULE, 812 .owner = THIS_MODULE,
814 .family = PF_ISDN, 813 .family = PF_ISDN,
815 .create = mISDN_sock_create, 814 .create = mISDN_sock_create,
diff --git a/drivers/leds/Kconfig b/drivers/leds/Kconfig
index 7c8e7122aaa9..e4f599f20e38 100644
--- a/drivers/leds/Kconfig
+++ b/drivers/leds/Kconfig
@@ -150,9 +150,9 @@ config LEDS_LP3944
150 tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip" 150 tristate "LED Support for N.S. LP3944 (Fun Light) I2C chip"
151 depends on LEDS_CLASS && I2C 151 depends on LEDS_CLASS && I2C
152 help 152 help
153 This option enables support for LEDs connected to the National 153 This option enables support for LEDs connected to the National
154 Semiconductor LP3944 Lighting Management Unit (LMU) also known as 154 Semiconductor LP3944 Lighting Management Unit (LMU) also known as
155 Fun Light Chip. 155 Fun Light Chip.
156 156
157 To compile this driver as a module, choose M here: the 157 To compile this driver as a module, choose M here: the
158 module will be called leds-lp3944. 158 module will be called leds-lp3944.
@@ -195,6 +195,13 @@ config LEDS_PCA955X
195 LED driver chips accessed via the I2C bus. Supported 195 LED driver chips accessed via the I2C bus. Supported
196 devices include PCA9550, PCA9551, PCA9552, and PCA9553. 196 devices include PCA9550, PCA9551, PCA9552, and PCA9553.
197 197
198config LEDS_WM831X_STATUS
199 tristate "LED support for status LEDs on WM831x PMICs"
200 depends on LEDS_CLASS && MFD_WM831X
201 help
202 This option enables support for the status LEDs of the WM831x
203 series of PMICs.
204
198config LEDS_WM8350 205config LEDS_WM8350
199 tristate "LED Support for WM8350 AudioPlus PMIC" 206 tristate "LED Support for WM8350 AudioPlus PMIC"
200 depends on LEDS_CLASS && MFD_WM8350 207 depends on LEDS_CLASS && MFD_WM8350
diff --git a/drivers/leds/Makefile b/drivers/leds/Makefile
index e8cdcf77a4c3..46d72704d606 100644
--- a/drivers/leds/Makefile
+++ b/drivers/leds/Makefile
@@ -26,6 +26,7 @@ obj-$(CONFIG_LEDS_HP6XX) += leds-hp6xx.o
26obj-$(CONFIG_LEDS_FSG) += leds-fsg.o 26obj-$(CONFIG_LEDS_FSG) += leds-fsg.o
27obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o 27obj-$(CONFIG_LEDS_PCA955X) += leds-pca955x.o
28obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o 28obj-$(CONFIG_LEDS_DA903X) += leds-da903x.o
29obj-$(CONFIG_LEDS_WM831X_STATUS) += leds-wm831x-status.o
29obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o 30obj-$(CONFIG_LEDS_WM8350) += leds-wm8350.o
30obj-$(CONFIG_LEDS_PWM) += leds-pwm.o 31obj-$(CONFIG_LEDS_PWM) += leds-pwm.o
31 32
diff --git a/drivers/leds/leds-clevo-mail.c b/drivers/leds/leds-clevo-mail.c
index f2242db54016..a498135a4e80 100644
--- a/drivers/leds/leds-clevo-mail.c
+++ b/drivers/leds/leds-clevo-mail.c
@@ -153,7 +153,7 @@ static struct led_classdev clevo_mail_led = {
153 .flags = LED_CORE_SUSPENDRESUME, 153 .flags = LED_CORE_SUSPENDRESUME,
154}; 154};
155 155
156static int __init clevo_mail_led_probe(struct platform_device *pdev) 156static int __devinit clevo_mail_led_probe(struct platform_device *pdev)
157{ 157{
158 return led_classdev_register(&pdev->dev, &clevo_mail_led); 158 return led_classdev_register(&pdev->dev, &clevo_mail_led);
159} 159}
diff --git a/drivers/leds/leds-cobalt-qube.c b/drivers/leds/leds-cobalt-qube.c
index 059aa2924b1c..8816806accd2 100644
--- a/drivers/leds/leds-cobalt-qube.c
+++ b/drivers/leds/leds-cobalt-qube.c
@@ -28,7 +28,7 @@ static void qube_front_led_set(struct led_classdev *led_cdev,
28} 28}
29 29
30static struct led_classdev qube_front_led = { 30static struct led_classdev qube_front_led = {
31 .name = "qube-front", 31 .name = "qube::front",
32 .brightness = LED_FULL, 32 .brightness = LED_FULL,
33 .brightness_set = qube_front_led_set, 33 .brightness_set = qube_front_led_set,
34 .default_trigger = "ide-disk", 34 .default_trigger = "ide-disk",
diff --git a/drivers/leds/leds-cobalt-raq.c b/drivers/leds/leds-cobalt-raq.c
index 5f1ce810815f..defc212105f3 100644
--- a/drivers/leds/leds-cobalt-raq.c
+++ b/drivers/leds/leds-cobalt-raq.c
@@ -49,7 +49,7 @@ static void raq_web_led_set(struct led_classdev *led_cdev,
49} 49}
50 50
51static struct led_classdev raq_web_led = { 51static struct led_classdev raq_web_led = {
52 .name = "raq-web", 52 .name = "raq::web",
53 .brightness_set = raq_web_led_set, 53 .brightness_set = raq_web_led_set,
54}; 54};
55 55
@@ -70,7 +70,7 @@ static void raq_power_off_led_set(struct led_classdev *led_cdev,
70} 70}
71 71
72static struct led_classdev raq_power_off_led = { 72static struct led_classdev raq_power_off_led = {
73 .name = "raq-power-off", 73 .name = "raq::power-off",
74 .brightness_set = raq_power_off_led_set, 74 .brightness_set = raq_power_off_led_set,
75 .default_trigger = "power-off", 75 .default_trigger = "power-off",
76}; 76};
diff --git a/drivers/leds/leds-gpio.c b/drivers/leds/leds-gpio.c
index 6b06638eb5b4..7467980b8cf9 100644
--- a/drivers/leds/leds-gpio.c
+++ b/drivers/leds/leds-gpio.c
@@ -80,7 +80,7 @@ static int __devinit create_gpio_led(const struct gpio_led *template,
80 80
81 /* skip leds that aren't available */ 81 /* skip leds that aren't available */
82 if (!gpio_is_valid(template->gpio)) { 82 if (!gpio_is_valid(template->gpio)) {
83 printk(KERN_INFO "Skipping unavilable LED gpio %d (%s)\n", 83 printk(KERN_INFO "Skipping unavailable LED gpio %d (%s)\n",
84 template->gpio, template->name); 84 template->gpio, template->name);
85 return 0; 85 return 0;
86 } 86 }
diff --git a/drivers/leds/leds-pca9532.c b/drivers/leds/leds-pca9532.c
index dba8921240f2..adc561eb59d2 100644
--- a/drivers/leds/leds-pca9532.c
+++ b/drivers/leds/leds-pca9532.c
@@ -19,9 +19,6 @@
19#include <linux/workqueue.h> 19#include <linux/workqueue.h>
20#include <linux/leds-pca9532.h> 20#include <linux/leds-pca9532.h>
21 21
22static const unsigned short normal_i2c[] = { /*0x60,*/ I2C_CLIENT_END};
23I2C_CLIENT_INSMOD_1(pca9532);
24
25#define PCA9532_REG_PSC(i) (0x2+(i)*2) 22#define PCA9532_REG_PSC(i) (0x2+(i)*2)
26#define PCA9532_REG_PWM(i) (0x3+(i)*2) 23#define PCA9532_REG_PWM(i) (0x3+(i)*2)
27#define PCA9532_REG_LS0 0x6 24#define PCA9532_REG_LS0 0x6
@@ -34,7 +31,7 @@ struct pca9532_data {
34 struct i2c_client *client; 31 struct i2c_client *client;
35 struct pca9532_led leds[16]; 32 struct pca9532_led leds[16];
36 struct mutex update_lock; 33 struct mutex update_lock;
37 struct input_dev *idev; 34 struct input_dev *idev;
38 struct work_struct work; 35 struct work_struct work;
39 u8 pwm[2]; 36 u8 pwm[2];
40 u8 psc[2]; 37 u8 psc[2];
@@ -53,9 +50,9 @@ MODULE_DEVICE_TABLE(i2c, pca9532_id);
53 50
54static struct i2c_driver pca9532_driver = { 51static struct i2c_driver pca9532_driver = {
55 .driver = { 52 .driver = {
56 .name = "pca9532", 53 .name = "pca9532",
57 }, 54 },
58 .probe = pca9532_probe, 55 .probe = pca9532_probe,
59 .remove = pca9532_remove, 56 .remove = pca9532_remove,
60 .id_table = pca9532_id, 57 .id_table = pca9532_id,
61}; 58};
@@ -149,7 +146,7 @@ static int pca9532_set_blink(struct led_classdev *led_cdev,
149 146
150 if (*delay_on == 0 && *delay_off == 0) { 147 if (*delay_on == 0 && *delay_off == 0) {
151 /* led subsystem ask us for a blink rate */ 148 /* led subsystem ask us for a blink rate */
152 *delay_on = 1000; 149 *delay_on = 1000;
153 *delay_off = 1000; 150 *delay_off = 1000;
154 } 151 }
155 if (*delay_on != *delay_off || *delay_on > 1690 || *delay_on < 6) 152 if (*delay_on != *delay_off || *delay_on > 1690 || *delay_on < 6)
@@ -227,7 +224,7 @@ static int pca9532_configure(struct i2c_client *client,
227 break; 224 break;
228 case PCA9532_TYPE_LED: 225 case PCA9532_TYPE_LED:
229 led->state = pled->state; 226 led->state = pled->state;
230 led->name = pled->name; 227 led->name = pled->name;
231 led->ldev.name = led->name; 228 led->ldev.name = led->name;
232 led->ldev.brightness = LED_OFF; 229 led->ldev.brightness = LED_OFF;
233 led->ldev.brightness_set = pca9532_set_brightness; 230 led->ldev.brightness_set = pca9532_set_brightness;
@@ -254,7 +251,7 @@ static int pca9532_configure(struct i2c_client *client,
254 data->idev->name = pled->name; 251 data->idev->name = pled->name;
255 data->idev->phys = "i2c/pca9532"; 252 data->idev->phys = "i2c/pca9532";
256 data->idev->id.bustype = BUS_HOST; 253 data->idev->id.bustype = BUS_HOST;
257 data->idev->id.vendor = 0x001f; 254 data->idev->id.vendor = 0x001f;
258 data->idev->id.product = 0x0001; 255 data->idev->id.product = 0x0001;
259 data->idev->id.version = 0x0100; 256 data->idev->id.version = 0x0100;
260 data->idev->evbit[0] = BIT_MASK(EV_SND); 257 data->idev->evbit[0] = BIT_MASK(EV_SND);
diff --git a/drivers/leds/leds-wm831x-status.c b/drivers/leds/leds-wm831x-status.c
new file mode 100644
index 000000000000..c586d05e336a
--- /dev/null
+++ b/drivers/leds/leds-wm831x-status.c
@@ -0,0 +1,341 @@
1/*
2 * LED driver for WM831x status LEDs
3 *
4 * Copyright(C) 2009 Wolfson Microelectronics PLC.
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 */
11
12#include <linux/kernel.h>
13#include <linux/init.h>
14#include <linux/platform_device.h>
15#include <linux/leds.h>
16#include <linux/err.h>
17#include <linux/mfd/wm831x/core.h>
18#include <linux/mfd/wm831x/pdata.h>
19#include <linux/mfd/wm831x/status.h>
20
21
22struct wm831x_status {
23 struct led_classdev cdev;
24 struct wm831x *wm831x;
25 struct work_struct work;
26 struct mutex mutex;
27
28 spinlock_t value_lock;
29 int reg; /* Control register */
30 int reg_val; /* Control register value */
31
32 int blink;
33 int blink_time;
34 int blink_cyc;
35 int src;
36 enum led_brightness brightness;
37};
38
39#define to_wm831x_status(led_cdev) \
40 container_of(led_cdev, struct wm831x_status, cdev)
41
42static void wm831x_status_work(struct work_struct *work)
43{
44 struct wm831x_status *led = container_of(work, struct wm831x_status,
45 work);
46 unsigned long flags;
47
48 mutex_lock(&led->mutex);
49
50 led->reg_val &= ~(WM831X_LED_SRC_MASK | WM831X_LED_MODE_MASK |
51 WM831X_LED_DUTY_CYC_MASK | WM831X_LED_DUR_MASK);
52
53 spin_lock_irqsave(&led->value_lock, flags);
54
55 led->reg_val |= led->src << WM831X_LED_SRC_SHIFT;
56 if (led->blink) {
57 led->reg_val |= 2 << WM831X_LED_MODE_SHIFT;
58 led->reg_val |= led->blink_time << WM831X_LED_DUR_SHIFT;
59 led->reg_val |= led->blink_cyc;
60 } else {
61 if (led->brightness != LED_OFF)
62 led->reg_val |= 1 << WM831X_LED_MODE_SHIFT;
63 }
64
65 spin_unlock_irqrestore(&led->value_lock, flags);
66
67 wm831x_reg_write(led->wm831x, led->reg, led->reg_val);
68
69 mutex_unlock(&led->mutex);
70}
71
72static void wm831x_status_set(struct led_classdev *led_cdev,
73 enum led_brightness value)
74{
75 struct wm831x_status *led = to_wm831x_status(led_cdev);
76 unsigned long flags;
77
78 spin_lock_irqsave(&led->value_lock, flags);
79 led->brightness = value;
80 if (value == LED_OFF)
81 led->blink = 0;
82 schedule_work(&led->work);
83 spin_unlock_irqrestore(&led->value_lock, flags);
84}
85
86static int wm831x_status_blink_set(struct led_classdev *led_cdev,
87 unsigned long *delay_on,
88 unsigned long *delay_off)
89{
90 struct wm831x_status *led = to_wm831x_status(led_cdev);
91 unsigned long flags;
92 int ret = 0;
93
94 /* Pick some defaults if we've not been given times */
95 if (*delay_on == 0 && *delay_off == 0) {
96 *delay_on = 250;
97 *delay_off = 250;
98 }
99
100 spin_lock_irqsave(&led->value_lock, flags);
101
102 /* We only have a limited selection of settings, see if we can
103 * support the configuration we're being given */
104 switch (*delay_on) {
105 case 1000:
106 led->blink_time = 0;
107 break;
108 case 250:
109 led->blink_time = 1;
110 break;
111 case 125:
112 led->blink_time = 2;
113 break;
114 case 62:
115 case 63:
116 /* Actually 62.5ms */
117 led->blink_time = 3;
118 break;
119 default:
120 ret = -EINVAL;
121 break;
122 }
123
124 if (ret == 0) {
125 switch (*delay_off / *delay_on) {
126 case 1:
127 led->blink_cyc = 0;
128 break;
129 case 3:
130 led->blink_cyc = 1;
131 break;
132 case 4:
133 led->blink_cyc = 2;
134 break;
135 case 8:
136 led->blink_cyc = 3;
137 break;
138 default:
139 ret = -EINVAL;
140 break;
141 }
142 }
143
144 if (ret == 0)
145 led->blink = 1;
146 else
147 led->blink = 0;
148
149 /* Always update; if we fail turn off blinking since we expect
150 * a software fallback. */
151 schedule_work(&led->work);
152
153 spin_unlock_irqrestore(&led->value_lock, flags);
154
155 return ret;
156}
157
158static const char *led_src_texts[] = {
159 "otp",
160 "power",
161 "charger",
162 "soft",
163};
164
165static ssize_t wm831x_status_src_show(struct device *dev,
166 struct device_attribute *attr, char *buf)
167{
168 struct led_classdev *led_cdev = dev_get_drvdata(dev);
169 struct wm831x_status *led = to_wm831x_status(led_cdev);
170 int i;
171 ssize_t ret = 0;
172
173 mutex_lock(&led->mutex);
174
175 for (i = 0; i < ARRAY_SIZE(led_src_texts); i++)
176 if (i == led->src)
177 ret += sprintf(&buf[ret], "[%s] ", led_src_texts[i]);
178 else
179 ret += sprintf(&buf[ret], "%s ", led_src_texts[i]);
180
181 mutex_unlock(&led->mutex);
182
183 ret += sprintf(&buf[ret], "\n");
184
185 return ret;
186}
187
188static ssize_t wm831x_status_src_store(struct device *dev,
189 struct device_attribute *attr,
190 const char *buf, size_t size)
191{
192 struct led_classdev *led_cdev = dev_get_drvdata(dev);
193 struct wm831x_status *led = to_wm831x_status(led_cdev);
194 char name[20];
195 int i;
196 size_t len;
197
198 name[sizeof(name) - 1] = '\0';
199 strncpy(name, buf, sizeof(name) - 1);
200 len = strlen(name);
201
202 if (len && name[len - 1] == '\n')
203 name[len - 1] = '\0';
204
205 for (i = 0; i < ARRAY_SIZE(led_src_texts); i++) {
206 if (!strcmp(name, led_src_texts[i])) {
207 mutex_lock(&led->mutex);
208
209 led->src = i;
210 schedule_work(&led->work);
211
212 mutex_unlock(&led->mutex);
213 }
214 }
215
216 return size;
217}
218
219static DEVICE_ATTR(src, 0644, wm831x_status_src_show, wm831x_status_src_store);
220
221static int wm831x_status_probe(struct platform_device *pdev)
222{
223 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
224 struct wm831x_pdata *chip_pdata;
225 struct wm831x_status_pdata pdata;
226 struct wm831x_status *drvdata;
227 struct resource *res;
228 int id = pdev->id % ARRAY_SIZE(chip_pdata->status);
229 int ret;
230
231 res = platform_get_resource(pdev, IORESOURCE_IO, 0);
232 if (res == NULL) {
233 dev_err(&pdev->dev, "No I/O resource\n");
234 ret = -EINVAL;
235 goto err;
236 }
237
238 drvdata = kzalloc(sizeof(struct wm831x_status), GFP_KERNEL);
239 if (!drvdata)
240 return -ENOMEM;
241 dev_set_drvdata(&pdev->dev, drvdata);
242
243 drvdata->wm831x = wm831x;
244 drvdata->reg = res->start;
245
246 if (wm831x->dev->platform_data)
247 chip_pdata = wm831x->dev->platform_data;
248 else
249 chip_pdata = NULL;
250
251 memset(&pdata, 0, sizeof(pdata));
252 if (chip_pdata && chip_pdata->status[id])
253 memcpy(&pdata, chip_pdata->status[id], sizeof(pdata));
254 else
255 pdata.name = dev_name(&pdev->dev);
256
257 mutex_init(&drvdata->mutex);
258 INIT_WORK(&drvdata->work, wm831x_status_work);
259 spin_lock_init(&drvdata->value_lock);
260
261 /* We cache the configuration register and read startup values
262 * from it. */
263 drvdata->reg_val = wm831x_reg_read(wm831x, drvdata->reg);
264
265 if (drvdata->reg_val & WM831X_LED_MODE_MASK)
266 drvdata->brightness = LED_FULL;
267 else
268 drvdata->brightness = LED_OFF;
269
270 /* Set a default source if configured, otherwise leave the
271 * current hardware setting.
272 */
273 if (pdata.default_src == WM831X_STATUS_PRESERVE) {
274 drvdata->src = drvdata->reg_val;
275 drvdata->src &= WM831X_LED_SRC_MASK;
276 drvdata->src >>= WM831X_LED_SRC_SHIFT;
277 } else {
278 drvdata->src = pdata.default_src - 1;
279 }
280
281 drvdata->cdev.name = pdata.name;
282 drvdata->cdev.default_trigger = pdata.default_trigger;
283 drvdata->cdev.brightness_set = wm831x_status_set;
284 drvdata->cdev.blink_set = wm831x_status_blink_set;
285
286 ret = led_classdev_register(wm831x->dev, &drvdata->cdev);
287 if (ret < 0) {
288 dev_err(&pdev->dev, "Failed to register LED: %d\n", ret);
289 goto err_led;
290 }
291
292 ret = device_create_file(drvdata->cdev.dev, &dev_attr_src);
293 if (ret != 0)
294 dev_err(&pdev->dev,
295 "No source control for LED: %d\n", ret);
296
297 return 0;
298
299err_led:
300 led_classdev_unregister(&drvdata->cdev);
301 kfree(drvdata);
302err:
303 return ret;
304}
305
306static int wm831x_status_remove(struct platform_device *pdev)
307{
308 struct wm831x_status *drvdata = platform_get_drvdata(pdev);
309
310 device_remove_file(drvdata->cdev.dev, &dev_attr_src);
311 led_classdev_unregister(&drvdata->cdev);
312 kfree(drvdata);
313
314 return 0;
315}
316
317static struct platform_driver wm831x_status_driver = {
318 .driver = {
319 .name = "wm831x-status",
320 .owner = THIS_MODULE,
321 },
322 .probe = wm831x_status_probe,
323 .remove = wm831x_status_remove,
324};
325
326static int __devinit wm831x_status_init(void)
327{
328 return platform_driver_register(&wm831x_status_driver);
329}
330module_init(wm831x_status_init);
331
332static void wm831x_status_exit(void)
333{
334 platform_driver_unregister(&wm831x_status_driver);
335}
336module_exit(wm831x_status_exit);
337
338MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com>");
339MODULE_DESCRIPTION("WM831x status LED driver");
340MODULE_LICENSE("GPL");
341MODULE_ALIAS("platform:wm831x-status");
diff --git a/drivers/leds/ledtrig-gpio.c b/drivers/leds/ledtrig-gpio.c
index 1bc5db4ece0d..f5913372d691 100644
--- a/drivers/leds/ledtrig-gpio.c
+++ b/drivers/leds/ledtrig-gpio.c
@@ -44,22 +44,22 @@ static void gpio_trig_work(struct work_struct *work)
44 struct gpio_trig_data, work); 44 struct gpio_trig_data, work);
45 int tmp; 45 int tmp;
46 46
47 if (!gpio_data->gpio) 47 if (!gpio_data->gpio)
48 return; 48 return;
49 49
50 tmp = gpio_get_value(gpio_data->gpio); 50 tmp = gpio_get_value(gpio_data->gpio);
51 if (gpio_data->inverted) 51 if (gpio_data->inverted)
52 tmp = !tmp; 52 tmp = !tmp;
53 53
54 if (tmp) { 54 if (tmp) {
55 if (gpio_data->desired_brightness) 55 if (gpio_data->desired_brightness)
56 led_set_brightness(gpio_data->led, 56 led_set_brightness(gpio_data->led,
57 gpio_data->desired_brightness); 57 gpio_data->desired_brightness);
58 else 58 else
59 led_set_brightness(gpio_data->led, LED_FULL); 59 led_set_brightness(gpio_data->led, LED_FULL);
60 } else { 60 } else {
61 led_set_brightness(gpio_data->led, LED_OFF); 61 led_set_brightness(gpio_data->led, LED_OFF);
62 } 62 }
63} 63}
64 64
65static ssize_t gpio_trig_brightness_show(struct device *dev, 65static ssize_t gpio_trig_brightness_show(struct device *dev,
diff --git a/drivers/lguest/lguest_user.c b/drivers/lguest/lguest_user.c
index b4d3f7ca554f..bd1632388e4a 100644
--- a/drivers/lguest/lguest_user.c
+++ b/drivers/lguest/lguest_user.c
@@ -508,7 +508,7 @@ static int close(struct inode *inode, struct file *file)
508 * uses: reading and writing a character device called /dev/lguest. All the 508 * uses: reading and writing a character device called /dev/lguest. All the
509 * work happens in the read(), write() and close() routines: 509 * work happens in the read(), write() and close() routines:
510 */ 510 */
511static struct file_operations lguest_fops = { 511static const struct file_operations lguest_fops = {
512 .owner = THIS_MODULE, 512 .owner = THIS_MODULE,
513 .release = close, 513 .release = close,
514 .write = write, 514 .write = write,
diff --git a/drivers/macintosh/therm_adt746x.c b/drivers/macintosh/therm_adt746x.c
index fde377c60cca..556f0feaa4df 100644
--- a/drivers/macintosh/therm_adt746x.c
+++ b/drivers/macintosh/therm_adt746x.c
@@ -124,6 +124,8 @@ read_reg(struct thermostat* th, int reg)
124 return data; 124 return data;
125} 125}
126 126
127static struct i2c_driver thermostat_driver;
128
127static int 129static int
128attach_thermostat(struct i2c_adapter *adapter) 130attach_thermostat(struct i2c_adapter *adapter)
129{ 131{
@@ -148,7 +150,7 @@ attach_thermostat(struct i2c_adapter *adapter)
148 * Let i2c-core delete that device on driver removal. 150 * Let i2c-core delete that device on driver removal.
149 * This is safe because i2c-core holds the core_lock mutex for us. 151 * This is safe because i2c-core holds the core_lock mutex for us.
150 */ 152 */
151 list_add_tail(&client->detected, &client->driver->clients); 153 list_add_tail(&client->detected, &thermostat_driver.clients);
152 return 0; 154 return 0;
153} 155}
154 156
diff --git a/drivers/macintosh/therm_pm72.c b/drivers/macintosh/therm_pm72.c
index a028598af2d3..ea32c7e5a9af 100644
--- a/drivers/macintosh/therm_pm72.c
+++ b/drivers/macintosh/therm_pm72.c
@@ -286,6 +286,8 @@ struct fcu_fan_table fcu_fans[] = {
286 }, 286 },
287}; 287};
288 288
289static struct i2c_driver therm_pm72_driver;
290
289/* 291/*
290 * Utility function to create an i2c_client structure and 292 * Utility function to create an i2c_client structure and
291 * attach it to one of u3 adapters 293 * attach it to one of u3 adapters
@@ -318,7 +320,7 @@ static struct i2c_client *attach_i2c_chip(int id, const char *name)
318 * Let i2c-core delete that device on driver removal. 320 * Let i2c-core delete that device on driver removal.
319 * This is safe because i2c-core holds the core_lock mutex for us. 321 * This is safe because i2c-core holds the core_lock mutex for us.
320 */ 322 */
321 list_add_tail(&clt->detected, &clt->driver->clients); 323 list_add_tail(&clt->detected, &therm_pm72_driver.clients);
322 return clt; 324 return clt;
323} 325}
324 326
diff --git a/drivers/macintosh/via-pmu-led.c b/drivers/macintosh/via-pmu-led.c
index 55ad95671387..d242976bcfe7 100644
--- a/drivers/macintosh/via-pmu-led.c
+++ b/drivers/macintosh/via-pmu-led.c
@@ -72,7 +72,7 @@ static void pmu_led_set(struct led_classdev *led_cdev,
72} 72}
73 73
74static struct led_classdev pmu_led = { 74static struct led_classdev pmu_led = {
75 .name = "pmu-front-led", 75 .name = "pmu-led::front",
76#ifdef CONFIG_ADB_PMU_LED_IDE 76#ifdef CONFIG_ADB_PMU_LED_IDE
77 .default_trigger = "ide-disk", 77 .default_trigger = "ide-disk",
78#endif 78#endif
diff --git a/drivers/macintosh/windfarm_lm75_sensor.c b/drivers/macintosh/windfarm_lm75_sensor.c
index 529886c7a826..ed6426a10773 100644
--- a/drivers/macintosh/windfarm_lm75_sensor.c
+++ b/drivers/macintosh/windfarm_lm75_sensor.c
@@ -115,6 +115,8 @@ static int wf_lm75_probe(struct i2c_client *client,
115 return rc; 115 return rc;
116} 116}
117 117
118static struct i2c_driver wf_lm75_driver;
119
118static struct i2c_client *wf_lm75_create(struct i2c_adapter *adapter, 120static struct i2c_client *wf_lm75_create(struct i2c_adapter *adapter,
119 u8 addr, int ds1775, 121 u8 addr, int ds1775,
120 const char *loc) 122 const char *loc)
@@ -157,7 +159,7 @@ static struct i2c_client *wf_lm75_create(struct i2c_adapter *adapter,
157 * Let i2c-core delete that device on driver removal. 159 * Let i2c-core delete that device on driver removal.
158 * This is safe because i2c-core holds the core_lock mutex for us. 160 * This is safe because i2c-core holds the core_lock mutex for us.
159 */ 161 */
160 list_add_tail(&client->detected, &client->driver->clients); 162 list_add_tail(&client->detected, &wf_lm75_driver.clients);
161 return client; 163 return client;
162 fail: 164 fail:
163 return NULL; 165 return NULL;
diff --git a/drivers/macintosh/windfarm_max6690_sensor.c b/drivers/macintosh/windfarm_max6690_sensor.c
index e2a55ecda2b2..a67b349319e9 100644
--- a/drivers/macintosh/windfarm_max6690_sensor.c
+++ b/drivers/macintosh/windfarm_max6690_sensor.c
@@ -88,6 +88,8 @@ static int wf_max6690_probe(struct i2c_client *client,
88 return rc; 88 return rc;
89} 89}
90 90
91static struct i2c_driver wf_max6690_driver;
92
91static struct i2c_client *wf_max6690_create(struct i2c_adapter *adapter, 93static struct i2c_client *wf_max6690_create(struct i2c_adapter *adapter,
92 u8 addr, const char *loc) 94 u8 addr, const char *loc)
93{ 95{
@@ -119,7 +121,7 @@ static struct i2c_client *wf_max6690_create(struct i2c_adapter *adapter,
119 * Let i2c-core delete that device on driver removal. 121 * Let i2c-core delete that device on driver removal.
120 * This is safe because i2c-core holds the core_lock mutex for us. 122 * This is safe because i2c-core holds the core_lock mutex for us.
121 */ 123 */
122 list_add_tail(&client->detected, &client->driver->clients); 124 list_add_tail(&client->detected, &wf_max6690_driver.clients);
123 return client; 125 return client;
124 126
125 fail: 127 fail:
diff --git a/drivers/macintosh/windfarm_smu_sat.c b/drivers/macintosh/windfarm_smu_sat.c
index 5da729e58f99..e20330a28959 100644
--- a/drivers/macintosh/windfarm_smu_sat.c
+++ b/drivers/macintosh/windfarm_smu_sat.c
@@ -194,6 +194,8 @@ static struct wf_sensor_ops wf_sat_ops = {
194 .owner = THIS_MODULE, 194 .owner = THIS_MODULE,
195}; 195};
196 196
197static struct i2c_driver wf_sat_driver;
198
197static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev) 199static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev)
198{ 200{
199 struct i2c_board_info info; 201 struct i2c_board_info info;
@@ -222,7 +224,7 @@ static void wf_sat_create(struct i2c_adapter *adapter, struct device_node *dev)
222 * Let i2c-core delete that device on driver removal. 224 * Let i2c-core delete that device on driver removal.
223 * This is safe because i2c-core holds the core_lock mutex for us. 225 * This is safe because i2c-core holds the core_lock mutex for us.
224 */ 226 */
225 list_add_tail(&client->detected, &client->driver->clients); 227 list_add_tail(&client->detected, &wf_sat_driver.clients);
226} 228}
227 229
228static int wf_sat_probe(struct i2c_client *client, 230static int wf_sat_probe(struct i2c_client *client,
diff --git a/drivers/md/dm.c b/drivers/md/dm.c
index 376f1ab48a24..23e76fe0d359 100644
--- a/drivers/md/dm.c
+++ b/drivers/md/dm.c
@@ -130,7 +130,7 @@ struct mapped_device {
130 /* 130 /*
131 * A list of ios that arrived while we were suspended. 131 * A list of ios that arrived while we were suspended.
132 */ 132 */
133 atomic_t pending[2]; 133 atomic_t pending;
134 wait_queue_head_t wait; 134 wait_queue_head_t wait;
135 struct work_struct work; 135 struct work_struct work;
136 struct bio_list deferred; 136 struct bio_list deferred;
@@ -453,14 +453,13 @@ static void start_io_acct(struct dm_io *io)
453{ 453{
454 struct mapped_device *md = io->md; 454 struct mapped_device *md = io->md;
455 int cpu; 455 int cpu;
456 int rw = bio_data_dir(io->bio);
457 456
458 io->start_time = jiffies; 457 io->start_time = jiffies;
459 458
460 cpu = part_stat_lock(); 459 cpu = part_stat_lock();
461 part_round_stats(cpu, &dm_disk(md)->part0); 460 part_round_stats(cpu, &dm_disk(md)->part0);
462 part_stat_unlock(); 461 part_stat_unlock();
463 dm_disk(md)->part0.in_flight[rw] = atomic_inc_return(&md->pending[rw]); 462 dm_disk(md)->part0.in_flight = atomic_inc_return(&md->pending);
464} 463}
465 464
466static void end_io_acct(struct dm_io *io) 465static void end_io_acct(struct dm_io *io)
@@ -480,9 +479,8 @@ static void end_io_acct(struct dm_io *io)
480 * After this is decremented the bio must not be touched if it is 479 * After this is decremented the bio must not be touched if it is
481 * a barrier. 480 * a barrier.
482 */ 481 */
483 dm_disk(md)->part0.in_flight[rw] = pending = 482 dm_disk(md)->part0.in_flight = pending =
484 atomic_dec_return(&md->pending[rw]); 483 atomic_dec_return(&md->pending);
485 pending += atomic_read(&md->pending[rw^0x1]);
486 484
487 /* nudge anyone waiting on suspend queue */ 485 /* nudge anyone waiting on suspend queue */
488 if (!pending) 486 if (!pending)
@@ -1787,8 +1785,7 @@ static struct mapped_device *alloc_dev(int minor)
1787 if (!md->disk) 1785 if (!md->disk)
1788 goto bad_disk; 1786 goto bad_disk;
1789 1787
1790 atomic_set(&md->pending[0], 0); 1788 atomic_set(&md->pending, 0);
1791 atomic_set(&md->pending[1], 0);
1792 init_waitqueue_head(&md->wait); 1789 init_waitqueue_head(&md->wait);
1793 INIT_WORK(&md->work, dm_wq_work); 1790 INIT_WORK(&md->work, dm_wq_work);
1794 init_waitqueue_head(&md->eventq); 1791 init_waitqueue_head(&md->eventq);
@@ -2091,8 +2088,7 @@ static int dm_wait_for_completion(struct mapped_device *md, int interruptible)
2091 break; 2088 break;
2092 } 2089 }
2093 spin_unlock_irqrestore(q->queue_lock, flags); 2090 spin_unlock_irqrestore(q->queue_lock, flags);
2094 } else if (!atomic_read(&md->pending[0]) && 2091 } else if (!atomic_read(&md->pending))
2095 !atomic_read(&md->pending[1]))
2096 break; 2092 break;
2097 2093
2098 if (interruptible == TASK_INTERRUPTIBLE && 2094 if (interruptible == TASK_INTERRUPTIBLE &&
diff --git a/drivers/media/dvb/dvb-core/dmxdev.c b/drivers/media/dvb/dvb-core/dmxdev.c
index 3750ff48cba1..c37790ad92d0 100644
--- a/drivers/media/dvb/dvb-core/dmxdev.c
+++ b/drivers/media/dvb/dvb-core/dmxdev.c
@@ -20,6 +20,7 @@
20 * 20 *
21 */ 21 */
22 22
23#include <linux/sched.h>
23#include <linux/spinlock.h> 24#include <linux/spinlock.h>
24#include <linux/slab.h> 25#include <linux/slab.h>
25#include <linux/vmalloc.h> 26#include <linux/vmalloc.h>
@@ -1203,7 +1204,7 @@ static unsigned int dvb_dvr_poll(struct file *file, poll_table *wait)
1203 return mask; 1204 return mask;
1204} 1205}
1205 1206
1206static struct file_operations dvb_dvr_fops = { 1207static const struct file_operations dvb_dvr_fops = {
1207 .owner = THIS_MODULE, 1208 .owner = THIS_MODULE,
1208 .read = dvb_dvr_read, 1209 .read = dvb_dvr_read,
1209 .write = dvb_dvr_write, 1210 .write = dvb_dvr_write,
diff --git a/drivers/media/dvb/dvb-core/dvb_demux.c b/drivers/media/dvb/dvb-core/dvb_demux.c
index eef6d3616626..91c537bca8ad 100644
--- a/drivers/media/dvb/dvb-core/dvb_demux.c
+++ b/drivers/media/dvb/dvb-core/dvb_demux.c
@@ -21,6 +21,7 @@
21 * 21 *
22 */ 22 */
23 23
24#include <linux/sched.h>
24#include <linux/spinlock.h> 25#include <linux/spinlock.h>
25#include <linux/slab.h> 26#include <linux/slab.h>
26#include <linux/vmalloc.h> 27#include <linux/vmalloc.h>
diff --git a/drivers/media/dvb/firewire/firedtv-ci.c b/drivers/media/dvb/firewire/firedtv-ci.c
index eeb80d0ea3ff..853e04b7cb36 100644
--- a/drivers/media/dvb/firewire/firedtv-ci.c
+++ b/drivers/media/dvb/firewire/firedtv-ci.c
@@ -215,7 +215,7 @@ static unsigned int fdtv_ca_io_poll(struct file *file, poll_table *wait)
215 return POLLIN; 215 return POLLIN;
216} 216}
217 217
218static struct file_operations fdtv_ca_fops = { 218static const struct file_operations fdtv_ca_fops = {
219 .owner = THIS_MODULE, 219 .owner = THIS_MODULE,
220 .ioctl = dvb_generic_ioctl, 220 .ioctl = dvb_generic_ioctl,
221 .open = dvb_generic_open, 221 .open = dvb_generic_open,
diff --git a/drivers/media/radio/radio-cadet.c b/drivers/media/radio/radio-cadet.c
index 8b1440136c45..482d0f3be5ff 100644
--- a/drivers/media/radio/radio-cadet.c
+++ b/drivers/media/radio/radio-cadet.c
@@ -38,6 +38,7 @@
38#include <linux/videodev2.h> /* V4L2 API defs */ 38#include <linux/videodev2.h> /* V4L2 API defs */
39#include <linux/param.h> 39#include <linux/param.h>
40#include <linux/pnp.h> 40#include <linux/pnp.h>
41#include <linux/sched.h>
41#include <linux/io.h> /* outb, outb_p */ 42#include <linux/io.h> /* outb, outb_p */
42#include <media/v4l2-device.h> 43#include <media/v4l2-device.h>
43#include <media/v4l2-ioctl.h> 44#include <media/v4l2-ioctl.h>
diff --git a/drivers/media/video/cafe_ccic.c b/drivers/media/video/cafe_ccic.c
index 657c481d255c..10230cb3d210 100644
--- a/drivers/media/video/cafe_ccic.c
+++ b/drivers/media/video/cafe_ccic.c
@@ -1325,7 +1325,7 @@ static void cafe_v4l_vm_close(struct vm_area_struct *vma)
1325 mutex_unlock(&sbuf->cam->s_mutex); 1325 mutex_unlock(&sbuf->cam->s_mutex);
1326} 1326}
1327 1327
1328static struct vm_operations_struct cafe_v4l_vm_ops = { 1328static const struct vm_operations_struct cafe_v4l_vm_ops = {
1329 .open = cafe_v4l_vm_open, 1329 .open = cafe_v4l_vm_open,
1330 .close = cafe_v4l_vm_close 1330 .close = cafe_v4l_vm_close
1331}; 1331};
diff --git a/drivers/media/video/cpia.c b/drivers/media/video/cpia.c
index 43ab0adf3b61..2377313c041a 100644
--- a/drivers/media/video/cpia.c
+++ b/drivers/media/video/cpia.c
@@ -31,6 +31,7 @@
31#include <linux/init.h> 31#include <linux/init.h>
32#include <linux/fs.h> 32#include <linux/fs.h>
33#include <linux/vmalloc.h> 33#include <linux/vmalloc.h>
34#include <linux/sched.h>
34#include <linux/slab.h> 35#include <linux/slab.h>
35#include <linux/proc_fs.h> 36#include <linux/proc_fs.h>
36#include <linux/ctype.h> 37#include <linux/ctype.h>
diff --git a/drivers/media/video/et61x251/et61x251_core.c b/drivers/media/video/et61x251/et61x251_core.c
index 74092f436be6..88987a57cf7b 100644
--- a/drivers/media/video/et61x251/et61x251_core.c
+++ b/drivers/media/video/et61x251/et61x251_core.c
@@ -1496,7 +1496,7 @@ static void et61x251_vm_close(struct vm_area_struct* vma)
1496} 1496}
1497 1497
1498 1498
1499static struct vm_operations_struct et61x251_vm_ops = { 1499static const struct vm_operations_struct et61x251_vm_ops = {
1500 .open = et61x251_vm_open, 1500 .open = et61x251_vm_open,
1501 .close = et61x251_vm_close, 1501 .close = et61x251_vm_close,
1502}; 1502};
diff --git a/drivers/media/video/gspca/gspca.c b/drivers/media/video/gspca/gspca.c
index cf6540da1e42..23d3fb776918 100644
--- a/drivers/media/video/gspca/gspca.c
+++ b/drivers/media/video/gspca/gspca.c
@@ -99,7 +99,7 @@ static void gspca_vm_close(struct vm_area_struct *vma)
99 frame->v4l2_buf.flags &= ~V4L2_BUF_FLAG_MAPPED; 99 frame->v4l2_buf.flags &= ~V4L2_BUF_FLAG_MAPPED;
100} 100}
101 101
102static struct vm_operations_struct gspca_vm_ops = { 102static const struct vm_operations_struct gspca_vm_ops = {
103 .open = gspca_vm_open, 103 .open = gspca_vm_open,
104 .close = gspca_vm_close, 104 .close = gspca_vm_close,
105}; 105};
diff --git a/drivers/media/video/meye.c b/drivers/media/video/meye.c
index d0765bed79c9..4b1bc05a462c 100644
--- a/drivers/media/video/meye.c
+++ b/drivers/media/video/meye.c
@@ -1589,7 +1589,7 @@ static void meye_vm_close(struct vm_area_struct *vma)
1589 meye.vma_use_count[idx]--; 1589 meye.vma_use_count[idx]--;
1590} 1590}
1591 1591
1592static struct vm_operations_struct meye_vm_ops = { 1592static const struct vm_operations_struct meye_vm_ops = {
1593 .open = meye_vm_open, 1593 .open = meye_vm_open,
1594 .close = meye_vm_close, 1594 .close = meye_vm_close,
1595}; 1595};
diff --git a/drivers/media/video/sh_mobile_ceu_camera.c b/drivers/media/video/sh_mobile_ceu_camera.c
index 5ab7c5aefd62..65ac474c517a 100644
--- a/drivers/media/video/sh_mobile_ceu_camera.c
+++ b/drivers/media/video/sh_mobile_ceu_camera.c
@@ -404,7 +404,7 @@ static int sh_mobile_ceu_add_device(struct soc_camera_device *icd)
404 "SuperH Mobile CEU driver attached to camera %d\n", 404 "SuperH Mobile CEU driver attached to camera %d\n",
405 icd->devnum); 405 icd->devnum);
406 406
407 clk_enable(pcdev->clk); 407 pm_runtime_get_sync(ici->v4l2_dev.dev);
408 408
409 ceu_write(pcdev, CAPSR, 1 << 16); /* reset */ 409 ceu_write(pcdev, CAPSR, 1 << 16); /* reset */
410 while (ceu_read(pcdev, CSTSR) & 1) 410 while (ceu_read(pcdev, CSTSR) & 1)
@@ -438,7 +438,7 @@ static void sh_mobile_ceu_remove_device(struct soc_camera_device *icd)
438 } 438 }
439 spin_unlock_irqrestore(&pcdev->lock, flags); 439 spin_unlock_irqrestore(&pcdev->lock, flags);
440 440
441 clk_disable(pcdev->clk); 441 pm_runtime_put_sync(ici->v4l2_dev.dev);
442 442
443 dev_info(icd->dev.parent, 443 dev_info(icd->dev.parent,
444 "SuperH Mobile CEU driver detached from camera %d\n", 444 "SuperH Mobile CEU driver detached from camera %d\n",
diff --git a/drivers/media/video/sn9c102/sn9c102_core.c b/drivers/media/video/sn9c102/sn9c102_core.c
index 9d84c94e8a40..4a7711c3e745 100644
--- a/drivers/media/video/sn9c102/sn9c102_core.c
+++ b/drivers/media/video/sn9c102/sn9c102_core.c
@@ -2077,7 +2077,7 @@ static void sn9c102_vm_close(struct vm_area_struct* vma)
2077} 2077}
2078 2078
2079 2079
2080static struct vm_operations_struct sn9c102_vm_ops = { 2080static const struct vm_operations_struct sn9c102_vm_ops = {
2081 .open = sn9c102_vm_open, 2081 .open = sn9c102_vm_open,
2082 .close = sn9c102_vm_close, 2082 .close = sn9c102_vm_close,
2083}; 2083};
diff --git a/drivers/media/video/stk-webcam.c b/drivers/media/video/stk-webcam.c
index 0b996ea4134e..6b41865f42bd 100644
--- a/drivers/media/video/stk-webcam.c
+++ b/drivers/media/video/stk-webcam.c
@@ -790,7 +790,7 @@ static void stk_v4l_vm_close(struct vm_area_struct *vma)
790 if (sbuf->mapcount == 0) 790 if (sbuf->mapcount == 0)
791 sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_MAPPED; 791 sbuf->v4lbuf.flags &= ~V4L2_BUF_FLAG_MAPPED;
792} 792}
793static struct vm_operations_struct stk_v4l_vm_ops = { 793static const struct vm_operations_struct stk_v4l_vm_ops = {
794 .open = stk_v4l_vm_open, 794 .open = stk_v4l_vm_open,
795 .close = stk_v4l_vm_close 795 .close = stk_v4l_vm_close
796}; 796};
diff --git a/drivers/media/video/uvc/uvc_v4l2.c b/drivers/media/video/uvc/uvc_v4l2.c
index 9e7351569b5d..a2bdd806efab 100644
--- a/drivers/media/video/uvc/uvc_v4l2.c
+++ b/drivers/media/video/uvc/uvc_v4l2.c
@@ -1069,7 +1069,7 @@ static void uvc_vm_close(struct vm_area_struct *vma)
1069 buffer->vma_use_count--; 1069 buffer->vma_use_count--;
1070} 1070}
1071 1071
1072static struct vm_operations_struct uvc_vm_ops = { 1072static const struct vm_operations_struct uvc_vm_ops = {
1073 .open = uvc_vm_open, 1073 .open = uvc_vm_open,
1074 .close = uvc_vm_close, 1074 .close = uvc_vm_close,
1075}; 1075};
diff --git a/drivers/media/video/videobuf-dma-contig.c b/drivers/media/video/videobuf-dma-contig.c
index d09ce83a9429..635ffc7b0391 100644
--- a/drivers/media/video/videobuf-dma-contig.c
+++ b/drivers/media/video/videobuf-dma-contig.c
@@ -105,7 +105,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
105 } 105 }
106} 106}
107 107
108static struct vm_operations_struct videobuf_vm_ops = { 108static const struct vm_operations_struct videobuf_vm_ops = {
109 .open = videobuf_vm_open, 109 .open = videobuf_vm_open,
110 .close = videobuf_vm_close, 110 .close = videobuf_vm_close,
111}; 111};
diff --git a/drivers/media/video/videobuf-dma-sg.c b/drivers/media/video/videobuf-dma-sg.c
index a8dd22ace3fb..53cdd67cebe1 100644
--- a/drivers/media/video/videobuf-dma-sg.c
+++ b/drivers/media/video/videobuf-dma-sg.c
@@ -394,7 +394,7 @@ videobuf_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
394 return 0; 394 return 0;
395} 395}
396 396
397static struct vm_operations_struct videobuf_vm_ops = 397static const struct vm_operations_struct videobuf_vm_ops =
398{ 398{
399 .open = videobuf_vm_open, 399 .open = videobuf_vm_open,
400 .close = videobuf_vm_close, 400 .close = videobuf_vm_close,
diff --git a/drivers/media/video/videobuf-vmalloc.c b/drivers/media/video/videobuf-vmalloc.c
index 30ae30f99ccc..35f3900c5633 100644
--- a/drivers/media/video/videobuf-vmalloc.c
+++ b/drivers/media/video/videobuf-vmalloc.c
@@ -116,7 +116,7 @@ static void videobuf_vm_close(struct vm_area_struct *vma)
116 return; 116 return;
117} 117}
118 118
119static struct vm_operations_struct videobuf_vm_ops = 119static const struct vm_operations_struct videobuf_vm_ops =
120{ 120{
121 .open = videobuf_vm_open, 121 .open = videobuf_vm_open,
122 .close = videobuf_vm_close, 122 .close = videobuf_vm_close,
diff --git a/drivers/media/video/vino.c b/drivers/media/video/vino.c
index cd6a3446ab7e..b034a81d2b1c 100644
--- a/drivers/media/video/vino.c
+++ b/drivers/media/video/vino.c
@@ -3857,7 +3857,7 @@ static void vino_vm_close(struct vm_area_struct *vma)
3857 dprintk("vino_vm_close(): count = %d\n", fb->map_count); 3857 dprintk("vino_vm_close(): count = %d\n", fb->map_count);
3858} 3858}
3859 3859
3860static struct vm_operations_struct vino_vm_ops = { 3860static const struct vm_operations_struct vino_vm_ops = {
3861 .open = vino_vm_open, 3861 .open = vino_vm_open,
3862 .close = vino_vm_close, 3862 .close = vino_vm_close,
3863}; 3863};
diff --git a/drivers/media/video/zc0301/zc0301_core.c b/drivers/media/video/zc0301/zc0301_core.c
index b3c6436b33ba..312a71336fd0 100644
--- a/drivers/media/video/zc0301/zc0301_core.c
+++ b/drivers/media/video/zc0301/zc0301_core.c
@@ -935,7 +935,7 @@ static void zc0301_vm_close(struct vm_area_struct* vma)
935} 935}
936 936
937 937
938static struct vm_operations_struct zc0301_vm_ops = { 938static const struct vm_operations_struct zc0301_vm_ops = {
939 .open = zc0301_vm_open, 939 .open = zc0301_vm_open,
940 .close = zc0301_vm_close, 940 .close = zc0301_vm_close,
941}; 941};
diff --git a/drivers/media/video/zoran/zoran_driver.c b/drivers/media/video/zoran/zoran_driver.c
index bcdefb1bcb3d..47137deafcfd 100644
--- a/drivers/media/video/zoran/zoran_driver.c
+++ b/drivers/media/video/zoran/zoran_driver.c
@@ -3172,7 +3172,7 @@ zoran_vm_close (struct vm_area_struct *vma)
3172 mutex_unlock(&zr->resource_lock); 3172 mutex_unlock(&zr->resource_lock);
3173} 3173}
3174 3174
3175static struct vm_operations_struct zoran_vm_ops = { 3175static const struct vm_operations_struct zoran_vm_ops = {
3176 .open = zoran_vm_open, 3176 .open = zoran_vm_open,
3177 .close = zoran_vm_close, 3177 .close = zoran_vm_close,
3178}; 3178};
diff --git a/drivers/mfd/ab3100-core.c b/drivers/mfd/ab3100-core.c
index 5447da16a170..613481028272 100644
--- a/drivers/mfd/ab3100-core.c
+++ b/drivers/mfd/ab3100-core.c
@@ -57,8 +57,6 @@
57 * The AB3100 is usually assigned address 0x48 (7-bit) 57 * The AB3100 is usually assigned address 0x48 (7-bit)
58 * The chip is defined in the platform i2c_board_data section. 58 * The chip is defined in the platform i2c_board_data section.
59 */ 59 */
60static unsigned short normal_i2c[] = { 0x48, I2C_CLIENT_END };
61I2C_CLIENT_INSMOD_1(ab3100);
62 60
63u8 ab3100_get_chip_type(struct ab3100 *ab3100) 61u8 ab3100_get_chip_type(struct ab3100 *ab3100)
64{ 62{
@@ -966,7 +964,7 @@ static int __exit ab3100_remove(struct i2c_client *client)
966} 964}
967 965
968static const struct i2c_device_id ab3100_id[] = { 966static const struct i2c_device_id ab3100_id[] = {
969 { "ab3100", ab3100 }, 967 { "ab3100", 0 },
970 { } 968 { }
971}; 969};
972MODULE_DEVICE_TABLE(i2c, ab3100_id); 970MODULE_DEVICE_TABLE(i2c, ab3100_id);
diff --git a/drivers/mfd/ucb1400_core.c b/drivers/mfd/ucb1400_core.c
index 2afc08006e6d..fa294b6d600a 100644
--- a/drivers/mfd/ucb1400_core.c
+++ b/drivers/mfd/ucb1400_core.c
@@ -21,6 +21,7 @@
21 */ 21 */
22 22
23#include <linux/module.h> 23#include <linux/module.h>
24#include <linux/sched.h>
24#include <linux/ucb1400.h> 25#include <linux/ucb1400.h>
25 26
26unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel, 27unsigned int ucb1400_adc_read(struct snd_ac97 *ac97, u16 adc_channel,
diff --git a/drivers/misc/Kconfig b/drivers/misc/Kconfig
index df1f86b5c83e..a2ea383105a6 100644
--- a/drivers/misc/Kconfig
+++ b/drivers/misc/Kconfig
@@ -249,5 +249,6 @@ config EP93XX_PWM
249source "drivers/misc/c2port/Kconfig" 249source "drivers/misc/c2port/Kconfig"
250source "drivers/misc/eeprom/Kconfig" 250source "drivers/misc/eeprom/Kconfig"
251source "drivers/misc/cb710/Kconfig" 251source "drivers/misc/cb710/Kconfig"
252source "drivers/misc/iwmc3200top/Kconfig"
252 253
253endif # MISC_DEVICES 254endif # MISC_DEVICES
diff --git a/drivers/misc/Makefile b/drivers/misc/Makefile
index f982d2ecfde7..e311267a355f 100644
--- a/drivers/misc/Makefile
+++ b/drivers/misc/Makefile
@@ -21,5 +21,6 @@ obj-$(CONFIG_HP_ILO) += hpilo.o
21obj-$(CONFIG_ISL29003) += isl29003.o 21obj-$(CONFIG_ISL29003) += isl29003.o
22obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o 22obj-$(CONFIG_EP93XX_PWM) += ep93xx_pwm.o
23obj-$(CONFIG_C2PORT) += c2port/ 23obj-$(CONFIG_C2PORT) += c2port/
24obj-$(CONFIG_IWMC3200TOP) += iwmc3200top/
24obj-y += eeprom/ 25obj-y += eeprom/
25obj-y += cb710/ 26obj-y += cb710/
diff --git a/drivers/misc/eeprom/max6875.c b/drivers/misc/eeprom/max6875.c
index 3c0c58eed347..5a6b2bce8ad5 100644
--- a/drivers/misc/eeprom/max6875.c
+++ b/drivers/misc/eeprom/max6875.c
@@ -33,12 +33,6 @@
33#include <linux/i2c.h> 33#include <linux/i2c.h>
34#include <linux/mutex.h> 34#include <linux/mutex.h>
35 35
36/* Do not scan - the MAX6875 access method will write to some EEPROM chips */
37static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
38
39/* Insmod parameters */
40I2C_CLIENT_INSMOD_1(max6875);
41
42/* The MAX6875 can only read/write 16 bytes at a time */ 36/* The MAX6875 can only read/write 16 bytes at a time */
43#define SLICE_SIZE 16 37#define SLICE_SIZE 16
44#define SLICE_BITS 4 38#define SLICE_BITS 4
@@ -146,31 +140,21 @@ static struct bin_attribute user_eeprom_attr = {
146 .read = max6875_read, 140 .read = max6875_read,
147}; 141};
148 142
149/* Return 0 if detection is successful, -ENODEV otherwise */ 143static int max6875_probe(struct i2c_client *client,
150static int max6875_detect(struct i2c_client *client, int kind, 144 const struct i2c_device_id *id)
151 struct i2c_board_info *info)
152{ 145{
153 struct i2c_adapter *adapter = client->adapter; 146 struct i2c_adapter *adapter = client->adapter;
147 struct max6875_data *data;
148 int err;
154 149
155 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE_DATA 150 if (!i2c_check_functionality(adapter, I2C_FUNC_SMBUS_WRITE_BYTE_DATA
156 | I2C_FUNC_SMBUS_READ_BYTE)) 151 | I2C_FUNC_SMBUS_READ_BYTE))
157 return -ENODEV; 152 return -ENODEV;
158 153
159 /* Only check even addresses */ 154 /* Only bind to even addresses */
160 if (client->addr & 1) 155 if (client->addr & 1)
161 return -ENODEV; 156 return -ENODEV;
162 157
163 strlcpy(info->type, "max6875", I2C_NAME_SIZE);
164
165 return 0;
166}
167
168static int max6875_probe(struct i2c_client *client,
169 const struct i2c_device_id *id)
170{
171 struct max6875_data *data;
172 int err;
173
174 if (!(data = kzalloc(sizeof(struct max6875_data), GFP_KERNEL))) 158 if (!(data = kzalloc(sizeof(struct max6875_data), GFP_KERNEL)))
175 return -ENOMEM; 159 return -ENOMEM;
176 160
@@ -222,9 +206,6 @@ static struct i2c_driver max6875_driver = {
222 .probe = max6875_probe, 206 .probe = max6875_probe,
223 .remove = max6875_remove, 207 .remove = max6875_remove,
224 .id_table = max6875_id, 208 .id_table = max6875_id,
225
226 .detect = max6875_detect,
227 .address_data = &addr_data,
228}; 209};
229 210
230static int __init max6875_init(void) 211static int __init max6875_init(void)
diff --git a/drivers/misc/iwmc3200top/Kconfig b/drivers/misc/iwmc3200top/Kconfig
new file mode 100644
index 000000000000..9e4b88fb57f1
--- /dev/null
+++ b/drivers/misc/iwmc3200top/Kconfig
@@ -0,0 +1,20 @@
1config IWMC3200TOP
2 tristate "Intel Wireless MultiCom Top Driver"
3 depends on MMC && EXPERIMENTAL
4 select FW_LOADER
5 ---help---
6 Intel Wireless MultiCom 3200 Top driver is responsible for
7 for firmware load and enabled coms enumeration
8
9config IWMC3200TOP_DEBUG
10 bool "Enable full debug output of iwmc3200top Driver"
11 depends on IWMC3200TOP
12 ---help---
13 Enable full debug output of iwmc3200top Driver
14
15config IWMC3200TOP_DEBUGFS
16 bool "Enable Debugfs debugging interface for iwmc3200top"
17 depends on IWMC3200TOP
18 ---help---
19 Enable creation of debugfs files for iwmc3200top
20
diff --git a/drivers/misc/iwmc3200top/Makefile b/drivers/misc/iwmc3200top/Makefile
new file mode 100644
index 000000000000..fbf53fb4634e
--- /dev/null
+++ b/drivers/misc/iwmc3200top/Makefile
@@ -0,0 +1,29 @@
1# iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
2# drivers/misc/iwmc3200top/Makefile
3#
4# Copyright (C) 2009 Intel Corporation. All rights reserved.
5#
6# This program is free software; you can redistribute it and/or
7# modify it under the terms of the GNU General Public License version
8# 2 as published by the Free Software Foundation.
9#
10# This program is distributed in the hope that it will be useful,
11# but WITHOUT ANY WARRANTY; without even the implied warranty of
12# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13# GNU General Public License for more details.
14#
15# You should have received a copy of the GNU General Public License
16# along with this program; if not, write to the Free Software
17# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
18# 02110-1301, USA.
19#
20#
21# Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
22# -
23#
24#
25
26obj-$(CONFIG_IWMC3200TOP) += iwmc3200top.o
27iwmc3200top-objs := main.o fw-download.o
28iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUG) += log.o
29iwmc3200top-$(CONFIG_IWMC3200TOP_DEBUGFS) += debugfs.o
diff --git a/drivers/misc/iwmc3200top/debugfs.c b/drivers/misc/iwmc3200top/debugfs.c
new file mode 100644
index 000000000000..0c8ea0a1c8a3
--- /dev/null
+++ b/drivers/misc/iwmc3200top/debugfs.c
@@ -0,0 +1,133 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/debufs.c
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#include <linux/kernel.h>
28#include <linux/string.h>
29#include <linux/ctype.h>
30#include <linux/mmc/sdio_func.h>
31#include <linux/mmc/sdio.h>
32#include <linux/debugfs.h>
33
34#include "iwmc3200top.h"
35#include "fw-msg.h"
36#include "log.h"
37#include "debugfs.h"
38
39
40
41/* Constants definition */
42#define HEXADECIMAL_RADIX 16
43
44/* Functions definition */
45
46
47#define DEBUGFS_ADD(name, parent) do { \
48 dbgfs->dbgfs_##parent##_files.file_##name = \
49 debugfs_create_file(#name, 0644, dbgfs->dir_##parent, priv, \
50 &iwmct_dbgfs_##name##_ops); \
51} while (0)
52
53#define DEBUGFS_RM(name) do { \
54 debugfs_remove(name); \
55 name = NULL; \
56} while (0)
57
58#define DEBUGFS_READ_FUNC(name) \
59ssize_t iwmct_dbgfs_##name##_read(struct file *file, \
60 char __user *user_buf, \
61 size_t count, loff_t *ppos);
62
63#define DEBUGFS_WRITE_FUNC(name) \
64ssize_t iwmct_dbgfs_##name##_write(struct file *file, \
65 const char __user *user_buf, \
66 size_t count, loff_t *ppos);
67
68#define DEBUGFS_READ_FILE_OPS(name) \
69 DEBUGFS_READ_FUNC(name) \
70 static const struct file_operations iwmct_dbgfs_##name##_ops = { \
71 .read = iwmct_dbgfs_##name##_read, \
72 .open = iwmct_dbgfs_open_file_generic, \
73 };
74
75#define DEBUGFS_WRITE_FILE_OPS(name) \
76 DEBUGFS_WRITE_FUNC(name) \
77 static const struct file_operations iwmct_dbgfs_##name##_ops = { \
78 .write = iwmct_dbgfs_##name##_write, \
79 .open = iwmct_dbgfs_open_file_generic, \
80 };
81
82#define DEBUGFS_READ_WRITE_FILE_OPS(name) \
83 DEBUGFS_READ_FUNC(name) \
84 DEBUGFS_WRITE_FUNC(name) \
85 static const struct file_operations iwmct_dbgfs_##name##_ops = {\
86 .write = iwmct_dbgfs_##name##_write, \
87 .read = iwmct_dbgfs_##name##_read, \
88 .open = iwmct_dbgfs_open_file_generic, \
89 };
90
91
92/* Debugfs file ops definitions */
93
94/*
95 * Create the debugfs files and directories
96 *
97 */
98void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name)
99{
100 struct iwmct_debugfs *dbgfs;
101
102 dbgfs = kzalloc(sizeof(struct iwmct_debugfs), GFP_KERNEL);
103 if (!dbgfs) {
104 LOG_ERROR(priv, DEBUGFS, "failed to allocate %zd bytes\n",
105 sizeof(struct iwmct_debugfs));
106 return;
107 }
108
109 priv->dbgfs = dbgfs;
110 dbgfs->name = name;
111 dbgfs->dir_drv = debugfs_create_dir(name, NULL);
112 if (!dbgfs->dir_drv) {
113 LOG_ERROR(priv, DEBUGFS, "failed to create debugfs dir\n");
114 return;
115 }
116
117 return;
118}
119
120/**
121 * Remove the debugfs files and directories
122 *
123 */
124void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs)
125{
126 if (!dbgfs)
127 return;
128
129 DEBUGFS_RM(dbgfs->dir_drv);
130 kfree(dbgfs);
131 dbgfs = NULL;
132}
133
diff --git a/drivers/misc/iwmc3200top/debugfs.h b/drivers/misc/iwmc3200top/debugfs.h
new file mode 100644
index 000000000000..71d45759b40f
--- /dev/null
+++ b/drivers/misc/iwmc3200top/debugfs.h
@@ -0,0 +1,58 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/debufs.h
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#ifndef __DEBUGFS_H__
28#define __DEBUGFS_H__
29
30
31#ifdef CONFIG_IWMC3200TOP_DEBUGFS
32
33struct iwmct_debugfs {
34 const char *name;
35 struct dentry *dir_drv;
36 struct dir_drv_files {
37 } dbgfs_drv_files;
38};
39
40void iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name);
41void iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs);
42
43#else /* CONFIG_IWMC3200TOP_DEBUGFS */
44
45struct iwmct_debugfs;
46
47static inline void
48iwmct_dbgfs_register(struct iwmct_priv *priv, const char *name)
49{}
50
51static inline void
52iwmct_dbgfs_unregister(struct iwmct_debugfs *dbgfs)
53{}
54
55#endif /* CONFIG_IWMC3200TOP_DEBUGFS */
56
57#endif /* __DEBUGFS_H__ */
58
diff --git a/drivers/misc/iwmc3200top/fw-download.c b/drivers/misc/iwmc3200top/fw-download.c
new file mode 100644
index 000000000000..33cb693dd37c
--- /dev/null
+++ b/drivers/misc/iwmc3200top/fw-download.c
@@ -0,0 +1,359 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/fw-download.c
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#include <linux/firmware.h>
28#include <linux/mmc/sdio_func.h>
29#include <asm/unaligned.h>
30
31#include "iwmc3200top.h"
32#include "log.h"
33#include "fw-msg.h"
34
35#define CHECKSUM_BYTES_NUM sizeof(u32)
36
37/**
38 init parser struct with file
39 */
40static int iwmct_fw_parser_init(struct iwmct_priv *priv, const u8 *file,
41 size_t file_size, size_t block_size)
42{
43 struct iwmct_parser *parser = &priv->parser;
44 struct iwmct_fw_hdr *fw_hdr = &parser->versions;
45
46 LOG_INFOEX(priv, INIT, "-->\n");
47
48 LOG_INFO(priv, FW_DOWNLOAD, "file_size=%zd\n", file_size);
49
50 parser->file = file;
51 parser->file_size = file_size;
52 parser->cur_pos = 0;
53 parser->buf = NULL;
54
55 parser->buf = kzalloc(block_size, GFP_KERNEL);
56 if (!parser->buf) {
57 LOG_ERROR(priv, FW_DOWNLOAD, "kzalloc error\n");
58 return -ENOMEM;
59 }
60 parser->buf_size = block_size;
61
62 /* extract fw versions */
63 memcpy(fw_hdr, parser->file, sizeof(struct iwmct_fw_hdr));
64 LOG_INFO(priv, FW_DOWNLOAD, "fw versions are:\n"
65 "top %u.%u.%u gps %u.%u.%u bt %u.%u.%u tic %s\n",
66 fw_hdr->top_major, fw_hdr->top_minor, fw_hdr->top_revision,
67 fw_hdr->gps_major, fw_hdr->gps_minor, fw_hdr->gps_revision,
68 fw_hdr->bt_major, fw_hdr->bt_minor, fw_hdr->bt_revision,
69 fw_hdr->tic_name);
70
71 parser->cur_pos += sizeof(struct iwmct_fw_hdr);
72
73 LOG_INFOEX(priv, INIT, "<--\n");
74 return 0;
75}
76
77static bool iwmct_checksum(struct iwmct_priv *priv)
78{
79 struct iwmct_parser *parser = &priv->parser;
80 __le32 *file = (__le32 *)parser->file;
81 int i, pad, steps;
82 u32 accum = 0;
83 u32 checksum;
84 u32 mask = 0xffffffff;
85
86 pad = (parser->file_size - CHECKSUM_BYTES_NUM) % 4;
87 steps = (parser->file_size - CHECKSUM_BYTES_NUM) / 4;
88
89 LOG_INFO(priv, FW_DOWNLOAD, "pad=%d steps=%d\n", pad, steps);
90
91 for (i = 0; i < steps; i++)
92 accum += le32_to_cpu(file[i]);
93
94 if (pad) {
95 mask <<= 8 * (4 - pad);
96 accum += le32_to_cpu(file[steps]) & mask;
97 }
98
99 checksum = get_unaligned_le32((__le32 *)(parser->file +
100 parser->file_size - CHECKSUM_BYTES_NUM));
101
102 LOG_INFO(priv, FW_DOWNLOAD,
103 "compare checksum accum=0x%x to checksum=0x%x\n",
104 accum, checksum);
105
106 return checksum == accum;
107}
108
109static int iwmct_parse_next_section(struct iwmct_priv *priv, const u8 **p_sec,
110 size_t *sec_size, __le32 *sec_addr)
111{
112 struct iwmct_parser *parser = &priv->parser;
113 struct iwmct_dbg *dbg = &priv->dbg;
114 struct iwmct_fw_sec_hdr *sec_hdr;
115
116 LOG_INFOEX(priv, INIT, "-->\n");
117
118 while (parser->cur_pos + sizeof(struct iwmct_fw_sec_hdr)
119 <= parser->file_size) {
120
121 sec_hdr = (struct iwmct_fw_sec_hdr *)
122 (parser->file + parser->cur_pos);
123 parser->cur_pos += sizeof(struct iwmct_fw_sec_hdr);
124
125 LOG_INFO(priv, FW_DOWNLOAD,
126 "sec hdr: type=%s addr=0x%x size=%d\n",
127 sec_hdr->type, sec_hdr->target_addr,
128 sec_hdr->data_size);
129
130 if (strcmp(sec_hdr->type, "ENT") == 0)
131 parser->entry_point = le32_to_cpu(sec_hdr->target_addr);
132 else if (strcmp(sec_hdr->type, "LBL") == 0)
133 strcpy(dbg->label_fw, parser->file + parser->cur_pos);
134 else if (((strcmp(sec_hdr->type, "TOP") == 0) &&
135 (priv->barker & BARKER_DNLOAD_TOP_MSK)) ||
136 ((strcmp(sec_hdr->type, "GPS") == 0) &&
137 (priv->barker & BARKER_DNLOAD_GPS_MSK)) ||
138 ((strcmp(sec_hdr->type, "BTH") == 0) &&
139 (priv->barker & BARKER_DNLOAD_BT_MSK))) {
140 *sec_addr = sec_hdr->target_addr;
141 *sec_size = le32_to_cpu(sec_hdr->data_size);
142 *p_sec = parser->file + parser->cur_pos;
143 parser->cur_pos += le32_to_cpu(sec_hdr->data_size);
144 return 1;
145 } else if (strcmp(sec_hdr->type, "LOG") != 0)
146 LOG_WARNING(priv, FW_DOWNLOAD,
147 "skipping section type %s\n",
148 sec_hdr->type);
149
150 parser->cur_pos += le32_to_cpu(sec_hdr->data_size);
151 LOG_INFO(priv, FW_DOWNLOAD,
152 "finished with section cur_pos=%zd\n", parser->cur_pos);
153 }
154
155 LOG_INFOEX(priv, INIT, "<--\n");
156 return 0;
157}
158
159static int iwmct_download_section(struct iwmct_priv *priv, const u8 *p_sec,
160 size_t sec_size, __le32 addr)
161{
162 struct iwmct_parser *parser = &priv->parser;
163 struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf;
164 const u8 *cur_block = p_sec;
165 size_t sent = 0;
166 int cnt = 0;
167 int ret = 0;
168 u32 cmd = 0;
169
170 LOG_INFOEX(priv, INIT, "-->\n");
171 LOG_INFO(priv, FW_DOWNLOAD, "Download address 0x%x size 0x%zx\n",
172 addr, sec_size);
173
174 while (sent < sec_size) {
175 int i;
176 u32 chksm = 0;
177 u32 reset = atomic_read(&priv->reset);
178 /* actual FW data */
179 u32 data_size = min(parser->buf_size - sizeof(*hdr),
180 sec_size - sent);
181 /* Pad to block size */
182 u32 trans_size = (data_size + sizeof(*hdr) +
183 IWMC_SDIO_BLK_SIZE - 1) &
184 ~(IWMC_SDIO_BLK_SIZE - 1);
185 ++cnt;
186
187 /* in case of reset, interrupt FW DOWNLAOD */
188 if (reset) {
189 LOG_INFO(priv, FW_DOWNLOAD,
190 "Reset detected. Abort FW download!!!");
191 ret = -ECANCELED;
192 goto exit;
193 }
194
195 memset(parser->buf, 0, parser->buf_size);
196 cmd |= IWMC_OPCODE_WRITE << CMD_HDR_OPCODE_POS;
197 cmd |= IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
198 cmd |= (priv->dbg.direct ? 1 : 0) << CMD_HDR_DIRECT_ACCESS_POS;
199 cmd |= (priv->dbg.checksum ? 1 : 0) << CMD_HDR_USE_CHECKSUM_POS;
200 hdr->data_size = cpu_to_le32(data_size);
201 hdr->target_addr = addr;
202
203 /* checksum is allowed for sizes divisible by 4 */
204 if (data_size & 0x3)
205 cmd &= ~CMD_HDR_USE_CHECKSUM_MSK;
206
207 memcpy(hdr->data, cur_block, data_size);
208
209
210 if (cmd & CMD_HDR_USE_CHECKSUM_MSK) {
211
212 chksm = data_size + le32_to_cpu(addr) + cmd;
213 for (i = 0; i < data_size >> 2; i++)
214 chksm += ((u32 *)cur_block)[i];
215
216 hdr->block_chksm = cpu_to_le32(chksm);
217 LOG_INFO(priv, FW_DOWNLOAD, "Checksum = 0x%X\n",
218 hdr->block_chksm);
219 }
220
221 LOG_INFO(priv, FW_DOWNLOAD, "trans#%d, len=%d, sent=%zd, "
222 "sec_size=%zd, startAddress 0x%X\n",
223 cnt, trans_size, sent, sec_size, addr);
224
225 if (priv->dbg.dump)
226 LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, trans_size);
227
228
229 hdr->cmd = cpu_to_le32(cmd);
230 /* send it down */
231 /* TODO: add more proper sending and error checking */
232 ret = iwmct_tx(priv, 0, parser->buf, trans_size);
233 if (ret != 0) {
234 LOG_INFO(priv, FW_DOWNLOAD,
235 "iwmct_tx returned %d\n", ret);
236 goto exit;
237 }
238
239 addr = cpu_to_le32(le32_to_cpu(addr) + data_size);
240 sent += data_size;
241 cur_block = p_sec + sent;
242
243 if (priv->dbg.blocks && (cnt + 1) >= priv->dbg.blocks) {
244 LOG_INFO(priv, FW_DOWNLOAD,
245 "Block number limit is reached [%d]\n",
246 priv->dbg.blocks);
247 break;
248 }
249 }
250
251 if (sent < sec_size)
252 ret = -EINVAL;
253exit:
254 LOG_INFOEX(priv, INIT, "<--\n");
255 return ret;
256}
257
258static int iwmct_kick_fw(struct iwmct_priv *priv, bool jump)
259{
260 struct iwmct_parser *parser = &priv->parser;
261 struct iwmct_fw_load_hdr *hdr = (struct iwmct_fw_load_hdr *)parser->buf;
262 int ret;
263 u32 cmd;
264
265 LOG_INFOEX(priv, INIT, "-->\n");
266
267 memset(parser->buf, 0, parser->buf_size);
268 cmd = IWMC_CMD_SIGNATURE << CMD_HDR_SIGNATURE_POS;
269 if (jump) {
270 cmd |= IWMC_OPCODE_JUMP << CMD_HDR_OPCODE_POS;
271 hdr->target_addr = cpu_to_le32(parser->entry_point);
272 LOG_INFO(priv, FW_DOWNLOAD, "jump address 0x%x\n",
273 parser->entry_point);
274 } else {
275 cmd |= IWMC_OPCODE_LAST_COMMAND << CMD_HDR_OPCODE_POS;
276 LOG_INFO(priv, FW_DOWNLOAD, "last command\n");
277 }
278
279 hdr->cmd = cpu_to_le32(cmd);
280
281 LOG_HEXDUMP(FW_DOWNLOAD, parser->buf, sizeof(*hdr));
282 /* send it down */
283 /* TODO: add more proper sending and error checking */
284 ret = iwmct_tx(priv, 0, parser->buf, IWMC_SDIO_BLK_SIZE);
285 if (ret)
286 LOG_INFO(priv, FW_DOWNLOAD, "iwmct_tx returned %d", ret);
287
288 LOG_INFOEX(priv, INIT, "<--\n");
289 return 0;
290}
291
292int iwmct_fw_load(struct iwmct_priv *priv)
293{
294 const struct firmware *raw = NULL;
295 __le32 addr;
296 size_t len;
297 const u8 *pdata;
298 const u8 *name = "iwmc3200top.1.fw";
299 int ret = 0;
300
301 /* clear parser struct */
302 memset(&priv->parser, 0, sizeof(struct iwmct_parser));
303 if (!name) {
304 ret = -EINVAL;
305 goto exit;
306 }
307
308 /* get the firmware */
309 ret = request_firmware(&raw, name, &priv->func->dev);
310 if (ret < 0) {
311 LOG_ERROR(priv, FW_DOWNLOAD, "%s request_firmware failed %d\n",
312 name, ret);
313 goto exit;
314 }
315
316 if (raw->size < sizeof(struct iwmct_fw_sec_hdr)) {
317 LOG_ERROR(priv, FW_DOWNLOAD, "%s smaller then (%zd) (%zd)\n",
318 name, sizeof(struct iwmct_fw_sec_hdr), raw->size);
319 goto exit;
320 }
321
322 LOG_INFO(priv, FW_DOWNLOAD, "Read firmware '%s'\n", name);
323
324 ret = iwmct_fw_parser_init(priv, raw->data, raw->size, priv->trans_len);
325 if (ret < 0) {
326 LOG_ERROR(priv, FW_DOWNLOAD,
327 "iwmct_parser_init failed: Reason %d\n", ret);
328 goto exit;
329 }
330
331 /* checksum */
332 if (!iwmct_checksum(priv)) {
333 LOG_ERROR(priv, FW_DOWNLOAD, "checksum error\n");
334 ret = -EINVAL;
335 goto exit;
336 }
337
338 /* download firmware to device */
339 while (iwmct_parse_next_section(priv, &pdata, &len, &addr)) {
340 if (iwmct_download_section(priv, pdata, len, addr)) {
341 LOG_ERROR(priv, FW_DOWNLOAD,
342 "%s download section failed\n", name);
343 ret = -EIO;
344 goto exit;
345 }
346 }
347
348 iwmct_kick_fw(priv, !!(priv->barker & BARKER_DNLOAD_JUMP_MSK));
349
350exit:
351 kfree(priv->parser.buf);
352
353 if (raw)
354 release_firmware(raw);
355
356 raw = NULL;
357
358 return ret;
359}
diff --git a/drivers/misc/iwmc3200top/fw-msg.h b/drivers/misc/iwmc3200top/fw-msg.h
new file mode 100644
index 000000000000..9e26b75bd482
--- /dev/null
+++ b/drivers/misc/iwmc3200top/fw-msg.h
@@ -0,0 +1,113 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/fw-msg.h
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#ifndef __FWMSG_H__
28#define __FWMSG_H__
29
30#define COMM_TYPE_D2H 0xFF
31#define COMM_TYPE_H2D 0xEE
32
33#define COMM_CATEGORY_OPERATIONAL 0x00
34#define COMM_CATEGORY_DEBUG 0x01
35#define COMM_CATEGORY_TESTABILITY 0x02
36#define COMM_CATEGORY_DIAGNOSTICS 0x03
37
38#define OP_DBG_ZSTR_MSG cpu_to_le16(0x1A)
39
40#define FW_LOG_SRC_MAX 32
41#define FW_LOG_SRC_ALL 255
42
43#define FW_STRING_TABLE_ADDR cpu_to_le32(0x0C000000)
44
45#define CMD_DBG_LOG_LEVEL cpu_to_le16(0x0001)
46#define CMD_TST_DEV_RESET cpu_to_le16(0x0060)
47#define CMD_TST_FUNC_RESET cpu_to_le16(0x0062)
48#define CMD_TST_IFACE_RESET cpu_to_le16(0x0064)
49#define CMD_TST_CPU_UTILIZATION cpu_to_le16(0x0065)
50#define CMD_TST_TOP_DEEP_SLEEP cpu_to_le16(0x0080)
51#define CMD_TST_WAKEUP cpu_to_le16(0x0081)
52#define CMD_TST_FUNC_WAKEUP cpu_to_le16(0x0082)
53#define CMD_TST_FUNC_DEEP_SLEEP_REQUEST cpu_to_le16(0x0083)
54#define CMD_TST_GET_MEM_DUMP cpu_to_le16(0x0096)
55
56#define OP_OPR_ALIVE cpu_to_le16(0x0010)
57#define OP_OPR_CMD_ACK cpu_to_le16(0x001F)
58#define OP_OPR_CMD_NACK cpu_to_le16(0x0020)
59#define OP_TST_MEM_DUMP cpu_to_le16(0x0043)
60
61#define CMD_FLAG_PADDING_256 0x80
62
63#define FW_HCMD_BLOCK_SIZE 256
64
65struct msg_hdr {
66 u8 type;
67 u8 category;
68 __le16 opcode;
69 u8 seqnum;
70 u8 flags;
71 __le16 length;
72} __attribute__((__packed__));
73
74struct log_hdr {
75 __le32 timestamp;
76 u8 severity;
77 u8 logsource;
78 __le16 reserved;
79} __attribute__((__packed__));
80
81struct mdump_hdr {
82 u8 dmpid;
83 u8 frag;
84 __le16 size;
85 __le32 addr;
86} __attribute__((__packed__));
87
88struct top_msg {
89 struct msg_hdr hdr;
90 union {
91 /* D2H messages */
92 struct {
93 struct log_hdr log_hdr;
94 u8 data[1];
95 } __attribute__((__packed__)) log;
96
97 struct {
98 struct log_hdr log_hdr;
99 struct mdump_hdr md_hdr;
100 u8 data[1];
101 } __attribute__((__packed__)) mdump;
102
103 /* H2D messages */
104 struct {
105 u8 logsource;
106 u8 sevmask;
107 } __attribute__((__packed__)) logdefs[FW_LOG_SRC_MAX];
108 struct mdump_hdr mdump_req;
109 } u;
110} __attribute__((__packed__));
111
112
113#endif /* __FWMSG_H__ */
diff --git a/drivers/misc/iwmc3200top/iwmc3200top.h b/drivers/misc/iwmc3200top/iwmc3200top.h
new file mode 100644
index 000000000000..f572fcf177a1
--- /dev/null
+++ b/drivers/misc/iwmc3200top/iwmc3200top.h
@@ -0,0 +1,206 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/iwmc3200top.h
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#ifndef __IWMC3200TOP_H__
28#define __IWMC3200TOP_H__
29
30#include <linux/workqueue.h>
31
32#define DRV_NAME "iwmc3200top"
33
34#define IWMC_SDIO_BLK_SIZE 256
35#define IWMC_DEFAULT_TR_BLK 64
36#define IWMC_SDIO_DATA_ADDR 0x0
37#define IWMC_SDIO_INTR_ENABLE_ADDR 0x14
38#define IWMC_SDIO_INTR_STATUS_ADDR 0x13
39#define IWMC_SDIO_INTR_CLEAR_ADDR 0x13
40#define IWMC_SDIO_INTR_GET_SIZE_ADDR 0x2C
41
42#define COMM_HUB_HEADER_LENGTH 16
43#define LOGGER_HEADER_LENGTH 10
44
45
46#define BARKER_DNLOAD_BT_POS 0
47#define BARKER_DNLOAD_BT_MSK BIT(BARKER_DNLOAD_BT_POS)
48#define BARKER_DNLOAD_GPS_POS 1
49#define BARKER_DNLOAD_GPS_MSK BIT(BARKER_DNLOAD_GPS_POS)
50#define BARKER_DNLOAD_TOP_POS 2
51#define BARKER_DNLOAD_TOP_MSK BIT(BARKER_DNLOAD_TOP_POS)
52#define BARKER_DNLOAD_RESERVED1_POS 3
53#define BARKER_DNLOAD_RESERVED1_MSK BIT(BARKER_DNLOAD_RESERVED1_POS)
54#define BARKER_DNLOAD_JUMP_POS 4
55#define BARKER_DNLOAD_JUMP_MSK BIT(BARKER_DNLOAD_JUMP_POS)
56#define BARKER_DNLOAD_SYNC_POS 5
57#define BARKER_DNLOAD_SYNC_MSK BIT(BARKER_DNLOAD_SYNC_POS)
58#define BARKER_DNLOAD_RESERVED2_POS 6
59#define BARKER_DNLOAD_RESERVED2_MSK (0x3 << BARKER_DNLOAD_RESERVED2_POS)
60#define BARKER_DNLOAD_BARKER_POS 8
61#define BARKER_DNLOAD_BARKER_MSK (0xffffff << BARKER_DNLOAD_BARKER_POS)
62
63#define IWMC_BARKER_REBOOT (0xdeadbe << BARKER_DNLOAD_BARKER_POS)
64/* whole field barker */
65#define IWMC_BARKER_ACK 0xfeedbabe
66
67#define IWMC_CMD_SIGNATURE 0xcbbc
68
69#define CMD_HDR_OPCODE_POS 0
70#define CMD_HDR_OPCODE_MSK_MSK (0xf << CMD_HDR_OPCODE_MSK_POS)
71#define CMD_HDR_RESPONSE_CODE_POS 4
72#define CMD_HDR_RESPONSE_CODE_MSK (0xf << CMD_HDR_RESPONSE_CODE_POS)
73#define CMD_HDR_USE_CHECKSUM_POS 8
74#define CMD_HDR_USE_CHECKSUM_MSK BIT(CMD_HDR_USE_CHECKSUM_POS)
75#define CMD_HDR_RESPONSE_REQUIRED_POS 9
76#define CMD_HDR_RESPONSE_REQUIRED_MSK BIT(CMD_HDR_RESPONSE_REQUIRED_POS)
77#define CMD_HDR_DIRECT_ACCESS_POS 10
78#define CMD_HDR_DIRECT_ACCESS_MSK BIT(CMD_HDR_DIRECT_ACCESS_POS)
79#define CMD_HDR_RESERVED_POS 11
80#define CMD_HDR_RESERVED_MSK BIT(0x1f << CMD_HDR_RESERVED_POS)
81#define CMD_HDR_SIGNATURE_POS 16
82#define CMD_HDR_SIGNATURE_MSK BIT(0xffff << CMD_HDR_SIGNATURE_POS)
83
84enum {
85 IWMC_OPCODE_PING = 0,
86 IWMC_OPCODE_READ = 1,
87 IWMC_OPCODE_WRITE = 2,
88 IWMC_OPCODE_JUMP = 3,
89 IWMC_OPCODE_REBOOT = 4,
90 IWMC_OPCODE_PERSISTENT_WRITE = 5,
91 IWMC_OPCODE_PERSISTENT_READ = 6,
92 IWMC_OPCODE_READ_MODIFY_WRITE = 7,
93 IWMC_OPCODE_LAST_COMMAND = 15
94};
95
96struct iwmct_fw_load_hdr {
97 __le32 cmd;
98 __le32 target_addr;
99 __le32 data_size;
100 __le32 block_chksm;
101 u8 data[0];
102};
103
104/**
105 * struct iwmct_fw_hdr
106 * holds all sw components versions
107 */
108struct iwmct_fw_hdr {
109 u8 top_major;
110 u8 top_minor;
111 u8 top_revision;
112 u8 gps_major;
113 u8 gps_minor;
114 u8 gps_revision;
115 u8 bt_major;
116 u8 bt_minor;
117 u8 bt_revision;
118 u8 tic_name[31];
119};
120
121/**
122 * struct iwmct_fw_sec_hdr
123 * @type: function type
124 * @data_size: section's data size
125 * @target_addr: download address
126 */
127struct iwmct_fw_sec_hdr {
128 u8 type[4];
129 __le32 data_size;
130 __le32 target_addr;
131};
132
133/**
134 * struct iwmct_parser
135 * @file: fw image
136 * @file_size: fw size
137 * @cur_pos: position in file
138 * @buf: temp buf for download
139 * @buf_size: size of buf
140 * @entry_point: address to jump in fw kick-off
141 */
142struct iwmct_parser {
143 const u8 *file;
144 size_t file_size;
145 size_t cur_pos;
146 u8 *buf;
147 size_t buf_size;
148 u32 entry_point;
149 struct iwmct_fw_hdr versions;
150};
151
152
153struct iwmct_work_struct {
154 struct list_head list;
155 ssize_t iosize;
156};
157
158struct iwmct_dbg {
159 int blocks;
160 bool dump;
161 bool jump;
162 bool direct;
163 bool checksum;
164 bool fw_download;
165 int block_size;
166 int download_trans_blks;
167
168 char label_fw[256];
169};
170
171struct iwmct_debugfs;
172
173struct iwmct_priv {
174 struct sdio_func *func;
175 struct iwmct_debugfs *dbgfs;
176 struct iwmct_parser parser;
177 atomic_t reset;
178 atomic_t dev_sync;
179 u32 trans_len;
180 u32 barker;
181 struct iwmct_dbg dbg;
182
183 /* drivers work queue */
184 struct workqueue_struct *wq;
185 struct workqueue_struct *bus_rescan_wq;
186 struct work_struct bus_rescan_worker;
187 struct work_struct isr_worker;
188
189 /* drivers wait queue */
190 wait_queue_head_t wait_q;
191
192 /* rx request list */
193 struct list_head read_req_list;
194};
195
196extern int iwmct_tx(struct iwmct_priv *priv, unsigned int addr,
197 void *src, int count);
198
199extern int iwmct_fw_load(struct iwmct_priv *priv);
200
201extern void iwmct_dbg_init_params(struct iwmct_priv *drv);
202extern void iwmct_dbg_init_drv_attrs(struct device_driver *drv);
203extern void iwmct_dbg_remove_drv_attrs(struct device_driver *drv);
204extern int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len);
205
206#endif /* __IWMC3200TOP_H__ */
diff --git a/drivers/misc/iwmc3200top/log.c b/drivers/misc/iwmc3200top/log.c
new file mode 100644
index 000000000000..d569279698f6
--- /dev/null
+++ b/drivers/misc/iwmc3200top/log.c
@@ -0,0 +1,347 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/log.c
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#include <linux/kernel.h>
28#include <linux/mmc/sdio_func.h>
29#include <linux/ctype.h>
30#include "fw-msg.h"
31#include "iwmc3200top.h"
32#include "log.h"
33
34/* Maximal hexadecimal string size of the FW memdump message */
35#define LOG_MSG_SIZE_MAX 12400
36
37/* iwmct_logdefs is a global used by log macros */
38u8 iwmct_logdefs[LOG_SRC_MAX];
39static u8 iwmct_fw_logdefs[FW_LOG_SRC_MAX];
40
41
42static int _log_set_log_filter(u8 *logdefs, int size, u8 src, u8 logmask)
43{
44 int i;
45
46 if (src < size)
47 logdefs[src] = logmask;
48 else if (src == LOG_SRC_ALL)
49 for (i = 0; i < size; i++)
50 logdefs[i] = logmask;
51 else
52 return -1;
53
54 return 0;
55}
56
57
58int iwmct_log_set_filter(u8 src, u8 logmask)
59{
60 return _log_set_log_filter(iwmct_logdefs, LOG_SRC_MAX, src, logmask);
61}
62
63
64int iwmct_log_set_fw_filter(u8 src, u8 logmask)
65{
66 return _log_set_log_filter(iwmct_fw_logdefs,
67 FW_LOG_SRC_MAX, src, logmask);
68}
69
70
71static int log_msg_format_hex(char *str, int slen, u8 *ibuf,
72 int ilen, char *pref)
73{
74 int pos = 0;
75 int i;
76 int len;
77
78 for (pos = 0, i = 0; pos < slen - 2 && pref[i] != '\0'; i++, pos++)
79 str[pos] = pref[i];
80
81 for (i = 0; pos < slen - 2 && i < ilen; pos += len, i++)
82 len = snprintf(&str[pos], slen - pos - 1, " %2.2X", ibuf[i]);
83
84 if (i < ilen)
85 return -1;
86
87 return 0;
88}
89
90/* NOTE: This function is not thread safe.
91 Currently it's called only from sdio rx worker - no race there
92*/
93void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len)
94{
95 struct top_msg *msg;
96 static char logbuf[LOG_MSG_SIZE_MAX];
97
98 msg = (struct top_msg *)buf;
99
100 if (len < sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr)) {
101 LOG_ERROR(priv, FW_MSG, "Log message from TOP "
102 "is too short %d (expected %zd)\n",
103 len, sizeof(msg->hdr) + sizeof(msg->u.log.log_hdr));
104 return;
105 }
106
107 if (!(iwmct_fw_logdefs[msg->u.log.log_hdr.logsource] &
108 BIT(msg->u.log.log_hdr.severity)) ||
109 !(iwmct_logdefs[LOG_SRC_FW_MSG] & BIT(msg->u.log.log_hdr.severity)))
110 return;
111
112 switch (msg->hdr.category) {
113 case COMM_CATEGORY_TESTABILITY:
114 if (!(iwmct_logdefs[LOG_SRC_TST] &
115 BIT(msg->u.log.log_hdr.severity)))
116 return;
117 if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf,
118 le16_to_cpu(msg->hdr.length) +
119 sizeof(msg->hdr), "<TST>"))
120 LOG_WARNING(priv, TST,
121 "TOP TST message is too long, truncating...");
122 LOG_WARNING(priv, TST, "%s\n", logbuf);
123 break;
124 case COMM_CATEGORY_DEBUG:
125 if (msg->hdr.opcode == OP_DBG_ZSTR_MSG)
126 LOG_INFO(priv, FW_MSG, "%s %s", "<DBG>",
127 ((u8 *)msg) + sizeof(msg->hdr)
128 + sizeof(msg->u.log.log_hdr));
129 else {
130 if (log_msg_format_hex(logbuf, LOG_MSG_SIZE_MAX, buf,
131 le16_to_cpu(msg->hdr.length)
132 + sizeof(msg->hdr),
133 "<DBG>"))
134 LOG_WARNING(priv, FW_MSG,
135 "TOP DBG message is too long,"
136 "truncating...");
137 LOG_WARNING(priv, FW_MSG, "%s\n", logbuf);
138 }
139 break;
140 default:
141 break;
142 }
143}
144
145static int _log_get_filter_str(u8 *logdefs, int logdefsz, char *buf, int size)
146{
147 int i, pos, len;
148 for (i = 0, pos = 0; (pos < size-1) && (i < logdefsz); i++) {
149 len = snprintf(&buf[pos], size - pos - 1, "0x%02X%02X,",
150 i, logdefs[i]);
151 pos += len;
152 }
153 buf[pos-1] = '\n';
154 buf[pos] = '\0';
155
156 if (i < logdefsz)
157 return -1;
158 return 0;
159}
160
161int log_get_filter_str(char *buf, int size)
162{
163 return _log_get_filter_str(iwmct_logdefs, LOG_SRC_MAX, buf, size);
164}
165
166int log_get_fw_filter_str(char *buf, int size)
167{
168 return _log_get_filter_str(iwmct_fw_logdefs, FW_LOG_SRC_MAX, buf, size);
169}
170
171#define HEXADECIMAL_RADIX 16
172#define LOG_SRC_FORMAT 7 /* log level is in format of "0xXXXX," */
173
174ssize_t show_iwmct_log_level(struct device *d,
175 struct device_attribute *attr, char *buf)
176{
177 struct iwmct_priv *priv = dev_get_drvdata(d);
178 char *str_buf;
179 int buf_size;
180 ssize_t ret;
181
182 buf_size = (LOG_SRC_FORMAT * LOG_SRC_MAX) + 1;
183 str_buf = kzalloc(buf_size, GFP_KERNEL);
184 if (!str_buf) {
185 LOG_ERROR(priv, DEBUGFS,
186 "failed to allocate %d bytes\n", buf_size);
187 ret = -ENOMEM;
188 goto exit;
189 }
190
191 if (log_get_filter_str(str_buf, buf_size) < 0) {
192 ret = -EINVAL;
193 goto exit;
194 }
195
196 ret = sprintf(buf, "%s", str_buf);
197
198exit:
199 kfree(str_buf);
200 return ret;
201}
202
203ssize_t store_iwmct_log_level(struct device *d,
204 struct device_attribute *attr,
205 const char *buf, size_t count)
206{
207 struct iwmct_priv *priv = dev_get_drvdata(d);
208 char *token, *str_buf = NULL;
209 long val;
210 ssize_t ret = count;
211 u8 src, mask;
212
213 if (!count)
214 goto exit;
215
216 str_buf = kzalloc(count, GFP_KERNEL);
217 if (!str_buf) {
218 LOG_ERROR(priv, DEBUGFS,
219 "failed to allocate %zd bytes\n", count);
220 ret = -ENOMEM;
221 goto exit;
222 }
223
224 memcpy(str_buf, buf, count);
225
226 while ((token = strsep(&str_buf, ",")) != NULL) {
227 while (isspace(*token))
228 ++token;
229 if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) {
230 LOG_ERROR(priv, DEBUGFS,
231 "failed to convert string to long %s\n",
232 token);
233 ret = -EINVAL;
234 goto exit;
235 }
236
237 mask = val & 0xFF;
238 src = (val & 0XFF00) >> 8;
239 iwmct_log_set_filter(src, mask);
240 }
241
242exit:
243 kfree(str_buf);
244 return ret;
245}
246
247ssize_t show_iwmct_log_level_fw(struct device *d,
248 struct device_attribute *attr, char *buf)
249{
250 struct iwmct_priv *priv = dev_get_drvdata(d);
251 char *str_buf;
252 int buf_size;
253 ssize_t ret;
254
255 buf_size = (LOG_SRC_FORMAT * FW_LOG_SRC_MAX) + 2;
256
257 str_buf = kzalloc(buf_size, GFP_KERNEL);
258 if (!str_buf) {
259 LOG_ERROR(priv, DEBUGFS,
260 "failed to allocate %d bytes\n", buf_size);
261 ret = -ENOMEM;
262 goto exit;
263 }
264
265 if (log_get_fw_filter_str(str_buf, buf_size) < 0) {
266 ret = -EINVAL;
267 goto exit;
268 }
269
270 ret = sprintf(buf, "%s", str_buf);
271
272exit:
273 kfree(str_buf);
274 return ret;
275}
276
277ssize_t store_iwmct_log_level_fw(struct device *d,
278 struct device_attribute *attr,
279 const char *buf, size_t count)
280{
281 struct iwmct_priv *priv = dev_get_drvdata(d);
282 struct top_msg cmd;
283 char *token, *str_buf = NULL;
284 ssize_t ret = count;
285 u16 cmdlen = 0;
286 int i;
287 long val;
288 u8 src, mask;
289
290 if (!count)
291 goto exit;
292
293 str_buf = kzalloc(count, GFP_KERNEL);
294 if (!str_buf) {
295 LOG_ERROR(priv, DEBUGFS,
296 "failed to allocate %zd bytes\n", count);
297 ret = -ENOMEM;
298 goto exit;
299 }
300
301 memcpy(str_buf, buf, count);
302
303 cmd.hdr.type = COMM_TYPE_H2D;
304 cmd.hdr.category = COMM_CATEGORY_DEBUG;
305 cmd.hdr.opcode = CMD_DBG_LOG_LEVEL;
306
307 for (i = 0; ((token = strsep(&str_buf, ",")) != NULL) &&
308 (i < FW_LOG_SRC_MAX); i++) {
309
310 while (isspace(*token))
311 ++token;
312
313 if (strict_strtol(token, HEXADECIMAL_RADIX, &val)) {
314 LOG_ERROR(priv, DEBUGFS,
315 "failed to convert string to long %s\n",
316 token);
317 ret = -EINVAL;
318 goto exit;
319 }
320
321 mask = val & 0xFF; /* LSB */
322 src = (val & 0XFF00) >> 8; /* 2nd least significant byte. */
323 iwmct_log_set_fw_filter(src, mask);
324
325 cmd.u.logdefs[i].logsource = src;
326 cmd.u.logdefs[i].sevmask = mask;
327 }
328
329 cmd.hdr.length = cpu_to_le16(i * sizeof(cmd.u.logdefs[0]));
330 cmdlen = (i * sizeof(cmd.u.logdefs[0]) + sizeof(cmd.hdr));
331
332 ret = iwmct_send_hcmd(priv, (u8 *)&cmd, cmdlen);
333 if (ret) {
334 LOG_ERROR(priv, DEBUGFS,
335 "Failed to send %d bytes of fwcmd, ret=%zd\n",
336 cmdlen, ret);
337 goto exit;
338 } else
339 LOG_INFO(priv, DEBUGFS, "fwcmd sent (%d bytes)\n", cmdlen);
340
341 ret = count;
342
343exit:
344 kfree(str_buf);
345 return ret;
346}
347
diff --git a/drivers/misc/iwmc3200top/log.h b/drivers/misc/iwmc3200top/log.h
new file mode 100644
index 000000000000..aba8121f978c
--- /dev/null
+++ b/drivers/misc/iwmc3200top/log.h
@@ -0,0 +1,158 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/log.h
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#ifndef __LOG_H__
28#define __LOG_H__
29
30
31/* log severity:
32 * The log levels here match FW log levels
33 * so values need to stay as is */
34#define LOG_SEV_CRITICAL 0
35#define LOG_SEV_ERROR 1
36#define LOG_SEV_WARNING 2
37#define LOG_SEV_INFO 3
38#define LOG_SEV_INFOEX 4
39
40#define LOG_SEV_FILTER_ALL \
41 (BIT(LOG_SEV_CRITICAL) | \
42 BIT(LOG_SEV_ERROR) | \
43 BIT(LOG_SEV_WARNING) | \
44 BIT(LOG_SEV_INFO) | \
45 BIT(LOG_SEV_INFOEX))
46
47/* log source */
48#define LOG_SRC_INIT 0
49#define LOG_SRC_DEBUGFS 1
50#define LOG_SRC_FW_DOWNLOAD 2
51#define LOG_SRC_FW_MSG 3
52#define LOG_SRC_TST 4
53#define LOG_SRC_IRQ 5
54
55#define LOG_SRC_MAX 6
56#define LOG_SRC_ALL 0xFF
57
58/**
59 * Default intitialization runtime log level
60 */
61#ifndef LOG_SEV_FILTER_RUNTIME
62#define LOG_SEV_FILTER_RUNTIME \
63 (BIT(LOG_SEV_CRITICAL) | \
64 BIT(LOG_SEV_ERROR) | \
65 BIT(LOG_SEV_WARNING))
66#endif
67
68#ifndef FW_LOG_SEV_FILTER_RUNTIME
69#define FW_LOG_SEV_FILTER_RUNTIME LOG_SEV_FILTER_ALL
70#endif
71
72#ifdef CONFIG_IWMC3200TOP_DEBUG
73/**
74 * Log macros
75 */
76
77#define priv2dev(priv) (&(priv->func)->dev)
78
79#define LOG_CRITICAL(priv, src, fmt, args...) \
80do { \
81 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_CRITICAL)) \
82 dev_crit(priv2dev(priv), "%s %d: " fmt, \
83 __func__, __LINE__, ##args); \
84} while (0)
85
86#define LOG_ERROR(priv, src, fmt, args...) \
87do { \
88 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_ERROR)) \
89 dev_err(priv2dev(priv), "%s %d: " fmt, \
90 __func__, __LINE__, ##args); \
91} while (0)
92
93#define LOG_WARNING(priv, src, fmt, args...) \
94do { \
95 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_WARNING)) \
96 dev_warn(priv2dev(priv), "%s %d: " fmt, \
97 __func__, __LINE__, ##args); \
98} while (0)
99
100#define LOG_INFO(priv, src, fmt, args...) \
101do { \
102 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFO)) \
103 dev_info(priv2dev(priv), "%s %d: " fmt, \
104 __func__, __LINE__, ##args); \
105} while (0)
106
107#define LOG_INFOEX(priv, src, fmt, args...) \
108do { \
109 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFOEX)) \
110 dev_dbg(priv2dev(priv), "%s %d: " fmt, \
111 __func__, __LINE__, ##args); \
112} while (0)
113
114#define LOG_HEXDUMP(src, ptr, len) \
115do { \
116 if (iwmct_logdefs[LOG_SRC_ ## src] & BIT(LOG_SEV_INFOEX)) \
117 print_hex_dump(KERN_DEBUG, "", DUMP_PREFIX_NONE, \
118 16, 1, ptr, len, false); \
119} while (0)
120
121void iwmct_log_top_message(struct iwmct_priv *priv, u8 *buf, int len);
122
123extern u8 iwmct_logdefs[];
124
125int iwmct_log_set_filter(u8 src, u8 logmask);
126int iwmct_log_set_fw_filter(u8 src, u8 logmask);
127
128ssize_t show_iwmct_log_level(struct device *d,
129 struct device_attribute *attr, char *buf);
130ssize_t store_iwmct_log_level(struct device *d,
131 struct device_attribute *attr,
132 const char *buf, size_t count);
133ssize_t show_iwmct_log_level_fw(struct device *d,
134 struct device_attribute *attr, char *buf);
135ssize_t store_iwmct_log_level_fw(struct device *d,
136 struct device_attribute *attr,
137 const char *buf, size_t count);
138
139#else
140
141#define LOG_CRITICAL(priv, src, fmt, args...)
142#define LOG_ERROR(priv, src, fmt, args...)
143#define LOG_WARNING(priv, src, fmt, args...)
144#define LOG_INFO(priv, src, fmt, args...)
145#define LOG_INFOEX(priv, src, fmt, args...)
146#define LOG_HEXDUMP(src, ptr, len)
147
148static inline void iwmct_log_top_message(struct iwmct_priv *priv,
149 u8 *buf, int len) {}
150static inline int iwmct_log_set_filter(u8 src, u8 logmask) { return 0; }
151static inline int iwmct_log_set_fw_filter(u8 src, u8 logmask) { return 0; }
152
153#endif /* CONFIG_IWMC3200TOP_DEBUG */
154
155int log_get_filter_str(char *buf, int size);
156int log_get_fw_filter_str(char *buf, int size);
157
158#endif /* __LOG_H__ */
diff --git a/drivers/misc/iwmc3200top/main.c b/drivers/misc/iwmc3200top/main.c
new file mode 100644
index 000000000000..02b3dadc8abd
--- /dev/null
+++ b/drivers/misc/iwmc3200top/main.c
@@ -0,0 +1,677 @@
1/*
2 * iwmc3200top - Intel Wireless MultiCom 3200 Top Driver
3 * drivers/misc/iwmc3200top/main.c
4 *
5 * Copyright (C) 2009 Intel Corporation. All rights reserved.
6 *
7 * This program is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU General Public License version
9 * 2 as published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
19 * 02110-1301, USA.
20 *
21 *
22 * Author Name: Maxim Grabarnik <maxim.grabarnink@intel.com>
23 * -
24 *
25 */
26
27#include <linux/module.h>
28#include <linux/init.h>
29#include <linux/kernel.h>
30#include <linux/debugfs.h>
31#include <linux/mmc/sdio_ids.h>
32#include <linux/mmc/sdio_func.h>
33#include <linux/mmc/sdio.h>
34
35#include "iwmc3200top.h"
36#include "log.h"
37#include "fw-msg.h"
38#include "debugfs.h"
39
40
41#define DRIVER_DESCRIPTION "Intel(R) IWMC 3200 Top Driver"
42#define DRIVER_COPYRIGHT "Copyright (c) 2008 Intel Corporation."
43
44#define DRIVER_VERSION "0.1.62"
45
46MODULE_DESCRIPTION(DRIVER_DESCRIPTION);
47MODULE_VERSION(DRIVER_VERSION);
48MODULE_LICENSE("GPL");
49MODULE_AUTHOR(DRIVER_COPYRIGHT);
50
51/*
52 * This workers main task is to wait for OP_OPR_ALIVE
53 * from TOP FW until ALIVE_MSG_TIMOUT timeout is elapsed.
54 * When OP_OPR_ALIVE received it will issue
55 * a call to "bus_rescan_devices".
56 */
57static void iwmct_rescan_worker(struct work_struct *ws)
58{
59 struct iwmct_priv *priv;
60 int ret;
61
62 priv = container_of(ws, struct iwmct_priv, bus_rescan_worker);
63
64 LOG_INFO(priv, FW_MSG, "Calling bus_rescan\n");
65
66 ret = bus_rescan_devices(priv->func->dev.bus);
67 if (ret < 0)
68 LOG_INFO(priv, FW_DOWNLOAD, "bus_rescan_devices FAILED!!!\n");
69}
70
71static void op_top_message(struct iwmct_priv *priv, struct top_msg *msg)
72{
73 switch (msg->hdr.opcode) {
74 case OP_OPR_ALIVE:
75 LOG_INFO(priv, FW_MSG, "Got ALIVE from device, wake rescan\n");
76 queue_work(priv->bus_rescan_wq, &priv->bus_rescan_worker);
77 break;
78 default:
79 LOG_INFO(priv, FW_MSG, "Received msg opcode 0x%X\n",
80 msg->hdr.opcode);
81 break;
82 }
83}
84
85
86static void handle_top_message(struct iwmct_priv *priv, u8 *buf, int len)
87{
88 struct top_msg *msg;
89
90 msg = (struct top_msg *)buf;
91
92 if (msg->hdr.type != COMM_TYPE_D2H) {
93 LOG_ERROR(priv, FW_MSG,
94 "Message from TOP with invalid message type 0x%X\n",
95 msg->hdr.type);
96 return;
97 }
98
99 if (len < sizeof(msg->hdr)) {
100 LOG_ERROR(priv, FW_MSG,
101 "Message from TOP is too short for message header "
102 "received %d bytes, expected at least %zd bytes\n",
103 len, sizeof(msg->hdr));
104 return;
105 }
106
107 if (len < le16_to_cpu(msg->hdr.length) + sizeof(msg->hdr)) {
108 LOG_ERROR(priv, FW_MSG,
109 "Message length (%d bytes) is shorter than "
110 "in header (%d bytes)\n",
111 len, le16_to_cpu(msg->hdr.length));
112 return;
113 }
114
115 switch (msg->hdr.category) {
116 case COMM_CATEGORY_OPERATIONAL:
117 op_top_message(priv, (struct top_msg *)buf);
118 break;
119
120 case COMM_CATEGORY_DEBUG:
121 case COMM_CATEGORY_TESTABILITY:
122 case COMM_CATEGORY_DIAGNOSTICS:
123 iwmct_log_top_message(priv, buf, len);
124 break;
125
126 default:
127 LOG_ERROR(priv, FW_MSG,
128 "Message from TOP with unknown category 0x%X\n",
129 msg->hdr.category);
130 break;
131 }
132}
133
134int iwmct_send_hcmd(struct iwmct_priv *priv, u8 *cmd, u16 len)
135{
136 int ret;
137 u8 *buf;
138
139 LOG_INFOEX(priv, FW_MSG, "Sending hcmd:\n");
140
141 /* add padding to 256 for IWMC */
142 ((struct top_msg *)cmd)->hdr.flags |= CMD_FLAG_PADDING_256;
143
144 LOG_HEXDUMP(FW_MSG, cmd, len);
145
146 if (len > FW_HCMD_BLOCK_SIZE) {
147 LOG_ERROR(priv, FW_MSG, "size %d exceeded hcmd max size %d\n",
148 len, FW_HCMD_BLOCK_SIZE);
149 return -1;
150 }
151
152 buf = kzalloc(FW_HCMD_BLOCK_SIZE, GFP_KERNEL);
153 if (!buf) {
154 LOG_ERROR(priv, FW_MSG, "kzalloc error, buf size %d\n",
155 FW_HCMD_BLOCK_SIZE);
156 return -1;
157 }
158
159 memcpy(buf, cmd, len);
160
161 sdio_claim_host(priv->func);
162 ret = sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR, buf,
163 FW_HCMD_BLOCK_SIZE);
164 sdio_release_host(priv->func);
165
166 kfree(buf);
167 return ret;
168}
169
170int iwmct_tx(struct iwmct_priv *priv, unsigned int addr,
171 void *src, int count)
172{
173 int ret;
174
175 sdio_claim_host(priv->func);
176 ret = sdio_memcpy_toio(priv->func, addr, src, count);
177 sdio_release_host(priv->func);
178
179 return ret;
180}
181
182static void iwmct_irq_read_worker(struct work_struct *ws)
183{
184 struct iwmct_priv *priv;
185 struct iwmct_work_struct *read_req;
186 __le32 *buf = NULL;
187 int ret;
188 int iosize;
189 u32 barker;
190 bool is_barker;
191
192 priv = container_of(ws, struct iwmct_priv, isr_worker);
193
194 LOG_INFO(priv, IRQ, "enter iwmct_irq_read_worker %p\n", ws);
195
196 /* --------------------- Handshake with device -------------------- */
197 sdio_claim_host(priv->func);
198
199 /* all list manipulations have to be protected by
200 * sdio_claim_host/sdio_release_host */
201 if (list_empty(&priv->read_req_list)) {
202 LOG_ERROR(priv, IRQ, "read_req_list empty in read worker\n");
203 goto exit_release;
204 }
205
206 read_req = list_entry(priv->read_req_list.next,
207 struct iwmct_work_struct, list);
208
209 list_del(&read_req->list);
210 iosize = read_req->iosize;
211 kfree(read_req);
212
213 buf = kzalloc(iosize, GFP_KERNEL);
214 if (!buf) {
215 LOG_ERROR(priv, IRQ, "kzalloc error, buf size %d\n", iosize);
216 goto exit_release;
217 }
218
219 LOG_INFO(priv, IRQ, "iosize=%d, buf=%p, func=%d\n",
220 iosize, buf, priv->func->num);
221
222 /* read from device */
223 ret = sdio_memcpy_fromio(priv->func, buf, IWMC_SDIO_DATA_ADDR, iosize);
224 if (ret) {
225 LOG_ERROR(priv, IRQ, "error %d reading buffer\n", ret);
226 goto exit_release;
227 }
228
229 LOG_HEXDUMP(IRQ, (u8 *)buf, iosize);
230
231 barker = le32_to_cpu(buf[0]);
232
233 /* Verify whether it's a barker and if not - treat as regular Rx */
234 if (barker == IWMC_BARKER_ACK ||
235 (barker & BARKER_DNLOAD_BARKER_MSK) == IWMC_BARKER_REBOOT) {
236
237 /* Valid Barker is equal on first 4 dwords */
238 is_barker = (buf[1] == buf[0]) &&
239 (buf[2] == buf[0]) &&
240 (buf[3] == buf[0]);
241
242 if (!is_barker) {
243 LOG_WARNING(priv, IRQ,
244 "Potentially inconsistent barker "
245 "%08X_%08X_%08X_%08X\n",
246 le32_to_cpu(buf[0]), le32_to_cpu(buf[1]),
247 le32_to_cpu(buf[2]), le32_to_cpu(buf[3]));
248 }
249 } else {
250 is_barker = false;
251 }
252
253 /* Handle Top CommHub message */
254 if (!is_barker) {
255 sdio_release_host(priv->func);
256 handle_top_message(priv, (u8 *)buf, iosize);
257 goto exit;
258 } else if (barker == IWMC_BARKER_ACK) { /* Handle barkers */
259 if (atomic_read(&priv->dev_sync) == 0) {
260 LOG_ERROR(priv, IRQ,
261 "ACK barker arrived out-of-sync\n");
262 goto exit_release;
263 }
264
265 /* Continuing to FW download (after Sync is completed)*/
266 atomic_set(&priv->dev_sync, 0);
267 LOG_INFO(priv, IRQ, "ACK barker arrived "
268 "- starting FW download\n");
269 } else { /* REBOOT barker */
270 LOG_INFO(priv, IRQ, "Recieved reboot barker: %x\n", barker);
271 priv->barker = barker;
272
273 if (barker & BARKER_DNLOAD_SYNC_MSK) {
274 /* Send the same barker back */
275 ret = sdio_memcpy_toio(priv->func, IWMC_SDIO_DATA_ADDR,
276 buf, iosize);
277 if (ret) {
278 LOG_ERROR(priv, IRQ,
279 "error %d echoing barker\n", ret);
280 goto exit_release;
281 }
282 LOG_INFO(priv, IRQ, "Echoing barker to device\n");
283 atomic_set(&priv->dev_sync, 1);
284 goto exit_release;
285 }
286
287 /* Continuing to FW download (without Sync) */
288 LOG_INFO(priv, IRQ, "No sync requested "
289 "- starting FW download\n");
290 }
291
292 sdio_release_host(priv->func);
293
294
295 LOG_INFO(priv, IRQ, "barker download request 0x%x is:\n", priv->barker);
296 LOG_INFO(priv, IRQ, "******* Top FW %s requested ********\n",
297 (priv->barker & BARKER_DNLOAD_TOP_MSK) ? "was" : "not");
298 LOG_INFO(priv, IRQ, "******* GPS FW %s requested ********\n",
299 (priv->barker & BARKER_DNLOAD_GPS_MSK) ? "was" : "not");
300 LOG_INFO(priv, IRQ, "******* BT FW %s requested ********\n",
301 (priv->barker & BARKER_DNLOAD_BT_MSK) ? "was" : "not");
302
303 if (priv->dbg.fw_download)
304 iwmct_fw_load(priv);
305 else
306 LOG_ERROR(priv, IRQ, "FW download not allowed\n");
307
308 goto exit;
309
310exit_release:
311 sdio_release_host(priv->func);
312exit:
313 kfree(buf);
314 LOG_INFO(priv, IRQ, "exit iwmct_irq_read_worker\n");
315}
316
317static void iwmct_irq(struct sdio_func *func)
318{
319 struct iwmct_priv *priv;
320 int val, ret;
321 int iosize;
322 int addr = IWMC_SDIO_INTR_GET_SIZE_ADDR;
323 struct iwmct_work_struct *read_req;
324
325 priv = sdio_get_drvdata(func);
326
327 LOG_INFO(priv, IRQ, "enter iwmct_irq\n");
328
329 /* read the function's status register */
330 val = sdio_readb(func, IWMC_SDIO_INTR_STATUS_ADDR, &ret);
331
332 LOG_INFO(priv, IRQ, "iir value = %d, ret=%d\n", val, ret);
333
334 if (!val) {
335 LOG_ERROR(priv, IRQ, "iir = 0, exiting ISR\n");
336 goto exit_clear_intr;
337 }
338
339
340 /*
341 * read 2 bytes of the transaction size
342 * IMPORTANT: sdio transaction size has to be read before clearing
343 * sdio interrupt!!!
344 */
345 val = sdio_readb(priv->func, addr++, &ret);
346 iosize = val;
347 val = sdio_readb(priv->func, addr++, &ret);
348 iosize += val << 8;
349
350 LOG_INFO(priv, IRQ, "READ size %d\n", iosize);
351
352 if (iosize == 0) {
353 LOG_ERROR(priv, IRQ, "READ size %d, exiting ISR\n", iosize);
354 goto exit_clear_intr;
355 }
356
357 /* allocate a work structure to pass iosize to the worker */
358 read_req = kzalloc(sizeof(struct iwmct_work_struct), GFP_KERNEL);
359 if (!read_req) {
360 LOG_ERROR(priv, IRQ, "failed to allocate read_req, exit ISR\n");
361 goto exit_clear_intr;
362 }
363
364 INIT_LIST_HEAD(&read_req->list);
365 read_req->iosize = iosize;
366
367 list_add_tail(&priv->read_req_list, &read_req->list);
368
369 /* clear the function's interrupt request bit (write 1 to clear) */
370 sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
371
372 queue_work(priv->wq, &priv->isr_worker);
373
374 LOG_INFO(priv, IRQ, "exit iwmct_irq\n");
375
376 return;
377
378exit_clear_intr:
379 /* clear the function's interrupt request bit (write 1 to clear) */
380 sdio_writeb(func, 1, IWMC_SDIO_INTR_CLEAR_ADDR, &ret);
381}
382
383
384static int blocks;
385module_param(blocks, int, 0604);
386MODULE_PARM_DESC(blocks, "max_blocks_to_send");
387
388static int dump;
389module_param(dump, bool, 0604);
390MODULE_PARM_DESC(dump, "dump_hex_content");
391
392static int jump = 1;
393module_param(jump, bool, 0604);
394
395static int direct = 1;
396module_param(direct, bool, 0604);
397
398static int checksum = 1;
399module_param(checksum, bool, 0604);
400
401static int fw_download = 1;
402module_param(fw_download, bool, 0604);
403
404static int block_size = IWMC_SDIO_BLK_SIZE;
405module_param(block_size, int, 0404);
406
407static int download_trans_blks = IWMC_DEFAULT_TR_BLK;
408module_param(download_trans_blks, int, 0604);
409
410static int rubbish_barker;
411module_param(rubbish_barker, bool, 0604);
412
413#ifdef CONFIG_IWMC3200TOP_DEBUG
414static int log_level[LOG_SRC_MAX];
415static unsigned int log_level_argc;
416module_param_array(log_level, int, &log_level_argc, 0604);
417MODULE_PARM_DESC(log_level, "log_level");
418
419static int log_level_fw[FW_LOG_SRC_MAX];
420static unsigned int log_level_fw_argc;
421module_param_array(log_level_fw, int, &log_level_fw_argc, 0604);
422MODULE_PARM_DESC(log_level_fw, "log_level_fw");
423#endif
424
425void iwmct_dbg_init_params(struct iwmct_priv *priv)
426{
427#ifdef CONFIG_IWMC3200TOP_DEBUG
428 int i;
429
430 for (i = 0; i < log_level_argc; i++) {
431 dev_notice(&priv->func->dev, "log_level[%d]=0x%X\n",
432 i, log_level[i]);
433 iwmct_log_set_filter((log_level[i] >> 8) & 0xFF,
434 log_level[i] & 0xFF);
435 }
436 for (i = 0; i < log_level_fw_argc; i++) {
437 dev_notice(&priv->func->dev, "log_level_fw[%d]=0x%X\n",
438 i, log_level_fw[i]);
439 iwmct_log_set_fw_filter((log_level_fw[i] >> 8) & 0xFF,
440 log_level_fw[i] & 0xFF);
441 }
442#endif
443
444 priv->dbg.blocks = blocks;
445 LOG_INFO(priv, INIT, "blocks=%d\n", blocks);
446 priv->dbg.dump = (bool)dump;
447 LOG_INFO(priv, INIT, "dump=%d\n", dump);
448 priv->dbg.jump = (bool)jump;
449 LOG_INFO(priv, INIT, "jump=%d\n", jump);
450 priv->dbg.direct = (bool)direct;
451 LOG_INFO(priv, INIT, "direct=%d\n", direct);
452 priv->dbg.checksum = (bool)checksum;
453 LOG_INFO(priv, INIT, "checksum=%d\n", checksum);
454 priv->dbg.fw_download = (bool)fw_download;
455 LOG_INFO(priv, INIT, "fw_download=%d\n", fw_download);
456 priv->dbg.block_size = block_size;
457 LOG_INFO(priv, INIT, "block_size=%d\n", block_size);
458 priv->dbg.download_trans_blks = download_trans_blks;
459 LOG_INFO(priv, INIT, "download_trans_blks=%d\n", download_trans_blks);
460}
461
462/*****************************************************************************
463 *
464 * sysfs attributes
465 *
466 *****************************************************************************/
467static ssize_t show_iwmct_fw_version(struct device *d,
468 struct device_attribute *attr, char *buf)
469{
470 struct iwmct_priv *priv = dev_get_drvdata(d);
471 return sprintf(buf, "%s\n", priv->dbg.label_fw);
472}
473static DEVICE_ATTR(cc_label_fw, S_IRUGO, show_iwmct_fw_version, NULL);
474
475#ifdef CONFIG_IWMC3200TOP_DEBUG
476static DEVICE_ATTR(log_level, S_IWUSR | S_IRUGO,
477 show_iwmct_log_level, store_iwmct_log_level);
478static DEVICE_ATTR(log_level_fw, S_IWUSR | S_IRUGO,
479 show_iwmct_log_level_fw, store_iwmct_log_level_fw);
480#endif
481
482static struct attribute *iwmct_sysfs_entries[] = {
483 &dev_attr_cc_label_fw.attr,
484#ifdef CONFIG_IWMC3200TOP_DEBUG
485 &dev_attr_log_level.attr,
486 &dev_attr_log_level_fw.attr,
487#endif
488 NULL
489};
490
491static struct attribute_group iwmct_attribute_group = {
492 .name = NULL, /* put in device directory */
493 .attrs = iwmct_sysfs_entries,
494};
495
496
497static int iwmct_probe(struct sdio_func *func,
498 const struct sdio_device_id *id)
499{
500 struct iwmct_priv *priv;
501 int ret;
502 int val = 1;
503 int addr = IWMC_SDIO_INTR_ENABLE_ADDR;
504
505 dev_dbg(&func->dev, "enter iwmct_probe\n");
506
507 dev_dbg(&func->dev, "IRQ polling period id %u msecs, HZ is %d\n",
508 jiffies_to_msecs(2147483647), HZ);
509
510 priv = kzalloc(sizeof(struct iwmct_priv), GFP_KERNEL);
511 if (!priv) {
512 dev_err(&func->dev, "kzalloc error\n");
513 return -ENOMEM;
514 }
515 priv->func = func;
516 sdio_set_drvdata(func, priv);
517
518
519 /* create drivers work queue */
520 priv->wq = create_workqueue(DRV_NAME "_wq");
521 priv->bus_rescan_wq = create_workqueue(DRV_NAME "_rescan_wq");
522 INIT_WORK(&priv->bus_rescan_worker, iwmct_rescan_worker);
523 INIT_WORK(&priv->isr_worker, iwmct_irq_read_worker);
524
525 init_waitqueue_head(&priv->wait_q);
526
527 sdio_claim_host(func);
528 /* FIXME: Remove after it is fixed in the Boot ROM upgrade */
529 func->enable_timeout = 10;
530
531 /* In our HW, setting the block size also wakes up the boot rom. */
532 ret = sdio_set_block_size(func, priv->dbg.block_size);
533 if (ret) {
534 LOG_ERROR(priv, INIT,
535 "sdio_set_block_size() failure: %d\n", ret);
536 goto error_sdio_enable;
537 }
538
539 ret = sdio_enable_func(func);
540 if (ret) {
541 LOG_ERROR(priv, INIT, "sdio_enable_func() failure: %d\n", ret);
542 goto error_sdio_enable;
543 }
544
545 /* init reset and dev_sync states */
546 atomic_set(&priv->reset, 0);
547 atomic_set(&priv->dev_sync, 0);
548
549 /* init read req queue */
550 INIT_LIST_HEAD(&priv->read_req_list);
551
552 /* process configurable parameters */
553 iwmct_dbg_init_params(priv);
554 ret = sysfs_create_group(&func->dev.kobj, &iwmct_attribute_group);
555 if (ret) {
556 LOG_ERROR(priv, INIT, "Failed to register attributes and "
557 "initialize module_params\n");
558 goto error_dev_attrs;
559 }
560
561 iwmct_dbgfs_register(priv, DRV_NAME);
562
563 if (!priv->dbg.direct && priv->dbg.download_trans_blks > 8) {
564 LOG_INFO(priv, INIT,
565 "Reducing transaction to 8 blocks = 2K (from %d)\n",
566 priv->dbg.download_trans_blks);
567 priv->dbg.download_trans_blks = 8;
568 }
569 priv->trans_len = priv->dbg.download_trans_blks * priv->dbg.block_size;
570 LOG_INFO(priv, INIT, "Transaction length = %d\n", priv->trans_len);
571
572 ret = sdio_claim_irq(func, iwmct_irq);
573 if (ret) {
574 LOG_ERROR(priv, INIT, "sdio_claim_irq() failure: %d\n", ret);
575 goto error_claim_irq;
576 }
577
578
579 /* Enable function's interrupt */
580 sdio_writeb(priv->func, val, addr, &ret);
581 if (ret) {
582 LOG_ERROR(priv, INIT, "Failure writing to "
583 "Interrupt Enable Register (%d): %d\n", addr, ret);
584 goto error_enable_int;
585 }
586
587 sdio_release_host(func);
588
589 LOG_INFO(priv, INIT, "exit iwmct_probe\n");
590
591 return ret;
592
593error_enable_int:
594 sdio_release_irq(func);
595error_claim_irq:
596 sdio_disable_func(func);
597error_dev_attrs:
598 iwmct_dbgfs_unregister(priv->dbgfs);
599 sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group);
600error_sdio_enable:
601 sdio_release_host(func);
602 return ret;
603}
604
605static void iwmct_remove(struct sdio_func *func)
606{
607 struct iwmct_work_struct *read_req;
608 struct iwmct_priv *priv = sdio_get_drvdata(func);
609
610 priv = sdio_get_drvdata(func);
611
612 LOG_INFO(priv, INIT, "enter\n");
613
614 sdio_claim_host(func);
615 sdio_release_irq(func);
616 sdio_release_host(func);
617
618 /* Safely destroy osc workqueue */
619 destroy_workqueue(priv->bus_rescan_wq);
620 destroy_workqueue(priv->wq);
621
622 sdio_claim_host(func);
623 sdio_disable_func(func);
624 sysfs_remove_group(&func->dev.kobj, &iwmct_attribute_group);
625 iwmct_dbgfs_unregister(priv->dbgfs);
626 sdio_release_host(func);
627
628 /* free read requests */
629 while (!list_empty(&priv->read_req_list)) {
630 read_req = list_entry(priv->read_req_list.next,
631 struct iwmct_work_struct, list);
632
633 list_del(&read_req->list);
634 kfree(read_req);
635 }
636
637 kfree(priv);
638}
639
640
641static const struct sdio_device_id iwmct_ids[] = {
642 /* Intel Wireless MultiCom 3200 Top Driver */
643 { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1404)},
644 { }, /* Terminating entry */
645};
646
647MODULE_DEVICE_TABLE(sdio, iwmct_ids);
648
649static struct sdio_driver iwmct_driver = {
650 .probe = iwmct_probe,
651 .remove = iwmct_remove,
652 .name = DRV_NAME,
653 .id_table = iwmct_ids,
654};
655
656static int __init iwmct_init(void)
657{
658 int rc;
659
660 /* Default log filter settings */
661 iwmct_log_set_filter(LOG_SRC_ALL, LOG_SEV_FILTER_RUNTIME);
662 iwmct_log_set_filter(LOG_SRC_FW_MSG, LOG_SEV_FILTER_ALL);
663 iwmct_log_set_fw_filter(LOG_SRC_ALL, FW_LOG_SEV_FILTER_RUNTIME);
664
665 rc = sdio_register_driver(&iwmct_driver);
666
667 return rc;
668}
669
670static void __exit iwmct_exit(void)
671{
672 sdio_unregister_driver(&iwmct_driver);
673}
674
675module_init(iwmct_init);
676module_exit(iwmct_exit);
677
diff --git a/drivers/misc/phantom.c b/drivers/misc/phantom.c
index fa57b67593ae..90a95ce8dc34 100644
--- a/drivers/misc/phantom.c
+++ b/drivers/misc/phantom.c
@@ -271,7 +271,7 @@ static unsigned int phantom_poll(struct file *file, poll_table *wait)
271 return mask; 271 return mask;
272} 272}
273 273
274static struct file_operations phantom_file_ops = { 274static const struct file_operations phantom_file_ops = {
275 .open = phantom_open, 275 .open = phantom_open,
276 .release = phantom_release, 276 .release = phantom_release,
277 .unlocked_ioctl = phantom_ioctl, 277 .unlocked_ioctl = phantom_ioctl,
diff --git a/drivers/misc/sgi-gru/grufile.c b/drivers/misc/sgi-gru/grufile.c
index aed609832bc2..41c8fe2a928c 100644
--- a/drivers/misc/sgi-gru/grufile.c
+++ b/drivers/misc/sgi-gru/grufile.c
@@ -53,7 +53,6 @@ struct gru_stats_s gru_stats;
53/* Guaranteed user available resources on each node */ 53/* Guaranteed user available resources on each node */
54static int max_user_cbrs, max_user_dsr_bytes; 54static int max_user_cbrs, max_user_dsr_bytes;
55 55
56static struct file_operations gru_fops;
57static struct miscdevice gru_miscdev; 56static struct miscdevice gru_miscdev;
58 57
59 58
@@ -426,7 +425,7 @@ static void __exit gru_exit(void)
426 gru_proc_exit(); 425 gru_proc_exit();
427} 426}
428 427
429static struct file_operations gru_fops = { 428static const struct file_operations gru_fops = {
430 .owner = THIS_MODULE, 429 .owner = THIS_MODULE,
431 .unlocked_ioctl = gru_file_unlocked_ioctl, 430 .unlocked_ioctl = gru_file_unlocked_ioctl,
432 .mmap = gru_file_mmap, 431 .mmap = gru_file_mmap,
@@ -438,7 +437,7 @@ static struct miscdevice gru_miscdev = {
438 .fops = &gru_fops, 437 .fops = &gru_fops,
439}; 438};
440 439
441struct vm_operations_struct gru_vm_ops = { 440const struct vm_operations_struct gru_vm_ops = {
442 .close = gru_vma_close, 441 .close = gru_vma_close,
443 .fault = gru_fault, 442 .fault = gru_fault,
444}; 443};
diff --git a/drivers/misc/sgi-gru/grutables.h b/drivers/misc/sgi-gru/grutables.h
index 34ab3d453919..46990bcfa536 100644
--- a/drivers/misc/sgi-gru/grutables.h
+++ b/drivers/misc/sgi-gru/grutables.h
@@ -624,7 +624,7 @@ static inline int is_kernel_context(struct gru_thread_state *gts)
624 */ 624 */
625struct gru_unload_context_req; 625struct gru_unload_context_req;
626 626
627extern struct vm_operations_struct gru_vm_ops; 627extern const struct vm_operations_struct gru_vm_ops;
628extern struct device *grudev; 628extern struct device *grudev;
629 629
630extern struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma, 630extern struct gru_vma_data *gru_alloc_vma_data(struct vm_area_struct *vma,
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index 610dbd1fcc82..96d10f40fb23 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -240,7 +240,7 @@ static int mmc_ext_csd_release(struct inode *inode, struct file *file)
240 return 0; 240 return 0;
241} 241}
242 242
243static struct file_operations mmc_dbg_ext_csd_fops = { 243static const struct file_operations mmc_dbg_ext_csd_fops = {
244 .open = mmc_ext_csd_open, 244 .open = mmc_ext_csd_open,
245 .read = mmc_ext_csd_read, 245 .read = mmc_ext_csd_read,
246 .release = mmc_ext_csd_release, 246 .release = mmc_ext_csd_release,
diff --git a/drivers/mmc/core/sdio_cis.c b/drivers/mmc/core/sdio_cis.c
index 6636354b48ce..e1035c895808 100644
--- a/drivers/mmc/core/sdio_cis.c
+++ b/drivers/mmc/core/sdio_cis.c
@@ -98,6 +98,22 @@ static const unsigned char speed_val[16] =
98static const unsigned int speed_unit[8] = 98static const unsigned int speed_unit[8] =
99 { 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 }; 99 { 10000, 100000, 1000000, 10000000, 0, 0, 0, 0 };
100 100
101/* FUNCE tuples with these types get passed to SDIO drivers */
102static const unsigned char funce_type_whitelist[] = {
103 4 /* CISTPL_FUNCE_LAN_NODE_ID used in Broadcom cards */
104};
105
106static int cistpl_funce_whitelisted(unsigned char type)
107{
108 int i;
109
110 for (i = 0; i < ARRAY_SIZE(funce_type_whitelist); i++) {
111 if (funce_type_whitelist[i] == type)
112 return 1;
113 }
114 return 0;
115}
116
101static int cistpl_funce_common(struct mmc_card *card, 117static int cistpl_funce_common(struct mmc_card *card,
102 const unsigned char *buf, unsigned size) 118 const unsigned char *buf, unsigned size)
103{ 119{
@@ -120,6 +136,10 @@ static int cistpl_funce_func(struct sdio_func *func,
120 unsigned vsn; 136 unsigned vsn;
121 unsigned min_size; 137 unsigned min_size;
122 138
139 /* let SDIO drivers take care of whitelisted FUNCE tuples */
140 if (cistpl_funce_whitelisted(buf[0]))
141 return -EILSEQ;
142
123 vsn = func->card->cccr.sdio_vsn; 143 vsn = func->card->cccr.sdio_vsn;
124 min_size = (vsn == SDIO_SDIO_REV_1_00) ? 28 : 42; 144 min_size = (vsn == SDIO_SDIO_REV_1_00) ? 28 : 42;
125 145
@@ -154,13 +174,12 @@ static int cistpl_funce(struct mmc_card *card, struct sdio_func *func,
154 else 174 else
155 ret = cistpl_funce_common(card, buf, size); 175 ret = cistpl_funce_common(card, buf, size);
156 176
157 if (ret) { 177 if (ret && ret != -EILSEQ) {
158 printk(KERN_ERR "%s: bad CISTPL_FUNCE size %u " 178 printk(KERN_ERR "%s: bad CISTPL_FUNCE size %u "
159 "type %u\n", mmc_hostname(card->host), size, buf[0]); 179 "type %u\n", mmc_hostname(card->host), size, buf[0]);
160 return ret;
161 } 180 }
162 181
163 return 0; 182 return ret;
164} 183}
165 184
166typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *, 185typedef int (tpl_parse_t)(struct mmc_card *, struct sdio_func *,
@@ -253,21 +272,12 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
253 for (i = 0; i < ARRAY_SIZE(cis_tpl_list); i++) 272 for (i = 0; i < ARRAY_SIZE(cis_tpl_list); i++)
254 if (cis_tpl_list[i].code == tpl_code) 273 if (cis_tpl_list[i].code == tpl_code)
255 break; 274 break;
256 if (i >= ARRAY_SIZE(cis_tpl_list)) { 275 if (i < ARRAY_SIZE(cis_tpl_list)) {
257 /* this tuple is unknown to the core */
258 this->next = NULL;
259 this->code = tpl_code;
260 this->size = tpl_link;
261 *prev = this;
262 prev = &this->next;
263 printk(KERN_DEBUG
264 "%s: queuing CIS tuple 0x%02x length %u\n",
265 mmc_hostname(card->host), tpl_code, tpl_link);
266 } else {
267 const struct cis_tpl *tpl = cis_tpl_list + i; 276 const struct cis_tpl *tpl = cis_tpl_list + i;
268 if (tpl_link < tpl->min_size) { 277 if (tpl_link < tpl->min_size) {
269 printk(KERN_ERR 278 printk(KERN_ERR
270 "%s: bad CIS tuple 0x%02x (length = %u, expected >= %u)\n", 279 "%s: bad CIS tuple 0x%02x"
280 " (length = %u, expected >= %u)\n",
271 mmc_hostname(card->host), 281 mmc_hostname(card->host),
272 tpl_code, tpl_link, tpl->min_size); 282 tpl_code, tpl_link, tpl->min_size);
273 ret = -EINVAL; 283 ret = -EINVAL;
@@ -275,7 +285,30 @@ static int sdio_read_cis(struct mmc_card *card, struct sdio_func *func)
275 ret = tpl->parse(card, func, 285 ret = tpl->parse(card, func,
276 this->data, tpl_link); 286 this->data, tpl_link);
277 } 287 }
278 kfree(this); 288 /*
289 * We don't need the tuple anymore if it was
290 * successfully parsed by the SDIO core or if it is
291 * not going to be parsed by SDIO drivers.
292 */
293 if (!ret || ret != -EILSEQ)
294 kfree(this);
295 } else {
296 /* unknown tuple */
297 ret = -EILSEQ;
298 }
299
300 if (ret == -EILSEQ) {
301 /* this tuple is unknown to the core or whitelisted */
302 this->next = NULL;
303 this->code = tpl_code;
304 this->size = tpl_link;
305 *prev = this;
306 prev = &this->next;
307 printk(KERN_DEBUG
308 "%s: queuing CIS tuple 0x%02x length %u\n",
309 mmc_hostname(card->host), tpl_code, tpl_link);
310 /* keep on analyzing tuples */
311 ret = 0;
279 } 312 }
280 313
281 ptr += tpl_link; 314 ptr += tpl_link;
diff --git a/drivers/mmc/host/Kconfig b/drivers/mmc/host/Kconfig
index 7cb057f3f883..432ae8358c86 100644
--- a/drivers/mmc/host/Kconfig
+++ b/drivers/mmc/host/Kconfig
@@ -276,6 +276,47 @@ config MMC_S3C
276 276
277 If unsure, say N. 277 If unsure, say N.
278 278
279config MMC_S3C_HW_SDIO_IRQ
280 bool "Hardware support for SDIO IRQ"
281 depends on MMC_S3C
282 help
283 Enable the hardware support for SDIO interrupts instead of using
284 the generic polling code.
285
286choice
287 prompt "Samsung S3C SD/MMC transfer code"
288 depends on MMC_S3C
289
290config MMC_S3C_PIO
291 bool "Use PIO transfers only"
292 help
293 Use PIO to transfer data between memory and the hardware.
294
295 PIO is slower than DMA as it requires CPU instructions to
296 move the data. This has been the traditional default for
297 the S3C MCI driver.
298
299config MMC_S3C_DMA
300 bool "Use DMA transfers only (EXPERIMENTAL)"
301 depends on EXPERIMENTAL
302 help
303 Use DMA to transfer data between memory and the hardare.
304
305 Currently, the DMA support in this driver seems to not be
306 working properly and needs to be debugged before this
307 option is useful.
308
309config MMC_S3C_PIODMA
310 bool "Support for both PIO and DMA (EXPERIMENTAL)"
311 help
312 Compile both the PIO and DMA transfer routines into the
313 driver and let the platform select at run-time which one
314 is best.
315
316 See notes for the DMA option.
317
318endchoice
319
279config MMC_SDRICOH_CS 320config MMC_SDRICOH_CS
280 tristate "MMC/SD driver for Ricoh Bay1Controllers (EXPERIMENTAL)" 321 tristate "MMC/SD driver for Ricoh Bay1Controllers (EXPERIMENTAL)"
281 depends on EXPERIMENTAL && PCI && PCMCIA 322 depends on EXPERIMENTAL && PCI && PCMCIA
diff --git a/drivers/mmc/host/mmci.c b/drivers/mmc/host/mmci.c
index 8741d0f5146a..705a5894a6bb 100644
--- a/drivers/mmc/host/mmci.c
+++ b/drivers/mmc/host/mmci.c
@@ -22,12 +22,13 @@
22#include <linux/clk.h> 22#include <linux/clk.h>
23#include <linux/scatterlist.h> 23#include <linux/scatterlist.h>
24#include <linux/gpio.h> 24#include <linux/gpio.h>
25#include <linux/amba/mmci.h>
26#include <linux/regulator/consumer.h>
25 27
26#include <asm/cacheflush.h> 28#include <asm/cacheflush.h>
27#include <asm/div64.h> 29#include <asm/div64.h>
28#include <asm/io.h> 30#include <asm/io.h>
29#include <asm/sizes.h> 31#include <asm/sizes.h>
30#include <asm/mach/mmc.h>
31 32
32#include "mmci.h" 33#include "mmci.h"
33 34
@@ -38,6 +39,36 @@
38 39
39static unsigned int fmax = 515633; 40static unsigned int fmax = 515633;
40 41
42/*
43 * This must be called with host->lock held
44 */
45static void mmci_set_clkreg(struct mmci_host *host, unsigned int desired)
46{
47 u32 clk = 0;
48
49 if (desired) {
50 if (desired >= host->mclk) {
51 clk = MCI_CLK_BYPASS;
52 host->cclk = host->mclk;
53 } else {
54 clk = host->mclk / (2 * desired) - 1;
55 if (clk >= 256)
56 clk = 255;
57 host->cclk = host->mclk / (2 * (clk + 1));
58 }
59 if (host->hw_designer == 0x80)
60 clk |= MCI_FCEN; /* Bug fix in ST IP block */
61 clk |= MCI_CLK_ENABLE;
62 /* This hasn't proven to be worthwhile */
63 /* clk |= MCI_CLK_PWRSAVE; */
64 }
65
66 if (host->mmc->ios.bus_width == MMC_BUS_WIDTH_4)
67 clk |= MCI_WIDE_BUS;
68
69 writel(clk, host->base + MMCICLOCK);
70}
71
41static void 72static void
42mmci_request_end(struct mmci_host *host, struct mmc_request *mrq) 73mmci_request_end(struct mmci_host *host, struct mmc_request *mrq)
43{ 74{
@@ -419,30 +450,31 @@ static void mmci_request(struct mmc_host *mmc, struct mmc_request *mrq)
419static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios) 450static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
420{ 451{
421 struct mmci_host *host = mmc_priv(mmc); 452 struct mmci_host *host = mmc_priv(mmc);
422 u32 clk = 0, pwr = 0; 453 u32 pwr = 0;
423 454 unsigned long flags;
424 if (ios->clock) {
425 if (ios->clock >= host->mclk) {
426 clk = MCI_CLK_BYPASS;
427 host->cclk = host->mclk;
428 } else {
429 clk = host->mclk / (2 * ios->clock) - 1;
430 if (clk >= 256)
431 clk = 255;
432 host->cclk = host->mclk / (2 * (clk + 1));
433 }
434 if (host->hw_designer == AMBA_VENDOR_ST)
435 clk |= MCI_FCEN; /* Bug fix in ST IP block */
436 clk |= MCI_CLK_ENABLE;
437 }
438
439 if (host->plat->translate_vdd)
440 pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
441 455
442 switch (ios->power_mode) { 456 switch (ios->power_mode) {
443 case MMC_POWER_OFF: 457 case MMC_POWER_OFF:
458 if(host->vcc &&
459 regulator_is_enabled(host->vcc))
460 regulator_disable(host->vcc);
444 break; 461 break;
445 case MMC_POWER_UP: 462 case MMC_POWER_UP:
463#ifdef CONFIG_REGULATOR
464 if (host->vcc)
465 /* This implicitly enables the regulator */
466 mmc_regulator_set_ocr(host->vcc, ios->vdd);
467#endif
468 /*
469 * The translate_vdd function is not used if you have
470 * an external regulator, or your design is really weird.
471 * Using it would mean sending in power control BOTH using
472 * a regulator AND the 4 MMCIPWR bits. If we don't have
473 * a regulator, we might have some other platform specific
474 * power control behind this translate function.
475 */
476 if (!host->vcc && host->plat->translate_vdd)
477 pwr |= host->plat->translate_vdd(mmc_dev(mmc), ios->vdd);
446 /* The ST version does not have this, fall through to POWER_ON */ 478 /* The ST version does not have this, fall through to POWER_ON */
447 if (host->hw_designer != AMBA_VENDOR_ST) { 479 if (host->hw_designer != AMBA_VENDOR_ST) {
448 pwr |= MCI_PWR_UP; 480 pwr |= MCI_PWR_UP;
@@ -465,12 +497,16 @@ static void mmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
465 } 497 }
466 } 498 }
467 499
468 writel(clk, host->base + MMCICLOCK); 500 spin_lock_irqsave(&host->lock, flags);
501
502 mmci_set_clkreg(host, ios->clock);
469 503
470 if (host->pwr != pwr) { 504 if (host->pwr != pwr) {
471 host->pwr = pwr; 505 host->pwr = pwr;
472 writel(pwr, host->base + MMCIPOWER); 506 writel(pwr, host->base + MMCIPOWER);
473 } 507 }
508
509 spin_unlock_irqrestore(&host->lock, flags);
474} 510}
475 511
476static int mmci_get_ro(struct mmc_host *mmc) 512static int mmci_get_ro(struct mmc_host *mmc)
@@ -517,7 +553,7 @@ static void mmci_check_status(unsigned long data)
517 553
518static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id) 554static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
519{ 555{
520 struct mmc_platform_data *plat = dev->dev.platform_data; 556 struct mmci_platform_data *plat = dev->dev.platform_data;
521 struct mmci_host *host; 557 struct mmci_host *host;
522 struct mmc_host *mmc; 558 struct mmc_host *mmc;
523 int ret; 559 int ret;
@@ -583,7 +619,30 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
583 mmc->ops = &mmci_ops; 619 mmc->ops = &mmci_ops;
584 mmc->f_min = (host->mclk + 511) / 512; 620 mmc->f_min = (host->mclk + 511) / 512;
585 mmc->f_max = min(host->mclk, fmax); 621 mmc->f_max = min(host->mclk, fmax);
586 mmc->ocr_avail = plat->ocr_mask; 622#ifdef CONFIG_REGULATOR
623 /* If we're using the regulator framework, try to fetch a regulator */
624 host->vcc = regulator_get(&dev->dev, "vmmc");
625 if (IS_ERR(host->vcc))
626 host->vcc = NULL;
627 else {
628 int mask = mmc_regulator_get_ocrmask(host->vcc);
629
630 if (mask < 0)
631 dev_err(&dev->dev, "error getting OCR mask (%d)\n",
632 mask);
633 else {
634 host->mmc->ocr_avail = (u32) mask;
635 if (plat->ocr_mask)
636 dev_warn(&dev->dev,
637 "Provided ocr_mask/setpower will not be used "
638 "(using regulator instead)\n");
639 }
640 }
641#endif
642 /* Fall back to platform data if no regulator is found */
643 if (host->vcc == NULL)
644 mmc->ocr_avail = plat->ocr_mask;
645 mmc->caps = plat->capabilities;
587 646
588 /* 647 /*
589 * We can do SGIO 648 * We can do SGIO
@@ -619,7 +678,6 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
619 writel(0, host->base + MMCIMASK1); 678 writel(0, host->base + MMCIMASK1);
620 writel(0xfff, host->base + MMCICLEAR); 679 writel(0xfff, host->base + MMCICLEAR);
621 680
622#ifdef CONFIG_GPIOLIB
623 if (gpio_is_valid(plat->gpio_cd)) { 681 if (gpio_is_valid(plat->gpio_cd)) {
624 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)"); 682 ret = gpio_request(plat->gpio_cd, DRIVER_NAME " (cd)");
625 if (ret == 0) 683 if (ret == 0)
@@ -638,7 +696,6 @@ static int __devinit mmci_probe(struct amba_device *dev, struct amba_id *id)
638 else if (ret != -ENOSYS) 696 else if (ret != -ENOSYS)
639 goto err_gpio_wp; 697 goto err_gpio_wp;
640 } 698 }
641#endif
642 699
643 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host); 700 ret = request_irq(dev->irq[0], mmci_irq, IRQF_SHARED, DRIVER_NAME " (cmd)", host);
644 if (ret) 701 if (ret)
@@ -720,6 +777,10 @@ static int __devexit mmci_remove(struct amba_device *dev)
720 clk_disable(host->clk); 777 clk_disable(host->clk);
721 clk_put(host->clk); 778 clk_put(host->clk);
722 779
780 if (regulator_is_enabled(host->vcc))
781 regulator_disable(host->vcc);
782 regulator_put(host->vcc);
783
723 mmc_free_host(mmc); 784 mmc_free_host(mmc);
724 785
725 amba_release_regions(dev); 786 amba_release_regions(dev);
diff --git a/drivers/mmc/host/mmci.h b/drivers/mmc/host/mmci.h
index 839f264c9725..1ceb9a90f59b 100644
--- a/drivers/mmc/host/mmci.h
+++ b/drivers/mmc/host/mmci.h
@@ -161,7 +161,7 @@ struct mmci_host {
161 unsigned int mclk; 161 unsigned int mclk;
162 unsigned int cclk; 162 unsigned int cclk;
163 u32 pwr; 163 u32 pwr;
164 struct mmc_platform_data *plat; 164 struct mmci_platform_data *plat;
165 165
166 u8 hw_designer; 166 u8 hw_designer;
167 u8 hw_revision:4; 167 u8 hw_revision:4;
@@ -175,6 +175,7 @@ struct mmci_host {
175 struct scatterlist *sg_ptr; 175 struct scatterlist *sg_ptr;
176 unsigned int sg_off; 176 unsigned int sg_off;
177 unsigned int size; 177 unsigned int size;
178 struct regulator *vcc;
178}; 179};
179 180
180static inline void mmci_init_sg(struct mmci_host *host, struct mmc_data *data) 181static inline void mmci_init_sg(struct mmci_host *host, struct mmc_data *data)
diff --git a/drivers/mmc/host/pxamci.c b/drivers/mmc/host/pxamci.c
index e55ac792d68c..5e0b1529964d 100644
--- a/drivers/mmc/host/pxamci.c
+++ b/drivers/mmc/host/pxamci.c
@@ -28,6 +28,7 @@
28#include <linux/mmc/host.h> 28#include <linux/mmc/host.h>
29#include <linux/io.h> 29#include <linux/io.h>
30#include <linux/regulator/consumer.h> 30#include <linux/regulator/consumer.h>
31#include <linux/gpio.h>
31 32
32#include <asm/sizes.h> 33#include <asm/sizes.h>
33 34
@@ -96,10 +97,18 @@ static inline void pxamci_init_ocr(struct pxamci_host *host)
96 97
97static inline void pxamci_set_power(struct pxamci_host *host, unsigned int vdd) 98static inline void pxamci_set_power(struct pxamci_host *host, unsigned int vdd)
98{ 99{
100 int on;
101
99#ifdef CONFIG_REGULATOR 102#ifdef CONFIG_REGULATOR
100 if (host->vcc) 103 if (host->vcc)
101 mmc_regulator_set_ocr(host->vcc, vdd); 104 mmc_regulator_set_ocr(host->vcc, vdd);
102#endif 105#endif
106 if (!host->vcc && host->pdata &&
107 gpio_is_valid(host->pdata->gpio_power)) {
108 on = ((1 << vdd) & host->pdata->ocr_mask);
109 gpio_set_value(host->pdata->gpio_power,
110 !!on ^ host->pdata->gpio_power_invert);
111 }
103 if (!host->vcc && host->pdata && host->pdata->setpower) 112 if (!host->vcc && host->pdata && host->pdata->setpower)
104 host->pdata->setpower(mmc_dev(host->mmc), vdd); 113 host->pdata->setpower(mmc_dev(host->mmc), vdd);
105} 114}
@@ -421,6 +430,12 @@ static int pxamci_get_ro(struct mmc_host *mmc)
421{ 430{
422 struct pxamci_host *host = mmc_priv(mmc); 431 struct pxamci_host *host = mmc_priv(mmc);
423 432
433 if (host->pdata && gpio_is_valid(host->pdata->gpio_card_ro)) {
434 if (host->pdata->gpio_card_ro_invert)
435 return !gpio_get_value(host->pdata->gpio_card_ro);
436 else
437 return gpio_get_value(host->pdata->gpio_card_ro);
438 }
424 if (host->pdata && host->pdata->get_ro) 439 if (host->pdata && host->pdata->get_ro)
425 return !!host->pdata->get_ro(mmc_dev(mmc)); 440 return !!host->pdata->get_ro(mmc_dev(mmc));
426 /* 441 /*
@@ -534,7 +549,7 @@ static int pxamci_probe(struct platform_device *pdev)
534 struct mmc_host *mmc; 549 struct mmc_host *mmc;
535 struct pxamci_host *host = NULL; 550 struct pxamci_host *host = NULL;
536 struct resource *r, *dmarx, *dmatx; 551 struct resource *r, *dmarx, *dmatx;
537 int ret, irq; 552 int ret, irq, gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
538 553
539 r = platform_get_resource(pdev, IORESOURCE_MEM, 0); 554 r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
540 irq = platform_get_irq(pdev, 0); 555 irq = platform_get_irq(pdev, 0);
@@ -661,13 +676,63 @@ static int pxamci_probe(struct platform_device *pdev)
661 } 676 }
662 host->dma_drcmrtx = dmatx->start; 677 host->dma_drcmrtx = dmatx->start;
663 678
679 if (host->pdata) {
680 gpio_cd = host->pdata->gpio_card_detect;
681 gpio_ro = host->pdata->gpio_card_ro;
682 gpio_power = host->pdata->gpio_power;
683 }
684 if (gpio_is_valid(gpio_power)) {
685 ret = gpio_request(gpio_power, "mmc card power");
686 if (ret) {
687 dev_err(&pdev->dev, "Failed requesting gpio_power %d\n", gpio_power);
688 goto out;
689 }
690 gpio_direction_output(gpio_power,
691 host->pdata->gpio_power_invert);
692 }
693 if (gpio_is_valid(gpio_ro)) {
694 ret = gpio_request(gpio_ro, "mmc card read only");
695 if (ret) {
696 dev_err(&pdev->dev, "Failed requesting gpio_ro %d\n", gpio_power);
697 goto err_gpio_ro;
698 }
699 gpio_direction_input(gpio_ro);
700 }
701 if (gpio_is_valid(gpio_cd)) {
702 ret = gpio_request(gpio_cd, "mmc card detect");
703 if (ret) {
704 dev_err(&pdev->dev, "Failed requesting gpio_cd %d\n", gpio_power);
705 goto err_gpio_cd;
706 }
707 gpio_direction_input(gpio_cd);
708
709 ret = request_irq(gpio_to_irq(gpio_cd), pxamci_detect_irq,
710 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
711 "mmc card detect", mmc);
712 if (ret) {
713 dev_err(&pdev->dev, "failed to request card detect IRQ\n");
714 goto err_request_irq;
715 }
716 }
717
664 if (host->pdata && host->pdata->init) 718 if (host->pdata && host->pdata->init)
665 host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc); 719 host->pdata->init(&pdev->dev, pxamci_detect_irq, mmc);
666 720
721 if (gpio_is_valid(gpio_power) && host->pdata->setpower)
722 dev_warn(&pdev->dev, "gpio_power and setpower() both defined\n");
723 if (gpio_is_valid(gpio_ro) && host->pdata->get_ro)
724 dev_warn(&pdev->dev, "gpio_ro and get_ro() both defined\n");
725
667 mmc_add_host(mmc); 726 mmc_add_host(mmc);
668 727
669 return 0; 728 return 0;
670 729
730err_request_irq:
731 gpio_free(gpio_cd);
732err_gpio_cd:
733 gpio_free(gpio_ro);
734err_gpio_ro:
735 gpio_free(gpio_power);
671 out: 736 out:
672 if (host) { 737 if (host) {
673 if (host->dma >= 0) 738 if (host->dma >= 0)
@@ -688,12 +753,26 @@ static int pxamci_probe(struct platform_device *pdev)
688static int pxamci_remove(struct platform_device *pdev) 753static int pxamci_remove(struct platform_device *pdev)
689{ 754{
690 struct mmc_host *mmc = platform_get_drvdata(pdev); 755 struct mmc_host *mmc = platform_get_drvdata(pdev);
756 int gpio_cd = -1, gpio_ro = -1, gpio_power = -1;
691 757
692 platform_set_drvdata(pdev, NULL); 758 platform_set_drvdata(pdev, NULL);
693 759
694 if (mmc) { 760 if (mmc) {
695 struct pxamci_host *host = mmc_priv(mmc); 761 struct pxamci_host *host = mmc_priv(mmc);
696 762
763 if (host->pdata) {
764 gpio_cd = host->pdata->gpio_card_detect;
765 gpio_ro = host->pdata->gpio_card_ro;
766 gpio_power = host->pdata->gpio_power;
767 }
768 if (gpio_is_valid(gpio_cd)) {
769 free_irq(gpio_to_irq(gpio_cd), mmc);
770 gpio_free(gpio_cd);
771 }
772 if (gpio_is_valid(gpio_ro))
773 gpio_free(gpio_ro);
774 if (gpio_is_valid(gpio_power))
775 gpio_free(gpio_power);
697 if (host->vcc) 776 if (host->vcc)
698 regulator_put(host->vcc); 777 regulator_put(host->vcc);
699 778
@@ -725,20 +804,20 @@ static int pxamci_remove(struct platform_device *pdev)
725} 804}
726 805
727#ifdef CONFIG_PM 806#ifdef CONFIG_PM
728static int pxamci_suspend(struct platform_device *dev, pm_message_t state) 807static int pxamci_suspend(struct device *dev)
729{ 808{
730 struct mmc_host *mmc = platform_get_drvdata(dev); 809 struct mmc_host *mmc = dev_get_drvdata(dev);
731 int ret = 0; 810 int ret = 0;
732 811
733 if (mmc) 812 if (mmc)
734 ret = mmc_suspend_host(mmc, state); 813 ret = mmc_suspend_host(mmc, PMSG_SUSPEND);
735 814
736 return ret; 815 return ret;
737} 816}
738 817
739static int pxamci_resume(struct platform_device *dev) 818static int pxamci_resume(struct device *dev)
740{ 819{
741 struct mmc_host *mmc = platform_get_drvdata(dev); 820 struct mmc_host *mmc = dev_get_drvdata(dev);
742 int ret = 0; 821 int ret = 0;
743 822
744 if (mmc) 823 if (mmc)
@@ -746,19 +825,22 @@ static int pxamci_resume(struct platform_device *dev)
746 825
747 return ret; 826 return ret;
748} 827}
749#else 828
750#define pxamci_suspend NULL 829static struct dev_pm_ops pxamci_pm_ops = {
751#define pxamci_resume NULL 830 .suspend = pxamci_suspend,
831 .resume = pxamci_resume,
832};
752#endif 833#endif
753 834
754static struct platform_driver pxamci_driver = { 835static struct platform_driver pxamci_driver = {
755 .probe = pxamci_probe, 836 .probe = pxamci_probe,
756 .remove = pxamci_remove, 837 .remove = pxamci_remove,
757 .suspend = pxamci_suspend,
758 .resume = pxamci_resume,
759 .driver = { 838 .driver = {
760 .name = DRIVER_NAME, 839 .name = DRIVER_NAME,
761 .owner = THIS_MODULE, 840 .owner = THIS_MODULE,
841#ifdef CONFIG_PM
842 .pm = &pxamci_pm_ops,
843#endif
762 }, 844 },
763}; 845};
764 846
diff --git a/drivers/mmc/host/s3cmci.c b/drivers/mmc/host/s3cmci.c
index 8c08cd7efa7f..99b74a351020 100644
--- a/drivers/mmc/host/s3cmci.c
+++ b/drivers/mmc/host/s3cmci.c
@@ -17,6 +17,8 @@
17#include <linux/mmc/host.h> 17#include <linux/mmc/host.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/cpufreq.h> 19#include <linux/cpufreq.h>
20#include <linux/debugfs.h>
21#include <linux/seq_file.h>
20#include <linux/gpio.h> 22#include <linux/gpio.h>
21#include <linux/irq.h> 23#include <linux/irq.h>
22#include <linux/io.h> 24#include <linux/io.h>
@@ -58,8 +60,6 @@ static const int dbgmap_debug = dbg_err | dbg_debug;
58 dev_dbg(&host->pdev->dev, args); \ 60 dev_dbg(&host->pdev->dev, args); \
59 } while (0) 61 } while (0)
60 62
61#define RESSIZE(ressource) (((ressource)->end - (ressource)->start)+1)
62
63static struct s3c2410_dma_client s3cmci_dma_client = { 63static struct s3c2410_dma_client s3cmci_dma_client = {
64 .name = "s3c-mci", 64 .name = "s3c-mci",
65}; 65};
@@ -164,6 +164,40 @@ static void dbg_dumpregs(struct s3cmci_host *host, char *prefix) { }
164 164
165#endif /* CONFIG_MMC_DEBUG */ 165#endif /* CONFIG_MMC_DEBUG */
166 166
167/**
168 * s3cmci_host_usedma - return whether the host is using dma or pio
169 * @host: The host state
170 *
171 * Return true if the host is using DMA to transfer data, else false
172 * to use PIO mode. Will return static data depending on the driver
173 * configuration.
174 */
175static inline bool s3cmci_host_usedma(struct s3cmci_host *host)
176{
177#ifdef CONFIG_MMC_S3C_PIO
178 return false;
179#elif defined(CONFIG_MMC_S3C_DMA)
180 return true;
181#else
182 return host->dodma;
183#endif
184}
185
186/**
187 * s3cmci_host_canpio - return true if host has pio code available
188 *
189 * Return true if the driver has been compiled with the PIO support code
190 * available.
191 */
192static inline bool s3cmci_host_canpio(void)
193{
194#ifdef CONFIG_MMC_S3C_PIO
195 return true;
196#else
197 return false;
198#endif
199}
200
167static inline u32 enable_imask(struct s3cmci_host *host, u32 imask) 201static inline u32 enable_imask(struct s3cmci_host *host, u32 imask)
168{ 202{
169 u32 newmask; 203 u32 newmask;
@@ -190,7 +224,33 @@ static inline u32 disable_imask(struct s3cmci_host *host, u32 imask)
190 224
191static inline void clear_imask(struct s3cmci_host *host) 225static inline void clear_imask(struct s3cmci_host *host)
192{ 226{
193 writel(0, host->base + host->sdiimsk); 227 u32 mask = readl(host->base + host->sdiimsk);
228
229 /* preserve the SDIO IRQ mask state */
230 mask &= S3C2410_SDIIMSK_SDIOIRQ;
231 writel(mask, host->base + host->sdiimsk);
232}
233
234/**
235 * s3cmci_check_sdio_irq - test whether the SDIO IRQ is being signalled
236 * @host: The host to check.
237 *
238 * Test to see if the SDIO interrupt is being signalled in case the
239 * controller has failed to re-detect a card interrupt. Read GPE8 and
240 * see if it is low and if so, signal a SDIO interrupt.
241 *
242 * This is currently called if a request is finished (we assume that the
243 * bus is now idle) and when the SDIO IRQ is enabled in case the IRQ is
244 * already being indicated.
245*/
246static void s3cmci_check_sdio_irq(struct s3cmci_host *host)
247{
248 if (host->sdio_irqen) {
249 if (gpio_get_value(S3C2410_GPE(8)) == 0) {
250 printk(KERN_DEBUG "%s: signalling irq\n", __func__);
251 mmc_signal_sdio_irq(host->mmc);
252 }
253 }
194} 254}
195 255
196static inline int get_data_buffer(struct s3cmci_host *host, 256static inline int get_data_buffer(struct s3cmci_host *host,
@@ -238,6 +298,64 @@ static inline u32 fifo_free(struct s3cmci_host *host)
238 return 63 - fifostat; 298 return 63 - fifostat;
239} 299}
240 300
301/**
302 * s3cmci_enable_irq - enable IRQ, after having disabled it.
303 * @host: The device state.
304 * @more: True if more IRQs are expected from transfer.
305 *
306 * Enable the main IRQ if needed after it has been disabled.
307 *
308 * The IRQ can be one of the following states:
309 * - disabled during IDLE
310 * - disabled whilst processing data
311 * - enabled during transfer
312 * - enabled whilst awaiting SDIO interrupt detection
313 */
314static void s3cmci_enable_irq(struct s3cmci_host *host, bool more)
315{
316 unsigned long flags;
317 bool enable = false;
318
319 local_irq_save(flags);
320
321 host->irq_enabled = more;
322 host->irq_disabled = false;
323
324 enable = more | host->sdio_irqen;
325
326 if (host->irq_state != enable) {
327 host->irq_state = enable;
328
329 if (enable)
330 enable_irq(host->irq);
331 else
332 disable_irq(host->irq);
333 }
334
335 local_irq_restore(flags);
336}
337
338/**
339 *
340 */
341static void s3cmci_disable_irq(struct s3cmci_host *host, bool transfer)
342{
343 unsigned long flags;
344
345 local_irq_save(flags);
346
347 //printk(KERN_DEBUG "%s: transfer %d\n", __func__, transfer);
348
349 host->irq_disabled = transfer;
350
351 if (transfer && host->irq_state) {
352 host->irq_state = false;
353 disable_irq(host->irq);
354 }
355
356 local_irq_restore(flags);
357}
358
241static void do_pio_read(struct s3cmci_host *host) 359static void do_pio_read(struct s3cmci_host *host)
242{ 360{
243 int res; 361 int res;
@@ -374,8 +492,7 @@ static void pio_tasklet(unsigned long data)
374{ 492{
375 struct s3cmci_host *host = (struct s3cmci_host *) data; 493 struct s3cmci_host *host = (struct s3cmci_host *) data;
376 494
377 495 s3cmci_disable_irq(host, true);
378 disable_irq(host->irq);
379 496
380 if (host->pio_active == XFER_WRITE) 497 if (host->pio_active == XFER_WRITE)
381 do_pio_write(host); 498 do_pio_write(host);
@@ -395,9 +512,10 @@ static void pio_tasklet(unsigned long data)
395 host->mrq->data->error = -EINVAL; 512 host->mrq->data->error = -EINVAL;
396 } 513 }
397 514
515 s3cmci_enable_irq(host, false);
398 finalize_request(host); 516 finalize_request(host);
399 } else 517 } else
400 enable_irq(host->irq); 518 s3cmci_enable_irq(host, true);
401} 519}
402 520
403/* 521/*
@@ -432,17 +550,27 @@ static irqreturn_t s3cmci_irq(int irq, void *dev_id)
432 struct s3cmci_host *host = dev_id; 550 struct s3cmci_host *host = dev_id;
433 struct mmc_command *cmd; 551 struct mmc_command *cmd;
434 u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt, mci_imsk; 552 u32 mci_csta, mci_dsta, mci_fsta, mci_dcnt, mci_imsk;
435 u32 mci_cclear, mci_dclear; 553 u32 mci_cclear = 0, mci_dclear;
436 unsigned long iflags; 554 unsigned long iflags;
437 555
556 mci_dsta = readl(host->base + S3C2410_SDIDSTA);
557 mci_imsk = readl(host->base + host->sdiimsk);
558
559 if (mci_dsta & S3C2410_SDIDSTA_SDIOIRQDETECT) {
560 if (mci_imsk & S3C2410_SDIIMSK_SDIOIRQ) {
561 mci_dclear = S3C2410_SDIDSTA_SDIOIRQDETECT;
562 writel(mci_dclear, host->base + S3C2410_SDIDSTA);
563
564 mmc_signal_sdio_irq(host->mmc);
565 return IRQ_HANDLED;
566 }
567 }
568
438 spin_lock_irqsave(&host->complete_lock, iflags); 569 spin_lock_irqsave(&host->complete_lock, iflags);
439 570
440 mci_csta = readl(host->base + S3C2410_SDICMDSTAT); 571 mci_csta = readl(host->base + S3C2410_SDICMDSTAT);
441 mci_dsta = readl(host->base + S3C2410_SDIDSTA);
442 mci_dcnt = readl(host->base + S3C2410_SDIDCNT); 572 mci_dcnt = readl(host->base + S3C2410_SDIDCNT);
443 mci_fsta = readl(host->base + S3C2410_SDIFSTA); 573 mci_fsta = readl(host->base + S3C2410_SDIFSTA);
444 mci_imsk = readl(host->base + host->sdiimsk);
445 mci_cclear = 0;
446 mci_dclear = 0; 574 mci_dclear = 0;
447 575
448 if ((host->complete_what == COMPLETION_NONE) || 576 if ((host->complete_what == COMPLETION_NONE) ||
@@ -466,7 +594,7 @@ static irqreturn_t s3cmci_irq(int irq, void *dev_id)
466 goto irq_out; 594 goto irq_out;
467 } 595 }
468 596
469 if (!host->dodma) { 597 if (!s3cmci_host_usedma(host)) {
470 if ((host->pio_active == XFER_WRITE) && 598 if ((host->pio_active == XFER_WRITE) &&
471 (mci_fsta & S3C2410_SDIFSTA_TFDET)) { 599 (mci_fsta & S3C2410_SDIFSTA_TFDET)) {
472 600
@@ -673,6 +801,7 @@ static void s3cmci_dma_done_callback(struct s3c2410_dma_chan *dma_ch,
673 dbg(host, dbg_dma, "DMA FINISHED Size:%i DSTA:%08x DCNT:%08x\n", 801 dbg(host, dbg_dma, "DMA FINISHED Size:%i DSTA:%08x DCNT:%08x\n",
674 size, mci_dsta, mci_dcnt); 802 size, mci_dsta, mci_dcnt);
675 803
804 host->dma_complete = 1;
676 host->complete_what = COMPLETION_FINALIZE; 805 host->complete_what = COMPLETION_FINALIZE;
677 806
678out: 807out:
@@ -683,9 +812,9 @@ out:
683fail_request: 812fail_request:
684 host->mrq->data->error = -EINVAL; 813 host->mrq->data->error = -EINVAL;
685 host->complete_what = COMPLETION_FINALIZE; 814 host->complete_what = COMPLETION_FINALIZE;
686 writel(0, host->base + host->sdiimsk); 815 clear_imask(host);
687 goto out;
688 816
817 goto out;
689} 818}
690 819
691static void finalize_request(struct s3cmci_host *host) 820static void finalize_request(struct s3cmci_host *host)
@@ -702,8 +831,9 @@ static void finalize_request(struct s3cmci_host *host)
702 831
703 if (cmd->data && (cmd->error == 0) && 832 if (cmd->data && (cmd->error == 0) &&
704 (cmd->data->error == 0)) { 833 (cmd->data->error == 0)) {
705 if (host->dodma && (!host->dma_complete)) { 834 if (s3cmci_host_usedma(host) && (!host->dma_complete)) {
706 dbg(host, dbg_dma, "DMA Missing!\n"); 835 dbg(host, dbg_dma, "DMA Missing (%d)!\n",
836 host->dma_complete);
707 return; 837 return;
708 } 838 }
709 } 839 }
@@ -728,7 +858,7 @@ static void finalize_request(struct s3cmci_host *host)
728 writel(0, host->base + S3C2410_SDICMDARG); 858 writel(0, host->base + S3C2410_SDICMDARG);
729 writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON); 859 writel(S3C2410_SDIDCON_STOP, host->base + S3C2410_SDIDCON);
730 writel(0, host->base + S3C2410_SDICMDCON); 860 writel(0, host->base + S3C2410_SDICMDCON);
731 writel(0, host->base + host->sdiimsk); 861 clear_imask(host);
732 862
733 if (cmd->data && cmd->error) 863 if (cmd->data && cmd->error)
734 cmd->data->error = cmd->error; 864 cmd->data->error = cmd->error;
@@ -754,7 +884,7 @@ static void finalize_request(struct s3cmci_host *host)
754 /* If we had an error while transfering data we flush the 884 /* If we had an error while transfering data we flush the
755 * DMA channel and the fifo to clear out any garbage. */ 885 * DMA channel and the fifo to clear out any garbage. */
756 if (mrq->data->error != 0) { 886 if (mrq->data->error != 0) {
757 if (host->dodma) 887 if (s3cmci_host_usedma(host))
758 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); 888 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH);
759 889
760 if (host->is2440) { 890 if (host->is2440) {
@@ -776,6 +906,8 @@ static void finalize_request(struct s3cmci_host *host)
776request_done: 906request_done:
777 host->complete_what = COMPLETION_NONE; 907 host->complete_what = COMPLETION_NONE;
778 host->mrq = NULL; 908 host->mrq = NULL;
909
910 s3cmci_check_sdio_irq(host);
779 mmc_request_done(host->mmc, mrq); 911 mmc_request_done(host->mmc, mrq);
780} 912}
781 913
@@ -872,7 +1004,7 @@ static int s3cmci_setup_data(struct s3cmci_host *host, struct mmc_data *data)
872 1004
873 dcon = data->blocks & S3C2410_SDIDCON_BLKNUM_MASK; 1005 dcon = data->blocks & S3C2410_SDIDCON_BLKNUM_MASK;
874 1006
875 if (host->dodma) 1007 if (s3cmci_host_usedma(host))
876 dcon |= S3C2410_SDIDCON_DMAEN; 1008 dcon |= S3C2410_SDIDCON_DMAEN;
877 1009
878 if (host->bus_width == MMC_BUS_WIDTH_4) 1010 if (host->bus_width == MMC_BUS_WIDTH_4)
@@ -950,7 +1082,7 @@ static int s3cmci_prepare_pio(struct s3cmci_host *host, struct mmc_data *data)
950static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data) 1082static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
951{ 1083{
952 int dma_len, i; 1084 int dma_len, i;
953 int rw = (data->flags & MMC_DATA_WRITE) ? 1 : 0; 1085 int rw = data->flags & MMC_DATA_WRITE;
954 1086
955 BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR); 1087 BUG_ON((data->flags & BOTH_DIR) == BOTH_DIR);
956 1088
@@ -958,7 +1090,7 @@ static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
958 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH); 1090 s3c2410_dma_ctrl(host->dma, S3C2410_DMAOP_FLUSH);
959 1091
960 dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len, 1092 dma_len = dma_map_sg(mmc_dev(host->mmc), data->sg, data->sg_len,
961 (rw) ? DMA_TO_DEVICE : DMA_FROM_DEVICE); 1093 rw ? DMA_TO_DEVICE : DMA_FROM_DEVICE);
962 1094
963 if (dma_len == 0) 1095 if (dma_len == 0)
964 return -ENOMEM; 1096 return -ENOMEM;
@@ -969,11 +1101,11 @@ static int s3cmci_prepare_dma(struct s3cmci_host *host, struct mmc_data *data)
969 for (i = 0; i < dma_len; i++) { 1101 for (i = 0; i < dma_len; i++) {
970 int res; 1102 int res;
971 1103
972 dbg(host, dbg_dma, "enqueue %i:%u@%u\n", i, 1104 dbg(host, dbg_dma, "enqueue %i: %08x@%u\n", i,
973 sg_dma_address(&data->sg[i]), 1105 sg_dma_address(&data->sg[i]),
974 sg_dma_len(&data->sg[i])); 1106 sg_dma_len(&data->sg[i]));
975 1107
976 res = s3c2410_dma_enqueue(host->dma, (void *) host, 1108 res = s3c2410_dma_enqueue(host->dma, host,
977 sg_dma_address(&data->sg[i]), 1109 sg_dma_address(&data->sg[i]),
978 sg_dma_len(&data->sg[i])); 1110 sg_dma_len(&data->sg[i]));
979 1111
@@ -1018,7 +1150,7 @@ static void s3cmci_send_request(struct mmc_host *mmc)
1018 return; 1150 return;
1019 } 1151 }
1020 1152
1021 if (host->dodma) 1153 if (s3cmci_host_usedma(host))
1022 res = s3cmci_prepare_dma(host, cmd->data); 1154 res = s3cmci_prepare_dma(host, cmd->data);
1023 else 1155 else
1024 res = s3cmci_prepare_pio(host, cmd->data); 1156 res = s3cmci_prepare_pio(host, cmd->data);
@@ -1037,7 +1169,7 @@ static void s3cmci_send_request(struct mmc_host *mmc)
1037 s3cmci_send_command(host, cmd); 1169 s3cmci_send_command(host, cmd);
1038 1170
1039 /* Enable Interrupt */ 1171 /* Enable Interrupt */
1040 enable_irq(host->irq); 1172 s3cmci_enable_irq(host, true);
1041} 1173}
1042 1174
1043static int s3cmci_card_present(struct mmc_host *mmc) 1175static int s3cmci_card_present(struct mmc_host *mmc)
@@ -1049,7 +1181,7 @@ static int s3cmci_card_present(struct mmc_host *mmc)
1049 if (pdata->gpio_detect == 0) 1181 if (pdata->gpio_detect == 0)
1050 return -ENOSYS; 1182 return -ENOSYS;
1051 1183
1052 ret = s3c2410_gpio_getpin(pdata->gpio_detect) ? 0 : 1; 1184 ret = gpio_get_value(pdata->gpio_detect) ? 0 : 1;
1053 return ret ^ pdata->detect_invert; 1185 return ret ^ pdata->detect_invert;
1054} 1186}
1055 1187
@@ -1104,12 +1236,12 @@ static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1104 switch (ios->power_mode) { 1236 switch (ios->power_mode) {
1105 case MMC_POWER_ON: 1237 case MMC_POWER_ON:
1106 case MMC_POWER_UP: 1238 case MMC_POWER_UP:
1107 s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPE5_SDCLK); 1239 s3c2410_gpio_cfgpin(S3C2410_GPE(5), S3C2410_GPE5_SDCLK);
1108 s3c2410_gpio_cfgpin(S3C2410_GPE6, S3C2410_GPE6_SDCMD); 1240 s3c2410_gpio_cfgpin(S3C2410_GPE(6), S3C2410_GPE6_SDCMD);
1109 s3c2410_gpio_cfgpin(S3C2410_GPE7, S3C2410_GPE7_SDDAT0); 1241 s3c2410_gpio_cfgpin(S3C2410_GPE(7), S3C2410_GPE7_SDDAT0);
1110 s3c2410_gpio_cfgpin(S3C2410_GPE8, S3C2410_GPE8_SDDAT1); 1242 s3c2410_gpio_cfgpin(S3C2410_GPE(8), S3C2410_GPE8_SDDAT1);
1111 s3c2410_gpio_cfgpin(S3C2410_GPE9, S3C2410_GPE9_SDDAT2); 1243 s3c2410_gpio_cfgpin(S3C2410_GPE(9), S3C2410_GPE9_SDDAT2);
1112 s3c2410_gpio_cfgpin(S3C2410_GPE10, S3C2410_GPE10_SDDAT3); 1244 s3c2410_gpio_cfgpin(S3C2410_GPE(10), S3C2410_GPE10_SDDAT3);
1113 1245
1114 if (host->pdata->set_power) 1246 if (host->pdata->set_power)
1115 host->pdata->set_power(ios->power_mode, ios->vdd); 1247 host->pdata->set_power(ios->power_mode, ios->vdd);
@@ -1121,8 +1253,7 @@ static void s3cmci_set_ios(struct mmc_host *mmc, struct mmc_ios *ios)
1121 1253
1122 case MMC_POWER_OFF: 1254 case MMC_POWER_OFF:
1123 default: 1255 default:
1124 s3c2410_gpio_setpin(S3C2410_GPE5, 0); 1256 gpio_direction_output(S3C2410_GPE(5), 0);
1125 s3c2410_gpio_cfgpin(S3C2410_GPE5, S3C2410_GPIO_OUTPUT);
1126 1257
1127 if (host->is2440) 1258 if (host->is2440)
1128 mci_con |= S3C2440_SDICON_SDRESET; 1259 mci_con |= S3C2440_SDICON_SDRESET;
@@ -1168,7 +1299,7 @@ static int s3cmci_get_ro(struct mmc_host *mmc)
1168 struct s3c24xx_mci_pdata *pdata = host->pdata; 1299 struct s3c24xx_mci_pdata *pdata = host->pdata;
1169 int ret; 1300 int ret;
1170 1301
1171 if (pdata->gpio_wprotect == 0) 1302 if (pdata->no_wprotect)
1172 return 0; 1303 return 0;
1173 1304
1174 ret = s3c2410_gpio_getpin(pdata->gpio_wprotect); 1305 ret = s3c2410_gpio_getpin(pdata->gpio_wprotect);
@@ -1179,11 +1310,52 @@ static int s3cmci_get_ro(struct mmc_host *mmc)
1179 return ret; 1310 return ret;
1180} 1311}
1181 1312
1313static void s3cmci_enable_sdio_irq(struct mmc_host *mmc, int enable)
1314{
1315 struct s3cmci_host *host = mmc_priv(mmc);
1316 unsigned long flags;
1317 u32 con;
1318
1319 local_irq_save(flags);
1320
1321 con = readl(host->base + S3C2410_SDICON);
1322 host->sdio_irqen = enable;
1323
1324 if (enable == host->sdio_irqen)
1325 goto same_state;
1326
1327 if (enable) {
1328 con |= S3C2410_SDICON_SDIOIRQ;
1329 enable_imask(host, S3C2410_SDIIMSK_SDIOIRQ);
1330
1331 if (!host->irq_state && !host->irq_disabled) {
1332 host->irq_state = true;
1333 enable_irq(host->irq);
1334 }
1335 } else {
1336 disable_imask(host, S3C2410_SDIIMSK_SDIOIRQ);
1337 con &= ~S3C2410_SDICON_SDIOIRQ;
1338
1339 if (!host->irq_enabled && host->irq_state) {
1340 disable_irq_nosync(host->irq);
1341 host->irq_state = false;
1342 }
1343 }
1344
1345 writel(con, host->base + S3C2410_SDICON);
1346
1347 same_state:
1348 local_irq_restore(flags);
1349
1350 s3cmci_check_sdio_irq(host);
1351}
1352
1182static struct mmc_host_ops s3cmci_ops = { 1353static struct mmc_host_ops s3cmci_ops = {
1183 .request = s3cmci_request, 1354 .request = s3cmci_request,
1184 .set_ios = s3cmci_set_ios, 1355 .set_ios = s3cmci_set_ios,
1185 .get_ro = s3cmci_get_ro, 1356 .get_ro = s3cmci_get_ro,
1186 .get_cd = s3cmci_card_present, 1357 .get_cd = s3cmci_card_present,
1358 .enable_sdio_irq = s3cmci_enable_sdio_irq,
1187}; 1359};
1188 1360
1189static struct s3c24xx_mci_pdata s3cmci_def_pdata = { 1361static struct s3c24xx_mci_pdata s3cmci_def_pdata = {
@@ -1246,11 +1418,140 @@ static inline void s3cmci_cpufreq_deregister(struct s3cmci_host *host)
1246} 1418}
1247#endif 1419#endif
1248 1420
1249static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440) 1421
1422#ifdef CONFIG_DEBUG_FS
1423
1424static int s3cmci_state_show(struct seq_file *seq, void *v)
1425{
1426 struct s3cmci_host *host = seq->private;
1427
1428 seq_printf(seq, "Register base = 0x%08x\n", (u32)host->base);
1429 seq_printf(seq, "Clock rate = %ld\n", host->clk_rate);
1430 seq_printf(seq, "Prescale = %d\n", host->prescaler);
1431 seq_printf(seq, "is2440 = %d\n", host->is2440);
1432 seq_printf(seq, "IRQ = %d\n", host->irq);
1433 seq_printf(seq, "IRQ enabled = %d\n", host->irq_enabled);
1434 seq_printf(seq, "IRQ disabled = %d\n", host->irq_disabled);
1435 seq_printf(seq, "IRQ state = %d\n", host->irq_state);
1436 seq_printf(seq, "CD IRQ = %d\n", host->irq_cd);
1437 seq_printf(seq, "Do DMA = %d\n", s3cmci_host_usedma(host));
1438 seq_printf(seq, "SDIIMSK at %d\n", host->sdiimsk);
1439 seq_printf(seq, "SDIDATA at %d\n", host->sdidata);
1440
1441 return 0;
1442}
1443
1444static int s3cmci_state_open(struct inode *inode, struct file *file)
1445{
1446 return single_open(file, s3cmci_state_show, inode->i_private);
1447}
1448
1449static const struct file_operations s3cmci_fops_state = {
1450 .owner = THIS_MODULE,
1451 .open = s3cmci_state_open,
1452 .read = seq_read,
1453 .llseek = seq_lseek,
1454 .release = single_release,
1455};
1456
1457#define DBG_REG(_r) { .addr = S3C2410_SDI##_r, .name = #_r }
1458
1459struct s3cmci_reg {
1460 unsigned short addr;
1461 unsigned char *name;
1462} debug_regs[] = {
1463 DBG_REG(CON),
1464 DBG_REG(PRE),
1465 DBG_REG(CMDARG),
1466 DBG_REG(CMDCON),
1467 DBG_REG(CMDSTAT),
1468 DBG_REG(RSP0),
1469 DBG_REG(RSP1),
1470 DBG_REG(RSP2),
1471 DBG_REG(RSP3),
1472 DBG_REG(TIMER),
1473 DBG_REG(BSIZE),
1474 DBG_REG(DCON),
1475 DBG_REG(DCNT),
1476 DBG_REG(DSTA),
1477 DBG_REG(FSTA),
1478 {}
1479};
1480
1481static int s3cmci_regs_show(struct seq_file *seq, void *v)
1482{
1483 struct s3cmci_host *host = seq->private;
1484 struct s3cmci_reg *rptr = debug_regs;
1485
1486 for (; rptr->name; rptr++)
1487 seq_printf(seq, "SDI%s\t=0x%08x\n", rptr->name,
1488 readl(host->base + rptr->addr));
1489
1490 seq_printf(seq, "SDIIMSK\t=0x%08x\n", readl(host->base + host->sdiimsk));
1491
1492 return 0;
1493}
1494
1495static int s3cmci_regs_open(struct inode *inode, struct file *file)
1496{
1497 return single_open(file, s3cmci_regs_show, inode->i_private);
1498}
1499
1500static const struct file_operations s3cmci_fops_regs = {
1501 .owner = THIS_MODULE,
1502 .open = s3cmci_regs_open,
1503 .read = seq_read,
1504 .llseek = seq_lseek,
1505 .release = single_release,
1506};
1507
1508static void s3cmci_debugfs_attach(struct s3cmci_host *host)
1509{
1510 struct device *dev = &host->pdev->dev;
1511
1512 host->debug_root = debugfs_create_dir(dev_name(dev), NULL);
1513 if (IS_ERR(host->debug_root)) {
1514 dev_err(dev, "failed to create debugfs root\n");
1515 return;
1516 }
1517
1518 host->debug_state = debugfs_create_file("state", 0444,
1519 host->debug_root, host,
1520 &s3cmci_fops_state);
1521
1522 if (IS_ERR(host->debug_state))
1523 dev_err(dev, "failed to create debug state file\n");
1524
1525 host->debug_regs = debugfs_create_file("regs", 0444,
1526 host->debug_root, host,
1527 &s3cmci_fops_regs);
1528
1529 if (IS_ERR(host->debug_regs))
1530 dev_err(dev, "failed to create debug regs file\n");
1531}
1532
1533static void s3cmci_debugfs_remove(struct s3cmci_host *host)
1534{
1535 debugfs_remove(host->debug_regs);
1536 debugfs_remove(host->debug_state);
1537 debugfs_remove(host->debug_root);
1538}
1539
1540#else
1541static inline void s3cmci_debugfs_attach(struct s3cmci_host *host) { }
1542static inline void s3cmci_debugfs_remove(struct s3cmci_host *host) { }
1543
1544#endif /* CONFIG_DEBUG_FS */
1545
1546static int __devinit s3cmci_probe(struct platform_device *pdev)
1250{ 1547{
1251 struct s3cmci_host *host; 1548 struct s3cmci_host *host;
1252 struct mmc_host *mmc; 1549 struct mmc_host *mmc;
1253 int ret; 1550 int ret;
1551 int is2440;
1552 int i;
1553
1554 is2440 = platform_get_device_id(pdev)->driver_data;
1254 1555
1255 mmc = mmc_alloc_host(sizeof(struct s3cmci_host), &pdev->dev); 1556 mmc = mmc_alloc_host(sizeof(struct s3cmci_host), &pdev->dev);
1256 if (!mmc) { 1557 if (!mmc) {
@@ -1258,6 +1559,18 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1258 goto probe_out; 1559 goto probe_out;
1259 } 1560 }
1260 1561
1562 for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++) {
1563 ret = gpio_request(i, dev_name(&pdev->dev));
1564 if (ret) {
1565 dev_err(&pdev->dev, "failed to get gpio %d\n", i);
1566
1567 for (i--; i >= S3C2410_GPE(5); i--)
1568 gpio_free(i);
1569
1570 goto probe_free_host;
1571 }
1572 }
1573
1261 host = mmc_priv(mmc); 1574 host = mmc_priv(mmc);
1262 host->mmc = mmc; 1575 host->mmc = mmc;
1263 host->pdev = pdev; 1576 host->pdev = pdev;
@@ -1282,11 +1595,12 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1282 host->clk_div = 2; 1595 host->clk_div = 2;
1283 } 1596 }
1284 1597
1285 host->dodma = 0;
1286 host->complete_what = COMPLETION_NONE; 1598 host->complete_what = COMPLETION_NONE;
1287 host->pio_active = XFER_NONE; 1599 host->pio_active = XFER_NONE;
1288 1600
1289 host->dma = S3CMCI_DMA; 1601#ifdef CONFIG_MMC_S3C_PIODMA
1602 host->dodma = host->pdata->dma;
1603#endif
1290 1604
1291 host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0); 1605 host->mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1292 if (!host->mem) { 1606 if (!host->mem) {
@@ -1294,19 +1608,19 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1294 "failed to get io memory region resouce.\n"); 1608 "failed to get io memory region resouce.\n");
1295 1609
1296 ret = -ENOENT; 1610 ret = -ENOENT;
1297 goto probe_free_host; 1611 goto probe_free_gpio;
1298 } 1612 }
1299 1613
1300 host->mem = request_mem_region(host->mem->start, 1614 host->mem = request_mem_region(host->mem->start,
1301 RESSIZE(host->mem), pdev->name); 1615 resource_size(host->mem), pdev->name);
1302 1616
1303 if (!host->mem) { 1617 if (!host->mem) {
1304 dev_err(&pdev->dev, "failed to request io memory region.\n"); 1618 dev_err(&pdev->dev, "failed to request io memory region.\n");
1305 ret = -ENOENT; 1619 ret = -ENOENT;
1306 goto probe_free_host; 1620 goto probe_free_gpio;
1307 } 1621 }
1308 1622
1309 host->base = ioremap(host->mem->start, RESSIZE(host->mem)); 1623 host->base = ioremap(host->mem->start, resource_size(host->mem));
1310 if (!host->base) { 1624 if (!host->base) {
1311 dev_err(&pdev->dev, "failed to ioremap() io memory region.\n"); 1625 dev_err(&pdev->dev, "failed to ioremap() io memory region.\n");
1312 ret = -EINVAL; 1626 ret = -EINVAL;
@@ -1331,31 +1645,60 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1331 * ensure we don't lock the system with un-serviceable requests. */ 1645 * ensure we don't lock the system with un-serviceable requests. */
1332 1646
1333 disable_irq(host->irq); 1647 disable_irq(host->irq);
1648 host->irq_state = false;
1334 1649
1335 host->irq_cd = s3c2410_gpio_getirq(host->pdata->gpio_detect); 1650 if (!host->pdata->no_detect) {
1336 1651 ret = gpio_request(host->pdata->gpio_detect, "s3cmci detect");
1337 if (host->irq_cd >= 0) { 1652 if (ret) {
1338 if (request_irq(host->irq_cd, s3cmci_irq_cd, 1653 dev_err(&pdev->dev, "failed to get detect gpio\n");
1339 IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING,
1340 DRIVER_NAME, host)) {
1341 dev_err(&pdev->dev, "can't get card detect irq.\n");
1342 ret = -ENOENT;
1343 goto probe_free_irq; 1654 goto probe_free_irq;
1344 } 1655 }
1345 } else { 1656
1346 dev_warn(&pdev->dev, "host detect has no irq available\n"); 1657 host->irq_cd = s3c2410_gpio_getirq(host->pdata->gpio_detect);
1347 s3c2410_gpio_cfgpin(host->pdata->gpio_detect, 1658
1348 S3C2410_GPIO_INPUT); 1659 if (host->irq_cd >= 0) {
1660 if (request_irq(host->irq_cd, s3cmci_irq_cd,
1661 IRQF_TRIGGER_RISING |
1662 IRQF_TRIGGER_FALLING,
1663 DRIVER_NAME, host)) {
1664 dev_err(&pdev->dev,
1665 "can't get card detect irq.\n");
1666 ret = -ENOENT;
1667 goto probe_free_gpio_cd;
1668 }
1669 } else {
1670 dev_warn(&pdev->dev,
1671 "host detect has no irq available\n");
1672 gpio_direction_input(host->pdata->gpio_detect);
1673 }
1674 } else
1675 host->irq_cd = -1;
1676
1677 if (!host->pdata->no_wprotect) {
1678 ret = gpio_request(host->pdata->gpio_wprotect, "s3cmci wp");
1679 if (ret) {
1680 dev_err(&pdev->dev, "failed to get writeprotect\n");
1681 goto probe_free_irq_cd;
1682 }
1683
1684 gpio_direction_input(host->pdata->gpio_wprotect);
1349 } 1685 }
1350 1686
1351 if (host->pdata->gpio_wprotect) 1687 /* depending on the dma state, get a dma channel to use. */
1352 s3c2410_gpio_cfgpin(host->pdata->gpio_wprotect,
1353 S3C2410_GPIO_INPUT);
1354 1688
1355 if (s3c2410_dma_request(S3CMCI_DMA, &s3cmci_dma_client, NULL) < 0) { 1689 if (s3cmci_host_usedma(host)) {
1356 dev_err(&pdev->dev, "unable to get DMA channel.\n"); 1690 host->dma = s3c2410_dma_request(DMACH_SDI, &s3cmci_dma_client,
1357 ret = -EBUSY; 1691 host);
1358 goto probe_free_irq_cd; 1692 if (host->dma < 0) {
1693 dev_err(&pdev->dev, "cannot get DMA channel.\n");
1694 if (!s3cmci_host_canpio()) {
1695 ret = -EBUSY;
1696 goto probe_free_gpio_wp;
1697 } else {
1698 dev_warn(&pdev->dev, "falling back to PIO.\n");
1699 host->dodma = 0;
1700 }
1701 }
1359 } 1702 }
1360 1703
1361 host->clk = clk_get(&pdev->dev, "sdi"); 1704 host->clk = clk_get(&pdev->dev, "sdi");
@@ -1363,7 +1706,7 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1363 dev_err(&pdev->dev, "failed to find clock source.\n"); 1706 dev_err(&pdev->dev, "failed to find clock source.\n");
1364 ret = PTR_ERR(host->clk); 1707 ret = PTR_ERR(host->clk);
1365 host->clk = NULL; 1708 host->clk = NULL;
1366 goto probe_free_host; 1709 goto probe_free_dma;
1367 } 1710 }
1368 1711
1369 ret = clk_enable(host->clk); 1712 ret = clk_enable(host->clk);
@@ -1376,7 +1719,11 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1376 1719
1377 mmc->ops = &s3cmci_ops; 1720 mmc->ops = &s3cmci_ops;
1378 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34; 1721 mmc->ocr_avail = MMC_VDD_32_33 | MMC_VDD_33_34;
1722#ifdef CONFIG_MMC_S3C_HW_SDIO_IRQ
1723 mmc->caps = MMC_CAP_4_BIT_DATA | MMC_CAP_SDIO_IRQ;
1724#else
1379 mmc->caps = MMC_CAP_4_BIT_DATA; 1725 mmc->caps = MMC_CAP_4_BIT_DATA;
1726#endif
1380 mmc->f_min = host->clk_rate / (host->clk_div * 256); 1727 mmc->f_min = host->clk_rate / (host->clk_div * 256);
1381 mmc->f_max = host->clk_rate / host->clk_div; 1728 mmc->f_max = host->clk_rate / host->clk_div;
1382 1729
@@ -1408,8 +1755,12 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1408 goto free_cpufreq; 1755 goto free_cpufreq;
1409 } 1756 }
1410 1757
1758 s3cmci_debugfs_attach(host);
1759
1411 platform_set_drvdata(pdev, mmc); 1760 platform_set_drvdata(pdev, mmc);
1412 dev_info(&pdev->dev, "initialisation done.\n"); 1761 dev_info(&pdev->dev, "%s - using %s, %s SDIO IRQ\n", mmc_hostname(mmc),
1762 s3cmci_host_usedma(host) ? "dma" : "pio",
1763 mmc->caps & MMC_CAP_SDIO_IRQ ? "hw" : "sw");
1413 1764
1414 return 0; 1765 return 0;
1415 1766
@@ -1422,6 +1773,18 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1422 clk_free: 1773 clk_free:
1423 clk_put(host->clk); 1774 clk_put(host->clk);
1424 1775
1776 probe_free_dma:
1777 if (s3cmci_host_usedma(host))
1778 s3c2410_dma_free(host->dma, &s3cmci_dma_client);
1779
1780 probe_free_gpio_wp:
1781 if (!host->pdata->no_wprotect)
1782 gpio_free(host->pdata->gpio_wprotect);
1783
1784 probe_free_gpio_cd:
1785 if (!host->pdata->no_detect)
1786 gpio_free(host->pdata->gpio_detect);
1787
1425 probe_free_irq_cd: 1788 probe_free_irq_cd:
1426 if (host->irq_cd >= 0) 1789 if (host->irq_cd >= 0)
1427 free_irq(host->irq_cd, host); 1790 free_irq(host->irq_cd, host);
@@ -1433,10 +1796,15 @@ static int __devinit s3cmci_probe(struct platform_device *pdev, int is2440)
1433 iounmap(host->base); 1796 iounmap(host->base);
1434 1797
1435 probe_free_mem_region: 1798 probe_free_mem_region:
1436 release_mem_region(host->mem->start, RESSIZE(host->mem)); 1799 release_mem_region(host->mem->start, resource_size(host->mem));
1800
1801 probe_free_gpio:
1802 for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++)
1803 gpio_free(i);
1437 1804
1438 probe_free_host: 1805 probe_free_host:
1439 mmc_free_host(mmc); 1806 mmc_free_host(mmc);
1807
1440 probe_out: 1808 probe_out:
1441 return ret; 1809 return ret;
1442} 1810}
@@ -1449,6 +1817,7 @@ static void s3cmci_shutdown(struct platform_device *pdev)
1449 if (host->irq_cd >= 0) 1817 if (host->irq_cd >= 0)
1450 free_irq(host->irq_cd, host); 1818 free_irq(host->irq_cd, host);
1451 1819
1820 s3cmci_debugfs_remove(host);
1452 s3cmci_cpufreq_deregister(host); 1821 s3cmci_cpufreq_deregister(host);
1453 mmc_remove_host(mmc); 1822 mmc_remove_host(mmc);
1454 clk_disable(host->clk); 1823 clk_disable(host->clk);
@@ -1458,104 +1827,102 @@ static int __devexit s3cmci_remove(struct platform_device *pdev)
1458{ 1827{
1459 struct mmc_host *mmc = platform_get_drvdata(pdev); 1828 struct mmc_host *mmc = platform_get_drvdata(pdev);
1460 struct s3cmci_host *host = mmc_priv(mmc); 1829 struct s3cmci_host *host = mmc_priv(mmc);
1830 struct s3c24xx_mci_pdata *pd = host->pdata;
1831 int i;
1461 1832
1462 s3cmci_shutdown(pdev); 1833 s3cmci_shutdown(pdev);
1463 1834
1464 clk_put(host->clk); 1835 clk_put(host->clk);
1465 1836
1466 tasklet_disable(&host->pio_tasklet); 1837 tasklet_disable(&host->pio_tasklet);
1467 s3c2410_dma_free(S3CMCI_DMA, &s3cmci_dma_client); 1838
1839 if (s3cmci_host_usedma(host))
1840 s3c2410_dma_free(host->dma, &s3cmci_dma_client);
1468 1841
1469 free_irq(host->irq, host); 1842 free_irq(host->irq, host);
1470 1843
1844 if (!pd->no_wprotect)
1845 gpio_free(pd->gpio_wprotect);
1846
1847 if (!pd->no_detect)
1848 gpio_free(pd->gpio_detect);
1849
1850 for (i = S3C2410_GPE(5); i <= S3C2410_GPE(10); i++)
1851 gpio_free(i);
1852
1853
1471 iounmap(host->base); 1854 iounmap(host->base);
1472 release_mem_region(host->mem->start, RESSIZE(host->mem)); 1855 release_mem_region(host->mem->start, resource_size(host->mem));
1473 1856
1474 mmc_free_host(mmc); 1857 mmc_free_host(mmc);
1475 return 0; 1858 return 0;
1476} 1859}
1477 1860
1478static int __devinit s3cmci_2410_probe(struct platform_device *dev) 1861static struct platform_device_id s3cmci_driver_ids[] = {
1479{ 1862 {
1480 return s3cmci_probe(dev, 0); 1863 .name = "s3c2410-sdi",
1481} 1864 .driver_data = 0,
1865 }, {
1866 .name = "s3c2412-sdi",
1867 .driver_data = 1,
1868 }, {
1869 .name = "s3c2440-sdi",
1870 .driver_data = 1,
1871 },
1872 { }
1873};
1482 1874
1483static int __devinit s3cmci_2412_probe(struct platform_device *dev) 1875MODULE_DEVICE_TABLE(platform, s3cmci_driver_ids);
1484{
1485 return s3cmci_probe(dev, 1);
1486}
1487 1876
1488static int __devinit s3cmci_2440_probe(struct platform_device *dev)
1489{
1490 return s3cmci_probe(dev, 1);
1491}
1492 1877
1493#ifdef CONFIG_PM 1878#ifdef CONFIG_PM
1494 1879
1495static int s3cmci_suspend(struct platform_device *dev, pm_message_t state) 1880static int s3cmci_suspend(struct device *dev)
1496{ 1881{
1497 struct mmc_host *mmc = platform_get_drvdata(dev); 1882 struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev));
1883 struct pm_message event = { PM_EVENT_SUSPEND };
1498 1884
1499 return mmc_suspend_host(mmc, state); 1885 return mmc_suspend_host(mmc, event);
1500} 1886}
1501 1887
1502static int s3cmci_resume(struct platform_device *dev) 1888static int s3cmci_resume(struct device *dev)
1503{ 1889{
1504 struct mmc_host *mmc = platform_get_drvdata(dev); 1890 struct mmc_host *mmc = platform_get_drvdata(to_platform_device(dev));
1505 1891
1506 return mmc_resume_host(mmc); 1892 return mmc_resume_host(mmc);
1507} 1893}
1508 1894
1509#else /* CONFIG_PM */ 1895static struct dev_pm_ops s3cmci_pm = {
1510#define s3cmci_suspend NULL
1511#define s3cmci_resume NULL
1512#endif /* CONFIG_PM */
1513
1514
1515static struct platform_driver s3cmci_2410_driver = {
1516 .driver.name = "s3c2410-sdi",
1517 .driver.owner = THIS_MODULE,
1518 .probe = s3cmci_2410_probe,
1519 .remove = __devexit_p(s3cmci_remove),
1520 .shutdown = s3cmci_shutdown,
1521 .suspend = s3cmci_suspend, 1896 .suspend = s3cmci_suspend,
1522 .resume = s3cmci_resume, 1897 .resume = s3cmci_resume,
1523}; 1898};
1524 1899
1525static struct platform_driver s3cmci_2412_driver = { 1900#define s3cmci_pm_ops &s3cmci_pm
1526 .driver.name = "s3c2412-sdi", 1901#else /* CONFIG_PM */
1527 .driver.owner = THIS_MODULE, 1902#define s3cmci_pm_ops NULL
1528 .probe = s3cmci_2412_probe, 1903#endif /* CONFIG_PM */
1529 .remove = __devexit_p(s3cmci_remove),
1530 .shutdown = s3cmci_shutdown,
1531 .suspend = s3cmci_suspend,
1532 .resume = s3cmci_resume,
1533};
1534 1904
1535static struct platform_driver s3cmci_2440_driver = { 1905
1536 .driver.name = "s3c2440-sdi", 1906static struct platform_driver s3cmci_driver = {
1537 .driver.owner = THIS_MODULE, 1907 .driver = {
1538 .probe = s3cmci_2440_probe, 1908 .name = "s3c-sdi",
1909 .owner = THIS_MODULE,
1910 .pm = s3cmci_pm_ops,
1911 },
1912 .id_table = s3cmci_driver_ids,
1913 .probe = s3cmci_probe,
1539 .remove = __devexit_p(s3cmci_remove), 1914 .remove = __devexit_p(s3cmci_remove),
1540 .shutdown = s3cmci_shutdown, 1915 .shutdown = s3cmci_shutdown,
1541 .suspend = s3cmci_suspend,
1542 .resume = s3cmci_resume,
1543}; 1916};
1544 1917
1545
1546static int __init s3cmci_init(void) 1918static int __init s3cmci_init(void)
1547{ 1919{
1548 platform_driver_register(&s3cmci_2410_driver); 1920 return platform_driver_register(&s3cmci_driver);
1549 platform_driver_register(&s3cmci_2412_driver);
1550 platform_driver_register(&s3cmci_2440_driver);
1551 return 0;
1552} 1921}
1553 1922
1554static void __exit s3cmci_exit(void) 1923static void __exit s3cmci_exit(void)
1555{ 1924{
1556 platform_driver_unregister(&s3cmci_2410_driver); 1925 platform_driver_unregister(&s3cmci_driver);
1557 platform_driver_unregister(&s3cmci_2412_driver);
1558 platform_driver_unregister(&s3cmci_2440_driver);
1559} 1926}
1560 1927
1561module_init(s3cmci_init); 1928module_init(s3cmci_init);
@@ -1564,6 +1931,3 @@ module_exit(s3cmci_exit);
1564MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver"); 1931MODULE_DESCRIPTION("Samsung S3C MMC/SD Card Interface driver");
1565MODULE_LICENSE("GPL v2"); 1932MODULE_LICENSE("GPL v2");
1566MODULE_AUTHOR("Thomas Kleffel <tk@maintech.de>, Ben Dooks <ben-linux@fluff.org>"); 1933MODULE_AUTHOR("Thomas Kleffel <tk@maintech.de>, Ben Dooks <ben-linux@fluff.org>");
1567MODULE_ALIAS("platform:s3c2410-sdi");
1568MODULE_ALIAS("platform:s3c2412-sdi");
1569MODULE_ALIAS("platform:s3c2440-sdi");
diff --git a/drivers/mmc/host/s3cmci.h b/drivers/mmc/host/s3cmci.h
index ca1ba3d58cfd..c76b53dbeb61 100644
--- a/drivers/mmc/host/s3cmci.h
+++ b/drivers/mmc/host/s3cmci.h
@@ -8,9 +8,6 @@
8 * published by the Free Software Foundation. 8 * published by the Free Software Foundation.
9 */ 9 */
10 10
11/* FIXME: DMA Resource management ?! */
12#define S3CMCI_DMA 0
13
14enum s3cmci_waitfor { 11enum s3cmci_waitfor {
15 COMPLETION_NONE, 12 COMPLETION_NONE,
16 COMPLETION_FINALIZE, 13 COMPLETION_FINALIZE,
@@ -42,6 +39,11 @@ struct s3cmci_host {
42 int dodma; 39 int dodma;
43 int dmatogo; 40 int dmatogo;
44 41
42 bool irq_disabled;
43 bool irq_enabled;
44 bool irq_state;
45 int sdio_irqen;
46
45 struct mmc_request *mrq; 47 struct mmc_request *mrq;
46 int cmd_is_stop; 48 int cmd_is_stop;
47 49
@@ -68,6 +70,12 @@ struct s3cmci_host {
68 unsigned int ccnt, dcnt; 70 unsigned int ccnt, dcnt;
69 struct tasklet_struct pio_tasklet; 71 struct tasklet_struct pio_tasklet;
70 72
73#ifdef CONFIG_DEBUG_FS
74 struct dentry *debug_root;
75 struct dentry *debug_state;
76 struct dentry *debug_regs;
77#endif
78
71#ifdef CONFIG_CPU_FREQ 79#ifdef CONFIG_CPU_FREQ
72 struct notifier_block freq_transition; 80 struct notifier_block freq_transition;
73#endif 81#endif
diff --git a/drivers/mtd/mtd_blkdevs.c b/drivers/mtd/mtd_blkdevs.c
index 0acbf4f5be50..8ca17a3e96ea 100644
--- a/drivers/mtd/mtd_blkdevs.c
+++ b/drivers/mtd/mtd_blkdevs.c
@@ -32,14 +32,6 @@ struct mtd_blkcore_priv {
32 spinlock_t queue_lock; 32 spinlock_t queue_lock;
33}; 33};
34 34
35static int blktrans_discard_request(struct request_queue *q,
36 struct request *req)
37{
38 req->cmd_type = REQ_TYPE_LINUX_BLOCK;
39 req->cmd[0] = REQ_LB_OP_DISCARD;
40 return 0;
41}
42
43static int do_blktrans_request(struct mtd_blktrans_ops *tr, 35static int do_blktrans_request(struct mtd_blktrans_ops *tr,
44 struct mtd_blktrans_dev *dev, 36 struct mtd_blktrans_dev *dev,
45 struct request *req) 37 struct request *req)
@@ -52,10 +44,6 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
52 44
53 buf = req->buffer; 45 buf = req->buffer;
54 46
55 if (req->cmd_type == REQ_TYPE_LINUX_BLOCK &&
56 req->cmd[0] == REQ_LB_OP_DISCARD)
57 return tr->discard(dev, block, nsect);
58
59 if (!blk_fs_request(req)) 47 if (!blk_fs_request(req))
60 return -EIO; 48 return -EIO;
61 49
@@ -63,6 +51,9 @@ static int do_blktrans_request(struct mtd_blktrans_ops *tr,
63 get_capacity(req->rq_disk)) 51 get_capacity(req->rq_disk))
64 return -EIO; 52 return -EIO;
65 53
54 if (blk_discard_rq(req))
55 return tr->discard(dev, block, nsect);
56
66 switch(rq_data_dir(req)) { 57 switch(rq_data_dir(req)) {
67 case READ: 58 case READ:
68 for (; nsect > 0; nsect--, block++, buf += tr->blksize) 59 for (; nsect > 0; nsect--, block++, buf += tr->blksize)
@@ -380,8 +371,8 @@ int register_mtd_blktrans(struct mtd_blktrans_ops *tr)
380 tr->blkcore_priv->rq->queuedata = tr; 371 tr->blkcore_priv->rq->queuedata = tr;
381 blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize); 372 blk_queue_logical_block_size(tr->blkcore_priv->rq, tr->blksize);
382 if (tr->discard) 373 if (tr->discard)
383 blk_queue_set_discard(tr->blkcore_priv->rq, 374 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD,
384 blktrans_discard_request); 375 tr->blkcore_priv->rq);
385 376
386 tr->blkshift = ffs(tr->blksize) - 1; 377 tr->blkshift = ffs(tr->blksize) - 1;
387 378
diff --git a/drivers/net/3c59x.c b/drivers/net/3c59x.c
index 975e25b19ebe..32031eaf4910 100644
--- a/drivers/net/3c59x.c
+++ b/drivers/net/3c59x.c
@@ -2560,7 +2560,7 @@ boomerang_rx(struct net_device *dev)
2560 struct sk_buff *skb; 2560 struct sk_buff *skb;
2561 entry = vp->dirty_rx % RX_RING_SIZE; 2561 entry = vp->dirty_rx % RX_RING_SIZE;
2562 if (vp->rx_skbuff[entry] == NULL) { 2562 if (vp->rx_skbuff[entry] == NULL) {
2563 skb = netdev_alloc_skb(dev, PKT_BUF_SZ + NET_IP_ALIGN); 2563 skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
2564 if (skb == NULL) { 2564 if (skb == NULL) {
2565 static unsigned long last_jif; 2565 static unsigned long last_jif;
2566 if (time_after(jiffies, last_jif + 10 * HZ)) { 2566 if (time_after(jiffies, last_jif + 10 * HZ)) {
@@ -2572,7 +2572,6 @@ boomerang_rx(struct net_device *dev)
2572 break; /* Bad news! */ 2572 break; /* Bad news! */
2573 } 2573 }
2574 2574
2575 skb_reserve(skb, NET_IP_ALIGN);
2576 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE)); 2575 vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(VORTEX_PCI(vp), skb->data, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
2577 vp->rx_skbuff[entry] = skb; 2576 vp->rx_skbuff[entry] = skb;
2578 } 2577 }
diff --git a/drivers/net/8139cp.c b/drivers/net/8139cp.c
index 83a1922e68e0..ab451bb8995a 100644
--- a/drivers/net/8139cp.c
+++ b/drivers/net/8139cp.c
@@ -549,14 +549,12 @@ rx_status_loop:
549 pr_debug("%s: rx slot %d status 0x%x len %d\n", 549 pr_debug("%s: rx slot %d status 0x%x len %d\n",
550 dev->name, rx_tail, status, len); 550 dev->name, rx_tail, status, len);
551 551
552 new_skb = netdev_alloc_skb(dev, buflen + NET_IP_ALIGN); 552 new_skb = netdev_alloc_skb_ip_align(dev, buflen);
553 if (!new_skb) { 553 if (!new_skb) {
554 dev->stats.rx_dropped++; 554 dev->stats.rx_dropped++;
555 goto rx_next; 555 goto rx_next;
556 } 556 }
557 557
558 skb_reserve(new_skb, NET_IP_ALIGN);
559
560 dma_unmap_single(&cp->pdev->dev, mapping, 558 dma_unmap_single(&cp->pdev->dev, mapping,
561 buflen, PCI_DMA_FROMDEVICE); 559 buflen, PCI_DMA_FROMDEVICE);
562 560
@@ -1057,12 +1055,10 @@ static int cp_refill_rx(struct cp_private *cp)
1057 struct sk_buff *skb; 1055 struct sk_buff *skb;
1058 dma_addr_t mapping; 1056 dma_addr_t mapping;
1059 1057
1060 skb = netdev_alloc_skb(dev, cp->rx_buf_sz + NET_IP_ALIGN); 1058 skb = netdev_alloc_skb_ip_align(dev, cp->rx_buf_sz);
1061 if (!skb) 1059 if (!skb)
1062 goto err_out; 1060 goto err_out;
1063 1061
1064 skb_reserve(skb, NET_IP_ALIGN);
1065
1066 mapping = dma_map_single(&cp->pdev->dev, skb->data, 1062 mapping = dma_map_single(&cp->pdev->dev, skb->data,
1067 cp->rx_buf_sz, PCI_DMA_FROMDEVICE); 1063 cp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1068 cp->rx_skb[i] = skb; 1064 cp->rx_skb[i] = skb;
diff --git a/drivers/net/8139too.c b/drivers/net/8139too.c
index 4a3628755026..7e333f73b228 100644
--- a/drivers/net/8139too.c
+++ b/drivers/net/8139too.c
@@ -2004,9 +2004,8 @@ no_early_rx:
2004 /* Malloc up new buffer, compatible with net-2e. */ 2004 /* Malloc up new buffer, compatible with net-2e. */
2005 /* Omit the four octet CRC from the length. */ 2005 /* Omit the four octet CRC from the length. */
2006 2006
2007 skb = netdev_alloc_skb(dev, pkt_size + NET_IP_ALIGN); 2007 skb = netdev_alloc_skb_ip_align(dev, pkt_size);
2008 if (likely(skb)) { 2008 if (likely(skb)) {
2009 skb_reserve (skb, NET_IP_ALIGN); /* 16 byte align the IP fields. */
2010#if RX_BUF_IDX == 3 2009#if RX_BUF_IDX == 3
2011 wrap_copy(skb, rx_ring, ring_offset+4, pkt_size); 2010 wrap_copy(skb, rx_ring, ring_offset+4, pkt_size);
2012#else 2011#else
diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig
index e19ca4bb7510..e012c2e0825a 100644
--- a/drivers/net/Kconfig
+++ b/drivers/net/Kconfig
@@ -1001,7 +1001,7 @@ config SMC911X
1001 1001
1002config SMSC911X 1002config SMSC911X
1003 tristate "SMSC LAN911x/LAN921x families embedded ethernet support" 1003 tristate "SMSC LAN911x/LAN921x families embedded ethernet support"
1004 depends on ARM || SUPERH || BLACKFIN 1004 depends on ARM || SUPERH || BLACKFIN || MIPS
1005 select CRC32 1005 select CRC32
1006 select MII 1006 select MII
1007 select PHYLIB 1007 select PHYLIB
diff --git a/drivers/net/arm/ks8695net.c b/drivers/net/arm/ks8695net.c
index 2a7b7745cc55..be256b34cea8 100644
--- a/drivers/net/arm/ks8695net.c
+++ b/drivers/net/arm/ks8695net.c
@@ -35,11 +35,13 @@
35 35
36#include <mach/regs-switch.h> 36#include <mach/regs-switch.h>
37#include <mach/regs-misc.h> 37#include <mach/regs-misc.h>
38#include <asm/mach/irq.h>
39#include <mach/regs-irq.h>
38 40
39#include "ks8695net.h" 41#include "ks8695net.h"
40 42
41#define MODULENAME "ks8695_ether" 43#define MODULENAME "ks8695_ether"
42#define MODULEVERSION "1.01" 44#define MODULEVERSION "1.02"
43 45
44/* 46/*
45 * Transmit and device reset timeout, default 5 seconds. 47 * Transmit and device reset timeout, default 5 seconds.
@@ -95,6 +97,9 @@ struct ks8695_skbuff {
95#define MAX_RX_DESC 16 97#define MAX_RX_DESC 16
96#define MAX_RX_DESC_MASK 0xf 98#define MAX_RX_DESC_MASK 0xf
97 99
100/*napi_weight have better more than rx DMA buffers*/
101#define NAPI_WEIGHT 64
102
98#define MAX_RXBUF_SIZE 0x700 103#define MAX_RXBUF_SIZE 0x700
99 104
100#define TX_RING_DMA_SIZE (sizeof(struct tx_ring_desc) * MAX_TX_DESC) 105#define TX_RING_DMA_SIZE (sizeof(struct tx_ring_desc) * MAX_TX_DESC)
@@ -120,6 +125,7 @@ enum ks8695_dtype {
120 * @dev: The platform device object for this interface 125 * @dev: The platform device object for this interface
121 * @dtype: The type of this device 126 * @dtype: The type of this device
122 * @io_regs: The ioremapped registers for this interface 127 * @io_regs: The ioremapped registers for this interface
128 * @napi : Add support NAPI for Rx
123 * @rx_irq_name: The textual name of the RX IRQ from the platform data 129 * @rx_irq_name: The textual name of the RX IRQ from the platform data
124 * @tx_irq_name: The textual name of the TX IRQ from the platform data 130 * @tx_irq_name: The textual name of the TX IRQ from the platform data
125 * @link_irq_name: The textual name of the link IRQ from the 131 * @link_irq_name: The textual name of the link IRQ from the
@@ -143,6 +149,7 @@ enum ks8695_dtype {
143 * @rx_ring_dma: The DMA mapped equivalent of rx_ring 149 * @rx_ring_dma: The DMA mapped equivalent of rx_ring
144 * @rx_buffers: The sk_buff mappings for the RX ring 150 * @rx_buffers: The sk_buff mappings for the RX ring
145 * @next_rx_desc_read: The next RX descriptor to read from on IRQ 151 * @next_rx_desc_read: The next RX descriptor to read from on IRQ
152 * @rx_lock: A lock to protect Rx irq function
146 * @msg_enable: The flags for which messages to emit 153 * @msg_enable: The flags for which messages to emit
147 */ 154 */
148struct ks8695_priv { 155struct ks8695_priv {
@@ -152,6 +159,8 @@ struct ks8695_priv {
152 enum ks8695_dtype dtype; 159 enum ks8695_dtype dtype;
153 void __iomem *io_regs; 160 void __iomem *io_regs;
154 161
162 struct napi_struct napi;
163
155 const char *rx_irq_name, *tx_irq_name, *link_irq_name; 164 const char *rx_irq_name, *tx_irq_name, *link_irq_name;
156 int rx_irq, tx_irq, link_irq; 165 int rx_irq, tx_irq, link_irq;
157 166
@@ -172,6 +181,7 @@ struct ks8695_priv {
172 dma_addr_t rx_ring_dma; 181 dma_addr_t rx_ring_dma;
173 struct ks8695_skbuff rx_buffers[MAX_RX_DESC]; 182 struct ks8695_skbuff rx_buffers[MAX_RX_DESC];
174 int next_rx_desc_read; 183 int next_rx_desc_read;
184 spinlock_t rx_lock;
175 185
176 int msg_enable; 186 int msg_enable;
177}; 187};
@@ -392,29 +402,74 @@ ks8695_tx_irq(int irq, void *dev_id)
392} 402}
393 403
394/** 404/**
405 * ks8695_get_rx_enable_bit - Get rx interrupt enable/status bit
406 * @ksp: Private data for the KS8695 Ethernet
407 *
408 * For KS8695 document:
409 * Interrupt Enable Register (offset 0xE204)
410 * Bit29 : WAN MAC Receive Interrupt Enable
411 * Bit16 : LAN MAC Receive Interrupt Enable
412 * Interrupt Status Register (Offset 0xF208)
413 * Bit29: WAN MAC Receive Status
414 * Bit16: LAN MAC Receive Status
415 * So, this Rx interrrupt enable/status bit number is equal
416 * as Rx IRQ number.
417 */
418static inline u32 ks8695_get_rx_enable_bit(struct ks8695_priv *ksp)
419{
420 return ksp->rx_irq;
421}
422
423/**
395 * ks8695_rx_irq - Receive IRQ handler 424 * ks8695_rx_irq - Receive IRQ handler
396 * @irq: The IRQ which went off (ignored) 425 * @irq: The IRQ which went off (ignored)
397 * @dev_id: The net_device for the interrupt 426 * @dev_id: The net_device for the interrupt
398 * 427 *
399 * Process the RX ring, passing any received packets up to the 428 * Inform NAPI that packet reception needs to be scheduled
400 * host. If we received anything other than errors, we then
401 * refill the ring.
402 */ 429 */
430
403static irqreturn_t 431static irqreturn_t
404ks8695_rx_irq(int irq, void *dev_id) 432ks8695_rx_irq(int irq, void *dev_id)
405{ 433{
406 struct net_device *ndev = (struct net_device *)dev_id; 434 struct net_device *ndev = (struct net_device *)dev_id;
407 struct ks8695_priv *ksp = netdev_priv(ndev); 435 struct ks8695_priv *ksp = netdev_priv(ndev);
436
437 spin_lock(&ksp->rx_lock);
438
439 if (napi_schedule_prep(&ksp->napi)) {
440 unsigned long status = readl(KS8695_IRQ_VA + KS8695_INTEN);
441 unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
442 /*disable rx interrupt*/
443 status &= ~mask_bit;
444 writel(status , KS8695_IRQ_VA + KS8695_INTEN);
445 __napi_schedule(&ksp->napi);
446 }
447
448 spin_unlock(&ksp->rx_lock);
449 return IRQ_HANDLED;
450}
451
452/**
453 * ks8695_rx - Receive packets called by NAPI poll method
454 * @ksp: Private data for the KS8695 Ethernet
455 * @budget: The max packets would be receive
456 */
457
458static int ks8695_rx(struct ks8695_priv *ksp, int budget)
459{
460 struct net_device *ndev = ksp->ndev;
408 struct sk_buff *skb; 461 struct sk_buff *skb;
409 int buff_n; 462 int buff_n;
410 u32 flags; 463 u32 flags;
411 int pktlen; 464 int pktlen;
412 int last_rx_processed = -1; 465 int last_rx_processed = -1;
466 int received = 0;
413 467
414 buff_n = ksp->next_rx_desc_read; 468 buff_n = ksp->next_rx_desc_read;
415 do { 469 while (received < budget
416 if (ksp->rx_buffers[buff_n].skb && 470 && ksp->rx_buffers[buff_n].skb
417 !(ksp->rx_ring[buff_n].status & cpu_to_le32(RDES_OWN))) { 471 && (!(ksp->rx_ring[buff_n].status &
472 cpu_to_le32(RDES_OWN)))) {
418 rmb(); 473 rmb();
419 flags = le32_to_cpu(ksp->rx_ring[buff_n].status); 474 flags = le32_to_cpu(ksp->rx_ring[buff_n].status);
420 /* Found an SKB which we own, this means we 475 /* Found an SKB which we own, this means we
@@ -464,7 +519,7 @@ ks8695_rx_irq(int irq, void *dev_id)
464 /* Relinquish the SKB to the network layer */ 519 /* Relinquish the SKB to the network layer */
465 skb_put(skb, pktlen); 520 skb_put(skb, pktlen);
466 skb->protocol = eth_type_trans(skb, ndev); 521 skb->protocol = eth_type_trans(skb, ndev);
467 netif_rx(skb); 522 netif_receive_skb(skb);
468 523
469 /* Record stats */ 524 /* Record stats */
470 ndev->stats.rx_packets++; 525 ndev->stats.rx_packets++;
@@ -478,29 +533,55 @@ rx_failure:
478 /* Give the ring entry back to the hardware */ 533 /* Give the ring entry back to the hardware */
479 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN); 534 ksp->rx_ring[buff_n].status = cpu_to_le32(RDES_OWN);
480rx_finished: 535rx_finished:
536 received++;
481 /* And note this as processed so we can start 537 /* And note this as processed so we can start
482 * from here next time 538 * from here next time
483 */ 539 */
484 last_rx_processed = buff_n; 540 last_rx_processed = buff_n;
485 } else { 541 buff_n = (buff_n + 1) & MAX_RX_DESC_MASK;
486 /* Ran out of things to process, stop now */ 542 /*And note which RX descriptor we last did */
487 break; 543 if (likely(last_rx_processed != -1))
488 } 544 ksp->next_rx_desc_read =
489 buff_n = (buff_n + 1) & MAX_RX_DESC_MASK; 545 (last_rx_processed + 1) &
490 } while (buff_n != ksp->next_rx_desc_read); 546 MAX_RX_DESC_MASK;
491 547 }
492 /* And note which RX descriptor we last did anything with */
493 if (likely(last_rx_processed != -1))
494 ksp->next_rx_desc_read =
495 (last_rx_processed + 1) & MAX_RX_DESC_MASK;
496
497 /* And refill the buffers */ 548 /* And refill the buffers */
498 ks8695_refill_rxbuffers(ksp); 549 ks8695_refill_rxbuffers(ksp);
499 550
500 /* Kick the RX DMA engine, in case it became suspended */ 551 /* Kick the RX DMA engine, in case it became
552 * suspended */
501 ks8695_writereg(ksp, KS8695_DRSC, 0); 553 ks8695_writereg(ksp, KS8695_DRSC, 0);
554 return received;
555}
502 556
503 return IRQ_HANDLED; 557
558/**
559 * ks8695_poll - Receive packet by NAPI poll method
560 * @ksp: Private data for the KS8695 Ethernet
561 * @budget: The remaining number packets for network subsystem
562 *
563 * Invoked by the network core when it requests for new
564 * packets from the driver
565 */
566static int ks8695_poll(struct napi_struct *napi, int budget)
567{
568 struct ks8695_priv *ksp = container_of(napi, struct ks8695_priv, napi);
569 unsigned long work_done;
570
571 unsigned long isr = readl(KS8695_IRQ_VA + KS8695_INTEN);
572 unsigned long mask_bit = 1 << ks8695_get_rx_enable_bit(ksp);
573
574 work_done = ks8695_rx(ksp, budget);
575
576 if (work_done < budget) {
577 unsigned long flags;
578 spin_lock_irqsave(&ksp->rx_lock, flags);
579 /*enable rx interrupt*/
580 writel(isr | mask_bit, KS8695_IRQ_VA + KS8695_INTEN);
581 __napi_complete(napi);
582 spin_unlock_irqrestore(&ksp->rx_lock, flags);
583 }
584 return work_done;
504} 585}
505 586
506/** 587/**
@@ -1253,6 +1334,7 @@ ks8695_stop(struct net_device *ndev)
1253 struct ks8695_priv *ksp = netdev_priv(ndev); 1334 struct ks8695_priv *ksp = netdev_priv(ndev);
1254 1335
1255 netif_stop_queue(ndev); 1336 netif_stop_queue(ndev);
1337 napi_disable(&ksp->napi);
1256 netif_carrier_off(ndev); 1338 netif_carrier_off(ndev);
1257 1339
1258 ks8695_shutdown(ksp); 1340 ks8695_shutdown(ksp);
@@ -1287,6 +1369,7 @@ ks8695_open(struct net_device *ndev)
1287 return ret; 1369 return ret;
1288 } 1370 }
1289 1371
1372 napi_enable(&ksp->napi);
1290 netif_start_queue(ndev); 1373 netif_start_queue(ndev);
1291 1374
1292 return 0; 1375 return 0;
@@ -1472,6 +1555,8 @@ ks8695_probe(struct platform_device *pdev)
1472 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops); 1555 SET_ETHTOOL_OPS(ndev, &ks8695_ethtool_ops);
1473 ndev->watchdog_timeo = msecs_to_jiffies(watchdog); 1556 ndev->watchdog_timeo = msecs_to_jiffies(watchdog);
1474 1557
1558 netif_napi_add(ndev, &ksp->napi, ks8695_poll, NAPI_WEIGHT);
1559
1475 /* Retrieve the default MAC addr from the chip. */ 1560 /* Retrieve the default MAC addr from the chip. */
1476 /* The bootloader should have left it in there for us. */ 1561 /* The bootloader should have left it in there for us. */
1477 1562
@@ -1505,6 +1590,7 @@ ks8695_probe(struct platform_device *pdev)
1505 1590
1506 /* And initialise the queue's lock */ 1591 /* And initialise the queue's lock */
1507 spin_lock_init(&ksp->txq_lock); 1592 spin_lock_init(&ksp->txq_lock);
1593 spin_lock_init(&ksp->rx_lock);
1508 1594
1509 /* Specify the RX DMA ring buffer */ 1595 /* Specify the RX DMA ring buffer */
1510 ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE; 1596 ksp->rx_ring = ksp->ring_base + TX_RING_DMA_SIZE;
@@ -1626,6 +1712,7 @@ ks8695_drv_remove(struct platform_device *pdev)
1626 struct ks8695_priv *ksp = netdev_priv(ndev); 1712 struct ks8695_priv *ksp = netdev_priv(ndev);
1627 1713
1628 platform_set_drvdata(pdev, NULL); 1714 platform_set_drvdata(pdev, NULL);
1715 netif_napi_del(&ksp->napi);
1629 1716
1630 unregister_netdev(ndev); 1717 unregister_netdev(ndev);
1631 ks8695_release_device(ksp); 1718 ks8695_release_device(ksp);
diff --git a/drivers/net/arm/w90p910_ether.c b/drivers/net/arm/w90p910_ether.c
index 25e2627eb118..b7f3866d546f 100644
--- a/drivers/net/arm/w90p910_ether.c
+++ b/drivers/net/arm/w90p910_ether.c
@@ -160,8 +160,8 @@ struct w90p910_ether {
160 struct mii_if_info mii; 160 struct mii_if_info mii;
161 struct timer_list check_timer; 161 struct timer_list check_timer;
162 void __iomem *reg; 162 void __iomem *reg;
163 unsigned int rxirq; 163 int rxirq;
164 unsigned int txirq; 164 int txirq;
165 unsigned int cur_tx; 165 unsigned int cur_tx;
166 unsigned int cur_rx; 166 unsigned int cur_rx;
167 unsigned int finish_tx; 167 unsigned int finish_tx;
diff --git a/drivers/net/atl1c/atl1c.h b/drivers/net/atl1c/atl1c.h
index 2a1120ad2e74..a348a22551d9 100644
--- a/drivers/net/atl1c/atl1c.h
+++ b/drivers/net/atl1c/atl1c.h
@@ -470,12 +470,28 @@ struct atl1c_ring_header {
470struct atl1c_buffer { 470struct atl1c_buffer {
471 struct sk_buff *skb; /* socket buffer */ 471 struct sk_buff *skb; /* socket buffer */
472 u16 length; /* rx buffer length */ 472 u16 length; /* rx buffer length */
473 u16 state; /* state of buffer */ 473 u16 flags; /* information of buffer */
474#define ATL1_BUFFER_FREE 0 474#define ATL1C_BUFFER_FREE 0x0001
475#define ATL1_BUFFER_BUSY 1 475#define ATL1C_BUFFER_BUSY 0x0002
476#define ATL1C_BUFFER_STATE_MASK 0x0003
477
478#define ATL1C_PCIMAP_SINGLE 0x0004
479#define ATL1C_PCIMAP_PAGE 0x0008
480#define ATL1C_PCIMAP_TYPE_MASK 0x000C
481
476 dma_addr_t dma; 482 dma_addr_t dma;
477}; 483};
478 484
485#define ATL1C_SET_BUFFER_STATE(buff, state) do { \
486 ((buff)->flags) &= ~ATL1C_BUFFER_STATE_MASK; \
487 ((buff)->flags) |= (state); \
488 } while (0)
489
490#define ATL1C_SET_PCIMAP_TYPE(buff, type) do { \
491 ((buff)->flags) &= ~ATL1C_PCIMAP_TYPE_MASK; \
492 ((buff)->flags) |= (type); \
493 } while (0)
494
479/* transimit packet descriptor (tpd) ring */ 495/* transimit packet descriptor (tpd) ring */
480struct atl1c_tpd_ring { 496struct atl1c_tpd_ring {
481 void *desc; /* descriptor ring virtual address */ 497 void *desc; /* descriptor ring virtual address */
diff --git a/drivers/net/atl1c/atl1c_main.c b/drivers/net/atl1c/atl1c_main.c
index 1372e9a99f5b..5ef9e23435f4 100644
--- a/drivers/net/atl1c/atl1c_main.c
+++ b/drivers/net/atl1c/atl1c_main.c
@@ -710,6 +710,29 @@ static int __devinit atl1c_sw_init(struct atl1c_adapter *adapter)
710 return 0; 710 return 0;
711} 711}
712 712
713static inline void atl1c_clean_buffer(struct pci_dev *pdev,
714 struct atl1c_buffer *buffer_info, int in_irq)
715{
716 if (buffer_info->flags & ATL1C_BUFFER_FREE)
717 return;
718 if (buffer_info->dma) {
719 if (buffer_info->flags & ATL1C_PCIMAP_SINGLE)
720 pci_unmap_single(pdev, buffer_info->dma,
721 buffer_info->length, PCI_DMA_TODEVICE);
722 else if (buffer_info->flags & ATL1C_PCIMAP_PAGE)
723 pci_unmap_page(pdev, buffer_info->dma,
724 buffer_info->length, PCI_DMA_TODEVICE);
725 }
726 if (buffer_info->skb) {
727 if (in_irq)
728 dev_kfree_skb_irq(buffer_info->skb);
729 else
730 dev_kfree_skb(buffer_info->skb);
731 }
732 buffer_info->dma = 0;
733 buffer_info->skb = NULL;
734 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
735}
713/* 736/*
714 * atl1c_clean_tx_ring - Free Tx-skb 737 * atl1c_clean_tx_ring - Free Tx-skb
715 * @adapter: board private structure 738 * @adapter: board private structure
@@ -725,22 +748,12 @@ static void atl1c_clean_tx_ring(struct atl1c_adapter *adapter,
725 ring_count = tpd_ring->count; 748 ring_count = tpd_ring->count;
726 for (index = 0; index < ring_count; index++) { 749 for (index = 0; index < ring_count; index++) {
727 buffer_info = &tpd_ring->buffer_info[index]; 750 buffer_info = &tpd_ring->buffer_info[index];
728 if (buffer_info->state == ATL1_BUFFER_FREE) 751 atl1c_clean_buffer(pdev, buffer_info, 0);
729 continue;
730 if (buffer_info->dma)
731 pci_unmap_single(pdev, buffer_info->dma,
732 buffer_info->length,
733 PCI_DMA_TODEVICE);
734 if (buffer_info->skb)
735 dev_kfree_skb(buffer_info->skb);
736 buffer_info->dma = 0;
737 buffer_info->skb = NULL;
738 buffer_info->state = ATL1_BUFFER_FREE;
739 } 752 }
740 753
741 /* Zero out Tx-buffers */ 754 /* Zero out Tx-buffers */
742 memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) * 755 memset(tpd_ring->desc, 0, sizeof(struct atl1c_tpd_desc) *
743 ring_count); 756 ring_count);
744 atomic_set(&tpd_ring->next_to_clean, 0); 757 atomic_set(&tpd_ring->next_to_clean, 0);
745 tpd_ring->next_to_use = 0; 758 tpd_ring->next_to_use = 0;
746} 759}
@@ -760,16 +773,7 @@ static void atl1c_clean_rx_ring(struct atl1c_adapter *adapter)
760 for (i = 0; i < adapter->num_rx_queues; i++) { 773 for (i = 0; i < adapter->num_rx_queues; i++) {
761 for (j = 0; j < rfd_ring[i].count; j++) { 774 for (j = 0; j < rfd_ring[i].count; j++) {
762 buffer_info = &rfd_ring[i].buffer_info[j]; 775 buffer_info = &rfd_ring[i].buffer_info[j];
763 if (buffer_info->state == ATL1_BUFFER_FREE) 776 atl1c_clean_buffer(pdev, buffer_info, 0);
764 continue;
765 if (buffer_info->dma)
766 pci_unmap_single(pdev, buffer_info->dma,
767 buffer_info->length,
768 PCI_DMA_FROMDEVICE);
769 if (buffer_info->skb)
770 dev_kfree_skb(buffer_info->skb);
771 buffer_info->state = ATL1_BUFFER_FREE;
772 buffer_info->skb = NULL;
773 } 777 }
774 /* zero out the descriptor ring */ 778 /* zero out the descriptor ring */
775 memset(rfd_ring[i].desc, 0, rfd_ring[i].size); 779 memset(rfd_ring[i].desc, 0, rfd_ring[i].size);
@@ -796,7 +800,8 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
796 atomic_set(&tpd_ring[i].next_to_clean, 0); 800 atomic_set(&tpd_ring[i].next_to_clean, 0);
797 buffer_info = tpd_ring[i].buffer_info; 801 buffer_info = tpd_ring[i].buffer_info;
798 for (j = 0; j < tpd_ring->count; j++) 802 for (j = 0; j < tpd_ring->count; j++)
799 buffer_info[i].state = ATL1_BUFFER_FREE; 803 ATL1C_SET_BUFFER_STATE(&buffer_info[i],
804 ATL1C_BUFFER_FREE);
800 } 805 }
801 for (i = 0; i < adapter->num_rx_queues; i++) { 806 for (i = 0; i < adapter->num_rx_queues; i++) {
802 rfd_ring[i].next_to_use = 0; 807 rfd_ring[i].next_to_use = 0;
@@ -805,7 +810,7 @@ static void atl1c_init_ring_ptrs(struct atl1c_adapter *adapter)
805 rrd_ring[i].next_to_clean = 0; 810 rrd_ring[i].next_to_clean = 0;
806 for (j = 0; j < rfd_ring[i].count; j++) { 811 for (j = 0; j < rfd_ring[i].count; j++) {
807 buffer_info = &rfd_ring[i].buffer_info[j]; 812 buffer_info = &rfd_ring[i].buffer_info[j];
808 buffer_info->state = ATL1_BUFFER_FREE; 813 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_FREE);
809 } 814 }
810 } 815 }
811} 816}
@@ -1447,6 +1452,7 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
1447 struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *) 1452 struct atl1c_tpd_ring *tpd_ring = (struct atl1c_tpd_ring *)
1448 &adapter->tpd_ring[type]; 1453 &adapter->tpd_ring[type];
1449 struct atl1c_buffer *buffer_info; 1454 struct atl1c_buffer *buffer_info;
1455 struct pci_dev *pdev = adapter->pdev;
1450 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean); 1456 u16 next_to_clean = atomic_read(&tpd_ring->next_to_clean);
1451 u16 hw_next_to_clean; 1457 u16 hw_next_to_clean;
1452 u16 shift; 1458 u16 shift;
@@ -1462,16 +1468,7 @@ static bool atl1c_clean_tx_irq(struct atl1c_adapter *adapter,
1462 1468
1463 while (next_to_clean != hw_next_to_clean) { 1469 while (next_to_clean != hw_next_to_clean) {
1464 buffer_info = &tpd_ring->buffer_info[next_to_clean]; 1470 buffer_info = &tpd_ring->buffer_info[next_to_clean];
1465 if (buffer_info->state == ATL1_BUFFER_BUSY) { 1471 atl1c_clean_buffer(pdev, buffer_info, 1);
1466 pci_unmap_page(adapter->pdev, buffer_info->dma,
1467 buffer_info->length, PCI_DMA_TODEVICE);
1468 buffer_info->dma = 0;
1469 if (buffer_info->skb) {
1470 dev_kfree_skb_irq(buffer_info->skb);
1471 buffer_info->skb = NULL;
1472 }
1473 buffer_info->state = ATL1_BUFFER_FREE;
1474 }
1475 if (++next_to_clean == tpd_ring->count) 1472 if (++next_to_clean == tpd_ring->count)
1476 next_to_clean = 0; 1473 next_to_clean = 0;
1477 atomic_set(&tpd_ring->next_to_clean, next_to_clean); 1474 atomic_set(&tpd_ring->next_to_clean, next_to_clean);
@@ -1587,7 +1584,7 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid
1587 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use]; 1584 buffer_info = &rfd_ring->buffer_info[rfd_next_to_use];
1588 next_info = &rfd_ring->buffer_info[next_next]; 1585 next_info = &rfd_ring->buffer_info[next_next];
1589 1586
1590 while (next_info->state == ATL1_BUFFER_FREE) { 1587 while (next_info->flags & ATL1C_BUFFER_FREE) {
1591 rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use); 1588 rfd_desc = ATL1C_RFD_DESC(rfd_ring, rfd_next_to_use);
1592 1589
1593 skb = dev_alloc_skb(adapter->rx_buffer_len); 1590 skb = dev_alloc_skb(adapter->rx_buffer_len);
@@ -1603,12 +1600,13 @@ static int atl1c_alloc_rx_buffer(struct atl1c_adapter *adapter, const int ringid
1603 * the 14 byte MAC header is removed 1600 * the 14 byte MAC header is removed
1604 */ 1601 */
1605 vir_addr = skb->data; 1602 vir_addr = skb->data;
1606 buffer_info->state = ATL1_BUFFER_BUSY; 1603 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
1607 buffer_info->skb = skb; 1604 buffer_info->skb = skb;
1608 buffer_info->length = adapter->rx_buffer_len; 1605 buffer_info->length = adapter->rx_buffer_len;
1609 buffer_info->dma = pci_map_single(pdev, vir_addr, 1606 buffer_info->dma = pci_map_single(pdev, vir_addr,
1610 buffer_info->length, 1607 buffer_info->length,
1611 PCI_DMA_FROMDEVICE); 1608 PCI_DMA_FROMDEVICE);
1609 ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE);
1612 rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma); 1610 rfd_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
1613 rfd_next_to_use = next_next; 1611 rfd_next_to_use = next_next;
1614 if (++next_next == rfd_ring->count) 1612 if (++next_next == rfd_ring->count)
@@ -1653,7 +1651,8 @@ static void atl1c_clean_rfd(struct atl1c_rfd_ring *rfd_ring,
1653 RRS_RX_RFD_INDEX_MASK; 1651 RRS_RX_RFD_INDEX_MASK;
1654 for (i = 0; i < num; i++) { 1652 for (i = 0; i < num; i++) {
1655 buffer_info[rfd_index].skb = NULL; 1653 buffer_info[rfd_index].skb = NULL;
1656 buffer_info[rfd_index].state = ATL1_BUFFER_FREE; 1654 ATL1C_SET_BUFFER_STATE(&buffer_info[rfd_index],
1655 ATL1C_BUFFER_FREE);
1657 if (++rfd_index == rfd_ring->count) 1656 if (++rfd_index == rfd_ring->count)
1658 rfd_index = 0; 1657 rfd_index = 0;
1659 } 1658 }
@@ -1967,7 +1966,8 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
1967 buffer_info->length = map_len; 1966 buffer_info->length = map_len;
1968 buffer_info->dma = pci_map_single(adapter->pdev, 1967 buffer_info->dma = pci_map_single(adapter->pdev,
1969 skb->data, hdr_len, PCI_DMA_TODEVICE); 1968 skb->data, hdr_len, PCI_DMA_TODEVICE);
1970 buffer_info->state = ATL1_BUFFER_BUSY; 1969 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
1970 ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE);
1971 mapped_len += map_len; 1971 mapped_len += map_len;
1972 use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma); 1972 use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
1973 use_tpd->buffer_len = cpu_to_le16(buffer_info->length); 1973 use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
@@ -1981,16 +1981,14 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
1981 else { 1981 else {
1982 use_tpd = atl1c_get_tpd(adapter, type); 1982 use_tpd = atl1c_get_tpd(adapter, type);
1983 memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc)); 1983 memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
1984 use_tpd = atl1c_get_tpd(adapter, type);
1985 memcpy(use_tpd, tpd, sizeof(struct atl1c_tpd_desc));
1986 } 1984 }
1987 buffer_info = atl1c_get_tx_buffer(adapter, use_tpd); 1985 buffer_info = atl1c_get_tx_buffer(adapter, use_tpd);
1988 buffer_info->length = buf_len - mapped_len; 1986 buffer_info->length = buf_len - mapped_len;
1989 buffer_info->dma = 1987 buffer_info->dma =
1990 pci_map_single(adapter->pdev, skb->data + mapped_len, 1988 pci_map_single(adapter->pdev, skb->data + mapped_len,
1991 buffer_info->length, PCI_DMA_TODEVICE); 1989 buffer_info->length, PCI_DMA_TODEVICE);
1992 buffer_info->state = ATL1_BUFFER_BUSY; 1990 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
1993 1991 ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_SINGLE);
1994 use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma); 1992 use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
1995 use_tpd->buffer_len = cpu_to_le16(buffer_info->length); 1993 use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
1996 } 1994 }
@@ -2010,8 +2008,8 @@ static void atl1c_tx_map(struct atl1c_adapter *adapter,
2010 frag->page_offset, 2008 frag->page_offset,
2011 buffer_info->length, 2009 buffer_info->length,
2012 PCI_DMA_TODEVICE); 2010 PCI_DMA_TODEVICE);
2013 buffer_info->state = ATL1_BUFFER_BUSY; 2011 ATL1C_SET_BUFFER_STATE(buffer_info, ATL1C_BUFFER_BUSY);
2014 2012 ATL1C_SET_PCIMAP_TYPE(buffer_info, ATL1C_PCIMAP_PAGE);
2015 use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma); 2013 use_tpd->buffer_addr = cpu_to_le64(buffer_info->dma);
2016 use_tpd->buffer_len = cpu_to_le16(buffer_info->length); 2014 use_tpd->buffer_len = cpu_to_le16(buffer_info->length);
2017 } 2015 }
diff --git a/drivers/net/atl1e/atl1e_main.c b/drivers/net/atl1e/atl1e_main.c
index 955da733c2ad..8b889ab544b0 100644
--- a/drivers/net/atl1e/atl1e_main.c
+++ b/drivers/net/atl1e/atl1e_main.c
@@ -1433,14 +1433,12 @@ static void atl1e_clean_rx_irq(struct atl1e_adapter *adapter, u8 que,
1433 1433
1434 packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) & 1434 packet_size = ((prrs->word1 >> RRS_PKT_SIZE_SHIFT) &
1435 RRS_PKT_SIZE_MASK) - 4; /* CRC */ 1435 RRS_PKT_SIZE_MASK) - 4; /* CRC */
1436 skb = netdev_alloc_skb(netdev, 1436 skb = netdev_alloc_skb_ip_align(netdev, packet_size);
1437 packet_size + NET_IP_ALIGN);
1438 if (skb == NULL) { 1437 if (skb == NULL) {
1439 dev_warn(&pdev->dev, "%s: Memory squeeze," 1438 dev_warn(&pdev->dev, "%s: Memory squeeze,"
1440 "deferring packet.\n", netdev->name); 1439 "deferring packet.\n", netdev->name);
1441 goto skip_pkt; 1440 goto skip_pkt;
1442 } 1441 }
1443 skb_reserve(skb, NET_IP_ALIGN);
1444 skb->dev = netdev; 1442 skb->dev = netdev;
1445 memcpy(skb->data, (u8 *)(prrs + 1), packet_size); 1443 memcpy(skb->data, (u8 *)(prrs + 1), packet_size);
1446 skb_put(skb, packet_size); 1444 skb_put(skb, packet_size);
diff --git a/drivers/net/atlx/atl1.c b/drivers/net/atlx/atl1.c
index 00569dc1313c..963df502260a 100644
--- a/drivers/net/atlx/atl1.c
+++ b/drivers/net/atlx/atl1.c
@@ -1864,21 +1864,14 @@ static u16 atl1_alloc_rx_buffers(struct atl1_adapter *adapter)
1864 1864
1865 rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use); 1865 rfd_desc = ATL1_RFD_DESC(rfd_ring, rfd_next_to_use);
1866 1866
1867 skb = netdev_alloc_skb(adapter->netdev, 1867 skb = netdev_alloc_skb_ip_align(adapter->netdev,
1868 adapter->rx_buffer_len + NET_IP_ALIGN); 1868 adapter->rx_buffer_len);
1869 if (unlikely(!skb)) { 1869 if (unlikely(!skb)) {
1870 /* Better luck next round */ 1870 /* Better luck next round */
1871 adapter->netdev->stats.rx_dropped++; 1871 adapter->netdev->stats.rx_dropped++;
1872 break; 1872 break;
1873 } 1873 }
1874 1874
1875 /*
1876 * Make buffer alignment 2 beyond a 16 byte boundary
1877 * this will result in a 16 byte aligned IP header after
1878 * the 14 byte MAC header is removed
1879 */
1880 skb_reserve(skb, NET_IP_ALIGN);
1881
1882 buffer_info->alloced = 1; 1875 buffer_info->alloced = 1;
1883 buffer_info->skb = skb; 1876 buffer_info->skb = skb;
1884 buffer_info->length = (u16) adapter->rx_buffer_len; 1877 buffer_info->length = (u16) adapter->rx_buffer_len;
diff --git a/drivers/net/atlx/atl2.c b/drivers/net/atlx/atl2.c
index ab688862093f..0d268075bad5 100644
--- a/drivers/net/atlx/atl2.c
+++ b/drivers/net/atlx/atl2.c
@@ -409,7 +409,7 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
409 if (rxd->status.ok && rxd->status.pkt_size >= 60) { 409 if (rxd->status.ok && rxd->status.pkt_size >= 60) {
410 int rx_size = (int)(rxd->status.pkt_size - 4); 410 int rx_size = (int)(rxd->status.pkt_size - 4);
411 /* alloc new buffer */ 411 /* alloc new buffer */
412 skb = netdev_alloc_skb(netdev, rx_size + NET_IP_ALIGN); 412 skb = netdev_alloc_skb_ip_align(netdev, rx_size);
413 if (NULL == skb) { 413 if (NULL == skb) {
414 printk(KERN_WARNING 414 printk(KERN_WARNING
415 "%s: Mem squeeze, deferring packet.\n", 415 "%s: Mem squeeze, deferring packet.\n",
@@ -421,7 +421,6 @@ static void atl2_intr_rx(struct atl2_adapter *adapter)
421 netdev->stats.rx_dropped++; 421 netdev->stats.rx_dropped++;
422 break; 422 break;
423 } 423 }
424 skb_reserve(skb, NET_IP_ALIGN);
425 skb->dev = netdev; 424 skb->dev = netdev;
426 memcpy(skb->data, rxd->packet, rx_size); 425 memcpy(skb->data, rxd->packet, rx_size);
427 skb_put(skb, rx_size); 426 skb_put(skb, rx_size);
diff --git a/drivers/net/bcm63xx_enet.c b/drivers/net/bcm63xx_enet.c
index ba29dc319b34..1f6c5486d715 100644
--- a/drivers/net/bcm63xx_enet.c
+++ b/drivers/net/bcm63xx_enet.c
@@ -320,16 +320,13 @@ static int bcm_enet_receive_queue(struct net_device *dev, int budget)
320 if (len < copybreak) { 320 if (len < copybreak) {
321 struct sk_buff *nskb; 321 struct sk_buff *nskb;
322 322
323 nskb = netdev_alloc_skb(dev, len + NET_IP_ALIGN); 323 nskb = netdev_alloc_skb_ip_align(dev, len);
324 if (!nskb) { 324 if (!nskb) {
325 /* forget packet, just rearm desc */ 325 /* forget packet, just rearm desc */
326 priv->stats.rx_dropped++; 326 priv->stats.rx_dropped++;
327 continue; 327 continue;
328 } 328 }
329 329
330 /* since we're copying the data, we can align
331 * them properly */
332 skb_reserve(nskb, NET_IP_ALIGN);
333 dma_sync_single_for_cpu(kdev, desc->address, 330 dma_sync_single_for_cpu(kdev, desc->address,
334 len, DMA_FROM_DEVICE); 331 len, DMA_FROM_DEVICE);
335 memcpy(nskb->data, skb->data, len); 332 memcpy(nskb->data, skb->data, len);
diff --git a/drivers/net/benet/be.h b/drivers/net/benet/be.h
index 3b79a225628a..67e165cf3f4e 100644
--- a/drivers/net/benet/be.h
+++ b/drivers/net/benet/be.h
@@ -35,20 +35,31 @@
35#define DRV_VER "2.101.205" 35#define DRV_VER "2.101.205"
36#define DRV_NAME "be2net" 36#define DRV_NAME "be2net"
37#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC" 37#define BE_NAME "ServerEngines BladeEngine2 10Gbps NIC"
38#define BE3_NAME "ServerEngines BladeEngine3 10Gbps NIC"
38#define OC_NAME "Emulex OneConnect 10Gbps NIC" 39#define OC_NAME "Emulex OneConnect 10Gbps NIC"
40#define OC_NAME1 "Emulex OneConnect 10Gbps NIC (be3)"
39#define DRV_DESC BE_NAME "Driver" 41#define DRV_DESC BE_NAME "Driver"
40 42
41#define BE_VENDOR_ID 0x19a2 43#define BE_VENDOR_ID 0x19a2
42#define BE_DEVICE_ID1 0x211 44#define BE_DEVICE_ID1 0x211
45#define BE_DEVICE_ID2 0x221
43#define OC_DEVICE_ID1 0x700 46#define OC_DEVICE_ID1 0x700
44#define OC_DEVICE_ID2 0x701 47#define OC_DEVICE_ID2 0x701
48#define OC_DEVICE_ID3 0x710
45 49
46static inline char *nic_name(struct pci_dev *pdev) 50static inline char *nic_name(struct pci_dev *pdev)
47{ 51{
48 if (pdev->device == OC_DEVICE_ID1 || pdev->device == OC_DEVICE_ID2) 52 switch (pdev->device) {
53 case OC_DEVICE_ID1:
54 case OC_DEVICE_ID2:
49 return OC_NAME; 55 return OC_NAME;
50 else 56 case OC_DEVICE_ID3:
57 return OC_NAME1;
58 case BE_DEVICE_ID2:
59 return BE3_NAME;
60 default:
51 return BE_NAME; 61 return BE_NAME;
62 }
52} 63}
53 64
54/* Number of bytes of an RX frame that are copied to skb->data */ 65/* Number of bytes of an RX frame that are copied to skb->data */
@@ -181,7 +192,6 @@ struct be_drvr_stats {
181 192
182struct be_stats_obj { 193struct be_stats_obj {
183 struct be_drvr_stats drvr_stats; 194 struct be_drvr_stats drvr_stats;
184 struct net_device_stats net_stats;
185 struct be_dma_mem cmd; 195 struct be_dma_mem cmd;
186}; 196};
187 197
diff --git a/drivers/net/benet/be_cmds.c b/drivers/net/benet/be_cmds.c
index 28a0eda92680..cc75dd0df0d8 100644
--- a/drivers/net/benet/be_cmds.c
+++ b/drivers/net/benet/be_cmds.c
@@ -834,7 +834,7 @@ int be_cmd_get_stats(struct be_adapter *adapter, struct be_dma_mem *nonemb_cmd)
834 834
835/* Uses synchronous mcc */ 835/* Uses synchronous mcc */
836int be_cmd_link_status_query(struct be_adapter *adapter, 836int be_cmd_link_status_query(struct be_adapter *adapter,
837 bool *link_up) 837 bool *link_up, u8 *mac_speed, u16 *link_speed)
838{ 838{
839 struct be_mcc_wrb *wrb; 839 struct be_mcc_wrb *wrb;
840 struct be_cmd_req_link_status *req; 840 struct be_cmd_req_link_status *req;
@@ -855,8 +855,11 @@ int be_cmd_link_status_query(struct be_adapter *adapter,
855 status = be_mcc_notify_wait(adapter); 855 status = be_mcc_notify_wait(adapter);
856 if (!status) { 856 if (!status) {
857 struct be_cmd_resp_link_status *resp = embedded_payload(wrb); 857 struct be_cmd_resp_link_status *resp = embedded_payload(wrb);
858 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) 858 if (resp->mac_speed != PHY_LINK_SPEED_ZERO) {
859 *link_up = true; 859 *link_up = true;
860 *link_speed = le16_to_cpu(resp->link_speed);
861 *mac_speed = resp->mac_speed;
862 }
860 } 863 }
861 864
862 spin_unlock_bh(&adapter->mcc_lock); 865 spin_unlock_bh(&adapter->mcc_lock);
@@ -1129,6 +1132,95 @@ int be_cmd_reset_function(struct be_adapter *adapter)
1129 return status; 1132 return status;
1130} 1133}
1131 1134
1135/* Uses sync mcc */
1136int be_cmd_set_beacon_state(struct be_adapter *adapter, u8 port_num,
1137 u8 bcn, u8 sts, u8 state)
1138{
1139 struct be_mcc_wrb *wrb;
1140 struct be_cmd_req_enable_disable_beacon *req;
1141 int status;
1142
1143 spin_lock_bh(&adapter->mcc_lock);
1144
1145 wrb = wrb_from_mccq(adapter);
1146 req = embedded_payload(wrb);
1147
1148 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1149
1150 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1151 OPCODE_COMMON_ENABLE_DISABLE_BEACON, sizeof(*req));
1152
1153 req->port_num = port_num;
1154 req->beacon_state = state;
1155 req->beacon_duration = bcn;
1156 req->status_duration = sts;
1157
1158 status = be_mcc_notify_wait(adapter);
1159
1160 spin_unlock_bh(&adapter->mcc_lock);
1161 return status;
1162}
1163
1164/* Uses sync mcc */
1165int be_cmd_get_beacon_state(struct be_adapter *adapter, u8 port_num, u32 *state)
1166{
1167 struct be_mcc_wrb *wrb;
1168 struct be_cmd_req_get_beacon_state *req;
1169 int status;
1170
1171 spin_lock_bh(&adapter->mcc_lock);
1172
1173 wrb = wrb_from_mccq(adapter);
1174 req = embedded_payload(wrb);
1175
1176 be_wrb_hdr_prepare(wrb, sizeof(*req), true, 0);
1177
1178 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1179 OPCODE_COMMON_GET_BEACON_STATE, sizeof(*req));
1180
1181 req->port_num = port_num;
1182
1183 status = be_mcc_notify_wait(adapter);
1184 if (!status) {
1185 struct be_cmd_resp_get_beacon_state *resp =
1186 embedded_payload(wrb);
1187 *state = resp->beacon_state;
1188 }
1189
1190 spin_unlock_bh(&adapter->mcc_lock);
1191 return status;
1192}
1193
1194/* Uses sync mcc */
1195int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
1196 u8 *connector)
1197{
1198 struct be_mcc_wrb *wrb;
1199 struct be_cmd_req_port_type *req;
1200 int status;
1201
1202 spin_lock_bh(&adapter->mcc_lock);
1203
1204 wrb = wrb_from_mccq(adapter);
1205 req = embedded_payload(wrb);
1206
1207 be_wrb_hdr_prepare(wrb, sizeof(struct be_cmd_resp_port_type), true, 0);
1208
1209 be_cmd_hdr_prepare(&req->hdr, CMD_SUBSYSTEM_COMMON,
1210 OPCODE_COMMON_READ_TRANSRECV_DATA, sizeof(*req));
1211
1212 req->port = cpu_to_le32(port);
1213 req->page_num = cpu_to_le32(TR_PAGE_A0);
1214 status = be_mcc_notify_wait(adapter);
1215 if (!status) {
1216 struct be_cmd_resp_port_type *resp = embedded_payload(wrb);
1217 *connector = resp->data.connector;
1218 }
1219
1220 spin_unlock_bh(&adapter->mcc_lock);
1221 return status;
1222}
1223
1132int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd, 1224int be_cmd_write_flashrom(struct be_adapter *adapter, struct be_dma_mem *cmd,
1133 u32 flash_type, u32 flash_opcode, u32 buf_size) 1225 u32 flash_type, u32 flash_opcode, u32 buf_size)
1134{ 1226{
diff --git a/drivers/net/benet/be_cmds.h b/drivers/net/benet/be_cmds.h
index e5f9676cf1bc..69dc017c814b 100644
--- a/drivers/net/benet/be_cmds.h
+++ b/drivers/net/benet/be_cmds.h
@@ -138,6 +138,9 @@ struct be_mcc_mailbox {
138#define OPCODE_COMMON_NTWK_PMAC_ADD 59 138#define OPCODE_COMMON_NTWK_PMAC_ADD 59
139#define OPCODE_COMMON_NTWK_PMAC_DEL 60 139#define OPCODE_COMMON_NTWK_PMAC_DEL 60
140#define OPCODE_COMMON_FUNCTION_RESET 61 140#define OPCODE_COMMON_FUNCTION_RESET 61
141#define OPCODE_COMMON_ENABLE_DISABLE_BEACON 69
142#define OPCODE_COMMON_GET_BEACON_STATE 70
143#define OPCODE_COMMON_READ_TRANSRECV_DATA 73
141 144
142#define OPCODE_ETH_ACPI_CONFIG 2 145#define OPCODE_ETH_ACPI_CONFIG 2
143#define OPCODE_ETH_PROMISCUOUS 3 146#define OPCODE_ETH_PROMISCUOUS 3
@@ -633,9 +636,47 @@ struct be_cmd_resp_link_status {
633 u8 mac_fault; 636 u8 mac_fault;
634 u8 mgmt_mac_duplex; 637 u8 mgmt_mac_duplex;
635 u8 mgmt_mac_speed; 638 u8 mgmt_mac_speed;
636 u16 rsvd0; 639 u16 link_speed;
640 u32 rsvd0;
637} __packed; 641} __packed;
638 642
643/******************** Port Identification ***************************/
644/* Identifies the type of port attached to NIC */
645struct be_cmd_req_port_type {
646 struct be_cmd_req_hdr hdr;
647 u32 page_num;
648 u32 port;
649};
650
651enum {
652 TR_PAGE_A0 = 0xa0,
653 TR_PAGE_A2 = 0xa2
654};
655
656struct be_cmd_resp_port_type {
657 struct be_cmd_resp_hdr hdr;
658 u32 page_num;
659 u32 port;
660 struct data {
661 u8 identifier;
662 u8 identifier_ext;
663 u8 connector;
664 u8 transceiver[8];
665 u8 rsvd0[3];
666 u8 length_km;
667 u8 length_hm;
668 u8 length_om1;
669 u8 length_om2;
670 u8 length_cu;
671 u8 length_cu_m;
672 u8 vendor_name[16];
673 u8 rsvd;
674 u8 vendor_oui[3];
675 u8 vendor_pn[16];
676 u8 vendor_rev[4];
677 } data;
678};
679
639/******************** Get FW Version *******************/ 680/******************** Get FW Version *******************/
640struct be_cmd_req_get_fw_version { 681struct be_cmd_req_get_fw_version {
641 struct be_cmd_req_hdr hdr; 682 struct be_cmd_req_hdr hdr;
@@ -699,6 +740,37 @@ struct be_cmd_resp_query_fw_cfg {
699 u32 rsvd[26]; 740 u32 rsvd[26];
700}; 741};
701 742
743/******************** Port Beacon ***************************/
744
745#define BEACON_STATE_ENABLED 0x1
746#define BEACON_STATE_DISABLED 0x0
747
748struct be_cmd_req_enable_disable_beacon {
749 struct be_cmd_req_hdr hdr;
750 u8 port_num;
751 u8 beacon_state;
752 u8 beacon_duration;
753 u8 status_duration;
754} __packed;
755
756struct be_cmd_resp_enable_disable_beacon {
757 struct be_cmd_resp_hdr resp_hdr;
758 u32 rsvd0;
759} __packed;
760
761struct be_cmd_req_get_beacon_state {
762 struct be_cmd_req_hdr hdr;
763 u8 port_num;
764 u8 rsvd0;
765 u16 rsvd1;
766} __packed;
767
768struct be_cmd_resp_get_beacon_state {
769 struct be_cmd_resp_hdr resp_hdr;
770 u8 beacon_state;
771 u8 rsvd0[3];
772} __packed;
773
702/****************** Firmware Flash ******************/ 774/****************** Firmware Flash ******************/
703struct flashrom_params { 775struct flashrom_params {
704 u32 op_code; 776 u32 op_code;
@@ -743,7 +815,7 @@ extern int be_cmd_rxq_create(struct be_adapter *adapter,
743extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q, 815extern int be_cmd_q_destroy(struct be_adapter *adapter, struct be_queue_info *q,
744 int type); 816 int type);
745extern int be_cmd_link_status_query(struct be_adapter *adapter, 817extern int be_cmd_link_status_query(struct be_adapter *adapter,
746 bool *link_up); 818 bool *link_up, u8 *mac_speed, u16 *link_speed);
747extern int be_cmd_reset(struct be_adapter *adapter); 819extern int be_cmd_reset(struct be_adapter *adapter);
748extern int be_cmd_get_stats(struct be_adapter *adapter, 820extern int be_cmd_get_stats(struct be_adapter *adapter,
749 struct be_dma_mem *nonemb_cmd); 821 struct be_dma_mem *nonemb_cmd);
@@ -765,6 +837,12 @@ extern int be_cmd_query_fw_cfg(struct be_adapter *adapter,
765 u32 *port_num, u32 *cap); 837 u32 *port_num, u32 *cap);
766extern int be_cmd_reset_function(struct be_adapter *adapter); 838extern int be_cmd_reset_function(struct be_adapter *adapter);
767extern int be_process_mcc(struct be_adapter *adapter); 839extern int be_process_mcc(struct be_adapter *adapter);
840extern int be_cmd_set_beacon_state(struct be_adapter *adapter,
841 u8 port_num, u8 beacon, u8 status, u8 state);
842extern int be_cmd_get_beacon_state(struct be_adapter *adapter,
843 u8 port_num, u32 *state);
844extern int be_cmd_read_port_type(struct be_adapter *adapter, u32 port,
845 u8 *connector);
768extern int be_cmd_write_flashrom(struct be_adapter *adapter, 846extern int be_cmd_write_flashrom(struct be_adapter *adapter,
769 struct be_dma_mem *cmd, u32 flash_oper, 847 struct be_dma_mem *cmd, u32 flash_oper,
770 u32 flash_opcode, u32 buf_size); 848 u32 flash_opcode, u32 buf_size);
diff --git a/drivers/net/benet/be_ethtool.c b/drivers/net/benet/be_ethtool.c
index f0fd95b43c07..e8f92831021a 100644
--- a/drivers/net/benet/be_ethtool.c
+++ b/drivers/net/benet/be_ethtool.c
@@ -234,7 +234,7 @@ be_get_ethtool_stats(struct net_device *netdev,
234 struct be_rxf_stats *rxf_stats = &hw_stats->rxf; 234 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
235 struct be_port_rxf_stats *port_stats = 235 struct be_port_rxf_stats *port_stats =
236 &rxf_stats->port[adapter->port_num]; 236 &rxf_stats->port[adapter->port_num];
237 struct net_device_stats *net_stats = &adapter->stats.net_stats; 237 struct net_device_stats *net_stats = &netdev->stats;
238 struct be_erx_stats *erx_stats = &hw_stats->erx; 238 struct be_erx_stats *erx_stats = &hw_stats->erx;
239 void *p = NULL; 239 void *p = NULL;
240 int i; 240 int i;
@@ -281,16 +281,55 @@ be_get_stat_strings(struct net_device *netdev, uint32_t stringset,
281 } 281 }
282} 282}
283 283
284static int be_get_stats_count(struct net_device *netdev) 284static int be_get_sset_count(struct net_device *netdev, int stringset)
285{ 285{
286 return ETHTOOL_STATS_NUM; 286 switch (stringset) {
287 case ETH_SS_STATS:
288 return ETHTOOL_STATS_NUM;
289 default:
290 return -EINVAL;
291 }
287} 292}
288 293
289static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 294static int be_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
290{ 295{
291 ecmd->speed = SPEED_10000; 296 struct be_adapter *adapter = netdev_priv(netdev);
297 u8 mac_speed = 0, connector = 0;
298 u16 link_speed = 0;
299 bool link_up = false;
300
301 be_cmd_link_status_query(adapter, &link_up, &mac_speed, &link_speed);
302
303 /* link_speed is in units of 10 Mbps */
304 if (link_speed) {
305 ecmd->speed = link_speed*10;
306 } else {
307 switch (mac_speed) {
308 case PHY_LINK_SPEED_1GBPS:
309 ecmd->speed = SPEED_1000;
310 break;
311 case PHY_LINK_SPEED_10GBPS:
312 ecmd->speed = SPEED_10000;
313 break;
314 }
315 }
292 ecmd->duplex = DUPLEX_FULL; 316 ecmd->duplex = DUPLEX_FULL;
293 ecmd->autoneg = AUTONEG_DISABLE; 317 ecmd->autoneg = AUTONEG_DISABLE;
318 ecmd->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_TP);
319
320 be_cmd_read_port_type(adapter, adapter->port_num, &connector);
321 switch (connector) {
322 case 7:
323 ecmd->port = PORT_FIBRE;
324 break;
325 default:
326 ecmd->port = PORT_TP;
327 break;
328 }
329
330 ecmd->phy_address = adapter->port_num;
331 ecmd->transceiver = XCVR_INTERNAL;
332
294 return 0; 333 return 0;
295} 334}
296 335
@@ -335,6 +374,35 @@ be_set_pauseparam(struct net_device *netdev, struct ethtool_pauseparam *ecmd)
335} 374}
336 375
337static int 376static int
377be_phys_id(struct net_device *netdev, u32 data)
378{
379 struct be_adapter *adapter = netdev_priv(netdev);
380 int status;
381 u32 cur;
382
383 if (!netif_running(netdev))
384 return 0;
385
386 be_cmd_get_beacon_state(adapter, adapter->port_num, &cur);
387
388 if (cur == BEACON_STATE_ENABLED)
389 return 0;
390
391 if (data < 2)
392 data = 2;
393
394 status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0,
395 BEACON_STATE_ENABLED);
396 set_current_state(TASK_INTERRUPTIBLE);
397 schedule_timeout(data*HZ);
398
399 status = be_cmd_set_beacon_state(adapter, adapter->port_num, 0, 0,
400 BEACON_STATE_DISABLED);
401
402 return status;
403}
404
405static int
338be_do_flash(struct net_device *netdev, struct ethtool_flash *efl) 406be_do_flash(struct net_device *netdev, struct ethtool_flash *efl)
339{ 407{
340 struct be_adapter *adapter = netdev_priv(netdev); 408 struct be_adapter *adapter = netdev_priv(netdev);
@@ -366,7 +434,8 @@ const struct ethtool_ops be_ethtool_ops = {
366 .get_tso = ethtool_op_get_tso, 434 .get_tso = ethtool_op_get_tso,
367 .set_tso = ethtool_op_set_tso, 435 .set_tso = ethtool_op_set_tso,
368 .get_strings = be_get_stat_strings, 436 .get_strings = be_get_stat_strings,
369 .get_stats_count = be_get_stats_count, 437 .phys_id = be_phys_id,
438 .get_sset_count = be_get_sset_count,
370 .get_ethtool_stats = be_get_ethtool_stats, 439 .get_ethtool_stats = be_get_ethtool_stats,
371 .flash_device = be_do_flash, 440 .flash_device = be_do_flash,
372}; 441};
diff --git a/drivers/net/benet/be_main.c b/drivers/net/benet/be_main.c
index 876b357101fa..c0bd20356eaf 100644
--- a/drivers/net/benet/be_main.c
+++ b/drivers/net/benet/be_main.c
@@ -31,8 +31,10 @@ MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
31 31
32static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = { 32static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
33 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) }, 33 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
34 { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
34 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) }, 35 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
35 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) }, 36 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
37 { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID3) },
36 { 0 } 38 { 0 }
37}; 39};
38MODULE_DEVICE_TABLE(pci, be_dev_ids); 40MODULE_DEVICE_TABLE(pci, be_dev_ids);
@@ -141,7 +143,7 @@ void netdev_stats_update(struct be_adapter *adapter)
141 struct be_rxf_stats *rxf_stats = &hw_stats->rxf; 143 struct be_rxf_stats *rxf_stats = &hw_stats->rxf;
142 struct be_port_rxf_stats *port_stats = 144 struct be_port_rxf_stats *port_stats =
143 &rxf_stats->port[adapter->port_num]; 145 &rxf_stats->port[adapter->port_num];
144 struct net_device_stats *dev_stats = &adapter->stats.net_stats; 146 struct net_device_stats *dev_stats = &adapter->netdev->stats;
145 struct be_erx_stats *erx_stats = &hw_stats->erx; 147 struct be_erx_stats *erx_stats = &hw_stats->erx;
146 148
147 dev_stats->rx_packets = port_stats->rx_total_frames; 149 dev_stats->rx_packets = port_stats->rx_total_frames;
@@ -269,9 +271,7 @@ static void be_rx_eqd_update(struct be_adapter *adapter)
269 271
270static struct net_device_stats *be_get_stats(struct net_device *dev) 272static struct net_device_stats *be_get_stats(struct net_device *dev)
271{ 273{
272 struct be_adapter *adapter = netdev_priv(dev); 274 return &dev->stats;
273
274 return &adapter->stats.net_stats;
275} 275}
276 276
277static u32 be_calc_rate(u64 bytes, unsigned long ticks) 277static u32 be_calc_rate(u64 bytes, unsigned long ticks)
@@ -758,7 +758,7 @@ static void be_rx_compl_process(struct be_adapter *adapter,
758 if ((adapter->cap == 0x400) && !vtm) 758 if ((adapter->cap == 0x400) && !vtm)
759 vlanf = 0; 759 vlanf = 0;
760 760
761 skb = netdev_alloc_skb(adapter->netdev, BE_HDR_LEN + NET_IP_ALIGN); 761 skb = netdev_alloc_skb_ip_align(adapter->netdev, BE_HDR_LEN);
762 if (!skb) { 762 if (!skb) {
763 if (net_ratelimit()) 763 if (net_ratelimit())
764 dev_warn(&adapter->pdev->dev, "skb alloc failed\n"); 764 dev_warn(&adapter->pdev->dev, "skb alloc failed\n");
@@ -766,8 +766,6 @@ static void be_rx_compl_process(struct be_adapter *adapter,
766 return; 766 return;
767 } 767 }
768 768
769 skb_reserve(skb, NET_IP_ALIGN);
770
771 skb_fill_rx_data(adapter, skb, rxcp); 769 skb_fill_rx_data(adapter, skb, rxcp);
772 770
773 if (do_pkt_csum(rxcp, adapter->rx_csum)) 771 if (do_pkt_csum(rxcp, adapter->rx_csum))
@@ -1590,6 +1588,8 @@ static int be_open(struct net_device *netdev)
1590 struct be_eq_obj *tx_eq = &adapter->tx_eq; 1588 struct be_eq_obj *tx_eq = &adapter->tx_eq;
1591 bool link_up; 1589 bool link_up;
1592 int status; 1590 int status;
1591 u8 mac_speed;
1592 u16 link_speed;
1593 1593
1594 /* First time posting */ 1594 /* First time posting */
1595 be_post_rx_frags(adapter); 1595 be_post_rx_frags(adapter);
@@ -1608,7 +1608,8 @@ static int be_open(struct net_device *netdev)
1608 /* Rx compl queue may be in unarmed state; rearm it */ 1608 /* Rx compl queue may be in unarmed state; rearm it */
1609 be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0); 1609 be_cq_notify(adapter, adapter->rx_obj.cq.id, true, 0);
1610 1610
1611 status = be_cmd_link_status_query(adapter, &link_up); 1611 status = be_cmd_link_status_query(adapter, &link_up, &mac_speed,
1612 &link_speed);
1612 if (status) 1613 if (status)
1613 goto ret_sts; 1614 goto ret_sts;
1614 be_link_status_update(adapter, link_up); 1615 be_link_status_update(adapter, link_up);
diff --git a/drivers/net/bnx2.c b/drivers/net/bnx2.c
index 08cddb6ff740..539d23b594ce 100644
--- a/drivers/net/bnx2.c
+++ b/drivers/net/bnx2.c
@@ -1466,6 +1466,8 @@ bnx2_enable_forced_2g5(struct bnx2 *bp)
1466 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { 1466 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1467 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 1467 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1468 bmcr |= BCM5708S_BMCR_FORCE_2500; 1468 bmcr |= BCM5708S_BMCR_FORCE_2500;
1469 } else {
1470 return;
1469 } 1471 }
1470 1472
1471 if (bp->autoneg & AUTONEG_SPEED) { 1473 if (bp->autoneg & AUTONEG_SPEED) {
@@ -1500,6 +1502,8 @@ bnx2_disable_forced_2g5(struct bnx2 *bp)
1500 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) { 1502 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1501 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr); 1503 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1502 bmcr &= ~BCM5708S_BMCR_FORCE_2500; 1504 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1505 } else {
1506 return;
1503 } 1507 }
1504 1508
1505 if (bp->autoneg & AUTONEG_SPEED) 1509 if (bp->autoneg & AUTONEG_SPEED)
diff --git a/drivers/net/bnx2x.h b/drivers/net/bnx2x.h
index bbf842284ebb..928942b74ce6 100644
--- a/drivers/net/bnx2x.h
+++ b/drivers/net/bnx2x.h
@@ -24,6 +24,10 @@
24#define BCM_VLAN 1 24#define BCM_VLAN 1
25#endif 25#endif
26 26
27#if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
28#define BCM_CNIC 1
29#include "cnic_if.h"
30#endif
27 31
28#define BNX2X_MULTI_QUEUE 32#define BNX2X_MULTI_QUEUE
29 33
@@ -762,7 +766,11 @@ struct bnx2x_eth_stats {
762 (offsetof(struct bnx2x_eth_stats, stat_name) / 4) 766 (offsetof(struct bnx2x_eth_stats, stat_name) / 4)
763 767
764 768
769#ifdef BCM_CNIC
770#define MAX_CONTEXT 15
771#else
765#define MAX_CONTEXT 16 772#define MAX_CONTEXT 16
773#endif
766 774
767union cdu_context { 775union cdu_context {
768 struct eth_context eth; 776 struct eth_context eth;
@@ -811,13 +819,21 @@ struct bnx2x {
811 struct bnx2x_fastpath fp[MAX_CONTEXT]; 819 struct bnx2x_fastpath fp[MAX_CONTEXT];
812 void __iomem *regview; 820 void __iomem *regview;
813 void __iomem *doorbells; 821 void __iomem *doorbells;
822#ifdef BCM_CNIC
823#define BNX2X_DB_SIZE (18*BCM_PAGE_SIZE)
824#else
814#define BNX2X_DB_SIZE (16*BCM_PAGE_SIZE) 825#define BNX2X_DB_SIZE (16*BCM_PAGE_SIZE)
826#endif
815 827
816 struct net_device *dev; 828 struct net_device *dev;
817 struct pci_dev *pdev; 829 struct pci_dev *pdev;
818 830
819 atomic_t intr_sem; 831 atomic_t intr_sem;
832#ifdef BCM_CNIC
833 struct msix_entry msix_table[MAX_CONTEXT+2];
834#else
820 struct msix_entry msix_table[MAX_CONTEXT+1]; 835 struct msix_entry msix_table[MAX_CONTEXT+1];
836#endif
821#define INT_MODE_INTx 1 837#define INT_MODE_INTx 1
822#define INT_MODE_MSI 2 838#define INT_MODE_MSI 2
823#define INT_MODE_MSIX 3 839#define INT_MODE_MSIX 3
@@ -863,8 +879,8 @@ struct bnx2x {
863 879
864 /* Flags for marking that there is a STAT_QUERY or 880 /* Flags for marking that there is a STAT_QUERY or
865 SET_MAC ramrod pending */ 881 SET_MAC ramrod pending */
866 u8 stats_pending; 882 int stats_pending;
867 u8 set_mac_pending; 883 int set_mac_pending;
868 884
869 /* End of fields used in the performance code paths */ 885 /* End of fields used in the performance code paths */
870 886
@@ -884,6 +900,7 @@ struct bnx2x {
884#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG) 900#define BP_NOMCP(bp) (bp->flags & NO_MCP_FLAG)
885#define HW_VLAN_TX_FLAG 0x400 901#define HW_VLAN_TX_FLAG 0x400
886#define HW_VLAN_RX_FLAG 0x800 902#define HW_VLAN_RX_FLAG 0x800
903#define MF_FUNC_DIS 0x1000
887 904
888 int func; 905 int func;
889#define BP_PORT(bp) (bp->func % PORT_MAX) 906#define BP_PORT(bp) (bp->func % PORT_MAX)
@@ -891,6 +908,11 @@ struct bnx2x {
891#define BP_E1HVN(bp) (bp->func >> 1) 908#define BP_E1HVN(bp) (bp->func >> 1)
892#define BP_L_ID(bp) (BP_E1HVN(bp) << 2) 909#define BP_L_ID(bp) (BP_E1HVN(bp) << 2)
893 910
911#ifdef BCM_CNIC
912#define BCM_CNIC_CID_START 16
913#define BCM_ISCSI_ETH_CL_ID 17
914#endif
915
894 int pm_cap; 916 int pm_cap;
895 int pcie_cap; 917 int pcie_cap;
896 int mrrs; 918 int mrrs;
@@ -944,7 +966,6 @@ struct bnx2x {
944#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000 966#define BNX2X_STATE_CLOSING_WAIT4_HALT 0x4000
945#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000 967#define BNX2X_STATE_CLOSING_WAIT4_DELETE 0x5000
946#define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000 968#define BNX2X_STATE_CLOSING_WAIT4_UNLOAD 0x6000
947#define BNX2X_STATE_DISABLED 0xd000
948#define BNX2X_STATE_DIAG 0xe000 969#define BNX2X_STATE_DIAG 0xe000
949#define BNX2X_STATE_ERROR 0xf000 970#define BNX2X_STATE_ERROR 0xf000
950 971
@@ -960,28 +981,51 @@ struct bnx2x {
960#define BNX2X_MAX_MULTICAST 64 981#define BNX2X_MAX_MULTICAST 64
961#define BNX2X_MAX_EMUL_MULTI 16 982#define BNX2X_MAX_EMUL_MULTI 16
962 983
984 u32 rx_mode_cl_mask;
985
963 dma_addr_t def_status_blk_mapping; 986 dma_addr_t def_status_blk_mapping;
964 987
965 struct bnx2x_slowpath *slowpath; 988 struct bnx2x_slowpath *slowpath;
966 dma_addr_t slowpath_mapping; 989 dma_addr_t slowpath_mapping;
967 990
968#ifdef BCM_ISCSI
969 void *t1;
970 dma_addr_t t1_mapping;
971 void *t2;
972 dma_addr_t t2_mapping;
973 void *timers;
974 dma_addr_t timers_mapping;
975 void *qm;
976 dma_addr_t qm_mapping;
977#endif
978
979 int dropless_fc; 991 int dropless_fc;
980 992
993#ifdef BCM_CNIC
994 u32 cnic_flags;
995#define BNX2X_CNIC_FLAG_MAC_SET 1
996
997 void *t1;
998 dma_addr_t t1_mapping;
999 void *t2;
1000 dma_addr_t t2_mapping;
1001 void *timers;
1002 dma_addr_t timers_mapping;
1003 void *qm;
1004 dma_addr_t qm_mapping;
1005 struct cnic_ops *cnic_ops;
1006 void *cnic_data;
1007 u32 cnic_tag;
1008 struct cnic_eth_dev cnic_eth_dev;
1009 struct host_status_block *cnic_sb;
1010 dma_addr_t cnic_sb_mapping;
1011#define CNIC_SB_ID(bp) BP_L_ID(bp)
1012 struct eth_spe *cnic_kwq;
1013 struct eth_spe *cnic_kwq_prod;
1014 struct eth_spe *cnic_kwq_cons;
1015 struct eth_spe *cnic_kwq_last;
1016 u16 cnic_kwq_pending;
1017 u16 cnic_spq_pending;
1018 struct mutex cnic_mutex;
1019 u8 iscsi_mac[6];
1020#endif
1021
981 int dmae_ready; 1022 int dmae_ready;
982 /* used to synchronize dmae accesses */ 1023 /* used to synchronize dmae accesses */
983 struct mutex dmae_mutex; 1024 struct mutex dmae_mutex;
984 1025
1026 /* used to protect the FW mail box */
1027 struct mutex fw_mb_mutex;
1028
985 /* used to synchronize stats collecting */ 1029 /* used to synchronize stats collecting */
986 int stats_state; 1030 int stats_state;
987 /* used by dmae command loader */ 1031 /* used by dmae command loader */
@@ -1147,7 +1191,7 @@ static inline u32 reg_poll(struct bnx2x *bp, u32 reg, u32 expected, int ms,
1147#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1) 1191#define MAX_SP_DESC_CNT (SP_DESC_CNT - 1)
1148 1192
1149 1193
1150#define BNX2X_BTR 3 1194#define BNX2X_BTR 1
1151#define MAX_SPQ_PENDING 8 1195#define MAX_SPQ_PENDING 8
1152 1196
1153 1197
diff --git a/drivers/net/bnx2x_hsi.h b/drivers/net/bnx2x_hsi.h
index 8e2261fad485..52585338ada8 100644
--- a/drivers/net/bnx2x_hsi.h
+++ b/drivers/net/bnx2x_hsi.h
@@ -7,6 +7,20 @@
7 * the Free Software Foundation. 7 * the Free Software Foundation.
8 */ 8 */
9 9
10struct license_key {
11 u32 reserved[6];
12
13#if defined(__BIG_ENDIAN)
14 u16 max_iscsi_init_conn;
15 u16 max_iscsi_trgt_conn;
16#elif defined(__LITTLE_ENDIAN)
17 u16 max_iscsi_trgt_conn;
18 u16 max_iscsi_init_conn;
19#endif
20
21 u32 reserved_a[6];
22};
23
10 24
11#define PORT_0 0 25#define PORT_0 0
12#define PORT_1 1 26#define PORT_1 1
@@ -250,6 +264,7 @@ struct port_hw_cfg { /* port 0: 0x12c port 1: 0x2bc */
250#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800 264#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101 0x00000800
251#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900 265#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727 0x00000900
252#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00 266#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC 0x00000a00
267#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823 0x00000b00
253#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00 268#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE 0x0000fd00
254#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00 269#define PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN 0x0000ff00
255 270
@@ -881,7 +896,7 @@ struct shmem_region { /* SharedMem Offset (size) */
881 896
882 struct shm_dev_info dev_info; /* 0x8 (0x438) */ 897 struct shm_dev_info dev_info; /* 0x8 (0x438) */
883 898
884 u8 reserved[52*PORT_MAX]; 899 struct license_key drv_lic_key[PORT_MAX]; /* 0x440 (52*2=0x68) */
885 900
886 /* FW information (for internal FW use) */ 901 /* FW information (for internal FW use) */
887 u32 fw_info_fio_offset; /* 0x4a8 (0x4) */ 902 u32 fw_info_fio_offset; /* 0x4a8 (0x4) */
@@ -1245,8 +1260,8 @@ struct host_func_stats {
1245 1260
1246 1261
1247#define BCM_5710_FW_MAJOR_VERSION 5 1262#define BCM_5710_FW_MAJOR_VERSION 5
1248#define BCM_5710_FW_MINOR_VERSION 0 1263#define BCM_5710_FW_MINOR_VERSION 2
1249#define BCM_5710_FW_REVISION_VERSION 21 1264#define BCM_5710_FW_REVISION_VERSION 7
1250#define BCM_5710_FW_ENGINEERING_VERSION 0 1265#define BCM_5710_FW_ENGINEERING_VERSION 0
1251#define BCM_5710_FW_COMPILE_FLAGS 1 1266#define BCM_5710_FW_COMPILE_FLAGS 1
1252 1267
diff --git a/drivers/net/bnx2x_link.c b/drivers/net/bnx2x_link.c
index e32d3370862e..41b9b7bd3d8e 100644
--- a/drivers/net/bnx2x_link.c
+++ b/drivers/net/bnx2x_link.c
@@ -1107,18 +1107,21 @@ static void bnx2x_set_parallel_detection(struct link_params *params,
1107 MDIO_REG_BANK_SERDES_DIGITAL, 1107 MDIO_REG_BANK_SERDES_DIGITAL,
1108 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, 1108 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
1109 &control2); 1109 &control2);
1110 1110 if (params->speed_cap_mask & PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
1111 1111 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
1112 control2 |= MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN; 1112 else
1113 1113 control2 &= ~MDIO_SERDES_DIGITAL_A_1000X_CONTROL2_PRL_DT_EN;
1114 1114 DP(NETIF_MSG_LINK, "params->speed_cap_mask = 0x%x, control2 = 0x%x\n",
1115 params->speed_cap_mask, control2);
1115 CL45_WR_OVER_CL22(bp, params->port, 1116 CL45_WR_OVER_CL22(bp, params->port,
1116 params->phy_addr, 1117 params->phy_addr,
1117 MDIO_REG_BANK_SERDES_DIGITAL, 1118 MDIO_REG_BANK_SERDES_DIGITAL,
1118 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2, 1119 MDIO_SERDES_DIGITAL_A_1000X_CONTROL2,
1119 control2); 1120 control2);
1120 1121
1121 if (phy_flags & PHY_XGXS_FLAG) { 1122 if ((phy_flags & PHY_XGXS_FLAG) &&
1123 (params->speed_cap_mask &
1124 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)) {
1122 DP(NETIF_MSG_LINK, "XGXS\n"); 1125 DP(NETIF_MSG_LINK, "XGXS\n");
1123 1126
1124 CL45_WR_OVER_CL22(bp, params->port, 1127 CL45_WR_OVER_CL22(bp, params->port,
@@ -1225,7 +1228,7 @@ static void bnx2x_set_autoneg(struct link_params *params,
1225 params->phy_addr, 1228 params->phy_addr,
1226 MDIO_REG_BANK_CL73_USERB0, 1229 MDIO_REG_BANK_CL73_USERB0,
1227 MDIO_CL73_USERB0_CL73_UCTRL, 1230 MDIO_CL73_USERB0_CL73_UCTRL,
1228 MDIO_CL73_USERB0_CL73_UCTRL_USTAT1_MUXSEL); 1231 0xe);
1229 1232
1230 /* Enable BAM Station Manager*/ 1233 /* Enable BAM Station Manager*/
1231 CL45_WR_OVER_CL22(bp, params->port, 1234 CL45_WR_OVER_CL22(bp, params->port,
@@ -1236,29 +1239,25 @@ static void bnx2x_set_autoneg(struct link_params *params,
1236 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN | 1239 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_STATION_MNGR_EN |
1237 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN); 1240 MDIO_CL73_USERB0_CL73_BAM_CTRL1_BAM_NP_AFTER_BP_EN);
1238 1241
1239 /* Merge CL73 and CL37 aneg resolution */ 1242 /* Advertise CL73 link speeds */
1240 CL45_RD_OVER_CL22(bp, params->port,
1241 params->phy_addr,
1242 MDIO_REG_BANK_CL73_USERB0,
1243 MDIO_CL73_USERB0_CL73_BAM_CTRL3,
1244 &reg_val);
1245
1246 if (params->speed_cap_mask &
1247 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
1248 /* Set the CL73 AN speed */
1249 CL45_RD_OVER_CL22(bp, params->port, 1243 CL45_RD_OVER_CL22(bp, params->port,
1250 params->phy_addr, 1244 params->phy_addr,
1251 MDIO_REG_BANK_CL73_IEEEB1, 1245 MDIO_REG_BANK_CL73_IEEEB1,
1252 MDIO_CL73_IEEEB1_AN_ADV2, 1246 MDIO_CL73_IEEEB1_AN_ADV2,
1253 &reg_val); 1247 &reg_val);
1248 if (params->speed_cap_mask &
1249 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G)
1250 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4;
1251 if (params->speed_cap_mask &
1252 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G)
1253 reg_val |= MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX;
1254 1254
1255 CL45_WR_OVER_CL22(bp, params->port, 1255 CL45_WR_OVER_CL22(bp, params->port,
1256 params->phy_addr, 1256 params->phy_addr,
1257 MDIO_REG_BANK_CL73_IEEEB1, 1257 MDIO_REG_BANK_CL73_IEEEB1,
1258 MDIO_CL73_IEEEB1_AN_ADV2, 1258 MDIO_CL73_IEEEB1_AN_ADV2,
1259 reg_val | MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4); 1259 reg_val);
1260 1260
1261 }
1262 /* CL73 Autoneg Enabled */ 1261 /* CL73 Autoneg Enabled */
1263 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN; 1262 reg_val = MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN;
1264 1263
@@ -1351,6 +1350,7 @@ static void bnx2x_set_brcm_cl37_advertisment(struct link_params *params)
1351 1350
1352static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u16 *ieee_fc) 1351static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u16 *ieee_fc)
1353{ 1352{
1353 struct bnx2x *bp = params->bp;
1354 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX; 1354 *ieee_fc = MDIO_COMBO_IEEE0_AUTO_NEG_ADV_FULL_DUPLEX;
1355 /* resolve pause mode and advertisement 1355 /* resolve pause mode and advertisement
1356 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */ 1356 * Please refer to Table 28B-3 of the 802.3ab-1999 spec */
@@ -1380,18 +1380,30 @@ static void bnx2x_calc_ieee_aneg_adv(struct link_params *params, u16 *ieee_fc)
1380 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE; 1380 *ieee_fc |= MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE;
1381 break; 1381 break;
1382 } 1382 }
1383 DP(NETIF_MSG_LINK, "ieee_fc = 0x%x\n", *ieee_fc);
1383} 1384}
1384 1385
1385static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params, 1386static void bnx2x_set_ieee_aneg_advertisment(struct link_params *params,
1386 u16 ieee_fc) 1387 u16 ieee_fc)
1387{ 1388{
1388 struct bnx2x *bp = params->bp; 1389 struct bnx2x *bp = params->bp;
1390 u16 val;
1389 /* for AN, we are always publishing full duplex */ 1391 /* for AN, we are always publishing full duplex */
1390 1392
1391 CL45_WR_OVER_CL22(bp, params->port, 1393 CL45_WR_OVER_CL22(bp, params->port,
1392 params->phy_addr, 1394 params->phy_addr,
1393 MDIO_REG_BANK_COMBO_IEEE0, 1395 MDIO_REG_BANK_COMBO_IEEE0,
1394 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc); 1396 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, ieee_fc);
1397 CL45_RD_OVER_CL22(bp, params->port,
1398 params->phy_addr,
1399 MDIO_REG_BANK_CL73_IEEEB1,
1400 MDIO_CL73_IEEEB1_AN_ADV1, &val);
1401 val &= ~MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH;
1402 val |= ((ieee_fc<<3) & MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK);
1403 CL45_WR_OVER_CL22(bp, params->port,
1404 params->phy_addr,
1405 MDIO_REG_BANK_CL73_IEEEB1,
1406 MDIO_CL73_IEEEB1_AN_ADV1, val);
1395} 1407}
1396 1408
1397static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73) 1409static void bnx2x_restart_autoneg(struct link_params *params, u8 enable_cl73)
@@ -1609,6 +1621,39 @@ static u8 bnx2x_ext_phy_resolve_fc(struct link_params *params,
1609 return ret; 1621 return ret;
1610} 1622}
1611 1623
1624static u8 bnx2x_direct_parallel_detect_used(struct link_params *params)
1625{
1626 struct bnx2x *bp = params->bp;
1627 u16 pd_10g, status2_1000x;
1628 CL45_RD_OVER_CL22(bp, params->port,
1629 params->phy_addr,
1630 MDIO_REG_BANK_SERDES_DIGITAL,
1631 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
1632 &status2_1000x);
1633 CL45_RD_OVER_CL22(bp, params->port,
1634 params->phy_addr,
1635 MDIO_REG_BANK_SERDES_DIGITAL,
1636 MDIO_SERDES_DIGITAL_A_1000X_STATUS2,
1637 &status2_1000x);
1638 if (status2_1000x & MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED) {
1639 DP(NETIF_MSG_LINK, "1G parallel detect link on port %d\n",
1640 params->port);
1641 return 1;
1642 }
1643
1644 CL45_RD_OVER_CL22(bp, params->port,
1645 params->phy_addr,
1646 MDIO_REG_BANK_10G_PARALLEL_DETECT,
1647 MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS,
1648 &pd_10g);
1649
1650 if (pd_10g & MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK) {
1651 DP(NETIF_MSG_LINK, "10G parallel detect link on port %d\n",
1652 params->port);
1653 return 1;
1654 }
1655 return 0;
1656}
1612 1657
1613static void bnx2x_flow_ctrl_resolve(struct link_params *params, 1658static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1614 struct link_vars *vars, 1659 struct link_vars *vars,
@@ -1627,21 +1672,53 @@ static void bnx2x_flow_ctrl_resolve(struct link_params *params,
1627 (!(vars->phy_flags & PHY_SGMII_FLAG)) && 1672 (!(vars->phy_flags & PHY_SGMII_FLAG)) &&
1628 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) == 1673 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1629 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) { 1674 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)) {
1630 CL45_RD_OVER_CL22(bp, params->port, 1675 if (bnx2x_direct_parallel_detect_used(params)) {
1631 params->phy_addr, 1676 vars->flow_ctrl = params->req_fc_auto_adv;
1632 MDIO_REG_BANK_COMBO_IEEE0, 1677 return;
1633 MDIO_COMBO_IEEE0_AUTO_NEG_ADV, 1678 }
1634 &ld_pause); 1679 if ((gp_status &
1635 CL45_RD_OVER_CL22(bp, params->port, 1680 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
1636 params->phy_addr, 1681 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) ==
1637 MDIO_REG_BANK_COMBO_IEEE0, 1682 (MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_AUTONEG_COMPLETE |
1638 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1, 1683 MDIO_GP_STATUS_TOP_AN_STATUS1_CL73_MR_LP_NP_AN_ABLE)) {
1639 &lp_pause); 1684
1640 pause_result = (ld_pause & 1685 CL45_RD_OVER_CL22(bp, params->port,
1686 params->phy_addr,
1687 MDIO_REG_BANK_CL73_IEEEB1,
1688 MDIO_CL73_IEEEB1_AN_ADV1,
1689 &ld_pause);
1690 CL45_RD_OVER_CL22(bp, params->port,
1691 params->phy_addr,
1692 MDIO_REG_BANK_CL73_IEEEB1,
1693 MDIO_CL73_IEEEB1_AN_LP_ADV1,
1694 &lp_pause);
1695 pause_result = (ld_pause &
1696 MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK)
1697 >> 8;
1698 pause_result |= (lp_pause &
1699 MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK)
1700 >> 10;
1701 DP(NETIF_MSG_LINK, "pause_result CL73 0x%x\n",
1702 pause_result);
1703 } else {
1704
1705 CL45_RD_OVER_CL22(bp, params->port,
1706 params->phy_addr,
1707 MDIO_REG_BANK_COMBO_IEEE0,
1708 MDIO_COMBO_IEEE0_AUTO_NEG_ADV,
1709 &ld_pause);
1710 CL45_RD_OVER_CL22(bp, params->port,
1711 params->phy_addr,
1712 MDIO_REG_BANK_COMBO_IEEE0,
1713 MDIO_COMBO_IEEE0_AUTO_NEG_LINK_PARTNER_ABILITY1,
1714 &lp_pause);
1715 pause_result = (ld_pause &
1641 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5; 1716 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>5;
1642 pause_result |= (lp_pause & 1717 pause_result |= (lp_pause &
1643 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7; 1718 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK)>>7;
1644 DP(NETIF_MSG_LINK, "pause_result 0x%x\n", pause_result); 1719 DP(NETIF_MSG_LINK, "pause_result CL37 0x%x\n",
1720 pause_result);
1721 }
1645 bnx2x_pause_resolve(vars, pause_result); 1722 bnx2x_pause_resolve(vars, pause_result);
1646 } else if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) && 1723 } else if ((params->req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
1647 (bnx2x_ext_phy_resolve_fc(params, vars))) { 1724 (bnx2x_ext_phy_resolve_fc(params, vars))) {
@@ -1853,6 +1930,8 @@ static u8 bnx2x_link_settings_status(struct link_params *params,
1853 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) == 1930 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1854 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) || 1931 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
1855 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) == 1932 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1933 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) ||
1934 (XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
1856 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726))) { 1935 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726))) {
1857 vars->autoneg = AUTO_NEG_ENABLED; 1936 vars->autoneg = AUTO_NEG_ENABLED;
1858 1937
@@ -1987,8 +2066,7 @@ static u8 bnx2x_emac_program(struct link_params *params,
1987 GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE, 2066 GRCBASE_EMAC0 + port*0x400 + EMAC_REG_EMAC_MODE,
1988 mode); 2067 mode);
1989 2068
1990 bnx2x_set_led(bp, params->port, LED_MODE_OPER, 2069 bnx2x_set_led(params, LED_MODE_OPER, line_speed);
1991 line_speed, params->hw_led_mode, params->chip_id);
1992 return 0; 2070 return 0;
1993} 2071}
1994 2072
@@ -2122,6 +2200,8 @@ static void bnx2x_ext_phy_reset(struct link_params *params,
2122 MDIO_PMA_REG_CTRL, 2200 MDIO_PMA_REG_CTRL,
2123 1<<15); 2201 1<<15);
2124 break; 2202 break;
2203 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
2204 break;
2125 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE: 2205 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
2126 DP(NETIF_MSG_LINK, "XGXS PHY Failure detected\n"); 2206 DP(NETIF_MSG_LINK, "XGXS PHY Failure detected\n");
2127 break; 2207 break;
@@ -2512,16 +2592,11 @@ static void bnx2x_bcm8726_external_rom_boot(struct link_params *params)
2512 /* Need to wait 100ms after reset */ 2592 /* Need to wait 100ms after reset */
2513 msleep(100); 2593 msleep(100);
2514 2594
2515 /* Set serial boot control for external load */
2516 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2517 MDIO_PMA_DEVAD,
2518 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
2519
2520 /* Micro controller re-boot */ 2595 /* Micro controller re-boot */
2521 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2596 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2522 MDIO_PMA_DEVAD, 2597 MDIO_PMA_DEVAD,
2523 MDIO_PMA_REG_GEN_CTRL, 2598 MDIO_PMA_REG_GEN_CTRL,
2524 MDIO_PMA_REG_GEN_CTRL_ROM_RESET_INTERNAL_MP); 2599 0x018B);
2525 2600
2526 /* Set soft reset */ 2601 /* Set soft reset */
2527 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2602 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
@@ -2529,14 +2604,10 @@ static void bnx2x_bcm8726_external_rom_boot(struct link_params *params)
2529 MDIO_PMA_REG_GEN_CTRL, 2604 MDIO_PMA_REG_GEN_CTRL,
2530 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET); 2605 MDIO_PMA_REG_GEN_CTRL_ROM_MICRO_RESET);
2531 2606
2532 /* Set PLL register value to be same like in P13 ver */
2533 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2607 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2534 MDIO_PMA_DEVAD, 2608 MDIO_PMA_DEVAD,
2535 MDIO_PMA_REG_PLL_CTRL, 2609 MDIO_PMA_REG_MISC_CTRL1, 0x0001);
2536 0x73A0);
2537 2610
2538 /* Clear soft reset.
2539 Will automatically reset micro-controller re-boot */
2540 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr, 2611 bnx2x_cl45_write(bp, port, ext_phy_type, ext_phy_addr,
2541 MDIO_PMA_DEVAD, 2612 MDIO_PMA_DEVAD,
2542 MDIO_PMA_REG_GEN_CTRL, 2613 MDIO_PMA_REG_GEN_CTRL,
@@ -3462,8 +3533,8 @@ static void bnx2x_8481_set_10G_led_mode(struct link_params *params,
3462 MDIO_PMA_REG_8481_LINK_SIGNAL, 3533 MDIO_PMA_REG_8481_LINK_SIGNAL,
3463 &val1); 3534 &val1);
3464 /* Set bit 2 to 0, and bits [1:0] to 10 */ 3535 /* Set bit 2 to 0, and bits [1:0] to 10 */
3465 val1 &= ~((1<<0) | (1<<2)); /* Clear bits 0,2*/ 3536 val1 &= ~((1<<0) | (1<<2) | (1<<7)); /* Clear bits 0,2,7*/
3466 val1 |= (1<<1); /* Set bit 1 */ 3537 val1 |= ((1<<1) | (1<<6)); /* Set bit 1, 6 */
3467 3538
3468 bnx2x_cl45_write(bp, params->port, 3539 bnx2x_cl45_write(bp, params->port,
3469 ext_phy_type, 3540 ext_phy_type,
@@ -3497,36 +3568,19 @@ static void bnx2x_8481_set_10G_led_mode(struct link_params *params,
3497 MDIO_PMA_REG_8481_LED2_MASK, 3568 MDIO_PMA_REG_8481_LED2_MASK,
3498 0); 3569 0);
3499 3570
3500 /* LED3 (10G/1G/100/10G Activity) */ 3571 /* Unmask LED3 for 10G link */
3501 bnx2x_cl45_read(bp, params->port,
3502 ext_phy_type,
3503 ext_phy_addr,
3504 MDIO_PMA_DEVAD,
3505 MDIO_PMA_REG_8481_LINK_SIGNAL,
3506 &val1);
3507 /* Enable blink based on source 4(Activity) */
3508 val1 &= ~((1<<7) | (1<<8)); /* Clear bits 7,8 */
3509 val1 |= (1<<6); /* Set only bit 6 */
3510 bnx2x_cl45_write(bp, params->port, 3572 bnx2x_cl45_write(bp, params->port,
3511 ext_phy_type, 3573 ext_phy_type,
3512 ext_phy_addr, 3574 ext_phy_addr,
3513 MDIO_PMA_DEVAD, 3575 MDIO_PMA_DEVAD,
3514 MDIO_PMA_REG_8481_LINK_SIGNAL,
3515 val1);
3516
3517 bnx2x_cl45_read(bp, params->port,
3518 ext_phy_type,
3519 ext_phy_addr,
3520 MDIO_PMA_DEVAD,
3521 MDIO_PMA_REG_8481_LED3_MASK, 3576 MDIO_PMA_REG_8481_LED3_MASK,
3522 &val1); 3577 0x6);
3523 val1 |= (1<<4); /* Unmask LED3 for 10G link */
3524 bnx2x_cl45_write(bp, params->port, 3578 bnx2x_cl45_write(bp, params->port,
3525 ext_phy_type, 3579 ext_phy_type,
3526 ext_phy_addr, 3580 ext_phy_addr,
3527 MDIO_PMA_DEVAD, 3581 MDIO_PMA_DEVAD,
3528 MDIO_PMA_REG_8481_LED3_MASK, 3582 MDIO_PMA_REG_8481_LED3_BLINK,
3529 val1); 3583 0);
3530} 3584}
3531 3585
3532 3586
@@ -3544,7 +3598,10 @@ static void bnx2x_init_internal_phy(struct link_params *params,
3544 bnx2x_set_preemphasis(params); 3598 bnx2x_set_preemphasis(params);
3545 3599
3546 /* forced speed requested? */ 3600 /* forced speed requested? */
3547 if (vars->line_speed != SPEED_AUTO_NEG) { 3601 if (vars->line_speed != SPEED_AUTO_NEG ||
3602 ((XGXS_EXT_PHY_TYPE(params->ext_phy_config) ==
3603 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) &&
3604 params->loopback_mode == LOOPBACK_EXT)) {
3548 DP(NETIF_MSG_LINK, "not SGMII, no AN\n"); 3605 DP(NETIF_MSG_LINK, "not SGMII, no AN\n");
3549 3606
3550 /* disable autoneg */ 3607 /* disable autoneg */
@@ -3693,19 +3750,6 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
3693 } 3750 }
3694 } 3751 }
3695 /* Force speed */ 3752 /* Force speed */
3696 /* First enable LASI */
3697 bnx2x_cl45_write(bp, params->port,
3698 ext_phy_type,
3699 ext_phy_addr,
3700 MDIO_PMA_DEVAD,
3701 MDIO_PMA_REG_RX_ALARM_CTRL,
3702 0x0400);
3703 bnx2x_cl45_write(bp, params->port,
3704 ext_phy_type,
3705 ext_phy_addr,
3706 MDIO_PMA_DEVAD,
3707 MDIO_PMA_REG_LASI_CTRL, 0x0004);
3708
3709 if (params->req_line_speed == SPEED_10000) { 3753 if (params->req_line_speed == SPEED_10000) {
3710 DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n"); 3754 DP(NETIF_MSG_LINK, "XGXS 8706 force 10Gbps\n");
3711 3755
@@ -3715,6 +3759,9 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
3715 MDIO_PMA_DEVAD, 3759 MDIO_PMA_DEVAD,
3716 MDIO_PMA_REG_DIGITAL_CTRL, 3760 MDIO_PMA_REG_DIGITAL_CTRL,
3717 0x400); 3761 0x400);
3762 bnx2x_cl45_write(bp, params->port, ext_phy_type,
3763 ext_phy_addr, MDIO_PMA_DEVAD,
3764 MDIO_PMA_REG_LASI_CTRL, 1);
3718 } else { 3765 } else {
3719 /* Force 1Gbps using autoneg with 1G 3766 /* Force 1Gbps using autoneg with 1G
3720 advertisment */ 3767 advertisment */
@@ -3756,6 +3803,17 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
3756 MDIO_AN_DEVAD, 3803 MDIO_AN_DEVAD,
3757 MDIO_AN_REG_CTRL, 3804 MDIO_AN_REG_CTRL,
3758 0x1200); 3805 0x1200);
3806 bnx2x_cl45_write(bp, params->port,
3807 ext_phy_type,
3808 ext_phy_addr,
3809 MDIO_PMA_DEVAD,
3810 MDIO_PMA_REG_RX_ALARM_CTRL,
3811 0x0400);
3812 bnx2x_cl45_write(bp, params->port,
3813 ext_phy_type,
3814 ext_phy_addr,
3815 MDIO_PMA_DEVAD,
3816 MDIO_PMA_REG_LASI_CTRL, 0x0004);
3759 3817
3760 } 3818 }
3761 bnx2x_save_bcm_spirom_ver(bp, params->port, 3819 bnx2x_save_bcm_spirom_ver(bp, params->port,
@@ -4291,6 +4349,7 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
4291 break; 4349 break;
4292 } 4350 }
4293 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481: 4351 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
4352 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
4294 /* This phy uses the NIG latch mechanism since link 4353 /* This phy uses the NIG latch mechanism since link
4295 indication arrives through its LED4 and not via 4354 indication arrives through its LED4 and not via
4296 its LASI signal, so we get steady signal 4355 its LASI signal, so we get steady signal
@@ -4298,6 +4357,12 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
4298 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4, 4357 bnx2x_bits_en(bp, NIG_REG_LATCH_BC_0 + params->port*4,
4299 1 << NIG_LATCH_BC_ENABLE_MI_INT); 4358 1 << NIG_LATCH_BC_ENABLE_MI_INT);
4300 4359
4360 bnx2x_cl45_write(bp, params->port,
4361 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
4362 ext_phy_addr,
4363 MDIO_PMA_DEVAD,
4364 MDIO_PMA_REG_CTRL, 0x0000);
4365
4301 bnx2x_8481_set_led4(params, ext_phy_type, ext_phy_addr); 4366 bnx2x_8481_set_led4(params, ext_phy_type, ext_phy_addr);
4302 if (params->req_line_speed == SPEED_AUTO_NEG) { 4367 if (params->req_line_speed == SPEED_AUTO_NEG) {
4303 4368
@@ -4394,17 +4459,12 @@ static u8 bnx2x_ext_phy_init(struct link_params *params, struct link_vars *vars)
4394 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) { 4459 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G) {
4395 DP(NETIF_MSG_LINK, "Advertising 10G\n"); 4460 DP(NETIF_MSG_LINK, "Advertising 10G\n");
4396 /* Restart autoneg for 10G*/ 4461 /* Restart autoneg for 10G*/
4397 bnx2x_cl45_read(bp, params->port, 4462
4398 ext_phy_type,
4399 ext_phy_addr,
4400 MDIO_AN_DEVAD,
4401 MDIO_AN_REG_CTRL, &val);
4402 val |= 0x200;
4403 bnx2x_cl45_write(bp, params->port, 4463 bnx2x_cl45_write(bp, params->port,
4404 ext_phy_type, 4464 ext_phy_type,
4405 ext_phy_addr, 4465 ext_phy_addr,
4406 MDIO_AN_DEVAD, 4466 MDIO_AN_DEVAD,
4407 MDIO_AN_REG_CTRL, val); 4467 MDIO_AN_REG_CTRL, 0x3200);
4408 } 4468 }
4409 } else { 4469 } else {
4410 /* Force speed */ 4470 /* Force speed */
@@ -5148,6 +5208,7 @@ static u8 bnx2x_ext_phy_is_link_up(struct link_params *params,
5148 } 5208 }
5149 break; 5209 break;
5150 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481: 5210 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
5211 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
5151 /* Check 10G-BaseT link status */ 5212 /* Check 10G-BaseT link status */
5152 /* Check PMD signal ok */ 5213 /* Check PMD signal ok */
5153 bnx2x_cl45_read(bp, params->port, ext_phy_type, 5214 bnx2x_cl45_read(bp, params->port, ext_phy_type,
@@ -5363,8 +5424,10 @@ static void bnx2x_link_int_ack(struct link_params *params,
5363 (NIG_STATUS_XGXS0_LINK10G | 5424 (NIG_STATUS_XGXS0_LINK10G |
5364 NIG_STATUS_XGXS0_LINK_STATUS | 5425 NIG_STATUS_XGXS0_LINK_STATUS |
5365 NIG_STATUS_SERDES0_LINK_STATUS)); 5426 NIG_STATUS_SERDES0_LINK_STATUS));
5366 if (XGXS_EXT_PHY_TYPE(params->ext_phy_config) 5427 if ((XGXS_EXT_PHY_TYPE(params->ext_phy_config)
5367 == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481) { 5428 == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481) ||
5429 (XGXS_EXT_PHY_TYPE(params->ext_phy_config)
5430 == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823)) {
5368 bnx2x_8481_rearm_latch_signal(bp, port, is_mi_int); 5431 bnx2x_8481_rearm_latch_signal(bp, port, is_mi_int);
5369 } 5432 }
5370 if (vars->phy_link_up) { 5433 if (vars->phy_link_up) {
@@ -5477,6 +5540,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
5477 status = bnx2x_format_ver(spirom_ver, version, len); 5540 status = bnx2x_format_ver(spirom_ver, version, len);
5478 break; 5541 break;
5479 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481: 5542 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
5543 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
5480 spirom_ver = ((spirom_ver & 0xF80) >> 7) << 16 | 5544 spirom_ver = ((spirom_ver & 0xF80) >> 7) << 16 |
5481 (spirom_ver & 0x7F); 5545 (spirom_ver & 0x7F);
5482 status = bnx2x_format_ver(spirom_ver, version, len); 5546 status = bnx2x_format_ver(spirom_ver, version, len);
@@ -5728,13 +5792,15 @@ u8 bnx2x_override_led_value(struct bnx2x *bp, u8 port,
5728} 5792}
5729 5793
5730 5794
5731u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed, 5795u8 bnx2x_set_led(struct link_params *params, u8 mode, u32 speed)
5732 u16 hw_led_mode, u32 chip_id)
5733{ 5796{
5797 u8 port = params->port;
5798 u16 hw_led_mode = params->hw_led_mode;
5734 u8 rc = 0; 5799 u8 rc = 0;
5735 u32 tmp; 5800 u32 tmp;
5736 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0; 5801 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5737 5802 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(params->ext_phy_config);
5803 struct bnx2x *bp = params->bp;
5738 DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode); 5804 DP(NETIF_MSG_LINK, "bnx2x_set_led: port %x, mode %d\n", port, mode);
5739 DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n", 5805 DP(NETIF_MSG_LINK, "speed 0x%x, hw_led_mode 0x%x\n",
5740 speed, hw_led_mode); 5806 speed, hw_led_mode);
@@ -5749,7 +5815,14 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
5749 break; 5815 break;
5750 5816
5751 case LED_MODE_OPER: 5817 case LED_MODE_OPER:
5752 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, hw_led_mode); 5818 if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT) {
5819 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4, 0);
5820 REG_WR(bp, NIG_REG_LED_10G_P0 + port*4, 1);
5821 } else {
5822 REG_WR(bp, NIG_REG_LED_MODE_P0 + port*4,
5823 hw_led_mode);
5824 }
5825
5753 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 + 5826 REG_WR(bp, NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0 +
5754 port*4, 0); 5827 port*4, 0);
5755 /* Set blinking rate to ~15.9Hz */ 5828 /* Set blinking rate to ~15.9Hz */
@@ -5761,7 +5834,7 @@ u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed,
5761 EMAC_WR(bp, EMAC_REG_EMAC_LED, 5834 EMAC_WR(bp, EMAC_REG_EMAC_LED,
5762 (tmp & (~EMAC_LED_OVERRIDE))); 5835 (tmp & (~EMAC_LED_OVERRIDE)));
5763 5836
5764 if (!CHIP_IS_E1H(bp) && 5837 if (CHIP_IS_E1(bp) &&
5765 ((speed == SPEED_2500) || 5838 ((speed == SPEED_2500) ||
5766 (speed == SPEED_1000) || 5839 (speed == SPEED_1000) ||
5767 (speed == SPEED_100) || 5840 (speed == SPEED_100) ||
@@ -5864,6 +5937,7 @@ static u8 bnx2x_link_initialize(struct link_params *params,
5864 5937
5865 if (non_ext_phy || 5938 if (non_ext_phy ||
5866 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) || 5939 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
5940 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) ||
5867 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) || 5941 (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) ||
5868 (params->loopback_mode == LOOPBACK_EXT_PHY)) { 5942 (params->loopback_mode == LOOPBACK_EXT_PHY)) {
5869 if (params->req_line_speed == SPEED_AUTO_NEG) 5943 if (params->req_line_speed == SPEED_AUTO_NEG)
@@ -6030,10 +6104,7 @@ u8 bnx2x_phy_init(struct link_params *params, struct link_vars *vars)
6030 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE + 6104 REG_WR(bp, NIG_REG_EGRESS_DRAIN0_MODE +
6031 params->port*4, 0); 6105 params->port*4, 0);
6032 6106
6033 bnx2x_set_led(bp, params->port, LED_MODE_OPER, 6107 bnx2x_set_led(params, LED_MODE_OPER, vars->line_speed);
6034 vars->line_speed, params->hw_led_mode,
6035 params->chip_id);
6036
6037 } else 6108 } else
6038 /* No loopback */ 6109 /* No loopback */
6039 { 6110 {
@@ -6091,15 +6162,13 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6091{ 6162{
6092 struct bnx2x *bp = params->bp; 6163 struct bnx2x *bp = params->bp;
6093 u32 ext_phy_config = params->ext_phy_config; 6164 u32 ext_phy_config = params->ext_phy_config;
6094 u16 hw_led_mode = params->hw_led_mode;
6095 u32 chip_id = params->chip_id;
6096 u8 port = params->port; 6165 u8 port = params->port;
6097 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config); 6166 u32 ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
6098 u32 val = REG_RD(bp, params->shmem_base + 6167 u32 val = REG_RD(bp, params->shmem_base +
6099 offsetof(struct shmem_region, dev_info. 6168 offsetof(struct shmem_region, dev_info.
6100 port_feature_config[params->port]. 6169 port_feature_config[params->port].
6101 config)); 6170 config));
6102 6171 DP(NETIF_MSG_LINK, "Resetting the link of port %d\n", port);
6103 /* disable attentions */ 6172 /* disable attentions */
6104 vars->link_status = 0; 6173 vars->link_status = 0;
6105 bnx2x_update_mng(params, vars->link_status); 6174 bnx2x_update_mng(params, vars->link_status);
@@ -6127,7 +6196,7 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6127 * Hold it as vars low 6196 * Hold it as vars low
6128 */ 6197 */
6129 /* clear link led */ 6198 /* clear link led */
6130 bnx2x_set_led(bp, port, LED_MODE_OFF, 0, hw_led_mode, chip_id); 6199 bnx2x_set_led(params, LED_MODE_OFF, 0);
6131 if (reset_ext_phy) { 6200 if (reset_ext_phy) {
6132 switch (ext_phy_type) { 6201 switch (ext_phy_type) {
6133 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT: 6202 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
@@ -6163,6 +6232,22 @@ u8 bnx2x_link_reset(struct link_params *params, struct link_vars *vars,
6163 bnx2x_8726_reset_phy(bp, params->port, ext_phy_addr); 6232 bnx2x_8726_reset_phy(bp, params->port, ext_phy_addr);
6164 break; 6233 break;
6165 } 6234 }
6235 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
6236 {
6237 u8 ext_phy_addr =
6238 XGXS_EXT_PHY_ADDR(params->ext_phy_config);
6239 bnx2x_cl45_write(bp, port,
6240 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
6241 ext_phy_addr,
6242 MDIO_AN_DEVAD,
6243 MDIO_AN_REG_CTRL, 0x0000);
6244 bnx2x_cl45_write(bp, port,
6245 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481,
6246 ext_phy_addr,
6247 MDIO_PMA_DEVAD,
6248 MDIO_PMA_REG_CTRL, 1);
6249 break;
6250 }
6166 default: 6251 default:
6167 /* HW reset */ 6252 /* HW reset */
6168 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1, 6253 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
@@ -6198,9 +6283,7 @@ static u8 bnx2x_update_link_down(struct link_params *params,
6198 u8 port = params->port; 6283 u8 port = params->port;
6199 6284
6200 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port); 6285 DP(NETIF_MSG_LINK, "Port %x: Link is down\n", port);
6201 bnx2x_set_led(bp, port, LED_MODE_OFF, 6286 bnx2x_set_led(params, LED_MODE_OFF, 0);
6202 0, params->hw_led_mode,
6203 params->chip_id);
6204 6287
6205 /* indicate no mac active */ 6288 /* indicate no mac active */
6206 vars->mac_type = MAC_TYPE_NONE; 6289 vars->mac_type = MAC_TYPE_NONE;
@@ -6237,15 +6320,13 @@ static u8 bnx2x_update_link_up(struct link_params *params,
6237 vars->link_status |= LINK_STATUS_LINK_UP; 6320 vars->link_status |= LINK_STATUS_LINK_UP;
6238 if (link_10g) { 6321 if (link_10g) {
6239 bnx2x_bmac_enable(params, vars, 0); 6322 bnx2x_bmac_enable(params, vars, 0);
6240 bnx2x_set_led(bp, port, LED_MODE_OPER, 6323 bnx2x_set_led(params, LED_MODE_OPER, SPEED_10000);
6241 SPEED_10000, params->hw_led_mode,
6242 params->chip_id);
6243
6244 } else { 6324 } else {
6245 bnx2x_emac_enable(params, vars, 0);
6246 rc = bnx2x_emac_program(params, vars->line_speed, 6325 rc = bnx2x_emac_program(params, vars->line_speed,
6247 vars->duplex); 6326 vars->duplex);
6248 6327
6328 bnx2x_emac_enable(params, vars, 0);
6329
6249 /* AN complete? */ 6330 /* AN complete? */
6250 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) { 6331 if (gp_status & MDIO_AN_CL73_OR_37_COMPLETE) {
6251 if (!(vars->phy_flags & 6332 if (!(vars->phy_flags &
@@ -6343,6 +6424,7 @@ u8 bnx2x_link_update(struct link_params *params, struct link_vars *vars)
6343 6424
6344 if ((ext_phy_type != PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) && 6425 if ((ext_phy_type != PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT) &&
6345 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) && 6426 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) &&
6427 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706) &&
6346 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) && 6428 (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726) &&
6347 (ext_phy_link_up && !vars->phy_link_up)) 6429 (ext_phy_link_up && !vars->phy_link_up))
6348 bnx2x_init_internal_phy(params, vars, 0); 6430 bnx2x_init_internal_phy(params, vars, 0);
@@ -6578,6 +6660,13 @@ static u8 bnx2x_8726_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6578 return 0; 6660 return 0;
6579} 6661}
6580 6662
6663
6664static u8 bnx2x_84823_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6665{
6666 /* HW reset */
6667 bnx2x_ext_phy_hw_reset(bp, 1);
6668 return 0;
6669}
6581u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base) 6670u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6582{ 6671{
6583 u8 rc = 0; 6672 u8 rc = 0;
@@ -6607,7 +6696,9 @@ u8 bnx2x_common_init_phy(struct bnx2x *bp, u32 shmem_base)
6607 /* GPIO1 affects both ports, so there's need to pull 6696 /* GPIO1 affects both ports, so there's need to pull
6608 it for single port alone */ 6697 it for single port alone */
6609 rc = bnx2x_8726_common_init_phy(bp, shmem_base); 6698 rc = bnx2x_8726_common_init_phy(bp, shmem_base);
6610 6699 break;
6700 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM84823:
6701 rc = bnx2x_84823_common_init_phy(bp, shmem_base);
6611 break; 6702 break;
6612 default: 6703 default:
6613 DP(NETIF_MSG_LINK, 6704 DP(NETIF_MSG_LINK,
diff --git a/drivers/net/bnx2x_link.h b/drivers/net/bnx2x_link.h
index f3e252264e1b..40c2981de8ed 100644
--- a/drivers/net/bnx2x_link.h
+++ b/drivers/net/bnx2x_link.h
@@ -178,8 +178,7 @@ u8 bnx2x_get_ext_phy_fw_version(struct link_params *params, u8 driver_loaded,
178 Basically, the CLC takes care of the led for the link, but in case one needs 178 Basically, the CLC takes care of the led for the link, but in case one needs
179 to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to 179 to set/unset the led unnaturally, set the "mode" to LED_MODE_OPER to
180 blink the led, and LED_MODE_OFF to set the led off.*/ 180 blink the led, and LED_MODE_OFF to set the led off.*/
181u8 bnx2x_set_led(struct bnx2x *bp, u8 port, u8 mode, u32 speed, 181u8 bnx2x_set_led(struct link_params *params, u8 mode, u32 speed);
182 u16 hw_led_mode, u32 chip_id);
183#define LED_MODE_OFF 0 182#define LED_MODE_OFF 0
184#define LED_MODE_OPER 2 183#define LED_MODE_OPER 2
185 184
diff --git a/drivers/net/bnx2x_main.c b/drivers/net/bnx2x_main.c
index 20f0ed956df2..e2cf686d1118 100644
--- a/drivers/net/bnx2x_main.c
+++ b/drivers/net/bnx2x_main.c
@@ -49,6 +49,7 @@
49#include <linux/prefetch.h> 49#include <linux/prefetch.h>
50#include <linux/zlib.h> 50#include <linux/zlib.h>
51#include <linux/io.h> 51#include <linux/io.h>
52#include <linux/stringify.h>
52 53
53 54
54#include "bnx2x.h" 55#include "bnx2x.h"
@@ -56,15 +57,20 @@
56#include "bnx2x_init_ops.h" 57#include "bnx2x_init_ops.h"
57#include "bnx2x_dump.h" 58#include "bnx2x_dump.h"
58 59
59#define DRV_MODULE_VERSION "1.52.1" 60#define DRV_MODULE_VERSION "1.52.1-4"
60#define DRV_MODULE_RELDATE "2009/08/12" 61#define DRV_MODULE_RELDATE "2009/11/09"
61#define BNX2X_BC_VER 0x040200 62#define BNX2X_BC_VER 0x040200
62 63
63#include <linux/firmware.h> 64#include <linux/firmware.h>
64#include "bnx2x_fw_file_hdr.h" 65#include "bnx2x_fw_file_hdr.h"
65/* FW files */ 66/* FW files */
66#define FW_FILE_PREFIX_E1 "bnx2x-e1-" 67#define FW_FILE_VERSION \
67#define FW_FILE_PREFIX_E1H "bnx2x-e1h-" 68 __stringify(BCM_5710_FW_MAJOR_VERSION) "." \
69 __stringify(BCM_5710_FW_MINOR_VERSION) "." \
70 __stringify(BCM_5710_FW_REVISION_VERSION) "." \
71 __stringify(BCM_5710_FW_ENGINEERING_VERSION)
72#define FW_FILE_NAME_E1 "bnx2x-e1-" FW_FILE_VERSION ".fw"
73#define FW_FILE_NAME_E1H "bnx2x-e1h-" FW_FILE_VERSION ".fw"
68 74
69/* Time in jiffies before concluding the transmitter is hung */ 75/* Time in jiffies before concluding the transmitter is hung */
70#define TX_TIMEOUT (5*HZ) 76#define TX_TIMEOUT (5*HZ)
@@ -77,6 +83,8 @@ MODULE_AUTHOR("Eliezer Tamir");
77MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver"); 83MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78MODULE_LICENSE("GPL"); 84MODULE_LICENSE("GPL");
79MODULE_VERSION(DRV_MODULE_VERSION); 85MODULE_VERSION(DRV_MODULE_VERSION);
86MODULE_FIRMWARE(FW_FILE_NAME_E1);
87MODULE_FIRMWARE(FW_FILE_NAME_E1H);
80 88
81static int multi_mode = 1; 89static int multi_mode = 1;
82module_param(multi_mode, int, 0); 90module_param(multi_mode, int, 0);
@@ -742,6 +750,9 @@ static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
742 if (msix) { 750 if (msix) {
743 synchronize_irq(bp->msix_table[0].vector); 751 synchronize_irq(bp->msix_table[0].vector);
744 offset = 1; 752 offset = 1;
753#ifdef BCM_CNIC
754 offset++;
755#endif
745 for_each_queue(bp, i) 756 for_each_queue(bp, i)
746 synchronize_irq(bp->msix_table[i + offset].vector); 757 synchronize_irq(bp->msix_table[i + offset].vector);
747 } else 758 } else
@@ -966,6 +977,9 @@ static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
966 } 977 }
967} 978}
968 979
980#ifdef BCM_CNIC
981static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
982#endif
969 983
970static void bnx2x_sp_event(struct bnx2x_fastpath *fp, 984static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
971 union eth_rx_cqe *rr_cqe) 985 union eth_rx_cqe *rr_cqe)
@@ -1022,16 +1036,24 @@ static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
1022 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED; 1036 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1023 break; 1037 break;
1024 1038
1039#ifdef BCM_CNIC
1040 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
1041 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
1042 bnx2x_cnic_cfc_comp(bp, cid);
1043 break;
1044#endif
1025 1045
1026 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN): 1046 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1027 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG): 1047 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1028 DP(NETIF_MSG_IFUP, "got set mac ramrod\n"); 1048 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1029 bp->set_mac_pending = 0; 1049 bp->set_mac_pending--;
1050 smp_wmb();
1030 break; 1051 break;
1031 1052
1032 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT): 1053 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1033 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1034 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n"); 1054 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1055 bp->set_mac_pending--;
1056 smp_wmb();
1035 break; 1057 break;
1036 1058
1037 default: 1059 default:
@@ -1804,6 +1826,20 @@ static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1804 } 1826 }
1805 } 1827 }
1806 1828
1829#ifdef BCM_CNIC
1830 mask = 0x2 << CNIC_SB_ID(bp);
1831 if (status & (mask | 0x1)) {
1832 struct cnic_ops *c_ops = NULL;
1833
1834 rcu_read_lock();
1835 c_ops = rcu_dereference(bp->cnic_ops);
1836 if (c_ops)
1837 c_ops->cnic_handler(bp->cnic_data, NULL);
1838 rcu_read_unlock();
1839
1840 status &= ~mask;
1841 }
1842#endif
1807 1843
1808 if (unlikely(status & 0x1)) { 1844 if (unlikely(status & 0x1)) {
1809 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 1845 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
@@ -2128,18 +2164,30 @@ static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2128 2164
2129static void bnx2x_link_report(struct bnx2x *bp) 2165static void bnx2x_link_report(struct bnx2x *bp)
2130{ 2166{
2131 if (bp->state == BNX2X_STATE_DISABLED) { 2167 if (bp->flags & MF_FUNC_DIS) {
2132 netif_carrier_off(bp->dev); 2168 netif_carrier_off(bp->dev);
2133 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name); 2169 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2134 return; 2170 return;
2135 } 2171 }
2136 2172
2137 if (bp->link_vars.link_up) { 2173 if (bp->link_vars.link_up) {
2174 u16 line_speed;
2175
2138 if (bp->state == BNX2X_STATE_OPEN) 2176 if (bp->state == BNX2X_STATE_OPEN)
2139 netif_carrier_on(bp->dev); 2177 netif_carrier_on(bp->dev);
2140 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name); 2178 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2141 2179
2142 printk("%d Mbps ", bp->link_vars.line_speed); 2180 line_speed = bp->link_vars.line_speed;
2181 if (IS_E1HMF(bp)) {
2182 u16 vn_max_rate;
2183
2184 vn_max_rate =
2185 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
2186 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2187 if (vn_max_rate < line_speed)
2188 line_speed = vn_max_rate;
2189 }
2190 printk("%d Mbps ", line_speed);
2143 2191
2144 if (bp->link_vars.duplex == DUPLEX_FULL) 2192 if (bp->link_vars.duplex == DUPLEX_FULL)
2145 printk("full duplex"); 2193 printk("full duplex");
@@ -2304,8 +2352,14 @@ static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
2304 } 2352 }
2305 2353
2306 /* ... only if all min rates are zeros - disable fairness */ 2354 /* ... only if all min rates are zeros - disable fairness */
2307 if (all_zero) 2355 if (all_zero) {
2308 bp->vn_weight_sum = 0; 2356 bp->cmng.flags.cmng_enables &=
2357 ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2358 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
2359 " fairness will be disabled\n");
2360 } else
2361 bp->cmng.flags.cmng_enables |=
2362 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
2309} 2363}
2310 2364
2311static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func) 2365static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
@@ -2324,17 +2378,14 @@ static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2324 } else { 2378 } else {
2325 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >> 2379 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2326 FUNC_MF_CFG_MIN_BW_SHIFT) * 100; 2380 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2327 /* If fairness is enabled (not all min rates are zeroes) and 2381 /* If min rate is zero - set it to 1 */
2328 if current min rate is zero - set it to 1. 2382 if (!vn_min_rate)
2329 This is a requirement of the algorithm. */
2330 if (bp->vn_weight_sum && (vn_min_rate == 0))
2331 vn_min_rate = DEF_MIN_RATE; 2383 vn_min_rate = DEF_MIN_RATE;
2332 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >> 2384 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2333 FUNC_MF_CFG_MAX_BW_SHIFT) * 100; 2385 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2334 } 2386 }
2335
2336 DP(NETIF_MSG_IFUP, 2387 DP(NETIF_MSG_IFUP,
2337 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n", 2388 "func %d: vn_min_rate %d vn_max_rate %d vn_weight_sum %d\n",
2338 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum); 2389 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2339 2390
2340 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn)); 2391 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
@@ -2405,8 +2456,7 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2405 memset(&(pstats->mac_stx[0]), 0, 2456 memset(&(pstats->mac_stx[0]), 0,
2406 sizeof(struct mac_stx)); 2457 sizeof(struct mac_stx));
2407 } 2458 }
2408 if ((bp->state == BNX2X_STATE_OPEN) || 2459 if (bp->state == BNX2X_STATE_OPEN)
2409 (bp->state == BNX2X_STATE_DISABLED))
2410 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP); 2460 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2411 } 2461 }
2412 2462
@@ -2449,9 +2499,7 @@ static void bnx2x_link_attn(struct bnx2x *bp)
2449 2499
2450static void bnx2x__link_status_update(struct bnx2x *bp) 2500static void bnx2x__link_status_update(struct bnx2x *bp)
2451{ 2501{
2452 int func = BP_FUNC(bp); 2502 if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
2453
2454 if (bp->state != BNX2X_STATE_OPEN)
2455 return; 2503 return;
2456 2504
2457 bnx2x_link_status_update(&bp->link_params, &bp->link_vars); 2505 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
@@ -2461,7 +2509,6 @@ static void bnx2x__link_status_update(struct bnx2x *bp)
2461 else 2509 else
2462 bnx2x_stats_handle(bp, STATS_EVENT_STOP); 2510 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2463 2511
2464 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2465 bnx2x_calc_vn_weight_sum(bp); 2512 bnx2x_calc_vn_weight_sum(bp);
2466 2513
2467 /* indicate link status */ 2514 /* indicate link status */
@@ -2501,6 +2548,7 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2501 u32 cnt = 1; 2548 u32 cnt = 1;
2502 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10; 2549 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
2503 2550
2551 mutex_lock(&bp->fw_mb_mutex);
2504 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq)); 2552 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
2505 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq)); 2553 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
2506 2554
@@ -2510,8 +2558,8 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2510 2558
2511 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header); 2559 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
2512 2560
2513 /* Give the FW up to 2 second (200*10ms) */ 2561 /* Give the FW up to 5 second (500*10ms) */
2514 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200)); 2562 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
2515 2563
2516 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n", 2564 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
2517 cnt*delay, rc, seq); 2565 cnt*delay, rc, seq);
@@ -2525,32 +2573,24 @@ u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
2525 bnx2x_fw_dump(bp); 2573 bnx2x_fw_dump(bp);
2526 rc = 0; 2574 rc = 0;
2527 } 2575 }
2576 mutex_unlock(&bp->fw_mb_mutex);
2528 2577
2529 return rc; 2578 return rc;
2530} 2579}
2531 2580
2532static void bnx2x_set_storm_rx_mode(struct bnx2x *bp); 2581static void bnx2x_set_storm_rx_mode(struct bnx2x *bp);
2533static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set); 2582static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set);
2534static void bnx2x_set_rx_mode(struct net_device *dev); 2583static void bnx2x_set_rx_mode(struct net_device *dev);
2535 2584
2536static void bnx2x_e1h_disable(struct bnx2x *bp) 2585static void bnx2x_e1h_disable(struct bnx2x *bp)
2537{ 2586{
2538 int port = BP_PORT(bp); 2587 int port = BP_PORT(bp);
2539 int i;
2540
2541 bp->rx_mode = BNX2X_RX_MODE_NONE;
2542 bnx2x_set_storm_rx_mode(bp);
2543 2588
2544 netif_tx_disable(bp->dev); 2589 netif_tx_disable(bp->dev);
2545 bp->dev->trans_start = jiffies; /* prevent tx timeout */ 2590 bp->dev->trans_start = jiffies; /* prevent tx timeout */
2546 2591
2547 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 2592 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
2548 2593
2549 bnx2x_set_mac_addr_e1h(bp, 0);
2550
2551 for (i = 0; i < MC_HASH_SIZE; i++)
2552 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
2553
2554 netif_carrier_off(bp->dev); 2594 netif_carrier_off(bp->dev);
2555} 2595}
2556 2596
@@ -2560,13 +2600,13 @@ static void bnx2x_e1h_enable(struct bnx2x *bp)
2560 2600
2561 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1); 2601 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
2562 2602
2563 bnx2x_set_mac_addr_e1h(bp, 1);
2564
2565 /* Tx queue should be only reenabled */ 2603 /* Tx queue should be only reenabled */
2566 netif_tx_wake_all_queues(bp->dev); 2604 netif_tx_wake_all_queues(bp->dev);
2567 2605
2568 /* Initialize the receive filter. */ 2606 /*
2569 bnx2x_set_rx_mode(bp->dev); 2607 * Should not call netif_carrier_on since it will be called if the link
2608 * is up when checking for link state
2609 */
2570} 2610}
2571 2611
2572static void bnx2x_update_min_max(struct bnx2x *bp) 2612static void bnx2x_update_min_max(struct bnx2x *bp)
@@ -2605,21 +2645,23 @@ static void bnx2x_update_min_max(struct bnx2x *bp)
2605 2645
2606static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event) 2646static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2607{ 2647{
2608 int func = BP_FUNC(bp);
2609
2610 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event); 2648 DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
2611 bp->mf_config = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2612 2649
2613 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) { 2650 if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
2614 2651
2652 /*
2653 * This is the only place besides the function initialization
2654 * where the bp->flags can change so it is done without any
2655 * locks
2656 */
2615 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { 2657 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
2616 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n"); 2658 DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
2617 bp->state = BNX2X_STATE_DISABLED; 2659 bp->flags |= MF_FUNC_DIS;
2618 2660
2619 bnx2x_e1h_disable(bp); 2661 bnx2x_e1h_disable(bp);
2620 } else { 2662 } else {
2621 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n"); 2663 DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
2622 bp->state = BNX2X_STATE_OPEN; 2664 bp->flags &= ~MF_FUNC_DIS;
2623 2665
2624 bnx2x_e1h_enable(bp); 2666 bnx2x_e1h_enable(bp);
2625 } 2667 }
@@ -2638,11 +2680,40 @@ static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
2638 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK); 2680 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK);
2639} 2681}
2640 2682
2683/* must be called under the spq lock */
2684static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
2685{
2686 struct eth_spe *next_spe = bp->spq_prod_bd;
2687
2688 if (bp->spq_prod_bd == bp->spq_last_bd) {
2689 bp->spq_prod_bd = bp->spq;
2690 bp->spq_prod_idx = 0;
2691 DP(NETIF_MSG_TIMER, "end of spq\n");
2692 } else {
2693 bp->spq_prod_bd++;
2694 bp->spq_prod_idx++;
2695 }
2696 return next_spe;
2697}
2698
2699/* must be called under the spq lock */
2700static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
2701{
2702 int func = BP_FUNC(bp);
2703
2704 /* Make sure that BD data is updated before writing the producer */
2705 wmb();
2706
2707 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2708 bp->spq_prod_idx);
2709 mmiowb();
2710}
2711
2641/* the slow path queue is odd since completions arrive on the fastpath ring */ 2712/* the slow path queue is odd since completions arrive on the fastpath ring */
2642static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid, 2713static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2643 u32 data_hi, u32 data_lo, int common) 2714 u32 data_hi, u32 data_lo, int common)
2644{ 2715{
2645 int func = BP_FUNC(bp); 2716 struct eth_spe *spe;
2646 2717
2647 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/, 2718 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2648 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n", 2719 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
@@ -2664,38 +2735,23 @@ static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2664 return -EBUSY; 2735 return -EBUSY;
2665 } 2736 }
2666 2737
2738 spe = bnx2x_sp_get_next(bp);
2739
2667 /* CID needs port number to be encoded int it */ 2740 /* CID needs port number to be encoded int it */
2668 bp->spq_prod_bd->hdr.conn_and_cmd_data = 2741 spe->hdr.conn_and_cmd_data =
2669 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) | 2742 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2670 HW_CID(bp, cid))); 2743 HW_CID(bp, cid)));
2671 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE); 2744 spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2672 if (common) 2745 if (common)
2673 bp->spq_prod_bd->hdr.type |= 2746 spe->hdr.type |=
2674 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT)); 2747 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2675 2748
2676 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi); 2749 spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2677 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo); 2750 spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2678 2751
2679 bp->spq_left--; 2752 bp->spq_left--;
2680 2753
2681 if (bp->spq_prod_bd == bp->spq_last_bd) { 2754 bnx2x_sp_prod_update(bp);
2682 bp->spq_prod_bd = bp->spq;
2683 bp->spq_prod_idx = 0;
2684 DP(NETIF_MSG_TIMER, "end of spq\n");
2685
2686 } else {
2687 bp->spq_prod_bd++;
2688 bp->spq_prod_idx++;
2689 }
2690
2691 /* Make sure that BD data is updated before writing the producer */
2692 wmb();
2693
2694 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2695 bp->spq_prod_idx);
2696
2697 mmiowb();
2698
2699 spin_unlock_bh(&bp->spq_lock); 2755 spin_unlock_bh(&bp->spq_lock);
2700 return 0; 2756 return 0;
2701} 2757}
@@ -3024,6 +3080,8 @@ static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
3024 int func = BP_FUNC(bp); 3080 int func = BP_FUNC(bp);
3025 3081
3026 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0); 3082 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
3083 bp->mf_config = SHMEM_RD(bp,
3084 mf_cfg.func_mf_config[func].config);
3027 val = SHMEM_RD(bp, func_mb[func].drv_status); 3085 val = SHMEM_RD(bp, func_mb[func].drv_status);
3028 if (val & DRV_STATUS_DCC_EVENT_MASK) 3086 if (val & DRV_STATUS_DCC_EVENT_MASK)
3029 bnx2x_dcc_event(bp, 3087 bnx2x_dcc_event(bp,
@@ -3227,6 +3285,17 @@ static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
3227 return IRQ_HANDLED; 3285 return IRQ_HANDLED;
3228#endif 3286#endif
3229 3287
3288#ifdef BCM_CNIC
3289 {
3290 struct cnic_ops *c_ops;
3291
3292 rcu_read_lock();
3293 c_ops = rcu_dereference(bp->cnic_ops);
3294 if (c_ops)
3295 c_ops->cnic_handler(bp->cnic_data, NULL);
3296 rcu_read_unlock();
3297 }
3298#endif
3230 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0); 3299 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3231 3300
3232 return IRQ_HANDLED; 3301 return IRQ_HANDLED;
@@ -4640,8 +4709,7 @@ static void bnx2x_timer(unsigned long data)
4640 } 4709 }
4641 } 4710 }
4642 4711
4643 if ((bp->state == BNX2X_STATE_OPEN) || 4712 if (bp->state == BNX2X_STATE_OPEN)
4644 (bp->state == BNX2X_STATE_DISABLED))
4645 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE); 4713 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4646 4714
4647timer_restart: 4715timer_restart:
@@ -4860,21 +4928,21 @@ static void bnx2x_update_coalesce(struct bnx2x *bp)
4860 REG_WR8(bp, BAR_CSTRORM_INTMEM + 4928 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4861 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id, 4929 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4862 U_SB_ETH_RX_CQ_INDEX), 4930 U_SB_ETH_RX_CQ_INDEX),
4863 bp->rx_ticks/12); 4931 bp->rx_ticks/(4 * BNX2X_BTR));
4864 REG_WR16(bp, BAR_CSTRORM_INTMEM + 4932 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4865 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, 4933 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4866 U_SB_ETH_RX_CQ_INDEX), 4934 U_SB_ETH_RX_CQ_INDEX),
4867 (bp->rx_ticks/12) ? 0 : 1); 4935 (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4868 4936
4869 /* HC_INDEX_C_ETH_TX_CQ_CONS */ 4937 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4870 REG_WR8(bp, BAR_CSTRORM_INTMEM + 4938 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4871 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id, 4939 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4872 C_SB_ETH_TX_CQ_INDEX), 4940 C_SB_ETH_TX_CQ_INDEX),
4873 bp->tx_ticks/12); 4941 bp->tx_ticks/(4 * BNX2X_BTR));
4874 REG_WR16(bp, BAR_CSTRORM_INTMEM + 4942 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4875 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, 4943 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4876 C_SB_ETH_TX_CQ_INDEX), 4944 C_SB_ETH_TX_CQ_INDEX),
4877 (bp->tx_ticks/12) ? 0 : 1); 4945 (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
4878 } 4946 }
4879} 4947}
4880 4948
@@ -5235,7 +5303,7 @@ static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
5235{ 5303{
5236 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0}; 5304 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
5237 int mode = bp->rx_mode; 5305 int mode = bp->rx_mode;
5238 int mask = (1 << BP_L_ID(bp)); 5306 int mask = bp->rx_mode_cl_mask;
5239 int func = BP_FUNC(bp); 5307 int func = BP_FUNC(bp);
5240 int port = BP_PORT(bp); 5308 int port = BP_PORT(bp);
5241 int i; 5309 int i;
@@ -5348,6 +5416,7 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5348 (*(u32 *)&tstorm_config)); 5416 (*(u32 *)&tstorm_config));
5349 5417
5350 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */ 5418 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5419 bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
5351 bnx2x_set_storm_rx_mode(bp); 5420 bnx2x_set_storm_rx_mode(bp);
5352 5421
5353 for_each_queue(bp, i) { 5422 for_each_queue(bp, i) {
@@ -5504,20 +5573,18 @@ static void bnx2x_init_internal_func(struct bnx2x *bp)
5504 bp->link_vars.line_speed = SPEED_10000; 5573 bp->link_vars.line_speed = SPEED_10000;
5505 bnx2x_init_port_minmax(bp); 5574 bnx2x_init_port_minmax(bp);
5506 5575
5576 if (!BP_NOMCP(bp))
5577 bp->mf_config =
5578 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
5507 bnx2x_calc_vn_weight_sum(bp); 5579 bnx2x_calc_vn_weight_sum(bp);
5508 5580
5509 for (vn = VN_0; vn < E1HVN_MAX; vn++) 5581 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5510 bnx2x_init_vn_minmax(bp, 2*vn + port); 5582 bnx2x_init_vn_minmax(bp, 2*vn + port);
5511 5583
5512 /* Enable rate shaping and fairness */ 5584 /* Enable rate shaping and fairness */
5513 bp->cmng.flags.cmng_enables = 5585 bp->cmng.flags.cmng_enables |=
5514 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN; 5586 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5515 if (bp->vn_weight_sum) 5587
5516 bp->cmng.flags.cmng_enables |=
5517 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5518 else
5519 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5520 " fairness will be disabled\n");
5521 } else { 5588 } else {
5522 /* rate shaping and fairness are disabled */ 5589 /* rate shaping and fairness are disabled */
5523 DP(NETIF_MSG_IFUP, 5590 DP(NETIF_MSG_IFUP,
@@ -5565,7 +5632,11 @@ static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5565 fp->state = BNX2X_FP_STATE_CLOSED; 5632 fp->state = BNX2X_FP_STATE_CLOSED;
5566 fp->index = i; 5633 fp->index = i;
5567 fp->cl_id = BP_L_ID(bp) + i; 5634 fp->cl_id = BP_L_ID(bp) + i;
5635#ifdef BCM_CNIC
5636 fp->sb_id = fp->cl_id + 1;
5637#else
5568 fp->sb_id = fp->cl_id; 5638 fp->sb_id = fp->cl_id;
5639#endif
5569 /* Suitable Rx and Tx SBs are served by the same client */ 5640 /* Suitable Rx and Tx SBs are served by the same client */
5570 if (i >= bp->num_rx_queues) 5641 if (i >= bp->num_rx_queues)
5571 fp->cl_id -= bp->num_rx_queues; 5642 fp->cl_id -= bp->num_rx_queues;
@@ -5867,7 +5938,7 @@ static int bnx2x_int_mem_test(struct bnx2x *bp)
5867 msleep(50); 5938 msleep(50);
5868 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); 5939 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5869 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); 5940 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5870#ifndef BCM_ISCSI 5941#ifndef BCM_CNIC
5871 /* set NIC mode */ 5942 /* set NIC mode */
5872 REG_WR(bp, PRS_REG_NIC_MODE, 1); 5943 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5873#endif 5944#endif
@@ -6006,6 +6077,9 @@ static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
6006static int bnx2x_init_common(struct bnx2x *bp) 6077static int bnx2x_init_common(struct bnx2x *bp)
6007{ 6078{
6008 u32 val, i; 6079 u32 val, i;
6080#ifdef BCM_CNIC
6081 u32 wb_write[2];
6082#endif
6009 6083
6010 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp)); 6084 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
6011 6085
@@ -6048,7 +6122,7 @@ static int bnx2x_init_common(struct bnx2x *bp)
6048#endif 6122#endif
6049 6123
6050 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2); 6124 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
6051#ifdef BCM_ISCSI 6125#ifdef BCM_CNIC
6052 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5); 6126 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
6053 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5); 6127 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
6054 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5); 6128 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
@@ -6091,11 +6165,26 @@ static int bnx2x_init_common(struct bnx2x *bp)
6091 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3); 6165 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
6092 6166
6093 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE); 6167 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
6168
6169#ifdef BCM_CNIC
6170 wb_write[0] = 0;
6171 wb_write[1] = 0;
6172 for (i = 0; i < 64; i++) {
6173 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
6174 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
6175
6176 if (CHIP_IS_E1H(bp)) {
6177 REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
6178 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
6179 wb_write, 2);
6180 }
6181 }
6182#endif
6094 /* soft reset pulse */ 6183 /* soft reset pulse */
6095 REG_WR(bp, QM_REG_SOFT_RESET, 1); 6184 REG_WR(bp, QM_REG_SOFT_RESET, 1);
6096 REG_WR(bp, QM_REG_SOFT_RESET, 0); 6185 REG_WR(bp, QM_REG_SOFT_RESET, 0);
6097 6186
6098#ifdef BCM_ISCSI 6187#ifdef BCM_CNIC
6099 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE); 6188 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
6100#endif 6189#endif
6101 6190
@@ -6109,8 +6198,10 @@ static int bnx2x_init_common(struct bnx2x *bp)
6109 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE); 6198 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
6110 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE); 6199 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
6111 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf); 6200 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
6201#ifndef BCM_CNIC
6112 /* set NIC mode */ 6202 /* set NIC mode */
6113 REG_WR(bp, PRS_REG_NIC_MODE, 1); 6203 REG_WR(bp, PRS_REG_NIC_MODE, 1);
6204#endif
6114 if (CHIP_IS_E1H(bp)) 6205 if (CHIP_IS_E1H(bp))
6115 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp)); 6206 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
6116 6207
@@ -6145,6 +6236,18 @@ static int bnx2x_init_common(struct bnx2x *bp)
6145 /* TODO: replace with something meaningful */ 6236 /* TODO: replace with something meaningful */
6146 } 6237 }
6147 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE); 6238 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
6239#ifdef BCM_CNIC
6240 REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
6241 REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
6242 REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
6243 REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
6244 REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
6245 REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
6246 REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
6247 REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
6248 REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
6249 REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
6250#endif
6148 REG_WR(bp, SRC_REG_SOFT_RST, 0); 6251 REG_WR(bp, SRC_REG_SOFT_RST, 0);
6149 6252
6150 if (sizeof(union cdu_context) != 1024) 6253 if (sizeof(union cdu_context) != 1024)
@@ -6261,38 +6364,14 @@ static int bnx2x_init_port(struct bnx2x *bp)
6261 bnx2x_init_block(bp, TCM_BLOCK, init_stage); 6364 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
6262 bnx2x_init_block(bp, UCM_BLOCK, init_stage); 6365 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
6263 bnx2x_init_block(bp, CCM_BLOCK, init_stage); 6366 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
6264#ifdef BCM_ISCSI
6265 /* Port0 1
6266 * Port1 385 */
6267 i++;
6268 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
6269 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
6270 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6271 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6272
6273 /* Port0 2
6274 * Port1 386 */
6275 i++;
6276 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
6277 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
6278 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6279 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6280
6281 /* Port0 3
6282 * Port1 387 */
6283 i++;
6284 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
6285 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
6286 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
6287 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6288#endif
6289 bnx2x_init_block(bp, XCM_BLOCK, init_stage); 6367 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
6290 6368
6291#ifdef BCM_ISCSI 6369#ifdef BCM_CNIC
6292 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20); 6370 REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
6293 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
6294 6371
6295 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage); 6372 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
6373 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
6374 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
6296#endif 6375#endif
6297 bnx2x_init_block(bp, DQ_BLOCK, init_stage); 6376 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
6298 6377
@@ -6350,18 +6429,8 @@ static int bnx2x_init_port(struct bnx2x *bp)
6350 msleep(5); 6429 msleep(5);
6351 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0); 6430 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6352 6431
6353#ifdef BCM_ISCSI 6432#ifdef BCM_CNIC
6354 /* tell the searcher where the T2 table is */ 6433 bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
6355 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6356
6357 wb_write[0] = U64_LO(bp->t2_mapping);
6358 wb_write[1] = U64_HI(bp->t2_mapping);
6359 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6360 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6361 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6362 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6363
6364 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6365#endif 6434#endif
6366 bnx2x_init_block(bp, CDU_BLOCK, init_stage); 6435 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6367 bnx2x_init_block(bp, CFC_BLOCK, init_stage); 6436 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
@@ -6470,7 +6539,12 @@ static int bnx2x_init_port(struct bnx2x *bp)
6470#define PXP_ONE_ILT(x) (((x) << 10) | x) 6539#define PXP_ONE_ILT(x) (((x) << 10) | x)
6471#define PXP_ILT_RANGE(f, l) (((l) << 10) | f) 6540#define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6472 6541
6542#ifdef BCM_CNIC
6543#define CNIC_ILT_LINES 127
6544#define CNIC_CTX_PER_ILT 16
6545#else
6473#define CNIC_ILT_LINES 0 6546#define CNIC_ILT_LINES 0
6547#endif
6474 6548
6475static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr) 6549static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6476{ 6550{
@@ -6509,6 +6583,46 @@ static int bnx2x_init_func(struct bnx2x *bp)
6509 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4, 6583 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6510 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES)); 6584 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6511 6585
6586#ifdef BCM_CNIC
6587 i += 1 + CNIC_ILT_LINES;
6588 bnx2x_ilt_wr(bp, i, bp->timers_mapping);
6589 if (CHIP_IS_E1(bp))
6590 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
6591 else {
6592 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
6593 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
6594 }
6595
6596 i++;
6597 bnx2x_ilt_wr(bp, i, bp->qm_mapping);
6598 if (CHIP_IS_E1(bp))
6599 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
6600 else {
6601 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
6602 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
6603 }
6604
6605 i++;
6606 bnx2x_ilt_wr(bp, i, bp->t1_mapping);
6607 if (CHIP_IS_E1(bp))
6608 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
6609 else {
6610 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
6611 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
6612 }
6613
6614 /* tell the searcher where the T2 table is */
6615 REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
6616
6617 bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
6618 U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
6619
6620 bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
6621 U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
6622 U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
6623
6624 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
6625#endif
6512 6626
6513 if (CHIP_IS_E1H(bp)) { 6627 if (CHIP_IS_E1H(bp)) {
6514 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func); 6628 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
@@ -6593,6 +6707,9 @@ static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6593 bnx2x_zero_def_sb(bp); 6707 bnx2x_zero_def_sb(bp);
6594 for_each_queue(bp, i) 6708 for_each_queue(bp, i)
6595 bnx2x_zero_sb(bp, BP_L_ID(bp) + i); 6709 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6710#ifdef BCM_CNIC
6711 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6712#endif
6596 6713
6597init_hw_err: 6714init_hw_err:
6598 bnx2x_gunzip_end(bp); 6715 bnx2x_gunzip_end(bp);
@@ -6668,11 +6785,13 @@ static void bnx2x_free_mem(struct bnx2x *bp)
6668 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping, 6785 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6669 sizeof(struct bnx2x_slowpath)); 6786 sizeof(struct bnx2x_slowpath));
6670 6787
6671#ifdef BCM_ISCSI 6788#ifdef BCM_CNIC
6672 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024); 6789 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6673 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024); 6790 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6674 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024); 6791 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6675 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024); 6792 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6793 BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
6794 sizeof(struct host_status_block));
6676#endif 6795#endif
6677 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE); 6796 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6678 6797
@@ -6751,32 +6870,26 @@ static int bnx2x_alloc_mem(struct bnx2x *bp)
6751 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping, 6870 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6752 sizeof(struct bnx2x_slowpath)); 6871 sizeof(struct bnx2x_slowpath));
6753 6872
6754#ifdef BCM_ISCSI 6873#ifdef BCM_CNIC
6755 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024); 6874 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6756 6875
6757 /* Initialize T1 */
6758 for (i = 0; i < 64*1024; i += 64) {
6759 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6760 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6761 }
6762
6763 /* allocate searcher T2 table 6876 /* allocate searcher T2 table
6764 we allocate 1/4 of alloc num for T2 6877 we allocate 1/4 of alloc num for T2
6765 (which is not entered into the ILT) */ 6878 (which is not entered into the ILT) */
6766 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024); 6879 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6767 6880
6768 /* Initialize T2 */ 6881 /* Initialize T2 (for 1024 connections) */
6769 for (i = 0; i < 16*1024; i += 64) 6882 for (i = 0; i < 16*1024; i += 64)
6770 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64; 6883 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6771
6772 /* now fixup the last line in the block to point to the next block */
6773 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6774 6884
6775 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */ 6885 /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
6776 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024); 6886 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6777 6887
6778 /* QM queues (128*MAX_CONN) */ 6888 /* QM queues (128*MAX_CONN) */
6779 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024); 6889 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6890
6891 BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
6892 sizeof(struct host_status_block));
6780#endif 6893#endif
6781 6894
6782 /* Slow path ring */ 6895 /* Slow path ring */
@@ -6852,6 +6965,9 @@ static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6852 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n", 6965 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6853 bp->msix_table[0].vector); 6966 bp->msix_table[0].vector);
6854 6967
6968#ifdef BCM_CNIC
6969 offset++;
6970#endif
6855 for_each_queue(bp, i) { 6971 for_each_queue(bp, i) {
6856 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq " 6972 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6857 "state %x\n", i, bp->msix_table[i + offset].vector, 6973 "state %x\n", i, bp->msix_table[i + offset].vector,
@@ -6885,6 +7001,12 @@ static int bnx2x_enable_msix(struct bnx2x *bp)
6885 bp->msix_table[0].entry = igu_vec; 7001 bp->msix_table[0].entry = igu_vec;
6886 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec); 7002 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6887 7003
7004#ifdef BCM_CNIC
7005 igu_vec = BP_L_ID(bp) + offset;
7006 bp->msix_table[1].entry = igu_vec;
7007 DP(NETIF_MSG_IFUP, "msix_table[1].entry = %d (CNIC)\n", igu_vec);
7008 offset++;
7009#endif
6888 for_each_queue(bp, i) { 7010 for_each_queue(bp, i) {
6889 igu_vec = BP_L_ID(bp) + offset + i; 7011 igu_vec = BP_L_ID(bp) + offset + i;
6890 bp->msix_table[i + offset].entry = igu_vec; 7012 bp->msix_table[i + offset].entry = igu_vec;
@@ -6915,6 +7037,9 @@ static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6915 return -EBUSY; 7037 return -EBUSY;
6916 } 7038 }
6917 7039
7040#ifdef BCM_CNIC
7041 offset++;
7042#endif
6918 for_each_queue(bp, i) { 7043 for_each_queue(bp, i) {
6919 struct bnx2x_fastpath *fp = &bp->fp[i]; 7044 struct bnx2x_fastpath *fp = &bp->fp[i];
6920 7045
@@ -7022,7 +7147,19 @@ static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
7022 * Init service functions 7147 * Init service functions
7023 */ 7148 */
7024 7149
7025static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set) 7150/**
7151 * Sets a MAC in a CAM for a few L2 Clients for E1 chip
7152 *
7153 * @param bp driver descriptor
7154 * @param set set or clear an entry (1 or 0)
7155 * @param mac pointer to a buffer containing a MAC
7156 * @param cl_bit_vec bit vector of clients to register a MAC for
7157 * @param cam_offset offset in a CAM to use
7158 * @param with_bcast set broadcast MAC as well
7159 */
7160static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
7161 u32 cl_bit_vec, u8 cam_offset,
7162 u8 with_bcast)
7026{ 7163{
7027 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config); 7164 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
7028 int port = BP_PORT(bp); 7165 int port = BP_PORT(bp);
@@ -7031,25 +7168,25 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
7031 * unicasts 0-31:port0 32-63:port1 7168 * unicasts 0-31:port0 32-63:port1
7032 * multicast 64-127:port0 128-191:port1 7169 * multicast 64-127:port0 128-191:port1
7033 */ 7170 */
7034 config->hdr.length = 2; 7171 config->hdr.length = 1 + (with_bcast ? 1 : 0);
7035 config->hdr.offset = port ? 32 : 0; 7172 config->hdr.offset = cam_offset;
7036 config->hdr.client_id = bp->fp->cl_id; 7173 config->hdr.client_id = 0xff;
7037 config->hdr.reserved1 = 0; 7174 config->hdr.reserved1 = 0;
7038 7175
7039 /* primary MAC */ 7176 /* primary MAC */
7040 config->config_table[0].cam_entry.msb_mac_addr = 7177 config->config_table[0].cam_entry.msb_mac_addr =
7041 swab16(*(u16 *)&bp->dev->dev_addr[0]); 7178 swab16(*(u16 *)&mac[0]);
7042 config->config_table[0].cam_entry.middle_mac_addr = 7179 config->config_table[0].cam_entry.middle_mac_addr =
7043 swab16(*(u16 *)&bp->dev->dev_addr[2]); 7180 swab16(*(u16 *)&mac[2]);
7044 config->config_table[0].cam_entry.lsb_mac_addr = 7181 config->config_table[0].cam_entry.lsb_mac_addr =
7045 swab16(*(u16 *)&bp->dev->dev_addr[4]); 7182 swab16(*(u16 *)&mac[4]);
7046 config->config_table[0].cam_entry.flags = cpu_to_le16(port); 7183 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
7047 if (set) 7184 if (set)
7048 config->config_table[0].target_table_entry.flags = 0; 7185 config->config_table[0].target_table_entry.flags = 0;
7049 else 7186 else
7050 CAM_INVALIDATE(config->config_table[0]); 7187 CAM_INVALIDATE(config->config_table[0]);
7051 config->config_table[0].target_table_entry.clients_bit_vector = 7188 config->config_table[0].target_table_entry.clients_bit_vector =
7052 cpu_to_le32(1 << BP_L_ID(bp)); 7189 cpu_to_le32(cl_bit_vec);
7053 config->config_table[0].target_table_entry.vlan_id = 0; 7190 config->config_table[0].target_table_entry.vlan_id = 0;
7054 7191
7055 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n", 7192 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
@@ -7059,47 +7196,58 @@ static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
7059 config->config_table[0].cam_entry.lsb_mac_addr); 7196 config->config_table[0].cam_entry.lsb_mac_addr);
7060 7197
7061 /* broadcast */ 7198 /* broadcast */
7062 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff); 7199 if (with_bcast) {
7063 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff); 7200 config->config_table[1].cam_entry.msb_mac_addr =
7064 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff); 7201 cpu_to_le16(0xffff);
7065 config->config_table[1].cam_entry.flags = cpu_to_le16(port); 7202 config->config_table[1].cam_entry.middle_mac_addr =
7066 if (set) 7203 cpu_to_le16(0xffff);
7067 config->config_table[1].target_table_entry.flags = 7204 config->config_table[1].cam_entry.lsb_mac_addr =
7068 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST; 7205 cpu_to_le16(0xffff);
7069 else 7206 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
7070 CAM_INVALIDATE(config->config_table[1]); 7207 if (set)
7071 config->config_table[1].target_table_entry.clients_bit_vector = 7208 config->config_table[1].target_table_entry.flags =
7072 cpu_to_le32(1 << BP_L_ID(bp)); 7209 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
7073 config->config_table[1].target_table_entry.vlan_id = 0; 7210 else
7211 CAM_INVALIDATE(config->config_table[1]);
7212 config->config_table[1].target_table_entry.clients_bit_vector =
7213 cpu_to_le32(cl_bit_vec);
7214 config->config_table[1].target_table_entry.vlan_id = 0;
7215 }
7074 7216
7075 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 7217 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7076 U64_HI(bnx2x_sp_mapping(bp, mac_config)), 7218 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
7077 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); 7219 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
7078} 7220}
7079 7221
7080static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set) 7222/**
7223 * Sets a MAC in a CAM for a few L2 Clients for E1H chip
7224 *
7225 * @param bp driver descriptor
7226 * @param set set or clear an entry (1 or 0)
7227 * @param mac pointer to a buffer containing a MAC
7228 * @param cl_bit_vec bit vector of clients to register a MAC for
7229 * @param cam_offset offset in a CAM to use
7230 */
7231static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
7232 u32 cl_bit_vec, u8 cam_offset)
7081{ 7233{
7082 struct mac_configuration_cmd_e1h *config = 7234 struct mac_configuration_cmd_e1h *config =
7083 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config); 7235 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
7084 7236
7085 /* CAM allocation for E1H
7086 * unicasts: by func number
7087 * multicast: 20+FUNC*20, 20 each
7088 */
7089 config->hdr.length = 1; 7237 config->hdr.length = 1;
7090 config->hdr.offset = BP_FUNC(bp); 7238 config->hdr.offset = cam_offset;
7091 config->hdr.client_id = bp->fp->cl_id; 7239 config->hdr.client_id = 0xff;
7092 config->hdr.reserved1 = 0; 7240 config->hdr.reserved1 = 0;
7093 7241
7094 /* primary MAC */ 7242 /* primary MAC */
7095 config->config_table[0].msb_mac_addr = 7243 config->config_table[0].msb_mac_addr =
7096 swab16(*(u16 *)&bp->dev->dev_addr[0]); 7244 swab16(*(u16 *)&mac[0]);
7097 config->config_table[0].middle_mac_addr = 7245 config->config_table[0].middle_mac_addr =
7098 swab16(*(u16 *)&bp->dev->dev_addr[2]); 7246 swab16(*(u16 *)&mac[2]);
7099 config->config_table[0].lsb_mac_addr = 7247 config->config_table[0].lsb_mac_addr =
7100 swab16(*(u16 *)&bp->dev->dev_addr[4]); 7248 swab16(*(u16 *)&mac[4]);
7101 config->config_table[0].clients_bit_vector = 7249 config->config_table[0].clients_bit_vector =
7102 cpu_to_le32(1 << BP_L_ID(bp)); 7250 cpu_to_le32(cl_bit_vec);
7103 config->config_table[0].vlan_id = 0; 7251 config->config_table[0].vlan_id = 0;
7104 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov); 7252 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
7105 if (set) 7253 if (set)
@@ -7108,11 +7256,11 @@ static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
7108 config->config_table[0].flags = 7256 config->config_table[0].flags =
7109 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE; 7257 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
7110 7258
7111 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n", 7259 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID mask %d\n",
7112 (set ? "setting" : "clearing"), 7260 (set ? "setting" : "clearing"),
7113 config->config_table[0].msb_mac_addr, 7261 config->config_table[0].msb_mac_addr,
7114 config->config_table[0].middle_mac_addr, 7262 config->config_table[0].middle_mac_addr,
7115 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp)); 7263 config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
7116 7264
7117 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 7265 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7118 U64_HI(bnx2x_sp_mapping(bp, mac_config)), 7266 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
@@ -7164,6 +7312,69 @@ static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
7164 return -EBUSY; 7312 return -EBUSY;
7165} 7313}
7166 7314
7315static void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
7316{
7317 bp->set_mac_pending++;
7318 smp_wmb();
7319
7320 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
7321 (1 << bp->fp->cl_id), BP_FUNC(bp));
7322
7323 /* Wait for a completion */
7324 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7325}
7326
7327static void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
7328{
7329 bp->set_mac_pending++;
7330 smp_wmb();
7331
7332 bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
7333 (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
7334 1);
7335
7336 /* Wait for a completion */
7337 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7338}
7339
7340#ifdef BCM_CNIC
7341/**
7342 * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
7343 * MAC(s). This function will wait until the ramdord completion
7344 * returns.
7345 *
7346 * @param bp driver handle
7347 * @param set set or clear the CAM entry
7348 *
7349 * @return 0 if cussess, -ENODEV if ramrod doesn't return.
7350 */
7351static int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
7352{
7353 u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
7354
7355 bp->set_mac_pending++;
7356 smp_wmb();
7357
7358 /* Send a SET_MAC ramrod */
7359 if (CHIP_IS_E1(bp))
7360 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
7361 cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
7362 1);
7363 else
7364 /* CAM allocation for E1H
7365 * unicasts: by func number
7366 * multicast: 20+FUNC*20, 20 each
7367 */
7368 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
7369 cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
7370
7371 /* Wait for a completion when setting */
7372 bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
7373
7374 return 0;
7375}
7376#endif
7377
7167static int bnx2x_setup_leading(struct bnx2x *bp) 7378static int bnx2x_setup_leading(struct bnx2x *bp)
7168{ 7379{
7169 int rc; 7380 int rc;
@@ -7275,11 +7486,6 @@ static int bnx2x_set_int_mode(struct bnx2x *bp)
7275 rc = bnx2x_enable_msix(bp); 7486 rc = bnx2x_enable_msix(bp);
7276 if (rc) { 7487 if (rc) {
7277 /* failed to enable MSI-X */ 7488 /* failed to enable MSI-X */
7278 if (bp->multi_mode)
7279 BNX2X_ERR("Multi requested but failed to "
7280 "enable MSI-X (rx %d tx %d), "
7281 "set number of queues to 1\n",
7282 bp->num_rx_queues, bp->num_tx_queues);
7283 bp->num_rx_queues = 1; 7489 bp->num_rx_queues = 1;
7284 bp->num_tx_queues = 1; 7490 bp->num_tx_queues = 1;
7285 } 7491 }
@@ -7289,6 +7495,10 @@ static int bnx2x_set_int_mode(struct bnx2x *bp)
7289 return rc; 7495 return rc;
7290} 7496}
7291 7497
7498#ifdef BCM_CNIC
7499static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd);
7500static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp);
7501#endif
7292 7502
7293/* must be called with rtnl_lock */ 7503/* must be called with rtnl_lock */
7294static int bnx2x_nic_load(struct bnx2x *bp, int load_mode) 7504static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
@@ -7427,20 +7637,37 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7427 if (CHIP_IS_E1H(bp)) 7637 if (CHIP_IS_E1H(bp))
7428 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) { 7638 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7429 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n"); 7639 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7430 bp->state = BNX2X_STATE_DISABLED; 7640 bp->flags |= MF_FUNC_DIS;
7431 } 7641 }
7432 7642
7433 if (bp->state == BNX2X_STATE_OPEN) { 7643 if (bp->state == BNX2X_STATE_OPEN) {
7644#ifdef BCM_CNIC
7645 /* Enable Timer scan */
7646 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 1);
7647#endif
7434 for_each_nondefault_queue(bp, i) { 7648 for_each_nondefault_queue(bp, i) {
7435 rc = bnx2x_setup_multi(bp, i); 7649 rc = bnx2x_setup_multi(bp, i);
7436 if (rc) 7650 if (rc)
7651#ifdef BCM_CNIC
7652 goto load_error4;
7653#else
7437 goto load_error3; 7654 goto load_error3;
7655#endif
7438 } 7656 }
7439 7657
7440 if (CHIP_IS_E1(bp)) 7658 if (CHIP_IS_E1(bp))
7441 bnx2x_set_mac_addr_e1(bp, 1); 7659 bnx2x_set_eth_mac_addr_e1(bp, 1);
7442 else 7660 else
7443 bnx2x_set_mac_addr_e1h(bp, 1); 7661 bnx2x_set_eth_mac_addr_e1h(bp, 1);
7662#ifdef BCM_CNIC
7663 /* Set iSCSI L2 MAC */
7664 mutex_lock(&bp->cnic_mutex);
7665 if (bp->cnic_eth_dev.drv_state & CNIC_DRV_STATE_REGD) {
7666 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7667 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7668 }
7669 mutex_unlock(&bp->cnic_mutex);
7670#endif
7444 } 7671 }
7445 7672
7446 if (bp->port.pmf) 7673 if (bp->port.pmf)
@@ -7481,9 +7708,19 @@ static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7481 /* start the timer */ 7708 /* start the timer */
7482 mod_timer(&bp->timer, jiffies + bp->current_interval); 7709 mod_timer(&bp->timer, jiffies + bp->current_interval);
7483 7710
7711#ifdef BCM_CNIC
7712 bnx2x_setup_cnic_irq_info(bp);
7713 if (bp->state == BNX2X_STATE_OPEN)
7714 bnx2x_cnic_notify(bp, CNIC_CTL_START_CMD);
7715#endif
7484 7716
7485 return 0; 7717 return 0;
7486 7718
7719#ifdef BCM_CNIC
7720load_error4:
7721 /* Disable Timer scan */
7722 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + BP_PORT(bp)*4, 0);
7723#endif
7487load_error3: 7724load_error3:
7488 bnx2x_int_disable_sync(bp, 1); 7725 bnx2x_int_disable_sync(bp, 1);
7489 if (!BP_NOMCP(bp)) { 7726 if (!BP_NOMCP(bp)) {
@@ -7591,6 +7828,19 @@ static void bnx2x_reset_func(struct bnx2x *bp)
7591 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0); 7828 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7592 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0); 7829 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7593 7830
7831#ifdef BCM_CNIC
7832 /* Disable Timer scan */
7833 REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
7834 /*
7835 * Wait for at least 10ms and up to 2 second for the timers scan to
7836 * complete
7837 */
7838 for (i = 0; i < 200; i++) {
7839 msleep(10);
7840 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
7841 break;
7842 }
7843#endif
7594 /* Clear ILT */ 7844 /* Clear ILT */
7595 base = FUNC_ILT_BASE(func); 7845 base = FUNC_ILT_BASE(func);
7596 for (i = base; i < base + ILT_PER_FUNC; i++) 7846 for (i = base; i < base + ILT_PER_FUNC; i++)
@@ -7657,6 +7907,9 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7657 u32 reset_code = 0; 7907 u32 reset_code = 0;
7658 int i, cnt, rc; 7908 int i, cnt, rc;
7659 7909
7910#ifdef BCM_CNIC
7911 bnx2x_cnic_notify(bp, CNIC_CTL_STOP_CMD);
7912#endif
7660 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT; 7913 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7661 7914
7662 /* Set "drop all" */ 7915 /* Set "drop all" */
@@ -7703,7 +7956,7 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7703 struct mac_configuration_cmd *config = 7956 struct mac_configuration_cmd *config =
7704 bnx2x_sp(bp, mcast_config); 7957 bnx2x_sp(bp, mcast_config);
7705 7958
7706 bnx2x_set_mac_addr_e1(bp, 0); 7959 bnx2x_set_eth_mac_addr_e1(bp, 0);
7707 7960
7708 for (i = 0; i < config->hdr.length; i++) 7961 for (i = 0; i < config->hdr.length; i++)
7709 CAM_INVALIDATE(config->config_table[i]); 7962 CAM_INVALIDATE(config->config_table[i]);
@@ -7716,6 +7969,9 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7716 config->hdr.client_id = bp->fp->cl_id; 7969 config->hdr.client_id = bp->fp->cl_id;
7717 config->hdr.reserved1 = 0; 7970 config->hdr.reserved1 = 0;
7718 7971
7972 bp->set_mac_pending++;
7973 smp_wmb();
7974
7719 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 7975 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7720 U64_HI(bnx2x_sp_mapping(bp, mcast_config)), 7976 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7721 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0); 7977 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
@@ -7723,13 +7979,22 @@ static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7723 } else { /* E1H */ 7979 } else { /* E1H */
7724 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0); 7980 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7725 7981
7726 bnx2x_set_mac_addr_e1h(bp, 0); 7982 bnx2x_set_eth_mac_addr_e1h(bp, 0);
7727 7983
7728 for (i = 0; i < MC_HASH_SIZE; i++) 7984 for (i = 0; i < MC_HASH_SIZE; i++)
7729 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0); 7985 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7730 7986
7731 REG_WR(bp, MISC_REG_E1HMF_MODE, 0); 7987 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7732 } 7988 }
7989#ifdef BCM_CNIC
7990 /* Clear iSCSI L2 MAC */
7991 mutex_lock(&bp->cnic_mutex);
7992 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7993 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7994 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7995 }
7996 mutex_unlock(&bp->cnic_mutex);
7997#endif
7733 7998
7734 if (unload_mode == UNLOAD_NORMAL) 7999 if (unload_mode == UNLOAD_NORMAL)
7735 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS; 8000 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
@@ -8506,6 +8771,14 @@ static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8506 bp->link_params.req_flow_ctrl, bp->port.advertising); 8771 bp->link_params.req_flow_ctrl, bp->port.advertising);
8507} 8772}
8508 8773
8774static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
8775{
8776 mac_hi = cpu_to_be16(mac_hi);
8777 mac_lo = cpu_to_be32(mac_lo);
8778 memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
8779 memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
8780}
8781
8509static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp) 8782static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8510{ 8783{
8511 int port = BP_PORT(bp); 8784 int port = BP_PORT(bp);
@@ -8587,14 +8860,15 @@ static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8587 8860
8588 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper); 8861 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8589 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower); 8862 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8590 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff); 8863 bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
8591 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8592 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8593 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8594 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8595 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8596 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN); 8864 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8597 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN); 8865 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8866
8867#ifdef BCM_CNIC
8868 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
8869 val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
8870 bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
8871#endif
8598} 8872}
8599 8873
8600static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp) 8874static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
@@ -8690,6 +8964,10 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8690 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */ 8964 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8691 8965
8692 mutex_init(&bp->port.phy_mutex); 8966 mutex_init(&bp->port.phy_mutex);
8967 mutex_init(&bp->fw_mb_mutex);
8968#ifdef BCM_CNIC
8969 mutex_init(&bp->cnic_mutex);
8970#endif
8693 8971
8694 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task); 8972 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8695 INIT_WORK(&bp->reset_task, bnx2x_reset_task); 8973 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
@@ -8738,8 +9016,9 @@ static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8738 9016
8739 bp->rx_csum = 1; 9017 bp->rx_csum = 1;
8740 9018
8741 bp->tx_ticks = 50; 9019 /* make sure that the numbers are in the right granularity */
8742 bp->rx_ticks = 25; 9020 bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
9021 bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
8743 9022
8744 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ); 9023 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8745 bp->current_interval = (poll ? poll : timer_interval); 9024 bp->current_interval = (poll ? poll : timer_interval);
@@ -8765,20 +9044,23 @@ static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8765 cmd->supported = bp->port.supported; 9044 cmd->supported = bp->port.supported;
8766 cmd->advertising = bp->port.advertising; 9045 cmd->advertising = bp->port.advertising;
8767 9046
8768 if (netif_carrier_ok(dev)) { 9047 if ((bp->state == BNX2X_STATE_OPEN) &&
9048 !(bp->flags & MF_FUNC_DIS) &&
9049 (bp->link_vars.link_up)) {
8769 cmd->speed = bp->link_vars.line_speed; 9050 cmd->speed = bp->link_vars.line_speed;
8770 cmd->duplex = bp->link_vars.duplex; 9051 cmd->duplex = bp->link_vars.duplex;
8771 } else { 9052 if (IS_E1HMF(bp)) {
8772 cmd->speed = bp->link_params.req_line_speed; 9053 u16 vn_max_rate;
8773 cmd->duplex = bp->link_params.req_duplex;
8774 }
8775 if (IS_E1HMF(bp)) {
8776 u16 vn_max_rate;
8777 9054
8778 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >> 9055 vn_max_rate =
9056 ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8779 FUNC_MF_CFG_MAX_BW_SHIFT) * 100; 9057 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8780 if (vn_max_rate < cmd->speed) 9058 if (vn_max_rate < cmd->speed)
8781 cmd->speed = vn_max_rate; 9059 cmd->speed = vn_max_rate;
9060 }
9061 } else {
9062 cmd->speed = -1;
9063 cmd->duplex = -1;
8782 } 9064 }
8783 9065
8784 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) { 9066 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
@@ -9163,6 +9445,9 @@ static u32 bnx2x_get_link(struct net_device *dev)
9163{ 9445{
9164 struct bnx2x *bp = netdev_priv(dev); 9446 struct bnx2x *bp = netdev_priv(dev);
9165 9447
9448 if (bp->flags & MF_FUNC_DIS)
9449 return 0;
9450
9166 return bp->link_vars.link_up; 9451 return bp->link_vars.link_up;
9167} 9452}
9168 9453
@@ -9567,8 +9852,7 @@ static int bnx2x_set_eeprom(struct net_device *dev,
9567 9852
9568 } else if (eeprom->magic == 0x50485952) { 9853 } else if (eeprom->magic == 0x50485952) {
9569 /* 'PHYR' (0x50485952): re-init link after FW upgrade */ 9854 /* 'PHYR' (0x50485952): re-init link after FW upgrade */
9570 if ((bp->state == BNX2X_STATE_OPEN) || 9855 if (bp->state == BNX2X_STATE_OPEN) {
9571 (bp->state == BNX2X_STATE_DISABLED)) {
9572 bnx2x_acquire_phy_lock(bp); 9856 bnx2x_acquire_phy_lock(bp);
9573 rc |= bnx2x_link_reset(&bp->link_params, 9857 rc |= bnx2x_link_reset(&bp->link_params,
9574 &bp->link_vars, 1); 9858 &bp->link_vars, 1);
@@ -9818,11 +10102,6 @@ static const struct {
9818 { "idle check (online)" } 10102 { "idle check (online)" }
9819}; 10103};
9820 10104
9821static int bnx2x_self_test_count(struct net_device *dev)
9822{
9823 return BNX2X_NUM_TESTS;
9824}
9825
9826static int bnx2x_test_registers(struct bnx2x *bp) 10105static int bnx2x_test_registers(struct bnx2x *bp)
9827{ 10106{
9828 int idx, i, rc = -ENODEV; 10107 int idx, i, rc = -ENODEV;
@@ -10223,14 +10502,16 @@ static int bnx2x_test_intr(struct bnx2x *bp)
10223 config->hdr.client_id = bp->fp->cl_id; 10502 config->hdr.client_id = bp->fp->cl_id;
10224 config->hdr.reserved1 = 0; 10503 config->hdr.reserved1 = 0;
10225 10504
10505 bp->set_mac_pending++;
10506 smp_wmb();
10226 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 10507 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10227 U64_HI(bnx2x_sp_mapping(bp, mac_config)), 10508 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
10228 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0); 10509 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
10229 if (rc == 0) { 10510 if (rc == 0) {
10230 bp->set_mac_pending++;
10231 for (i = 0; i < 10; i++) { 10511 for (i = 0; i < 10; i++) {
10232 if (!bp->set_mac_pending) 10512 if (!bp->set_mac_pending)
10233 break; 10513 break;
10514 smp_rmb();
10234 msleep_interruptible(10); 10515 msleep_interruptible(10);
10235 } 10516 }
10236 if (i == 10) 10517 if (i == 10)
@@ -10264,7 +10545,7 @@ static void bnx2x_self_test(struct net_device *dev,
10264 /* disable input for TX port IF */ 10545 /* disable input for TX port IF */
10265 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0); 10546 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
10266 10547
10267 link_up = bp->link_vars.link_up; 10548 link_up = (bnx2x_link_test(bp) == 0);
10268 bnx2x_nic_unload(bp, UNLOAD_NORMAL); 10549 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
10269 bnx2x_nic_load(bp, LOAD_DIAG); 10550 bnx2x_nic_load(bp, LOAD_DIAG);
10270 /* wait until link state is restored */ 10551 /* wait until link state is restored */
@@ -10436,6 +10717,36 @@ static const struct {
10436#define IS_E1HMF_MODE_STAT(bp) \ 10717#define IS_E1HMF_MODE_STAT(bp) \
10437 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS)) 10718 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10438 10719
10720static int bnx2x_get_sset_count(struct net_device *dev, int stringset)
10721{
10722 struct bnx2x *bp = netdev_priv(dev);
10723 int i, num_stats;
10724
10725 switch(stringset) {
10726 case ETH_SS_STATS:
10727 if (is_multi(bp)) {
10728 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10729 if (!IS_E1HMF_MODE_STAT(bp))
10730 num_stats += BNX2X_NUM_STATS;
10731 } else {
10732 if (IS_E1HMF_MODE_STAT(bp)) {
10733 num_stats = 0;
10734 for (i = 0; i < BNX2X_NUM_STATS; i++)
10735 if (IS_FUNC_STAT(i))
10736 num_stats++;
10737 } else
10738 num_stats = BNX2X_NUM_STATS;
10739 }
10740 return num_stats;
10741
10742 case ETH_SS_TEST:
10743 return BNX2X_NUM_TESTS;
10744
10745 default:
10746 return -EINVAL;
10747 }
10748}
10749
10439static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 10750static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10440{ 10751{
10441 struct bnx2x *bp = netdev_priv(dev); 10752 struct bnx2x *bp = netdev_priv(dev);
@@ -10473,28 +10784,6 @@ static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10473 } 10784 }
10474} 10785}
10475 10786
10476static int bnx2x_get_stats_count(struct net_device *dev)
10477{
10478 struct bnx2x *bp = netdev_priv(dev);
10479 int i, num_stats;
10480
10481 if (is_multi(bp)) {
10482 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10483 if (!IS_E1HMF_MODE_STAT(bp))
10484 num_stats += BNX2X_NUM_STATS;
10485 } else {
10486 if (IS_E1HMF_MODE_STAT(bp)) {
10487 num_stats = 0;
10488 for (i = 0; i < BNX2X_NUM_STATS; i++)
10489 if (IS_FUNC_STAT(i))
10490 num_stats++;
10491 } else
10492 num_stats = BNX2X_NUM_STATS;
10493 }
10494
10495 return num_stats;
10496}
10497
10498static void bnx2x_get_ethtool_stats(struct net_device *dev, 10787static void bnx2x_get_ethtool_stats(struct net_device *dev,
10499 struct ethtool_stats *stats, u64 *buf) 10788 struct ethtool_stats *stats, u64 *buf)
10500{ 10789{
@@ -10570,7 +10859,6 @@ static void bnx2x_get_ethtool_stats(struct net_device *dev,
10570static int bnx2x_phys_id(struct net_device *dev, u32 data) 10859static int bnx2x_phys_id(struct net_device *dev, u32 data)
10571{ 10860{
10572 struct bnx2x *bp = netdev_priv(dev); 10861 struct bnx2x *bp = netdev_priv(dev);
10573 int port = BP_PORT(bp);
10574 int i; 10862 int i;
10575 10863
10576 if (!netif_running(dev)) 10864 if (!netif_running(dev))
@@ -10584,13 +10872,10 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
10584 10872
10585 for (i = 0; i < (data * 2); i++) { 10873 for (i = 0; i < (data * 2); i++) {
10586 if ((i % 2) == 0) 10874 if ((i % 2) == 0)
10587 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000, 10875 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10588 bp->link_params.hw_led_mode, 10876 SPEED_1000);
10589 bp->link_params.chip_id);
10590 else 10877 else
10591 bnx2x_set_led(bp, port, LED_MODE_OFF, 0, 10878 bnx2x_set_led(&bp->link_params, LED_MODE_OFF, 0);
10592 bp->link_params.hw_led_mode,
10593 bp->link_params.chip_id);
10594 10879
10595 msleep_interruptible(500); 10880 msleep_interruptible(500);
10596 if (signal_pending(current)) 10881 if (signal_pending(current))
@@ -10598,10 +10883,8 @@ static int bnx2x_phys_id(struct net_device *dev, u32 data)
10598 } 10883 }
10599 10884
10600 if (bp->link_vars.link_up) 10885 if (bp->link_vars.link_up)
10601 bnx2x_set_led(bp, port, LED_MODE_OPER, 10886 bnx2x_set_led(&bp->link_params, LED_MODE_OPER,
10602 bp->link_vars.line_speed, 10887 bp->link_vars.line_speed);
10603 bp->link_params.hw_led_mode,
10604 bp->link_params.chip_id);
10605 10888
10606 return 0; 10889 return 0;
10607} 10890}
@@ -10637,11 +10920,10 @@ static const struct ethtool_ops bnx2x_ethtool_ops = {
10637 .set_sg = ethtool_op_set_sg, 10920 .set_sg = ethtool_op_set_sg,
10638 .get_tso = ethtool_op_get_tso, 10921 .get_tso = ethtool_op_get_tso,
10639 .set_tso = bnx2x_set_tso, 10922 .set_tso = bnx2x_set_tso,
10640 .self_test_count = bnx2x_self_test_count,
10641 .self_test = bnx2x_self_test, 10923 .self_test = bnx2x_self_test,
10924 .get_sset_count = bnx2x_get_sset_count,
10642 .get_strings = bnx2x_get_strings, 10925 .get_strings = bnx2x_get_strings,
10643 .phys_id = bnx2x_phys_id, 10926 .phys_id = bnx2x_phys_id,
10644 .get_stats_count = bnx2x_get_stats_count,
10645 .get_ethtool_stats = bnx2x_get_ethtool_stats, 10927 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10646}; 10928};
10647 10929
@@ -10843,10 +11125,10 @@ static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10843 } 11125 }
10844 11126
10845 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4) 11127 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10846 rc |= XMIT_GSO_V4; 11128 rc |= (XMIT_GSO_V4 | XMIT_CSUM_V4 | XMIT_CSUM_TCP);
10847 11129
10848 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) 11130 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10849 rc |= XMIT_GSO_V6; 11131 rc |= (XMIT_GSO_V6 | XMIT_CSUM_TCP | XMIT_CSUM_V6);
10850 11132
10851 return rc; 11133 return rc;
10852} 11134}
@@ -11321,6 +11603,9 @@ static void bnx2x_set_rx_mode(struct net_device *dev)
11321 config->hdr.client_id = bp->fp->cl_id; 11603 config->hdr.client_id = bp->fp->cl_id;
11322 config->hdr.reserved1 = 0; 11604 config->hdr.reserved1 = 0;
11323 11605
11606 bp->set_mac_pending++;
11607 smp_wmb();
11608
11324 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0, 11609 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
11325 U64_HI(bnx2x_sp_mapping(bp, mcast_config)), 11610 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
11326 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 11611 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
@@ -11370,9 +11655,9 @@ static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
11370 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 11655 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11371 if (netif_running(dev)) { 11656 if (netif_running(dev)) {
11372 if (CHIP_IS_E1(bp)) 11657 if (CHIP_IS_E1(bp))
11373 bnx2x_set_mac_addr_e1(bp, 1); 11658 bnx2x_set_eth_mac_addr_e1(bp, 1);
11374 else 11659 else
11375 bnx2x_set_mac_addr_e1h(bp, 1); 11660 bnx2x_set_eth_mac_addr_e1h(bp, 1);
11376 } 11661 }
11377 11662
11378 return 0; 11663 return 0;
@@ -11830,21 +12115,14 @@ static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11830 12115
11831static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev) 12116static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11832{ 12117{
11833 char fw_file_name[40] = {0}; 12118 const char *fw_file_name;
11834 struct bnx2x_fw_file_hdr *fw_hdr; 12119 struct bnx2x_fw_file_hdr *fw_hdr;
11835 int rc, offset; 12120 int rc;
11836 12121
11837 /* Create a FW file name */
11838 if (CHIP_IS_E1(bp)) 12122 if (CHIP_IS_E1(bp))
11839 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1); 12123 fw_file_name = FW_FILE_NAME_E1;
11840 else 12124 else
11841 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H); 12125 fw_file_name = FW_FILE_NAME_E1H;
11842
11843 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11844 BCM_5710_FW_MAJOR_VERSION,
11845 BCM_5710_FW_MINOR_VERSION,
11846 BCM_5710_FW_REVISION_VERSION,
11847 BCM_5710_FW_ENGINEERING_VERSION);
11848 12126
11849 printk(KERN_INFO PFX "Loading %s\n", fw_file_name); 12127 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11850 12128
@@ -12276,4 +12554,287 @@ static void __exit bnx2x_cleanup(void)
12276module_init(bnx2x_init); 12554module_init(bnx2x_init);
12277module_exit(bnx2x_cleanup); 12555module_exit(bnx2x_cleanup);
12278 12556
12557#ifdef BCM_CNIC
12558
12559/* count denotes the number of new completions we have seen */
12560static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
12561{
12562 struct eth_spe *spe;
12563
12564#ifdef BNX2X_STOP_ON_ERROR
12565 if (unlikely(bp->panic))
12566 return;
12567#endif
12568
12569 spin_lock_bh(&bp->spq_lock);
12570 bp->cnic_spq_pending -= count;
12571
12572 for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
12573 bp->cnic_spq_pending++) {
12574
12575 if (!bp->cnic_kwq_pending)
12576 break;
12577
12578 spe = bnx2x_sp_get_next(bp);
12579 *spe = *bp->cnic_kwq_cons;
12580
12581 bp->cnic_kwq_pending--;
12582
12583 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
12584 bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
12585
12586 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
12587 bp->cnic_kwq_cons = bp->cnic_kwq;
12588 else
12589 bp->cnic_kwq_cons++;
12590 }
12591 bnx2x_sp_prod_update(bp);
12592 spin_unlock_bh(&bp->spq_lock);
12593}
12594
12595static int bnx2x_cnic_sp_queue(struct net_device *dev,
12596 struct kwqe_16 *kwqes[], u32 count)
12597{
12598 struct bnx2x *bp = netdev_priv(dev);
12599 int i;
12600
12601#ifdef BNX2X_STOP_ON_ERROR
12602 if (unlikely(bp->panic))
12603 return -EIO;
12604#endif
12605
12606 spin_lock_bh(&bp->spq_lock);
12607
12608 for (i = 0; i < count; i++) {
12609 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
12610
12611 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
12612 break;
12613
12614 *bp->cnic_kwq_prod = *spe;
12615
12616 bp->cnic_kwq_pending++;
12617
12618 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
12619 spe->hdr.conn_and_cmd_data, spe->hdr.type,
12620 spe->data.mac_config_addr.hi,
12621 spe->data.mac_config_addr.lo,
12622 bp->cnic_kwq_pending);
12623
12624 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
12625 bp->cnic_kwq_prod = bp->cnic_kwq;
12626 else
12627 bp->cnic_kwq_prod++;
12628 }
12629
12630 spin_unlock_bh(&bp->spq_lock);
12631
12632 if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
12633 bnx2x_cnic_sp_post(bp, 0);
12634
12635 return i;
12636}
12637
12638static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12639{
12640 struct cnic_ops *c_ops;
12641 int rc = 0;
12642
12643 mutex_lock(&bp->cnic_mutex);
12644 c_ops = bp->cnic_ops;
12645 if (c_ops)
12646 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12647 mutex_unlock(&bp->cnic_mutex);
12648
12649 return rc;
12650}
12651
12652static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
12653{
12654 struct cnic_ops *c_ops;
12655 int rc = 0;
12656
12657 rcu_read_lock();
12658 c_ops = rcu_dereference(bp->cnic_ops);
12659 if (c_ops)
12660 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
12661 rcu_read_unlock();
12662
12663 return rc;
12664}
12665
12666/*
12667 * for commands that have no data
12668 */
12669static int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
12670{
12671 struct cnic_ctl_info ctl = {0};
12672
12673 ctl.cmd = cmd;
12674
12675 return bnx2x_cnic_ctl_send(bp, &ctl);
12676}
12677
12678static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
12679{
12680 struct cnic_ctl_info ctl;
12681
12682 /* first we tell CNIC and only then we count this as a completion */
12683 ctl.cmd = CNIC_CTL_COMPLETION_CMD;
12684 ctl.data.comp.cid = cid;
12685
12686 bnx2x_cnic_ctl_send_bh(bp, &ctl);
12687 bnx2x_cnic_sp_post(bp, 1);
12688}
12689
12690static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
12691{
12692 struct bnx2x *bp = netdev_priv(dev);
12693 int rc = 0;
12694
12695 switch (ctl->cmd) {
12696 case DRV_CTL_CTXTBL_WR_CMD: {
12697 u32 index = ctl->data.io.offset;
12698 dma_addr_t addr = ctl->data.io.dma_addr;
12699
12700 bnx2x_ilt_wr(bp, index, addr);
12701 break;
12702 }
12703
12704 case DRV_CTL_COMPLETION_CMD: {
12705 int count = ctl->data.comp.comp_count;
12706
12707 bnx2x_cnic_sp_post(bp, count);
12708 break;
12709 }
12710
12711 /* rtnl_lock is held. */
12712 case DRV_CTL_START_L2_CMD: {
12713 u32 cli = ctl->data.ring.client_id;
12714
12715 bp->rx_mode_cl_mask |= (1 << cli);
12716 bnx2x_set_storm_rx_mode(bp);
12717 break;
12718 }
12719
12720 /* rtnl_lock is held. */
12721 case DRV_CTL_STOP_L2_CMD: {
12722 u32 cli = ctl->data.ring.client_id;
12723
12724 bp->rx_mode_cl_mask &= ~(1 << cli);
12725 bnx2x_set_storm_rx_mode(bp);
12726 break;
12727 }
12728
12729 default:
12730 BNX2X_ERR("unknown command %x\n", ctl->cmd);
12731 rc = -EINVAL;
12732 }
12733
12734 return rc;
12735}
12736
12737static void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
12738{
12739 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12740
12741 if (bp->flags & USING_MSIX_FLAG) {
12742 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
12743 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
12744 cp->irq_arr[0].vector = bp->msix_table[1].vector;
12745 } else {
12746 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
12747 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
12748 }
12749 cp->irq_arr[0].status_blk = bp->cnic_sb;
12750 cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
12751 cp->irq_arr[1].status_blk = bp->def_status_blk;
12752 cp->irq_arr[1].status_blk_num = DEF_SB_ID;
12753
12754 cp->num_irq = 2;
12755}
12756
12757static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
12758 void *data)
12759{
12760 struct bnx2x *bp = netdev_priv(dev);
12761 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12762
12763 if (ops == NULL)
12764 return -EINVAL;
12765
12766 if (atomic_read(&bp->intr_sem) != 0)
12767 return -EBUSY;
12768
12769 bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
12770 if (!bp->cnic_kwq)
12771 return -ENOMEM;
12772
12773 bp->cnic_kwq_cons = bp->cnic_kwq;
12774 bp->cnic_kwq_prod = bp->cnic_kwq;
12775 bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
12776
12777 bp->cnic_spq_pending = 0;
12778 bp->cnic_kwq_pending = 0;
12779
12780 bp->cnic_data = data;
12781
12782 cp->num_irq = 0;
12783 cp->drv_state = CNIC_DRV_STATE_REGD;
12784
12785 bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
12786
12787 bnx2x_setup_cnic_irq_info(bp);
12788 bnx2x_set_iscsi_eth_mac_addr(bp, 1);
12789 bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
12790 rcu_assign_pointer(bp->cnic_ops, ops);
12791
12792 return 0;
12793}
12794
12795static int bnx2x_unregister_cnic(struct net_device *dev)
12796{
12797 struct bnx2x *bp = netdev_priv(dev);
12798 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12799
12800 mutex_lock(&bp->cnic_mutex);
12801 if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
12802 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
12803 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
12804 }
12805 cp->drv_state = 0;
12806 rcu_assign_pointer(bp->cnic_ops, NULL);
12807 mutex_unlock(&bp->cnic_mutex);
12808 synchronize_rcu();
12809 kfree(bp->cnic_kwq);
12810 bp->cnic_kwq = NULL;
12811
12812 return 0;
12813}
12814
12815struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
12816{
12817 struct bnx2x *bp = netdev_priv(dev);
12818 struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
12819
12820 cp->drv_owner = THIS_MODULE;
12821 cp->chip_id = CHIP_ID(bp);
12822 cp->pdev = bp->pdev;
12823 cp->io_base = bp->regview;
12824 cp->io_base2 = bp->doorbells;
12825 cp->max_kwqe_pending = 8;
12826 cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
12827 cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
12828 cp->ctx_tbl_len = CNIC_ILT_LINES;
12829 cp->starting_cid = BCM_CNIC_CID_START;
12830 cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
12831 cp->drv_ctl = bnx2x_drv_ctl;
12832 cp->drv_register_cnic = bnx2x_register_cnic;
12833 cp->drv_unregister_cnic = bnx2x_unregister_cnic;
12834
12835 return cp;
12836}
12837EXPORT_SYMBOL(bnx2x_cnic_probe);
12838
12839#endif /* BCM_CNIC */
12279 12840
diff --git a/drivers/net/bnx2x_reg.h b/drivers/net/bnx2x_reg.h
index aa76cbada5e2..b668173ffcb4 100644
--- a/drivers/net/bnx2x_reg.h
+++ b/drivers/net/bnx2x_reg.h
@@ -4772,18 +4772,28 @@
4772#define PCI_ID_VAL2 0x438 4772#define PCI_ID_VAL2 0x438
4773 4773
4774 4774
4775#define MDIO_REG_BANK_CL73_IEEEB0 0x0 4775#define MDIO_REG_BANK_CL73_IEEEB0 0x0
4776#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0 4776#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL 0x0
4777#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200 4777#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_RESTART_AN 0x0200
4778#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN 0x1000 4778#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_AN_EN 0x1000
4779#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST 0x8000 4779#define MDIO_CL73_IEEEB0_CL73_AN_CONTROL_MAIN_RST 0x8000
4780 4780
4781#define MDIO_REG_BANK_CL73_IEEEB1 0x10 4781#define MDIO_REG_BANK_CL73_IEEEB1 0x10
4782#define MDIO_CL73_IEEEB1_AN_ADV2 0x01 4782#define MDIO_CL73_IEEEB1_AN_ADV1 0x00
4783#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE 0x0400
4784#define MDIO_CL73_IEEEB1_AN_ADV1_ASYMMETRIC 0x0800
4785#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_BOTH 0x0C00
4786#define MDIO_CL73_IEEEB1_AN_ADV1_PAUSE_MASK 0x0C00
4787#define MDIO_CL73_IEEEB1_AN_ADV2 0x01
4783#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M 0x0000 4788#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M 0x0000
4784#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX 0x0020 4789#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_1000M_KX 0x0020
4785#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 0x0040 4790#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KX4 0x0040
4786#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR 0x0080 4791#define MDIO_CL73_IEEEB1_AN_ADV2_ADVR_10G_KR 0x0080
4792#define MDIO_CL73_IEEEB1_AN_LP_ADV1 0x03
4793#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE 0x0400
4794#define MDIO_CL73_IEEEB1_AN_LP_ADV1_ASYMMETRIC 0x0800
4795#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_BOTH 0x0C00
4796#define MDIO_CL73_IEEEB1_AN_LP_ADV1_PAUSE_MASK 0x0C00
4787 4797
4788#define MDIO_REG_BANK_RX0 0x80b0 4798#define MDIO_REG_BANK_RX0 0x80b0
4789#define MDIO_RX0_RX_STATUS 0x10 4799#define MDIO_RX0_RX_STATUS 0x10
@@ -4910,6 +4920,8 @@
4910 4920
4911 4921
4912#define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130 4922#define MDIO_REG_BANK_10G_PARALLEL_DETECT 0x8130
4923#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS 0x10
4924#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_STATUS_PD_LINK 0x8000
4913#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL 0x11 4925#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL 0x11
4914#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN 0x1 4926#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_CONTROL_PARDET10G_EN 0x1
4915#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK 0x13 4927#define MDIO_10G_PARALLEL_DETECT_PAR_DET_10G_LINK 0x13
@@ -4934,6 +4946,8 @@
4934#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_1G 0x0010 4946#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_1G 0x0010
4935#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_100M 0x0008 4947#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_100M 0x0008
4936#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_10M 0x0000 4948#define MDIO_SERDES_DIGITAL_A_1000X_STATUS1_SPEED_10M 0x0000
4949#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2 0x15
4950#define MDIO_SERDES_DIGITAL_A_1000X_STATUS2_AN_DISABLED 0x0002
4937#define MDIO_SERDES_DIGITAL_MISC1 0x18 4951#define MDIO_SERDES_DIGITAL_MISC1 0x18
4938#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_MASK 0xE000 4952#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_MASK 0xE000
4939#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_25M 0x0000 4953#define MDIO_SERDES_DIGITAL_MISC1_REFCLK_SEL_25M 0x0000
@@ -5115,6 +5129,7 @@ Theotherbitsarereservedandshouldbezero*/
5115#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c 5129#define MDIO_PMA_REG_8481_LED1_MASK 0xa82c
5116#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f 5130#define MDIO_PMA_REG_8481_LED2_MASK 0xa82f
5117#define MDIO_PMA_REG_8481_LED3_MASK 0xa832 5131#define MDIO_PMA_REG_8481_LED3_MASK 0xa832
5132#define MDIO_PMA_REG_8481_LED3_BLINK 0xa834
5118#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835 5133#define MDIO_PMA_REG_8481_SIGNAL_MASK 0xa835
5119#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b 5134#define MDIO_PMA_REG_8481_LINK_SIGNAL 0xa83b
5120 5135
diff --git a/drivers/net/bonding/bond_3ad.c b/drivers/net/bonding/bond_3ad.c
index c3fa31c9f2a7..88c3fe80b355 100644
--- a/drivers/net/bonding/bond_3ad.c
+++ b/drivers/net/bonding/bond_3ad.c
@@ -446,6 +446,48 @@ static u16 __ad_timer_to_ticks(u16 timer_type, u16 par)
446///////////////////////////////////////////////////////////////////////////////// 446/////////////////////////////////////////////////////////////////////////////////
447 447
448/** 448/**
449 * __choose_matched - update a port's matched variable from a received lacpdu
450 * @lacpdu: the lacpdu we've received
451 * @port: the port we're looking at
452 *
453 * Update the value of the matched variable, using parameter values from a
454 * newly received lacpdu. Parameter values for the partner carried in the
455 * received PDU are compared with the corresponding operational parameter
456 * values for the actor. Matched is set to TRUE if all of these parameters
457 * match and the PDU parameter partner_state.aggregation has the same value as
458 * actor_oper_port_state.aggregation and lacp will actively maintain the link
459 * in the aggregation. Matched is also set to TRUE if the value of
460 * actor_state.aggregation in the received PDU is set to FALSE, i.e., indicates
461 * an individual link and lacp will actively maintain the link. Otherwise,
462 * matched is set to FALSE. LACP is considered to be actively maintaining the
463 * link if either the PDU's actor_state.lacp_activity variable is TRUE or both
464 * the actor's actor_oper_port_state.lacp_activity and the PDU's
465 * partner_state.lacp_activity variables are TRUE.
466 *
467 * Note: the AD_PORT_MATCHED "variable" is not specified by 802.3ad; it is
468 * used here to implement the language from 802.3ad 43.4.9 that requires
469 * recordPDU to "match" the LACPDU parameters to the stored values.
470 */
471static void __choose_matched(struct lacpdu *lacpdu, struct port *port)
472{
473 // check if all parameters are alike
474 if (((ntohs(lacpdu->partner_port) == port->actor_port_number) &&
475 (ntohs(lacpdu->partner_port_priority) == port->actor_port_priority) &&
476 !MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) &&
477 (ntohs(lacpdu->partner_system_priority) == port->actor_system_priority) &&
478 (ntohs(lacpdu->partner_key) == port->actor_oper_port_key) &&
479 ((lacpdu->partner_state & AD_STATE_AGGREGATION) == (port->actor_oper_port_state & AD_STATE_AGGREGATION))) ||
480 // or this is individual link(aggregation == FALSE)
481 ((lacpdu->actor_state & AD_STATE_AGGREGATION) == 0)
482 ) {
483 // update the state machine Matched variable
484 port->sm_vars |= AD_PORT_MATCHED;
485 } else {
486 port->sm_vars &= ~AD_PORT_MATCHED;
487 }
488}
489
490/**
449 * __record_pdu - record parameters from a received lacpdu 491 * __record_pdu - record parameters from a received lacpdu
450 * @lacpdu: the lacpdu we've received 492 * @lacpdu: the lacpdu we've received
451 * @port: the port we're looking at 493 * @port: the port we're looking at
@@ -459,6 +501,7 @@ static void __record_pdu(struct lacpdu *lacpdu, struct port *port)
459 if (lacpdu && port) { 501 if (lacpdu && port) {
460 struct port_params *partner = &port->partner_oper; 502 struct port_params *partner = &port->partner_oper;
461 503
504 __choose_matched(lacpdu, port);
462 // record the new parameter values for the partner operational 505 // record the new parameter values for the partner operational
463 partner->port_number = ntohs(lacpdu->actor_port); 506 partner->port_number = ntohs(lacpdu->actor_port);
464 partner->port_priority = ntohs(lacpdu->actor_port_priority); 507 partner->port_priority = ntohs(lacpdu->actor_port_priority);
@@ -563,47 +606,6 @@ static void __update_default_selected(struct port *port)
563} 606}
564 607
565/** 608/**
566 * __choose_matched - update a port's matched variable from a received lacpdu
567 * @lacpdu: the lacpdu we've received
568 * @port: the port we're looking at
569 *
570 * Update the value of the matched variable, using parameter values from a
571 * newly received lacpdu. Parameter values for the partner carried in the
572 * received PDU are compared with the corresponding operational parameter
573 * values for the actor. Matched is set to TRUE if all of these parameters
574 * match and the PDU parameter partner_state.aggregation has the same value as
575 * actor_oper_port_state.aggregation and lacp will actively maintain the link
576 * in the aggregation. Matched is also set to TRUE if the value of
577 * actor_state.aggregation in the received PDU is set to FALSE, i.e., indicates
578 * an individual link and lacp will actively maintain the link. Otherwise,
579 * matched is set to FALSE. LACP is considered to be actively maintaining the
580 * link if either the PDU's actor_state.lacp_activity variable is TRUE or both
581 * the actor's actor_oper_port_state.lacp_activity and the PDU's
582 * partner_state.lacp_activity variables are TRUE.
583 */
584static void __choose_matched(struct lacpdu *lacpdu, struct port *port)
585{
586 // validate lacpdu and port
587 if (lacpdu && port) {
588 // check if all parameters are alike
589 if (((ntohs(lacpdu->partner_port) == port->actor_port_number) &&
590 (ntohs(lacpdu->partner_port_priority) == port->actor_port_priority) &&
591 !MAC_ADDRESS_COMPARE(&(lacpdu->partner_system), &(port->actor_system)) &&
592 (ntohs(lacpdu->partner_system_priority) == port->actor_system_priority) &&
593 (ntohs(lacpdu->partner_key) == port->actor_oper_port_key) &&
594 ((lacpdu->partner_state & AD_STATE_AGGREGATION) == (port->actor_oper_port_state & AD_STATE_AGGREGATION))) ||
595 // or this is individual link(aggregation == FALSE)
596 ((lacpdu->actor_state & AD_STATE_AGGREGATION) == 0)
597 ) {
598 // update the state machine Matched variable
599 port->sm_vars |= AD_PORT_MATCHED;
600 } else {
601 port->sm_vars &= ~AD_PORT_MATCHED;
602 }
603 }
604}
605
606/**
607 * __update_ntt - update a port's ntt variable from a received lacpdu 609 * __update_ntt - update a port's ntt variable from a received lacpdu
608 * @lacpdu: the lacpdu we've received 610 * @lacpdu: the lacpdu we've received
609 * @port: the port we're looking at 611 * @port: the port we're looking at
@@ -1134,7 +1136,6 @@ static void ad_rx_machine(struct lacpdu *lacpdu, struct port *port)
1134 __update_selected(lacpdu, port); 1136 __update_selected(lacpdu, port);
1135 __update_ntt(lacpdu, port); 1137 __update_ntt(lacpdu, port);
1136 __record_pdu(lacpdu, port); 1138 __record_pdu(lacpdu, port);
1137 __choose_matched(lacpdu, port);
1138 port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT)); 1139 port->sm_rx_timer_counter = __ad_timer_to_ticks(AD_CURRENT_WHILE_TIMER, (u16)(port->actor_oper_port_state & AD_STATE_LACP_TIMEOUT));
1139 port->actor_oper_port_state &= ~AD_STATE_EXPIRED; 1140 port->actor_oper_port_state &= ~AD_STATE_EXPIRED;
1140 // verify that if the aggregator is enabled, the port is enabled too. 1141 // verify that if the aggregator is enabled, the port is enabled too.
@@ -1956,7 +1957,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
1956 struct port *port, *prev_port, *temp_port; 1957 struct port *port, *prev_port, *temp_port;
1957 struct aggregator *aggregator, *new_aggregator, *temp_aggregator; 1958 struct aggregator *aggregator, *new_aggregator, *temp_aggregator;
1958 int select_new_active_agg = 0; 1959 int select_new_active_agg = 0;
1959 1960
1960 // find the aggregator related to this slave 1961 // find the aggregator related to this slave
1961 aggregator = &(SLAVE_AD_INFO(slave).aggregator); 1962 aggregator = &(SLAVE_AD_INFO(slave).aggregator);
1962 1963
@@ -2024,7 +2025,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
2024 2025
2025 // clear the aggregator 2026 // clear the aggregator
2026 ad_clear_agg(aggregator); 2027 ad_clear_agg(aggregator);
2027 2028
2028 if (select_new_active_agg) { 2029 if (select_new_active_agg) {
2029 ad_agg_selection_logic(__get_first_agg(port)); 2030 ad_agg_selection_logic(__get_first_agg(port));
2030 } 2031 }
@@ -2075,7 +2076,7 @@ void bond_3ad_unbind_slave(struct slave *slave)
2075 } 2076 }
2076 } 2077 }
2077 } 2078 }
2078 port->slave=NULL; 2079 port->slave=NULL;
2079} 2080}
2080 2081
2081/** 2082/**
@@ -2301,7 +2302,7 @@ void bond_3ad_handle_link_change(struct slave *slave, char link)
2301} 2302}
2302 2303
2303/* 2304/*
2304 * set link state for bonding master: if we have an active 2305 * set link state for bonding master: if we have an active
2305 * aggregator, we're up, if not, we're down. Presumes that we cannot 2306 * aggregator, we're up, if not, we're down. Presumes that we cannot
2306 * have an active aggregator if there are no slaves with link up. 2307 * have an active aggregator if there are no slaves with link up.
2307 * 2308 *
@@ -2395,7 +2396,7 @@ int bond_3ad_xmit_xor(struct sk_buff *skb, struct net_device *dev)
2395 goto out; 2396 goto out;
2396 } 2397 }
2397 2398
2398 slave_agg_no = bond->xmit_hash_policy(skb, dev, slaves_in_agg); 2399 slave_agg_no = bond->xmit_hash_policy(skb, slaves_in_agg);
2399 2400
2400 bond_for_each_slave(bond, slave, i) { 2401 bond_for_each_slave(bond, slave, i) {
2401 struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator; 2402 struct aggregator *agg = SLAVE_AD_INFO(slave).port.aggregator;
@@ -2445,9 +2446,6 @@ int bond_3ad_lacpdu_recv(struct sk_buff *skb, struct net_device *dev, struct pac
2445 struct slave *slave = NULL; 2446 struct slave *slave = NULL;
2446 int ret = NET_RX_DROP; 2447 int ret = NET_RX_DROP;
2447 2448
2448 if (dev_net(dev) != &init_net)
2449 goto out;
2450
2451 if (!(dev->flags & IFF_MASTER)) 2449 if (!(dev->flags & IFF_MASTER))
2452 goto out; 2450 goto out;
2453 2451
@@ -2468,4 +2466,3 @@ out:
2468 2466
2469 return ret; 2467 return ret;
2470} 2468}
2471
diff --git a/drivers/net/bonding/bond_alb.c b/drivers/net/bonding/bond_alb.c
index 9b5936f072dc..0d30d1e5e53f 100644
--- a/drivers/net/bonding/bond_alb.c
+++ b/drivers/net/bonding/bond_alb.c
@@ -355,9 +355,6 @@ static int rlb_arp_recv(struct sk_buff *skb, struct net_device *bond_dev, struct
355 struct arp_pkt *arp = (struct arp_pkt *)skb->data; 355 struct arp_pkt *arp = (struct arp_pkt *)skb->data;
356 int res = NET_RX_DROP; 356 int res = NET_RX_DROP;
357 357
358 if (dev_net(bond_dev) != &init_net)
359 goto out;
360
361 while (bond_dev->priv_flags & IFF_802_1Q_VLAN) 358 while (bond_dev->priv_flags & IFF_802_1Q_VLAN)
362 bond_dev = vlan_dev_real_dev(bond_dev); 359 bond_dev = vlan_dev_real_dev(bond_dev);
363 360
diff --git a/drivers/net/bonding/bond_ipv6.c b/drivers/net/bonding/bond_ipv6.c
index 83921abae12d..b72e1dc8cf8f 100644
--- a/drivers/net/bonding/bond_ipv6.c
+++ b/drivers/net/bonding/bond_ipv6.c
@@ -25,6 +25,7 @@
25#include <net/ipv6.h> 25#include <net/ipv6.h>
26#include <net/ndisc.h> 26#include <net/ndisc.h>
27#include <net/addrconf.h> 27#include <net/addrconf.h>
28#include <net/netns/generic.h>
28#include "bonding.h" 29#include "bonding.h"
29 30
30/* 31/*
@@ -152,11 +153,9 @@ static int bond_inet6addr_event(struct notifier_block *this,
152 struct net_device *vlan_dev, *event_dev = ifa->idev->dev; 153 struct net_device *vlan_dev, *event_dev = ifa->idev->dev;
153 struct bonding *bond; 154 struct bonding *bond;
154 struct vlan_entry *vlan; 155 struct vlan_entry *vlan;
156 struct bond_net *bn = net_generic(dev_net(event_dev), bond_net_id);
155 157
156 if (dev_net(event_dev) != &init_net) 158 list_for_each_entry(bond, &bn->dev_list, bond_list) {
157 return NOTIFY_DONE;
158
159 list_for_each_entry(bond, &bond_dev_list, bond_list) {
160 if (bond->dev == event_dev) { 159 if (bond->dev == event_dev) {
161 switch (event) { 160 switch (event) {
162 case NETDEV_UP: 161 case NETDEV_UP:
diff --git a/drivers/net/bonding/bond_main.c b/drivers/net/bonding/bond_main.c
index 40fb5eefc72e..ecea6c294132 100644
--- a/drivers/net/bonding/bond_main.c
+++ b/drivers/net/bonding/bond_main.c
@@ -75,6 +75,7 @@
75#include <linux/jiffies.h> 75#include <linux/jiffies.h>
76#include <net/route.h> 76#include <net/route.h>
77#include <net/net_namespace.h> 77#include <net/net_namespace.h>
78#include <net/netns/generic.h>
78#include "bonding.h" 79#include "bonding.h"
79#include "bond_3ad.h" 80#include "bond_3ad.h"
80#include "bond_alb.h" 81#include "bond_alb.h"
@@ -94,6 +95,7 @@ static int downdelay;
94static int use_carrier = 1; 95static int use_carrier = 1;
95static char *mode; 96static char *mode;
96static char *primary; 97static char *primary;
98static char *primary_reselect;
97static char *lacp_rate; 99static char *lacp_rate;
98static char *ad_select; 100static char *ad_select;
99static char *xmit_hash_policy; 101static char *xmit_hash_policy;
@@ -126,6 +128,14 @@ MODULE_PARM_DESC(mode, "Mode of operation : 0 for balance-rr, "
126 "6 for balance-alb"); 128 "6 for balance-alb");
127module_param(primary, charp, 0); 129module_param(primary, charp, 0);
128MODULE_PARM_DESC(primary, "Primary network device to use"); 130MODULE_PARM_DESC(primary, "Primary network device to use");
131module_param(primary_reselect, charp, 0);
132MODULE_PARM_DESC(primary_reselect, "Reselect primary slave "
133 "once it comes up; "
134 "0 for always (default), "
135 "1 for only if speed of primary is "
136 "better, "
137 "2 for only on active slave "
138 "failure");
129module_param(lacp_rate, charp, 0); 139module_param(lacp_rate, charp, 0);
130MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner " 140MODULE_PARM_DESC(lacp_rate, "LACPDU tx rate to request from 802.3ad partner "
131 "(slow/fast)"); 141 "(slow/fast)");
@@ -148,11 +158,7 @@ MODULE_PARM_DESC(fail_over_mac, "For active-backup, do not set all slaves to the
148static const char * const version = 158static const char * const version =
149 DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n"; 159 DRV_DESCRIPTION ": v" DRV_VERSION " (" DRV_RELDATE ")\n";
150 160
151LIST_HEAD(bond_dev_list); 161int bond_net_id;
152
153#ifdef CONFIG_PROC_FS
154static struct proc_dir_entry *bond_proc_dir;
155#endif
156 162
157static __be32 arp_target[BOND_MAX_ARP_TARGETS]; 163static __be32 arp_target[BOND_MAX_ARP_TARGETS];
158static int arp_ip_count; 164static int arp_ip_count;
@@ -200,6 +206,13 @@ const struct bond_parm_tbl fail_over_mac_tbl[] = {
200{ NULL, -1}, 206{ NULL, -1},
201}; 207};
202 208
209const struct bond_parm_tbl pri_reselect_tbl[] = {
210{ "always", BOND_PRI_RESELECT_ALWAYS},
211{ "better", BOND_PRI_RESELECT_BETTER},
212{ "failure", BOND_PRI_RESELECT_FAILURE},
213{ NULL, -1},
214};
215
203struct bond_parm_tbl ad_select_tbl[] = { 216struct bond_parm_tbl ad_select_tbl[] = {
204{ "stable", BOND_AD_STABLE}, 217{ "stable", BOND_AD_STABLE},
205{ "bandwidth", BOND_AD_BANDWIDTH}, 218{ "bandwidth", BOND_AD_BANDWIDTH},
@@ -211,7 +224,7 @@ struct bond_parm_tbl ad_select_tbl[] = {
211 224
212static void bond_send_gratuitous_arp(struct bonding *bond); 225static void bond_send_gratuitous_arp(struct bonding *bond);
213static int bond_init(struct net_device *bond_dev); 226static int bond_init(struct net_device *bond_dev);
214static void bond_deinit(struct net_device *bond_dev); 227static void bond_uninit(struct net_device *bond_dev);
215 228
216/*---------------------------- General routines -----------------------------*/ 229/*---------------------------- General routines -----------------------------*/
217 230
@@ -1070,6 +1083,25 @@ out:
1070 1083
1071} 1084}
1072 1085
1086static bool bond_should_change_active(struct bonding *bond)
1087{
1088 struct slave *prim = bond->primary_slave;
1089 struct slave *curr = bond->curr_active_slave;
1090
1091 if (!prim || !curr || curr->link != BOND_LINK_UP)
1092 return true;
1093 if (bond->force_primary) {
1094 bond->force_primary = false;
1095 return true;
1096 }
1097 if (bond->params.primary_reselect == BOND_PRI_RESELECT_BETTER &&
1098 (prim->speed < curr->speed ||
1099 (prim->speed == curr->speed && prim->duplex <= curr->duplex)))
1100 return false;
1101 if (bond->params.primary_reselect == BOND_PRI_RESELECT_FAILURE)
1102 return false;
1103 return true;
1104}
1073 1105
1074/** 1106/**
1075 * find_best_interface - select the best available slave to be the active one 1107 * find_best_interface - select the best available slave to be the active one
@@ -1084,7 +1116,7 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
1084 int mintime = bond->params.updelay; 1116 int mintime = bond->params.updelay;
1085 int i; 1117 int i;
1086 1118
1087 new_active = old_active = bond->curr_active_slave; 1119 new_active = bond->curr_active_slave;
1088 1120
1089 if (!new_active) { /* there were no active slaves left */ 1121 if (!new_active) { /* there were no active slaves left */
1090 if (bond->slave_cnt > 0) /* found one slave */ 1122 if (bond->slave_cnt > 0) /* found one slave */
@@ -1094,7 +1126,8 @@ static struct slave *bond_find_best_slave(struct bonding *bond)
1094 } 1126 }
1095 1127
1096 if ((bond->primary_slave) && 1128 if ((bond->primary_slave) &&
1097 bond->primary_slave->link == BOND_LINK_UP) { 1129 bond->primary_slave->link == BOND_LINK_UP &&
1130 bond_should_change_active(bond)) {
1098 new_active = bond->primary_slave; 1131 new_active = bond->primary_slave;
1099 } 1132 }
1100 1133
@@ -1678,8 +1711,10 @@ int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev)
1678 1711
1679 if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) { 1712 if (USES_PRIMARY(bond->params.mode) && bond->params.primary[0]) {
1680 /* if there is a primary slave, remember it */ 1713 /* if there is a primary slave, remember it */
1681 if (strcmp(bond->params.primary, new_slave->dev->name) == 0) 1714 if (strcmp(bond->params.primary, new_slave->dev->name) == 0) {
1682 bond->primary_slave = new_slave; 1715 bond->primary_slave = new_slave;
1716 bond->force_primary = true;
1717 }
1683 } 1718 }
1684 1719
1685 write_lock_bh(&bond->curr_slave_lock); 1720 write_lock_bh(&bond->curr_slave_lock);
@@ -1965,25 +2000,6 @@ int bond_release(struct net_device *bond_dev, struct net_device *slave_dev)
1965} 2000}
1966 2001
1967/* 2002/*
1968* Destroy a bonding device.
1969* Must be under rtnl_lock when this function is called.
1970*/
1971static void bond_uninit(struct net_device *bond_dev)
1972{
1973 struct bonding *bond = netdev_priv(bond_dev);
1974
1975 bond_deinit(bond_dev);
1976 bond_destroy_sysfs_entry(bond);
1977
1978 if (bond->wq)
1979 destroy_workqueue(bond->wq);
1980
1981 netif_addr_lock_bh(bond_dev);
1982 bond_mc_list_destroy(bond);
1983 netif_addr_unlock_bh(bond_dev);
1984}
1985
1986/*
1987* First release a slave and than destroy the bond if no more slaves are left. 2003* First release a slave and than destroy the bond if no more slaves are left.
1988* Must be under rtnl_lock when this function is called. 2004* Must be under rtnl_lock when this function is called.
1989*/ 2005*/
@@ -2567,7 +2583,7 @@ static void bond_arp_send_all(struct bonding *bond, struct slave *slave)
2567 fl.fl4_dst = targets[i]; 2583 fl.fl4_dst = targets[i];
2568 fl.fl4_tos = RTO_ONLINK; 2584 fl.fl4_tos = RTO_ONLINK;
2569 2585
2570 rv = ip_route_output_key(&init_net, &rt, &fl); 2586 rv = ip_route_output_key(dev_net(bond->dev), &rt, &fl);
2571 if (rv) { 2587 if (rv) {
2572 if (net_ratelimit()) { 2588 if (net_ratelimit()) {
2573 pr_warning(DRV_NAME 2589 pr_warning(DRV_NAME
@@ -2675,9 +2691,6 @@ static int bond_arp_rcv(struct sk_buff *skb, struct net_device *dev, struct pack
2675 unsigned char *arp_ptr; 2691 unsigned char *arp_ptr;
2676 __be32 sip, tip; 2692 __be32 sip, tip;
2677 2693
2678 if (dev_net(dev) != &init_net)
2679 goto out;
2680
2681 if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER)) 2694 if (!(dev->priv_flags & IFF_BONDING) || !(dev->flags & IFF_MASTER))
2682 goto out; 2695 goto out;
2683 2696
@@ -3201,11 +3214,14 @@ static void bond_info_show_master(struct seq_file *seq)
3201 } 3214 }
3202 3215
3203 if (USES_PRIMARY(bond->params.mode)) { 3216 if (USES_PRIMARY(bond->params.mode)) {
3204 seq_printf(seq, "Primary Slave: %s\n", 3217 seq_printf(seq, "Primary Slave: %s",
3205 (bond->primary_slave) ? 3218 (bond->primary_slave) ?
3206 bond->primary_slave->dev->name : "None"); 3219 bond->primary_slave->dev->name : "None");
3220 if (bond->primary_slave)
3221 seq_printf(seq, " (primary_reselect %s)",
3222 pri_reselect_tbl[bond->params.primary_reselect].modename);
3207 3223
3208 seq_printf(seq, "Currently Active Slave: %s\n", 3224 seq_printf(seq, "\nCurrently Active Slave: %s\n",
3209 (curr) ? curr->dev->name : "None"); 3225 (curr) ? curr->dev->name : "None");
3210 } 3226 }
3211 3227
@@ -3334,13 +3350,14 @@ static const struct file_operations bond_info_fops = {
3334 .release = seq_release, 3350 .release = seq_release,
3335}; 3351};
3336 3352
3337static int bond_create_proc_entry(struct bonding *bond) 3353static void bond_create_proc_entry(struct bonding *bond)
3338{ 3354{
3339 struct net_device *bond_dev = bond->dev; 3355 struct net_device *bond_dev = bond->dev;
3356 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
3340 3357
3341 if (bond_proc_dir) { 3358 if (bn->proc_dir) {
3342 bond->proc_entry = proc_create_data(bond_dev->name, 3359 bond->proc_entry = proc_create_data(bond_dev->name,
3343 S_IRUGO, bond_proc_dir, 3360 S_IRUGO, bn->proc_dir,
3344 &bond_info_fops, bond); 3361 &bond_info_fops, bond);
3345 if (bond->proc_entry == NULL) 3362 if (bond->proc_entry == NULL)
3346 pr_warning(DRV_NAME 3363 pr_warning(DRV_NAME
@@ -3349,14 +3366,15 @@ static int bond_create_proc_entry(struct bonding *bond)
3349 else 3366 else
3350 memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ); 3367 memcpy(bond->proc_file_name, bond_dev->name, IFNAMSIZ);
3351 } 3368 }
3352
3353 return 0;
3354} 3369}
3355 3370
3356static void bond_remove_proc_entry(struct bonding *bond) 3371static void bond_remove_proc_entry(struct bonding *bond)
3357{ 3372{
3358 if (bond_proc_dir && bond->proc_entry) { 3373 struct net_device *bond_dev = bond->dev;
3359 remove_proc_entry(bond->proc_file_name, bond_proc_dir); 3374 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
3375
3376 if (bn->proc_dir && bond->proc_entry) {
3377 remove_proc_entry(bond->proc_file_name, bn->proc_dir);
3360 memset(bond->proc_file_name, 0, IFNAMSIZ); 3378 memset(bond->proc_file_name, 0, IFNAMSIZ);
3361 bond->proc_entry = NULL; 3379 bond->proc_entry = NULL;
3362 } 3380 }
@@ -3365,11 +3383,11 @@ static void bond_remove_proc_entry(struct bonding *bond)
3365/* Create the bonding directory under /proc/net, if doesn't exist yet. 3383/* Create the bonding directory under /proc/net, if doesn't exist yet.
3366 * Caller must hold rtnl_lock. 3384 * Caller must hold rtnl_lock.
3367 */ 3385 */
3368static void bond_create_proc_dir(void) 3386static void bond_create_proc_dir(struct bond_net *bn)
3369{ 3387{
3370 if (!bond_proc_dir) { 3388 if (!bn->proc_dir) {
3371 bond_proc_dir = proc_mkdir(DRV_NAME, init_net.proc_net); 3389 bn->proc_dir = proc_mkdir(DRV_NAME, bn->net->proc_net);
3372 if (!bond_proc_dir) 3390 if (!bn->proc_dir)
3373 pr_warning(DRV_NAME 3391 pr_warning(DRV_NAME
3374 ": Warning: cannot create /proc/net/%s\n", 3392 ": Warning: cannot create /proc/net/%s\n",
3375 DRV_NAME); 3393 DRV_NAME);
@@ -3379,17 +3397,17 @@ static void bond_create_proc_dir(void)
3379/* Destroy the bonding directory under /proc/net, if empty. 3397/* Destroy the bonding directory under /proc/net, if empty.
3380 * Caller must hold rtnl_lock. 3398 * Caller must hold rtnl_lock.
3381 */ 3399 */
3382static void bond_destroy_proc_dir(void) 3400static void bond_destroy_proc_dir(struct bond_net *bn)
3383{ 3401{
3384 if (bond_proc_dir) { 3402 if (bn->proc_dir) {
3385 remove_proc_entry(DRV_NAME, init_net.proc_net); 3403 remove_proc_entry(DRV_NAME, bn->net->proc_net);
3386 bond_proc_dir = NULL; 3404 bn->proc_dir = NULL;
3387 } 3405 }
3388} 3406}
3389 3407
3390#else /* !CONFIG_PROC_FS */ 3408#else /* !CONFIG_PROC_FS */
3391 3409
3392static int bond_create_proc_entry(struct bonding *bond) 3410static void bond_create_proc_entry(struct bonding *bond)
3393{ 3411{
3394} 3412}
3395 3413
@@ -3397,11 +3415,11 @@ static void bond_remove_proc_entry(struct bonding *bond)
3397{ 3415{
3398} 3416}
3399 3417
3400static void bond_create_proc_dir(void) 3418static void bond_create_proc_dir(struct bond_net *bn)
3401{ 3419{
3402} 3420}
3403 3421
3404static void bond_destroy_proc_dir(void) 3422static void bond_destroy_proc_dir(struct bond_net *bn)
3405{ 3423{
3406} 3424}
3407 3425
@@ -3418,9 +3436,6 @@ static int bond_event_changename(struct bonding *bond)
3418 bond_remove_proc_entry(bond); 3436 bond_remove_proc_entry(bond);
3419 bond_create_proc_entry(bond); 3437 bond_create_proc_entry(bond);
3420 3438
3421 bond_destroy_sysfs_entry(bond);
3422 bond_create_sysfs_entry(bond);
3423
3424 return NOTIFY_DONE; 3439 return NOTIFY_DONE;
3425} 3440}
3426 3441
@@ -3432,9 +3447,6 @@ static int bond_master_netdev_event(unsigned long event,
3432 switch (event) { 3447 switch (event) {
3433 case NETDEV_CHANGENAME: 3448 case NETDEV_CHANGENAME:
3434 return bond_event_changename(event_bond); 3449 return bond_event_changename(event_bond);
3435 case NETDEV_UNREGISTER:
3436 bond_release_all(event_bond->dev);
3437 break;
3438 default: 3450 default:
3439 break; 3451 break;
3440 } 3452 }
@@ -3526,9 +3538,6 @@ static int bond_netdev_event(struct notifier_block *this,
3526{ 3538{
3527 struct net_device *event_dev = (struct net_device *)ptr; 3539 struct net_device *event_dev = (struct net_device *)ptr;
3528 3540
3529 if (dev_net(event_dev) != &init_net)
3530 return NOTIFY_DONE;
3531
3532 pr_debug("event_dev: %s, event: %lx\n", 3541 pr_debug("event_dev: %s, event: %lx\n",
3533 (event_dev ? event_dev->name : "None"), 3542 (event_dev ? event_dev->name : "None"),
3534 event); 3543 event);
@@ -3561,13 +3570,11 @@ static int bond_inetaddr_event(struct notifier_block *this, unsigned long event,
3561{ 3570{
3562 struct in_ifaddr *ifa = ptr; 3571 struct in_ifaddr *ifa = ptr;
3563 struct net_device *vlan_dev, *event_dev = ifa->ifa_dev->dev; 3572 struct net_device *vlan_dev, *event_dev = ifa->ifa_dev->dev;
3573 struct bond_net *bn = net_generic(dev_net(event_dev), bond_net_id);
3564 struct bonding *bond; 3574 struct bonding *bond;
3565 struct vlan_entry *vlan; 3575 struct vlan_entry *vlan;
3566 3576
3567 if (dev_net(ifa->ifa_dev->dev) != &init_net) 3577 list_for_each_entry(bond, &bn->dev_list, bond_list) {
3568 return NOTIFY_DONE;
3569
3570 list_for_each_entry(bond, &bond_dev_list, bond_list) {
3571 if (bond->dev == event_dev) { 3578 if (bond->dev == event_dev) {
3572 switch (event) { 3579 switch (event) {
3573 case NETDEV_UP: 3580 case NETDEV_UP:
@@ -3657,8 +3664,7 @@ void bond_unregister_arp(struct bonding *bond)
3657 * Hash for the output device based upon layer 2 and layer 3 data. If 3664 * Hash for the output device based upon layer 2 and layer 3 data. If
3658 * the packet is not IP mimic bond_xmit_hash_policy_l2() 3665 * the packet is not IP mimic bond_xmit_hash_policy_l2()
3659 */ 3666 */
3660static int bond_xmit_hash_policy_l23(struct sk_buff *skb, 3667static int bond_xmit_hash_policy_l23(struct sk_buff *skb, int count)
3661 struct net_device *bond_dev, int count)
3662{ 3668{
3663 struct ethhdr *data = (struct ethhdr *)skb->data; 3669 struct ethhdr *data = (struct ethhdr *)skb->data;
3664 struct iphdr *iph = ip_hdr(skb); 3670 struct iphdr *iph = ip_hdr(skb);
@@ -3676,8 +3682,7 @@ static int bond_xmit_hash_policy_l23(struct sk_buff *skb,
3676 * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is 3682 * the packet is a frag or not TCP or UDP, just use layer 3 data. If it is
3677 * altogether not IP, mimic bond_xmit_hash_policy_l2() 3683 * altogether not IP, mimic bond_xmit_hash_policy_l2()
3678 */ 3684 */
3679static int bond_xmit_hash_policy_l34(struct sk_buff *skb, 3685static int bond_xmit_hash_policy_l34(struct sk_buff *skb, int count)
3680 struct net_device *bond_dev, int count)
3681{ 3686{
3682 struct ethhdr *data = (struct ethhdr *)skb->data; 3687 struct ethhdr *data = (struct ethhdr *)skb->data;
3683 struct iphdr *iph = ip_hdr(skb); 3688 struct iphdr *iph = ip_hdr(skb);
@@ -3701,8 +3706,7 @@ static int bond_xmit_hash_policy_l34(struct sk_buff *skb,
3701/* 3706/*
3702 * Hash for the output device based upon layer 2 data 3707 * Hash for the output device based upon layer 2 data
3703 */ 3708 */
3704static int bond_xmit_hash_policy_l2(struct sk_buff *skb, 3709static int bond_xmit_hash_policy_l2(struct sk_buff *skb, int count)
3705 struct net_device *bond_dev, int count)
3706{ 3710{
3707 struct ethhdr *data = (struct ethhdr *)skb->data; 3711 struct ethhdr *data = (struct ethhdr *)skb->data;
3708 3712
@@ -3939,7 +3943,7 @@ static int bond_do_ioctl(struct net_device *bond_dev, struct ifreq *ifr, int cmd
3939 if (!capable(CAP_NET_ADMIN)) 3943 if (!capable(CAP_NET_ADMIN))
3940 return -EPERM; 3944 return -EPERM;
3941 3945
3942 slave_dev = dev_get_by_name(&init_net, ifr->ifr_slave); 3946 slave_dev = dev_get_by_name(dev_net(bond_dev), ifr->ifr_slave);
3943 3947
3944 pr_debug("slave_dev=%p: \n", slave_dev); 3948 pr_debug("slave_dev=%p: \n", slave_dev);
3945 3949
@@ -4295,7 +4299,7 @@ static int bond_xmit_xor(struct sk_buff *skb, struct net_device *bond_dev)
4295 if (!BOND_IS_OK(bond)) 4299 if (!BOND_IS_OK(bond))
4296 goto out; 4300 goto out;
4297 4301
4298 slave_no = bond->xmit_hash_policy(skb, bond_dev, bond->slave_cnt); 4302 slave_no = bond->xmit_hash_policy(skb, bond->slave_cnt);
4299 4303
4300 bond_for_each_slave(bond, slave, i) { 4304 bond_for_each_slave(bond, slave, i) {
4301 slave_no--; 4305 slave_no--;
@@ -4576,37 +4580,29 @@ static void bond_work_cancel_all(struct bonding *bond)
4576 cancel_delayed_work(&bond->ad_work); 4580 cancel_delayed_work(&bond->ad_work);
4577} 4581}
4578 4582
4579/* De-initialize device specific data. 4583/*
4580 * Caller must hold rtnl_lock. 4584* Destroy a bonding device.
4581 */ 4585* Must be under rtnl_lock when this function is called.
4582static void bond_deinit(struct net_device *bond_dev) 4586*/
4587static void bond_uninit(struct net_device *bond_dev)
4583{ 4588{
4584 struct bonding *bond = netdev_priv(bond_dev); 4589 struct bonding *bond = netdev_priv(bond_dev);
4585 4590
4591 /* Release the bonded slaves */
4592 bond_release_all(bond_dev);
4593
4586 list_del(&bond->bond_list); 4594 list_del(&bond->bond_list);
4587 4595
4588 bond_work_cancel_all(bond); 4596 bond_work_cancel_all(bond);
4589 4597
4590 bond_remove_proc_entry(bond); 4598 bond_remove_proc_entry(bond);
4591}
4592
4593/* Unregister and free all bond devices.
4594 * Caller must hold rtnl_lock.
4595 */
4596static void bond_free_all(void)
4597{
4598 struct bonding *bond, *nxt;
4599
4600 list_for_each_entry_safe(bond, nxt, &bond_dev_list, bond_list) {
4601 struct net_device *bond_dev = bond->dev;
4602 4599
4603 bond_work_cancel_all(bond); 4600 if (bond->wq)
4604 /* Release the bonded slaves */ 4601 destroy_workqueue(bond->wq);
4605 bond_release_all(bond_dev);
4606 unregister_netdevice(bond_dev);
4607 }
4608 4602
4609 bond_destroy_proc_dir(); 4603 netif_addr_lock_bh(bond_dev);
4604 bond_mc_list_destroy(bond);
4605 netif_addr_unlock_bh(bond_dev);
4610} 4606}
4611 4607
4612/*------------------------- Module initialization ---------------------------*/ 4608/*------------------------- Module initialization ---------------------------*/
@@ -4646,7 +4642,7 @@ int bond_parse_parm(const char *buf, const struct bond_parm_tbl *tbl)
4646 4642
4647static int bond_check_params(struct bond_params *params) 4643static int bond_check_params(struct bond_params *params)
4648{ 4644{
4649 int arp_validate_value, fail_over_mac_value; 4645 int arp_validate_value, fail_over_mac_value, primary_reselect_value;
4650 4646
4651 /* 4647 /*
4652 * Convert string parameters. 4648 * Convert string parameters.
@@ -4665,7 +4661,8 @@ static int bond_check_params(struct bond_params *params)
4665 if ((bond_mode != BOND_MODE_XOR) && 4661 if ((bond_mode != BOND_MODE_XOR) &&
4666 (bond_mode != BOND_MODE_8023AD)) { 4662 (bond_mode != BOND_MODE_8023AD)) {
4667 pr_info(DRV_NAME 4663 pr_info(DRV_NAME
4668 ": xor_mode param is irrelevant in mode %s\n", 4664 ": xmit_hash_policy param is irrelevant in"
4665 " mode %s\n",
4669 bond_mode_name(bond_mode)); 4666 bond_mode_name(bond_mode));
4670 } else { 4667 } else {
4671 xmit_hashtype = bond_parse_parm(xmit_hash_policy, 4668 xmit_hashtype = bond_parse_parm(xmit_hash_policy,
@@ -4945,6 +4942,20 @@ static int bond_check_params(struct bond_params *params)
4945 primary = NULL; 4942 primary = NULL;
4946 } 4943 }
4947 4944
4945 if (primary && primary_reselect) {
4946 primary_reselect_value = bond_parse_parm(primary_reselect,
4947 pri_reselect_tbl);
4948 if (primary_reselect_value == -1) {
4949 pr_err(DRV_NAME
4950 ": Error: Invalid primary_reselect \"%s\"\n",
4951 primary_reselect ==
4952 NULL ? "NULL" : primary_reselect);
4953 return -EINVAL;
4954 }
4955 } else {
4956 primary_reselect_value = BOND_PRI_RESELECT_ALWAYS;
4957 }
4958
4948 if (fail_over_mac) { 4959 if (fail_over_mac) {
4949 fail_over_mac_value = bond_parse_parm(fail_over_mac, 4960 fail_over_mac_value = bond_parse_parm(fail_over_mac,
4950 fail_over_mac_tbl); 4961 fail_over_mac_tbl);
@@ -4976,6 +4987,7 @@ static int bond_check_params(struct bond_params *params)
4976 params->use_carrier = use_carrier; 4987 params->use_carrier = use_carrier;
4977 params->lacp_fast = lacp_fast; 4988 params->lacp_fast = lacp_fast;
4978 params->primary[0] = 0; 4989 params->primary[0] = 0;
4990 params->primary_reselect = primary_reselect_value;
4979 params->fail_over_mac = fail_over_mac_value; 4991 params->fail_over_mac = fail_over_mac_value;
4980 4992
4981 if (primary) { 4993 if (primary) {
@@ -5012,6 +5024,7 @@ static void bond_set_lockdep_class(struct net_device *dev)
5012static int bond_init(struct net_device *bond_dev) 5024static int bond_init(struct net_device *bond_dev)
5013{ 5025{
5014 struct bonding *bond = netdev_priv(bond_dev); 5026 struct bonding *bond = netdev_priv(bond_dev);
5027 struct bond_net *bn = net_generic(dev_net(bond_dev), bond_net_id);
5015 5028
5016 pr_debug("Begin bond_init for %s\n", bond_dev->name); 5029 pr_debug("Begin bond_init for %s\n", bond_dev->name);
5017 5030
@@ -5024,30 +5037,41 @@ static int bond_init(struct net_device *bond_dev)
5024 netif_carrier_off(bond_dev); 5037 netif_carrier_off(bond_dev);
5025 5038
5026 bond_create_proc_entry(bond); 5039 bond_create_proc_entry(bond);
5027 list_add_tail(&bond->bond_list, &bond_dev_list); 5040 list_add_tail(&bond->bond_list, &bn->dev_list);
5028 5041
5042 bond_prepare_sysfs_group(bond);
5029 return 0; 5043 return 0;
5030} 5044}
5031 5045
5046static int bond_validate(struct nlattr *tb[], struct nlattr *data[])
5047{
5048 if (tb[IFLA_ADDRESS]) {
5049 if (nla_len(tb[IFLA_ADDRESS]) != ETH_ALEN)
5050 return -EINVAL;
5051 if (!is_valid_ether_addr(nla_data(tb[IFLA_ADDRESS])))
5052 return -EADDRNOTAVAIL;
5053 }
5054 return 0;
5055}
5056
5057static struct rtnl_link_ops bond_link_ops __read_mostly = {
5058 .kind = "bond",
5059 .priv_size = sizeof(struct bonding),
5060 .setup = bond_setup,
5061 .validate = bond_validate,
5062};
5063
5032/* Create a new bond based on the specified name and bonding parameters. 5064/* Create a new bond based on the specified name and bonding parameters.
5033 * If name is NULL, obtain a suitable "bond%d" name for us. 5065 * If name is NULL, obtain a suitable "bond%d" name for us.
5034 * Caller must NOT hold rtnl_lock; we need to release it here before we 5066 * Caller must NOT hold rtnl_lock; we need to release it here before we
5035 * set up our sysfs entries. 5067 * set up our sysfs entries.
5036 */ 5068 */
5037int bond_create(const char *name) 5069int bond_create(struct net *net, const char *name)
5038{ 5070{
5039 struct net_device *bond_dev; 5071 struct net_device *bond_dev;
5040 int res; 5072 int res;
5041 5073
5042 rtnl_lock(); 5074 rtnl_lock();
5043 /* Check to see if the bond already exists. */
5044 /* FIXME: pass netns from caller */
5045 if (name && __dev_get_by_name(&init_net, name)) {
5046 pr_err(DRV_NAME ": cannot add bond %s; already exists\n",
5047 name);
5048 res = -EEXIST;
5049 goto out_rtnl;
5050 }
5051 5075
5052 bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "", 5076 bond_dev = alloc_netdev(sizeof(struct bonding), name ? name : "",
5053 bond_setup); 5077 bond_setup);
@@ -5055,9 +5079,12 @@ int bond_create(const char *name)
5055 pr_err(DRV_NAME ": %s: eek! can't alloc netdev!\n", 5079 pr_err(DRV_NAME ": %s: eek! can't alloc netdev!\n",
5056 name); 5080 name);
5057 res = -ENOMEM; 5081 res = -ENOMEM;
5058 goto out_rtnl; 5082 goto out;
5059 } 5083 }
5060 5084
5085 dev_net_set(bond_dev, net);
5086 bond_dev->rtnl_link_ops = &bond_link_ops;
5087
5061 if (!name) { 5088 if (!name) {
5062 res = dev_alloc_name(bond_dev, "bond%d"); 5089 res = dev_alloc_name(bond_dev, "bond%d");
5063 if (res < 0) 5090 if (res < 0)
@@ -5065,27 +5092,55 @@ int bond_create(const char *name)
5065 } 5092 }
5066 5093
5067 res = register_netdevice(bond_dev); 5094 res = register_netdevice(bond_dev);
5068 if (res < 0)
5069 goto out_bond;
5070
5071 res = bond_create_sysfs_entry(netdev_priv(bond_dev));
5072 if (res < 0)
5073 goto out_unreg;
5074 5095
5096out:
5075 rtnl_unlock(); 5097 rtnl_unlock();
5076 return 0; 5098 return res;
5077
5078out_unreg:
5079 unregister_netdevice(bond_dev);
5080out_bond:
5081 bond_deinit(bond_dev);
5082out_netdev: 5099out_netdev:
5083 free_netdev(bond_dev); 5100 free_netdev(bond_dev);
5084out_rtnl: 5101 goto out;
5085 rtnl_unlock();
5086 return res;
5087} 5102}
5088 5103
5104static int bond_net_init(struct net *net)
5105{
5106 struct bond_net *bn;
5107 int err;
5108
5109 err = -ENOMEM;
5110 bn = kzalloc(sizeof(struct bond_net), GFP_KERNEL);
5111 if (bn == NULL)
5112 goto out;
5113
5114 bn->net = net;
5115 INIT_LIST_HEAD(&bn->dev_list);
5116
5117 err = net_assign_generic(net, bond_net_id, bn);
5118 if (err)
5119 goto out_free;
5120
5121 bond_create_proc_dir(bn);
5122out:
5123 return err;
5124out_free:
5125 kfree(bn);
5126 goto out;
5127}
5128
5129static void bond_net_exit(struct net *net)
5130{
5131 struct bond_net *bn;
5132
5133 bn = net_generic(net, bond_net_id);
5134
5135 bond_destroy_proc_dir(bn);
5136 kfree(bn);
5137}
5138
5139static struct pernet_operations bond_net_ops = {
5140 .init = bond_net_init,
5141 .exit = bond_net_exit,
5142};
5143
5089static int __init bonding_init(void) 5144static int __init bonding_init(void)
5090{ 5145{
5091 int i; 5146 int i;
@@ -5097,10 +5152,16 @@ static int __init bonding_init(void)
5097 if (res) 5152 if (res)
5098 goto out; 5153 goto out;
5099 5154
5100 bond_create_proc_dir(); 5155 res = register_pernet_gen_subsys(&bond_net_id, &bond_net_ops);
5156 if (res)
5157 goto out;
5158
5159 res = rtnl_link_register(&bond_link_ops);
5160 if (res)
5161 goto err_link;
5101 5162
5102 for (i = 0; i < max_bonds; i++) { 5163 for (i = 0; i < max_bonds; i++) {
5103 res = bond_create(NULL); 5164 res = bond_create(&init_net, NULL);
5104 if (res) 5165 if (res)
5105 goto err; 5166 goto err;
5106 } 5167 }
@@ -5112,14 +5173,13 @@ static int __init bonding_init(void)
5112 register_netdevice_notifier(&bond_netdev_notifier); 5173 register_netdevice_notifier(&bond_netdev_notifier);
5113 register_inetaddr_notifier(&bond_inetaddr_notifier); 5174 register_inetaddr_notifier(&bond_inetaddr_notifier);
5114 bond_register_ipv6_notifier(); 5175 bond_register_ipv6_notifier();
5115
5116 goto out;
5117err:
5118 rtnl_lock();
5119 bond_free_all();
5120 rtnl_unlock();
5121out: 5176out:
5122 return res; 5177 return res;
5178err:
5179 rtnl_link_unregister(&bond_link_ops);
5180err_link:
5181 unregister_pernet_gen_subsys(bond_net_id, &bond_net_ops);
5182 goto out;
5123 5183
5124} 5184}
5125 5185
@@ -5131,9 +5191,8 @@ static void __exit bonding_exit(void)
5131 5191
5132 bond_destroy_sysfs(); 5192 bond_destroy_sysfs();
5133 5193
5134 rtnl_lock(); 5194 rtnl_link_unregister(&bond_link_ops);
5135 bond_free_all(); 5195 unregister_pernet_gen_subsys(bond_net_id, &bond_net_ops);
5136 rtnl_unlock();
5137} 5196}
5138 5197
5139module_init(bonding_init); 5198module_init(bonding_init);
@@ -5142,3 +5201,4 @@ MODULE_LICENSE("GPL");
5142MODULE_VERSION(DRV_VERSION); 5201MODULE_VERSION(DRV_VERSION);
5143MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION); 5202MODULE_DESCRIPTION(DRV_DESCRIPTION ", v" DRV_VERSION);
5144MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others"); 5203MODULE_AUTHOR("Thomas Davis, tadavis@lbl.gov and many others");
5204MODULE_ALIAS_RTNL_LINK("bond");
diff --git a/drivers/net/bonding/bond_sysfs.c b/drivers/net/bonding/bond_sysfs.c
index ff449de6f3c0..a59094f8bb6b 100644
--- a/drivers/net/bonding/bond_sysfs.c
+++ b/drivers/net/bonding/bond_sysfs.c
@@ -35,6 +35,8 @@
35#include <linux/rtnetlink.h> 35#include <linux/rtnetlink.h>
36#include <linux/etherdevice.h> 36#include <linux/etherdevice.h>
37#include <net/net_namespace.h> 37#include <net/net_namespace.h>
38#include <net/netns/generic.h>
39#include <linux/nsproxy.h>
38 40
39#include "bonding.h" 41#include "bonding.h"
40 42
@@ -47,12 +49,14 @@
47 */ 49 */
48static ssize_t bonding_show_bonds(struct class *cls, char *buf) 50static ssize_t bonding_show_bonds(struct class *cls, char *buf)
49{ 51{
52 struct net *net = current->nsproxy->net_ns;
53 struct bond_net *bn = net_generic(net, bond_net_id);
50 int res = 0; 54 int res = 0;
51 struct bonding *bond; 55 struct bonding *bond;
52 56
53 rtnl_lock(); 57 rtnl_lock();
54 58
55 list_for_each_entry(bond, &bond_dev_list, bond_list) { 59 list_for_each_entry(bond, &bn->dev_list, bond_list) {
56 if (res > (PAGE_SIZE - IFNAMSIZ)) { 60 if (res > (PAGE_SIZE - IFNAMSIZ)) {
57 /* not enough space for another interface name */ 61 /* not enough space for another interface name */
58 if ((PAGE_SIZE - res) > 10) 62 if ((PAGE_SIZE - res) > 10)
@@ -69,11 +73,12 @@ static ssize_t bonding_show_bonds(struct class *cls, char *buf)
69 return res; 73 return res;
70} 74}
71 75
72static struct net_device *bond_get_by_name(const char *ifname) 76static struct net_device *bond_get_by_name(struct net *net, const char *ifname)
73{ 77{
78 struct bond_net *bn = net_generic(net, bond_net_id);
74 struct bonding *bond; 79 struct bonding *bond;
75 80
76 list_for_each_entry(bond, &bond_dev_list, bond_list) { 81 list_for_each_entry(bond, &bn->dev_list, bond_list) {
77 if (strncmp(bond->dev->name, ifname, IFNAMSIZ) == 0) 82 if (strncmp(bond->dev->name, ifname, IFNAMSIZ) == 0)
78 return bond->dev; 83 return bond->dev;
79 } 84 }
@@ -91,6 +96,7 @@ static struct net_device *bond_get_by_name(const char *ifname)
91static ssize_t bonding_store_bonds(struct class *cls, 96static ssize_t bonding_store_bonds(struct class *cls,
92 const char *buffer, size_t count) 97 const char *buffer, size_t count)
93{ 98{
99 struct net *net = current->nsproxy->net_ns;
94 char command[IFNAMSIZ + 1] = {0, }; 100 char command[IFNAMSIZ + 1] = {0, };
95 char *ifname; 101 char *ifname;
96 int rv, res = count; 102 int rv, res = count;
@@ -104,7 +110,7 @@ static ssize_t bonding_store_bonds(struct class *cls,
104 if (command[0] == '+') { 110 if (command[0] == '+') {
105 pr_info(DRV_NAME 111 pr_info(DRV_NAME
106 ": %s is being created...\n", ifname); 112 ": %s is being created...\n", ifname);
107 rv = bond_create(ifname); 113 rv = bond_create(net, ifname);
108 if (rv) { 114 if (rv) {
109 pr_info(DRV_NAME ": Bond creation failed.\n"); 115 pr_info(DRV_NAME ": Bond creation failed.\n");
110 res = rv; 116 res = rv;
@@ -113,7 +119,7 @@ static ssize_t bonding_store_bonds(struct class *cls,
113 struct net_device *bond_dev; 119 struct net_device *bond_dev;
114 120
115 rtnl_lock(); 121 rtnl_lock();
116 bond_dev = bond_get_by_name(ifname); 122 bond_dev = bond_get_by_name(net, ifname);
117 if (bond_dev) { 123 if (bond_dev) {
118 pr_info(DRV_NAME ": %s is being deleted...\n", 124 pr_info(DRV_NAME ": %s is being deleted...\n",
119 ifname); 125 ifname);
@@ -238,8 +244,7 @@ static ssize_t bonding_store_slaves(struct device *d,
238 /* Got a slave name in ifname. Is it already in the list? */ 244 /* Got a slave name in ifname. Is it already in the list? */
239 found = 0; 245 found = 0;
240 246
241 /* FIXME: get netns from sysfs object */ 247 dev = __dev_get_by_name(dev_net(bond->dev), ifname);
242 dev = __dev_get_by_name(&init_net, ifname);
243 if (!dev) { 248 if (!dev) {
244 pr_info(DRV_NAME 249 pr_info(DRV_NAME
245 ": %s: Interface %s does not exist!\n", 250 ": %s: Interface %s does not exist!\n",
@@ -1213,6 +1218,58 @@ static DEVICE_ATTR(primary, S_IRUGO | S_IWUSR,
1213 bonding_show_primary, bonding_store_primary); 1218 bonding_show_primary, bonding_store_primary);
1214 1219
1215/* 1220/*
1221 * Show and set the primary_reselect flag.
1222 */
1223static ssize_t bonding_show_primary_reselect(struct device *d,
1224 struct device_attribute *attr,
1225 char *buf)
1226{
1227 struct bonding *bond = to_bond(d);
1228
1229 return sprintf(buf, "%s %d\n",
1230 pri_reselect_tbl[bond->params.primary_reselect].modename,
1231 bond->params.primary_reselect);
1232}
1233
1234static ssize_t bonding_store_primary_reselect(struct device *d,
1235 struct device_attribute *attr,
1236 const char *buf, size_t count)
1237{
1238 int new_value, ret = count;
1239 struct bonding *bond = to_bond(d);
1240
1241 if (!rtnl_trylock())
1242 return restart_syscall();
1243
1244 new_value = bond_parse_parm(buf, pri_reselect_tbl);
1245 if (new_value < 0) {
1246 pr_err(DRV_NAME
1247 ": %s: Ignoring invalid primary_reselect value %.*s.\n",
1248 bond->dev->name,
1249 (int) strlen(buf) - 1, buf);
1250 ret = -EINVAL;
1251 goto out;
1252 }
1253
1254 bond->params.primary_reselect = new_value;
1255 pr_info(DRV_NAME ": %s: setting primary_reselect to %s (%d).\n",
1256 bond->dev->name, pri_reselect_tbl[new_value].modename,
1257 new_value);
1258
1259 read_lock(&bond->lock);
1260 write_lock_bh(&bond->curr_slave_lock);
1261 bond_select_active_slave(bond);
1262 write_unlock_bh(&bond->curr_slave_lock);
1263 read_unlock(&bond->lock);
1264out:
1265 rtnl_unlock();
1266 return ret;
1267}
1268static DEVICE_ATTR(primary_reselect, S_IRUGO | S_IWUSR,
1269 bonding_show_primary_reselect,
1270 bonding_store_primary_reselect);
1271
1272/*
1216 * Show and set the use_carrier flag. 1273 * Show and set the use_carrier flag.
1217 */ 1274 */
1218static ssize_t bonding_show_carrier(struct device *d, 1275static ssize_t bonding_show_carrier(struct device *d,
@@ -1501,6 +1558,7 @@ static struct attribute *per_bond_attrs[] = {
1501 &dev_attr_num_unsol_na.attr, 1558 &dev_attr_num_unsol_na.attr,
1502 &dev_attr_miimon.attr, 1559 &dev_attr_miimon.attr,
1503 &dev_attr_primary.attr, 1560 &dev_attr_primary.attr,
1561 &dev_attr_primary_reselect.attr,
1504 &dev_attr_use_carrier.attr, 1562 &dev_attr_use_carrier.attr,
1505 &dev_attr_active_slave.attr, 1563 &dev_attr_active_slave.attr,
1506 &dev_attr_mii_status.attr, 1564 &dev_attr_mii_status.attr,
@@ -1563,24 +1621,8 @@ void bond_destroy_sysfs(void)
1563 * Initialize sysfs for each bond. This sets up and registers 1621 * Initialize sysfs for each bond. This sets up and registers
1564 * the 'bondctl' directory for each individual bond under /sys/class/net. 1622 * the 'bondctl' directory for each individual bond under /sys/class/net.
1565 */ 1623 */
1566int bond_create_sysfs_entry(struct bonding *bond) 1624void bond_prepare_sysfs_group(struct bonding *bond)
1567{ 1625{
1568 struct net_device *dev = bond->dev; 1626 bond->dev->sysfs_groups[0] = &bonding_group;
1569 int err;
1570
1571 err = sysfs_create_group(&(dev->dev.kobj), &bonding_group);
1572 if (err)
1573 pr_emerg("eek! didn't create group!\n");
1574
1575 return err;
1576}
1577/*
1578 * Remove sysfs entries for each bond.
1579 */
1580void bond_destroy_sysfs_entry(struct bonding *bond)
1581{
1582 struct net_device *dev = bond->dev;
1583
1584 sysfs_remove_group(&(dev->dev.kobj), &bonding_group);
1585} 1627}
1586 1628
diff --git a/drivers/net/bonding/bonding.h b/drivers/net/bonding/bonding.h
index 68247714466f..a51ae7dc8d51 100644
--- a/drivers/net/bonding/bonding.h
+++ b/drivers/net/bonding/bonding.h
@@ -23,15 +23,13 @@
23#include "bond_3ad.h" 23#include "bond_3ad.h"
24#include "bond_alb.h" 24#include "bond_alb.h"
25 25
26#define DRV_VERSION "3.5.0" 26#define DRV_VERSION "3.6.0"
27#define DRV_RELDATE "November 4, 2008" 27#define DRV_RELDATE "September 26, 2009"
28#define DRV_NAME "bonding" 28#define DRV_NAME "bonding"
29#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver" 29#define DRV_DESCRIPTION "Ethernet Channel Bonding Driver"
30 30
31#define BOND_MAX_ARP_TARGETS 16 31#define BOND_MAX_ARP_TARGETS 16
32 32
33extern struct list_head bond_dev_list;
34
35#define IS_UP(dev) \ 33#define IS_UP(dev) \
36 ((((dev)->flags & IFF_UP) == IFF_UP) && \ 34 ((((dev)->flags & IFF_UP) == IFF_UP) && \
37 netif_running(dev) && \ 35 netif_running(dev) && \
@@ -131,6 +129,7 @@ struct bond_params {
131 int lacp_fast; 129 int lacp_fast;
132 int ad_select; 130 int ad_select;
133 char primary[IFNAMSIZ]; 131 char primary[IFNAMSIZ];
132 int primary_reselect;
134 __be32 arp_targets[BOND_MAX_ARP_TARGETS]; 133 __be32 arp_targets[BOND_MAX_ARP_TARGETS];
135}; 134};
136 135
@@ -190,6 +189,7 @@ struct bonding {
190 struct slave *curr_active_slave; 189 struct slave *curr_active_slave;
191 struct slave *current_arp_slave; 190 struct slave *current_arp_slave;
192 struct slave *primary_slave; 191 struct slave *primary_slave;
192 bool force_primary;
193 s32 slave_cnt; /* never change this value outside the attach/detach wrappers */ 193 s32 slave_cnt; /* never change this value outside the attach/detach wrappers */
194 rwlock_t lock; 194 rwlock_t lock;
195 rwlock_t curr_slave_lock; 195 rwlock_t curr_slave_lock;
@@ -204,7 +204,7 @@ struct bonding {
204#endif /* CONFIG_PROC_FS */ 204#endif /* CONFIG_PROC_FS */
205 struct list_head bond_list; 205 struct list_head bond_list;
206 struct dev_mc_list *mc_list; 206 struct dev_mc_list *mc_list;
207 int (*xmit_hash_policy)(struct sk_buff *, struct net_device *, int); 207 int (*xmit_hash_policy)(struct sk_buff *, int);
208 __be32 master_ip; 208 __be32 master_ip;
209 u16 flags; 209 u16 flags;
210 u16 rr_tx_counter; 210 u16 rr_tx_counter;
@@ -258,6 +258,10 @@ static inline bool bond_is_lb(const struct bonding *bond)
258 || bond->params.mode == BOND_MODE_ALB; 258 || bond->params.mode == BOND_MODE_ALB;
259} 259}
260 260
261#define BOND_PRI_RESELECT_ALWAYS 0
262#define BOND_PRI_RESELECT_BETTER 1
263#define BOND_PRI_RESELECT_FAILURE 2
264
261#define BOND_FOM_NONE 0 265#define BOND_FOM_NONE 0
262#define BOND_FOM_ACTIVE 1 266#define BOND_FOM_ACTIVE 1
263#define BOND_FOM_FOLLOW 2 267#define BOND_FOM_FOLLOW 2
@@ -321,12 +325,11 @@ static inline void bond_unset_master_alb_flags(struct bonding *bond)
321 325
322struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr); 326struct vlan_entry *bond_next_vlan(struct bonding *bond, struct vlan_entry *curr);
323int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev); 327int bond_dev_queue_xmit(struct bonding *bond, struct sk_buff *skb, struct net_device *slave_dev);
324int bond_create(const char *name); 328int bond_create(struct net *net, const char *name);
325int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev); 329int bond_release_and_destroy(struct net_device *bond_dev, struct net_device *slave_dev);
326int bond_create_sysfs(void); 330int bond_create_sysfs(void);
327void bond_destroy_sysfs(void); 331void bond_destroy_sysfs(void);
328void bond_destroy_sysfs_entry(struct bonding *bond); 332void bond_prepare_sysfs_group(struct bonding *bond);
329int bond_create_sysfs_entry(struct bonding *bond);
330int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave); 333int bond_create_slave_symlinks(struct net_device *master, struct net_device *slave);
331void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave); 334void bond_destroy_slave_symlinks(struct net_device *master, struct net_device *slave);
332int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev); 335int bond_enslave(struct net_device *bond_dev, struct net_device *slave_dev);
@@ -341,13 +344,22 @@ void bond_change_active_slave(struct bonding *bond, struct slave *new_active);
341void bond_register_arp(struct bonding *); 344void bond_register_arp(struct bonding *);
342void bond_unregister_arp(struct bonding *); 345void bond_unregister_arp(struct bonding *);
343 346
347struct bond_net {
348 struct net * net; /* Associated network namespace */
349 struct list_head dev_list;
350#ifdef CONFIG_PROC_FS
351 struct proc_dir_entry * proc_dir;
352#endif
353};
354
344/* exported from bond_main.c */ 355/* exported from bond_main.c */
345extern struct list_head bond_dev_list; 356extern int bond_net_id;
346extern const struct bond_parm_tbl bond_lacp_tbl[]; 357extern const struct bond_parm_tbl bond_lacp_tbl[];
347extern const struct bond_parm_tbl bond_mode_tbl[]; 358extern const struct bond_parm_tbl bond_mode_tbl[];
348extern const struct bond_parm_tbl xmit_hashtype_tbl[]; 359extern const struct bond_parm_tbl xmit_hashtype_tbl[];
349extern const struct bond_parm_tbl arp_validate_tbl[]; 360extern const struct bond_parm_tbl arp_validate_tbl[];
350extern const struct bond_parm_tbl fail_over_mac_tbl[]; 361extern const struct bond_parm_tbl fail_over_mac_tbl[];
362extern const struct bond_parm_tbl pri_reselect_tbl[];
351extern struct bond_parm_tbl ad_select_tbl[]; 363extern struct bond_parm_tbl ad_select_tbl[];
352 364
353#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE) 365#if defined(CONFIG_IPV6) || defined(CONFIG_IPV6_MODULE)
@@ -370,4 +382,3 @@ static inline void bond_unregister_ipv6_notifier(void)
370#endif 382#endif
371 383
372#endif /* _LINUX_BONDING_H */ 384#endif /* _LINUX_BONDING_H */
373
diff --git a/drivers/net/can/Kconfig b/drivers/net/can/Kconfig
index 772f6d2489ce..732b093e0815 100644
--- a/drivers/net/can/Kconfig
+++ b/drivers/net/can/Kconfig
@@ -41,6 +41,38 @@ config CAN_AT91
41 ---help--- 41 ---help---
42 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263. 42 This is a driver for the SoC CAN controller in Atmel's AT91SAM9263.
43 43
44config CAN_TI_HECC
45 depends on CAN_DEV && ARCH_OMAP3
46 tristate "TI High End CAN Controller"
47 ---help---
48 Driver for TI HECC (High End CAN Controller) module found on many
49 TI devices. The device specifications are available from www.ti.com
50
51config CAN_MCP251X
52 tristate "Microchip MCP251x SPI CAN controllers"
53 depends on CAN_DEV && SPI
54 ---help---
55 Driver for the Microchip MCP251x SPI CAN controllers.
56
57config CAN_MSCAN
58 depends on CAN_DEV && (PPC || M68K || M68KNOMMU)
59 tristate "Support for Freescale MSCAN based chips"
60 ---help---
61 The Motorola Scalable Controller Area Network (MSCAN) definition
62 is based on the MSCAN12 definition which is the specific
63 implementation of the Motorola Scalable CAN concept targeted for
64 the Motorola MC68HC12 Microcontroller Family.
65
66config CAN_MPC52XX
67 tristate "Freescale MPC5xxx onboard CAN controller"
68 depends on CAN_MSCAN && PPC_MPC52xx
69 ---help---
70 If you say yes here you get support for Freescale's MPC52xx
71 onboard dualCAN controller.
72
73 This driver can also be built as a module. If so, the module
74 will be called mpc5xxx_can.
75
44source "drivers/net/can/sja1000/Kconfig" 76source "drivers/net/can/sja1000/Kconfig"
45 77
46source "drivers/net/can/usb/Kconfig" 78source "drivers/net/can/usb/Kconfig"
diff --git a/drivers/net/can/Makefile b/drivers/net/can/Makefile
index 0dea62721f2f..56899fef1c6a 100644
--- a/drivers/net/can/Makefile
+++ b/drivers/net/can/Makefile
@@ -10,6 +10,9 @@ can-dev-y := dev.o
10obj-y += usb/ 10obj-y += usb/
11 11
12obj-$(CONFIG_CAN_SJA1000) += sja1000/ 12obj-$(CONFIG_CAN_SJA1000) += sja1000/
13obj-$(CONFIG_CAN_MSCAN) += mscan/
13obj-$(CONFIG_CAN_AT91) += at91_can.o 14obj-$(CONFIG_CAN_AT91) += at91_can.o
15obj-$(CONFIG_CAN_TI_HECC) += ti_hecc.o
16obj-$(CONFIG_CAN_MCP251X) += mcp251x.o
14 17
15ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG 18ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/at91_can.c b/drivers/net/can/at91_can.c
index f67ae285a35a..cbe3fce53e3b 100644
--- a/drivers/net/can/at91_can.c
+++ b/drivers/net/can/at91_can.c
@@ -221,38 +221,6 @@ static inline void set_mb_mode(const struct at91_priv *priv, unsigned int mb,
221 set_mb_mode_prio(priv, mb, mode, 0); 221 set_mb_mode_prio(priv, mb, mode, 0);
222} 222}
223 223
224static struct sk_buff *alloc_can_skb(struct net_device *dev,
225 struct can_frame **cf)
226{
227 struct sk_buff *skb;
228
229 skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
230 if (unlikely(!skb))
231 return NULL;
232
233 skb->protocol = htons(ETH_P_CAN);
234 skb->ip_summed = CHECKSUM_UNNECESSARY;
235 *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
236
237 return skb;
238}
239
240static struct sk_buff *alloc_can_err_skb(struct net_device *dev,
241 struct can_frame **cf)
242{
243 struct sk_buff *skb;
244
245 skb = alloc_can_skb(dev, cf);
246 if (unlikely(!skb))
247 return NULL;
248
249 memset(*cf, 0, sizeof(struct can_frame));
250 (*cf)->can_id = CAN_ERR_FLAG;
251 (*cf)->can_dlc = CAN_ERR_DLC;
252
253 return skb;
254}
255
256/* 224/*
257 * Swtich transceiver on or off 225 * Swtich transceiver on or off
258 */ 226 */
@@ -1087,7 +1055,7 @@ static int __init at91_can_probe(struct platform_device *pdev)
1087 goto exit_release; 1055 goto exit_release;
1088 } 1056 }
1089 1057
1090 dev = alloc_candev(sizeof(struct at91_priv)); 1058 dev = alloc_candev(sizeof(struct at91_priv), AT91_MB_TX_NUM);
1091 if (!dev) { 1059 if (!dev) {
1092 err = -ENOMEM; 1060 err = -ENOMEM;
1093 goto exit_iounmap; 1061 goto exit_iounmap;
diff --git a/drivers/net/can/dev.c b/drivers/net/can/dev.c
index 2868fe842a41..c1bb29f0322b 100644
--- a/drivers/net/can/dev.c
+++ b/drivers/net/can/dev.c
@@ -245,7 +245,7 @@ static void can_flush_echo_skb(struct net_device *dev)
245 struct net_device_stats *stats = &dev->stats; 245 struct net_device_stats *stats = &dev->stats;
246 int i; 246 int i;
247 247
248 for (i = 0; i < CAN_ECHO_SKB_MAX; i++) { 248 for (i = 0; i < priv->echo_skb_max; i++) {
249 if (priv->echo_skb[i]) { 249 if (priv->echo_skb[i]) {
250 kfree_skb(priv->echo_skb[i]); 250 kfree_skb(priv->echo_skb[i]);
251 priv->echo_skb[i] = NULL; 251 priv->echo_skb[i] = NULL;
@@ -262,10 +262,13 @@ static void can_flush_echo_skb(struct net_device *dev)
262 * of the device driver. The driver must protect access to 262 * of the device driver. The driver must protect access to
263 * priv->echo_skb, if necessary. 263 * priv->echo_skb, if necessary.
264 */ 264 */
265void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev, int idx) 265void can_put_echo_skb(struct sk_buff *skb, struct net_device *dev,
266 unsigned int idx)
266{ 267{
267 struct can_priv *priv = netdev_priv(dev); 268 struct can_priv *priv = netdev_priv(dev);
268 269
270 BUG_ON(idx >= priv->echo_skb_max);
271
269 /* check flag whether this packet has to be looped back */ 272 /* check flag whether this packet has to be looped back */
270 if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK) { 273 if (!(dev->flags & IFF_ECHO) || skb->pkt_type != PACKET_LOOPBACK) {
271 kfree_skb(skb); 274 kfree_skb(skb);
@@ -311,10 +314,12 @@ EXPORT_SYMBOL_GPL(can_put_echo_skb);
311 * is handled in the device driver. The driver must protect 314 * is handled in the device driver. The driver must protect
312 * access to priv->echo_skb, if necessary. 315 * access to priv->echo_skb, if necessary.
313 */ 316 */
314void can_get_echo_skb(struct net_device *dev, int idx) 317void can_get_echo_skb(struct net_device *dev, unsigned int idx)
315{ 318{
316 struct can_priv *priv = netdev_priv(dev); 319 struct can_priv *priv = netdev_priv(dev);
317 320
321 BUG_ON(idx >= priv->echo_skb_max);
322
318 if (priv->echo_skb[idx]) { 323 if (priv->echo_skb[idx]) {
319 netif_rx(priv->echo_skb[idx]); 324 netif_rx(priv->echo_skb[idx]);
320 priv->echo_skb[idx] = NULL; 325 priv->echo_skb[idx] = NULL;
@@ -327,10 +332,12 @@ EXPORT_SYMBOL_GPL(can_get_echo_skb);
327 * 332 *
328 * The function is typically called when TX failed. 333 * The function is typically called when TX failed.
329 */ 334 */
330void can_free_echo_skb(struct net_device *dev, int idx) 335void can_free_echo_skb(struct net_device *dev, unsigned int idx)
331{ 336{
332 struct can_priv *priv = netdev_priv(dev); 337 struct can_priv *priv = netdev_priv(dev);
333 338
339 BUG_ON(idx >= priv->echo_skb_max);
340
334 if (priv->echo_skb[idx]) { 341 if (priv->echo_skb[idx]) {
335 kfree_skb(priv->echo_skb[idx]); 342 kfree_skb(priv->echo_skb[idx]);
336 priv->echo_skb[idx] = NULL; 343 priv->echo_skb[idx] = NULL;
@@ -359,17 +366,12 @@ void can_restart(unsigned long data)
359 can_flush_echo_skb(dev); 366 can_flush_echo_skb(dev);
360 367
361 /* send restart message upstream */ 368 /* send restart message upstream */
362 skb = dev_alloc_skb(sizeof(struct can_frame)); 369 skb = alloc_can_err_skb(dev, &cf);
363 if (skb == NULL) { 370 if (skb == NULL) {
364 err = -ENOMEM; 371 err = -ENOMEM;
365 goto restart; 372 goto restart;
366 } 373 }
367 skb->dev = dev; 374 cf->can_id |= CAN_ERR_RESTARTED;
368 skb->protocol = htons(ETH_P_CAN);
369 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
370 memset(cf, 0, sizeof(struct can_frame));
371 cf->can_id = CAN_ERR_FLAG | CAN_ERR_RESTARTED;
372 cf->can_dlc = CAN_ERR_DLC;
373 375
374 netif_rx(skb); 376 netif_rx(skb);
375 377
@@ -442,20 +444,66 @@ static void can_setup(struct net_device *dev)
442 dev->features = NETIF_F_NO_CSUM; 444 dev->features = NETIF_F_NO_CSUM;
443} 445}
444 446
447struct sk_buff *alloc_can_skb(struct net_device *dev, struct can_frame **cf)
448{
449 struct sk_buff *skb;
450
451 skb = netdev_alloc_skb(dev, sizeof(struct can_frame));
452 if (unlikely(!skb))
453 return NULL;
454
455 skb->protocol = htons(ETH_P_CAN);
456 skb->pkt_type = PACKET_BROADCAST;
457 skb->ip_summed = CHECKSUM_UNNECESSARY;
458 *cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
459 memset(*cf, 0, sizeof(struct can_frame));
460
461 return skb;
462}
463EXPORT_SYMBOL_GPL(alloc_can_skb);
464
465struct sk_buff *alloc_can_err_skb(struct net_device *dev, struct can_frame **cf)
466{
467 struct sk_buff *skb;
468
469 skb = alloc_can_skb(dev, cf);
470 if (unlikely(!skb))
471 return NULL;
472
473 (*cf)->can_id = CAN_ERR_FLAG;
474 (*cf)->can_dlc = CAN_ERR_DLC;
475
476 return skb;
477}
478EXPORT_SYMBOL_GPL(alloc_can_err_skb);
479
445/* 480/*
446 * Allocate and setup space for the CAN network device 481 * Allocate and setup space for the CAN network device
447 */ 482 */
448struct net_device *alloc_candev(int sizeof_priv) 483struct net_device *alloc_candev(int sizeof_priv, unsigned int echo_skb_max)
449{ 484{
450 struct net_device *dev; 485 struct net_device *dev;
451 struct can_priv *priv; 486 struct can_priv *priv;
487 int size;
452 488
453 dev = alloc_netdev(sizeof_priv, "can%d", can_setup); 489 if (echo_skb_max)
490 size = ALIGN(sizeof_priv, sizeof(struct sk_buff *)) +
491 echo_skb_max * sizeof(struct sk_buff *);
492 else
493 size = sizeof_priv;
494
495 dev = alloc_netdev(size, "can%d", can_setup);
454 if (!dev) 496 if (!dev)
455 return NULL; 497 return NULL;
456 498
457 priv = netdev_priv(dev); 499 priv = netdev_priv(dev);
458 500
501 if (echo_skb_max) {
502 priv->echo_skb_max = echo_skb_max;
503 priv->echo_skb = (void *)priv +
504 ALIGN(sizeof_priv, sizeof(struct sk_buff *));
505 }
506
459 priv->state = CAN_STATE_STOPPED; 507 priv->state = CAN_STATE_STOPPED;
460 508
461 init_timer(&priv->restart_timer); 509 init_timer(&priv->restart_timer);
@@ -647,7 +695,7 @@ nla_put_failure:
647 return -EMSGSIZE; 695 return -EMSGSIZE;
648} 696}
649 697
650static int can_newlink(struct net_device *dev, 698static int can_newlink(struct net *src_net, struct net_device *dev,
651 struct nlattr *tb[], struct nlattr *data[]) 699 struct nlattr *tb[], struct nlattr *data[])
652{ 700{
653 return -EOPNOTSUPP; 701 return -EOPNOTSUPP;
diff --git a/drivers/net/can/mcp251x.c b/drivers/net/can/mcp251x.c
new file mode 100644
index 000000000000..8f48f4b50b7c
--- /dev/null
+++ b/drivers/net/can/mcp251x.c
@@ -0,0 +1,1164 @@
1/*
2 * CAN bus driver for Microchip 251x CAN Controller with SPI Interface
3 *
4 * MCP2510 support and bug fixes by Christian Pellegrin
5 * <chripell@evolware.org>
6 *
7 * Copyright 2009 Christian Pellegrin EVOL S.r.l.
8 *
9 * Copyright 2007 Raymarine UK, Ltd. All Rights Reserved.
10 * Written under contract by:
11 * Chris Elston, Katalix Systems, Ltd.
12 *
13 * Based on Microchip MCP251x CAN controller driver written by
14 * David Vrabel, Copyright 2006 Arcom Control Systems Ltd.
15 *
16 * Based on CAN bus driver for the CCAN controller written by
17 * - Sascha Hauer, Marc Kleine-Budde, Pengutronix
18 * - Simon Kallweit, intefo AG
19 * Copyright 2007
20 *
21 * This program is free software; you can redistribute it and/or modify
22 * it under the terms of the version 2 of the GNU General Public License
23 * as published by the Free Software Foundation
24 *
25 * This program is distributed in the hope that it will be useful,
26 * but WITHOUT ANY WARRANTY; without even the implied warranty of
27 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
28 * GNU General Public License for more details.
29 *
30 * You should have received a copy of the GNU General Public License
31 * along with this program; if not, write to the Free Software
32 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
33 *
34 *
35 *
36 * Your platform definition file should specify something like:
37 *
38 * static struct mcp251x_platform_data mcp251x_info = {
39 * .oscillator_frequency = 8000000,
40 * .board_specific_setup = &mcp251x_setup,
41 * .model = CAN_MCP251X_MCP2510,
42 * .power_enable = mcp251x_power_enable,
43 * .transceiver_enable = NULL,
44 * };
45 *
46 * static struct spi_board_info spi_board_info[] = {
47 * {
48 * .modalias = "mcp251x",
49 * .platform_data = &mcp251x_info,
50 * .irq = IRQ_EINT13,
51 * .max_speed_hz = 2*1000*1000,
52 * .chip_select = 2,
53 * },
54 * };
55 *
56 * Please see mcp251x.h for a description of the fields in
57 * struct mcp251x_platform_data.
58 *
59 */
60
61#include <linux/can.h>
62#include <linux/can/core.h>
63#include <linux/can/dev.h>
64#include <linux/can/platform/mcp251x.h>
65#include <linux/completion.h>
66#include <linux/delay.h>
67#include <linux/device.h>
68#include <linux/dma-mapping.h>
69#include <linux/freezer.h>
70#include <linux/interrupt.h>
71#include <linux/io.h>
72#include <linux/kernel.h>
73#include <linux/module.h>
74#include <linux/netdevice.h>
75#include <linux/platform_device.h>
76#include <linux/spi/spi.h>
77#include <linux/uaccess.h>
78
79/* SPI interface instruction set */
80#define INSTRUCTION_WRITE 0x02
81#define INSTRUCTION_READ 0x03
82#define INSTRUCTION_BIT_MODIFY 0x05
83#define INSTRUCTION_LOAD_TXB(n) (0x40 + 2 * (n))
84#define INSTRUCTION_READ_RXB(n) (((n) == 0) ? 0x90 : 0x94)
85#define INSTRUCTION_RESET 0xC0
86
87/* MPC251x registers */
88#define CANSTAT 0x0e
89#define CANCTRL 0x0f
90# define CANCTRL_REQOP_MASK 0xe0
91# define CANCTRL_REQOP_CONF 0x80
92# define CANCTRL_REQOP_LISTEN_ONLY 0x60
93# define CANCTRL_REQOP_LOOPBACK 0x40
94# define CANCTRL_REQOP_SLEEP 0x20
95# define CANCTRL_REQOP_NORMAL 0x00
96# define CANCTRL_OSM 0x08
97# define CANCTRL_ABAT 0x10
98#define TEC 0x1c
99#define REC 0x1d
100#define CNF1 0x2a
101# define CNF1_SJW_SHIFT 6
102#define CNF2 0x29
103# define CNF2_BTLMODE 0x80
104# define CNF2_SAM 0x40
105# define CNF2_PS1_SHIFT 3
106#define CNF3 0x28
107# define CNF3_SOF 0x08
108# define CNF3_WAKFIL 0x04
109# define CNF3_PHSEG2_MASK 0x07
110#define CANINTE 0x2b
111# define CANINTE_MERRE 0x80
112# define CANINTE_WAKIE 0x40
113# define CANINTE_ERRIE 0x20
114# define CANINTE_TX2IE 0x10
115# define CANINTE_TX1IE 0x08
116# define CANINTE_TX0IE 0x04
117# define CANINTE_RX1IE 0x02
118# define CANINTE_RX0IE 0x01
119#define CANINTF 0x2c
120# define CANINTF_MERRF 0x80
121# define CANINTF_WAKIF 0x40
122# define CANINTF_ERRIF 0x20
123# define CANINTF_TX2IF 0x10
124# define CANINTF_TX1IF 0x08
125# define CANINTF_TX0IF 0x04
126# define CANINTF_RX1IF 0x02
127# define CANINTF_RX0IF 0x01
128#define EFLG 0x2d
129# define EFLG_EWARN 0x01
130# define EFLG_RXWAR 0x02
131# define EFLG_TXWAR 0x04
132# define EFLG_RXEP 0x08
133# define EFLG_TXEP 0x10
134# define EFLG_TXBO 0x20
135# define EFLG_RX0OVR 0x40
136# define EFLG_RX1OVR 0x80
137#define TXBCTRL(n) (((n) * 0x10) + 0x30 + TXBCTRL_OFF)
138# define TXBCTRL_ABTF 0x40
139# define TXBCTRL_MLOA 0x20
140# define TXBCTRL_TXERR 0x10
141# define TXBCTRL_TXREQ 0x08
142#define TXBSIDH(n) (((n) * 0x10) + 0x30 + TXBSIDH_OFF)
143# define SIDH_SHIFT 3
144#define TXBSIDL(n) (((n) * 0x10) + 0x30 + TXBSIDL_OFF)
145# define SIDL_SID_MASK 7
146# define SIDL_SID_SHIFT 5
147# define SIDL_EXIDE_SHIFT 3
148# define SIDL_EID_SHIFT 16
149# define SIDL_EID_MASK 3
150#define TXBEID8(n) (((n) * 0x10) + 0x30 + TXBEID8_OFF)
151#define TXBEID0(n) (((n) * 0x10) + 0x30 + TXBEID0_OFF)
152#define TXBDLC(n) (((n) * 0x10) + 0x30 + TXBDLC_OFF)
153# define DLC_RTR_SHIFT 6
154#define TXBCTRL_OFF 0
155#define TXBSIDH_OFF 1
156#define TXBSIDL_OFF 2
157#define TXBEID8_OFF 3
158#define TXBEID0_OFF 4
159#define TXBDLC_OFF 5
160#define TXBDAT_OFF 6
161#define RXBCTRL(n) (((n) * 0x10) + 0x60 + RXBCTRL_OFF)
162# define RXBCTRL_BUKT 0x04
163# define RXBCTRL_RXM0 0x20
164# define RXBCTRL_RXM1 0x40
165#define RXBSIDH(n) (((n) * 0x10) + 0x60 + RXBSIDH_OFF)
166# define RXBSIDH_SHIFT 3
167#define RXBSIDL(n) (((n) * 0x10) + 0x60 + RXBSIDL_OFF)
168# define RXBSIDL_IDE 0x08
169# define RXBSIDL_EID 3
170# define RXBSIDL_SHIFT 5
171#define RXBEID8(n) (((n) * 0x10) + 0x60 + RXBEID8_OFF)
172#define RXBEID0(n) (((n) * 0x10) + 0x60 + RXBEID0_OFF)
173#define RXBDLC(n) (((n) * 0x10) + 0x60 + RXBDLC_OFF)
174# define RXBDLC_LEN_MASK 0x0f
175# define RXBDLC_RTR 0x40
176#define RXBCTRL_OFF 0
177#define RXBSIDH_OFF 1
178#define RXBSIDL_OFF 2
179#define RXBEID8_OFF 3
180#define RXBEID0_OFF 4
181#define RXBDLC_OFF 5
182#define RXBDAT_OFF 6
183
184#define GET_BYTE(val, byte) \
185 (((val) >> ((byte) * 8)) & 0xff)
186#define SET_BYTE(val, byte) \
187 (((val) & 0xff) << ((byte) * 8))
188
189/*
190 * Buffer size required for the largest SPI transfer (i.e., reading a
191 * frame)
192 */
193#define CAN_FRAME_MAX_DATA_LEN 8
194#define SPI_TRANSFER_BUF_LEN (6 + CAN_FRAME_MAX_DATA_LEN)
195#define CAN_FRAME_MAX_BITS 128
196
197#define TX_ECHO_SKB_MAX 1
198
199#define DEVICE_NAME "mcp251x"
200
201static int mcp251x_enable_dma; /* Enable SPI DMA. Default: 0 (Off) */
202module_param(mcp251x_enable_dma, int, S_IRUGO);
203MODULE_PARM_DESC(mcp251x_enable_dma, "Enable SPI DMA. Default: 0 (Off)");
204
205static struct can_bittiming_const mcp251x_bittiming_const = {
206 .name = DEVICE_NAME,
207 .tseg1_min = 3,
208 .tseg1_max = 16,
209 .tseg2_min = 2,
210 .tseg2_max = 8,
211 .sjw_max = 4,
212 .brp_min = 1,
213 .brp_max = 64,
214 .brp_inc = 1,
215};
216
217struct mcp251x_priv {
218 struct can_priv can;
219 struct net_device *net;
220 struct spi_device *spi;
221
222 struct mutex spi_lock; /* SPI buffer lock */
223 u8 *spi_tx_buf;
224 u8 *spi_rx_buf;
225 dma_addr_t spi_tx_dma;
226 dma_addr_t spi_rx_dma;
227
228 struct sk_buff *tx_skb;
229 int tx_len;
230 struct workqueue_struct *wq;
231 struct work_struct tx_work;
232 struct work_struct irq_work;
233 struct completion awake;
234 int wake;
235 int force_quit;
236 int after_suspend;
237#define AFTER_SUSPEND_UP 1
238#define AFTER_SUSPEND_DOWN 2
239#define AFTER_SUSPEND_POWER 4
240#define AFTER_SUSPEND_RESTART 8
241 int restart_tx;
242};
243
244static void mcp251x_clean(struct net_device *net)
245{
246 struct mcp251x_priv *priv = netdev_priv(net);
247
248 net->stats.tx_errors++;
249 if (priv->tx_skb)
250 dev_kfree_skb(priv->tx_skb);
251 if (priv->tx_len)
252 can_free_echo_skb(priv->net, 0);
253 priv->tx_skb = NULL;
254 priv->tx_len = 0;
255}
256
257/*
258 * Note about handling of error return of mcp251x_spi_trans: accessing
259 * registers via SPI is not really different conceptually than using
260 * normal I/O assembler instructions, although it's much more
261 * complicated from a practical POV. So it's not advisable to always
262 * check the return value of this function. Imagine that every
263 * read{b,l}, write{b,l} and friends would be bracketed in "if ( < 0)
264 * error();", it would be a great mess (well there are some situation
265 * when exception handling C++ like could be useful after all). So we
266 * just check that transfers are OK at the beginning of our
267 * conversation with the chip and to avoid doing really nasty things
268 * (like injecting bogus packets in the network stack).
269 */
270static int mcp251x_spi_trans(struct spi_device *spi, int len)
271{
272 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
273 struct spi_transfer t = {
274 .tx_buf = priv->spi_tx_buf,
275 .rx_buf = priv->spi_rx_buf,
276 .len = len,
277 .cs_change = 0,
278 };
279 struct spi_message m;
280 int ret;
281
282 spi_message_init(&m);
283
284 if (mcp251x_enable_dma) {
285 t.tx_dma = priv->spi_tx_dma;
286 t.rx_dma = priv->spi_rx_dma;
287 m.is_dma_mapped = 1;
288 }
289
290 spi_message_add_tail(&t, &m);
291
292 ret = spi_sync(spi, &m);
293 if (ret)
294 dev_err(&spi->dev, "spi transfer failed: ret = %d\n", ret);
295 return ret;
296}
297
298static u8 mcp251x_read_reg(struct spi_device *spi, uint8_t reg)
299{
300 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
301 u8 val = 0;
302
303 mutex_lock(&priv->spi_lock);
304
305 priv->spi_tx_buf[0] = INSTRUCTION_READ;
306 priv->spi_tx_buf[1] = reg;
307
308 mcp251x_spi_trans(spi, 3);
309 val = priv->spi_rx_buf[2];
310
311 mutex_unlock(&priv->spi_lock);
312
313 return val;
314}
315
316static void mcp251x_write_reg(struct spi_device *spi, u8 reg, uint8_t val)
317{
318 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
319
320 mutex_lock(&priv->spi_lock);
321
322 priv->spi_tx_buf[0] = INSTRUCTION_WRITE;
323 priv->spi_tx_buf[1] = reg;
324 priv->spi_tx_buf[2] = val;
325
326 mcp251x_spi_trans(spi, 3);
327
328 mutex_unlock(&priv->spi_lock);
329}
330
331static void mcp251x_write_bits(struct spi_device *spi, u8 reg,
332 u8 mask, uint8_t val)
333{
334 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
335
336 mutex_lock(&priv->spi_lock);
337
338 priv->spi_tx_buf[0] = INSTRUCTION_BIT_MODIFY;
339 priv->spi_tx_buf[1] = reg;
340 priv->spi_tx_buf[2] = mask;
341 priv->spi_tx_buf[3] = val;
342
343 mcp251x_spi_trans(spi, 4);
344
345 mutex_unlock(&priv->spi_lock);
346}
347
348static void mcp251x_hw_tx_frame(struct spi_device *spi, u8 *buf,
349 int len, int tx_buf_idx)
350{
351 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
352 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
353
354 if (pdata->model == CAN_MCP251X_MCP2510) {
355 int i;
356
357 for (i = 1; i < TXBDAT_OFF + len; i++)
358 mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx) + i,
359 buf[i]);
360 } else {
361 mutex_lock(&priv->spi_lock);
362 memcpy(priv->spi_tx_buf, buf, TXBDAT_OFF + len);
363 mcp251x_spi_trans(spi, TXBDAT_OFF + len);
364 mutex_unlock(&priv->spi_lock);
365 }
366}
367
368static void mcp251x_hw_tx(struct spi_device *spi, struct can_frame *frame,
369 int tx_buf_idx)
370{
371 u32 sid, eid, exide, rtr;
372 u8 buf[SPI_TRANSFER_BUF_LEN];
373
374 exide = (frame->can_id & CAN_EFF_FLAG) ? 1 : 0; /* Extended ID Enable */
375 if (exide)
376 sid = (frame->can_id & CAN_EFF_MASK) >> 18;
377 else
378 sid = frame->can_id & CAN_SFF_MASK; /* Standard ID */
379 eid = frame->can_id & CAN_EFF_MASK; /* Extended ID */
380 rtr = (frame->can_id & CAN_RTR_FLAG) ? 1 : 0; /* Remote transmission */
381
382 buf[TXBCTRL_OFF] = INSTRUCTION_LOAD_TXB(tx_buf_idx);
383 buf[TXBSIDH_OFF] = sid >> SIDH_SHIFT;
384 buf[TXBSIDL_OFF] = ((sid & SIDL_SID_MASK) << SIDL_SID_SHIFT) |
385 (exide << SIDL_EXIDE_SHIFT) |
386 ((eid >> SIDL_EID_SHIFT) & SIDL_EID_MASK);
387 buf[TXBEID8_OFF] = GET_BYTE(eid, 1);
388 buf[TXBEID0_OFF] = GET_BYTE(eid, 0);
389 buf[TXBDLC_OFF] = (rtr << DLC_RTR_SHIFT) | frame->can_dlc;
390 memcpy(buf + TXBDAT_OFF, frame->data, frame->can_dlc);
391 mcp251x_hw_tx_frame(spi, buf, frame->can_dlc, tx_buf_idx);
392 mcp251x_write_reg(spi, TXBCTRL(tx_buf_idx), TXBCTRL_TXREQ);
393}
394
395static void mcp251x_hw_rx_frame(struct spi_device *spi, u8 *buf,
396 int buf_idx)
397{
398 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
399 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
400
401 if (pdata->model == CAN_MCP251X_MCP2510) {
402 int i, len;
403
404 for (i = 1; i < RXBDAT_OFF; i++)
405 buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
406 len = buf[RXBDLC_OFF] & RXBDLC_LEN_MASK;
407 if (len > 8)
408 len = 8;
409 for (; i < (RXBDAT_OFF + len); i++)
410 buf[i] = mcp251x_read_reg(spi, RXBCTRL(buf_idx) + i);
411 } else {
412 mutex_lock(&priv->spi_lock);
413
414 priv->spi_tx_buf[RXBCTRL_OFF] = INSTRUCTION_READ_RXB(buf_idx);
415 mcp251x_spi_trans(spi, SPI_TRANSFER_BUF_LEN);
416 memcpy(buf, priv->spi_rx_buf, SPI_TRANSFER_BUF_LEN);
417
418 mutex_unlock(&priv->spi_lock);
419 }
420}
421
422static void mcp251x_hw_rx(struct spi_device *spi, int buf_idx)
423{
424 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
425 struct sk_buff *skb;
426 struct can_frame *frame;
427 u8 buf[SPI_TRANSFER_BUF_LEN];
428
429 skb = alloc_can_skb(priv->net, &frame);
430 if (!skb) {
431 dev_err(&spi->dev, "cannot allocate RX skb\n");
432 priv->net->stats.rx_dropped++;
433 return;
434 }
435
436 mcp251x_hw_rx_frame(spi, buf, buf_idx);
437 if (buf[RXBSIDL_OFF] & RXBSIDL_IDE) {
438 /* Extended ID format */
439 frame->can_id = CAN_EFF_FLAG;
440 frame->can_id |=
441 /* Extended ID part */
442 SET_BYTE(buf[RXBSIDL_OFF] & RXBSIDL_EID, 2) |
443 SET_BYTE(buf[RXBEID8_OFF], 1) |
444 SET_BYTE(buf[RXBEID0_OFF], 0) |
445 /* Standard ID part */
446 (((buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
447 (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT)) << 18);
448 /* Remote transmission request */
449 if (buf[RXBDLC_OFF] & RXBDLC_RTR)
450 frame->can_id |= CAN_RTR_FLAG;
451 } else {
452 /* Standard ID format */
453 frame->can_id =
454 (buf[RXBSIDH_OFF] << RXBSIDH_SHIFT) |
455 (buf[RXBSIDL_OFF] >> RXBSIDL_SHIFT);
456 }
457 /* Data length */
458 frame->can_dlc = buf[RXBDLC_OFF] & RXBDLC_LEN_MASK;
459 if (frame->can_dlc > 8) {
460 dev_warn(&spi->dev, "invalid frame recevied\n");
461 priv->net->stats.rx_errors++;
462 dev_kfree_skb(skb);
463 return;
464 }
465 memcpy(frame->data, buf + RXBDAT_OFF, frame->can_dlc);
466
467 priv->net->stats.rx_packets++;
468 priv->net->stats.rx_bytes += frame->can_dlc;
469 netif_rx(skb);
470}
471
472static void mcp251x_hw_sleep(struct spi_device *spi)
473{
474 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_SLEEP);
475}
476
477static void mcp251x_hw_wakeup(struct spi_device *spi)
478{
479 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
480
481 priv->wake = 1;
482
483 /* Can only wake up by generating a wake-up interrupt. */
484 mcp251x_write_bits(spi, CANINTE, CANINTE_WAKIE, CANINTE_WAKIE);
485 mcp251x_write_bits(spi, CANINTF, CANINTF_WAKIF, CANINTF_WAKIF);
486
487 /* Wait until the device is awake */
488 if (!wait_for_completion_timeout(&priv->awake, HZ))
489 dev_err(&spi->dev, "MCP251x didn't wake-up\n");
490}
491
492static netdev_tx_t mcp251x_hard_start_xmit(struct sk_buff *skb,
493 struct net_device *net)
494{
495 struct mcp251x_priv *priv = netdev_priv(net);
496 struct spi_device *spi = priv->spi;
497
498 if (priv->tx_skb || priv->tx_len) {
499 dev_warn(&spi->dev, "hard_xmit called while tx busy\n");
500 netif_stop_queue(net);
501 return NETDEV_TX_BUSY;
502 }
503
504 if (skb->len != sizeof(struct can_frame)) {
505 dev_err(&spi->dev, "dropping packet - bad length\n");
506 dev_kfree_skb(skb);
507 net->stats.tx_dropped++;
508 return NETDEV_TX_OK;
509 }
510
511 netif_stop_queue(net);
512 priv->tx_skb = skb;
513 net->trans_start = jiffies;
514 queue_work(priv->wq, &priv->tx_work);
515
516 return NETDEV_TX_OK;
517}
518
519static int mcp251x_do_set_mode(struct net_device *net, enum can_mode mode)
520{
521 struct mcp251x_priv *priv = netdev_priv(net);
522
523 switch (mode) {
524 case CAN_MODE_START:
525 /* We have to delay work since SPI I/O may sleep */
526 priv->can.state = CAN_STATE_ERROR_ACTIVE;
527 priv->restart_tx = 1;
528 if (priv->can.restart_ms == 0)
529 priv->after_suspend = AFTER_SUSPEND_RESTART;
530 queue_work(priv->wq, &priv->irq_work);
531 break;
532 default:
533 return -EOPNOTSUPP;
534 }
535
536 return 0;
537}
538
539static void mcp251x_set_normal_mode(struct spi_device *spi)
540{
541 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
542 unsigned long timeout;
543
544 /* Enable interrupts */
545 mcp251x_write_reg(spi, CANINTE,
546 CANINTE_ERRIE | CANINTE_TX2IE | CANINTE_TX1IE |
547 CANINTE_TX0IE | CANINTE_RX1IE | CANINTE_RX0IE |
548 CANINTF_MERRF);
549
550 if (priv->can.ctrlmode & CAN_CTRLMODE_LOOPBACK) {
551 /* Put device into loopback mode */
552 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_LOOPBACK);
553 } else {
554 /* Put device into normal mode */
555 mcp251x_write_reg(spi, CANCTRL, CANCTRL_REQOP_NORMAL);
556
557 /* Wait for the device to enter normal mode */
558 timeout = jiffies + HZ;
559 while (mcp251x_read_reg(spi, CANSTAT) & CANCTRL_REQOP_MASK) {
560 schedule();
561 if (time_after(jiffies, timeout)) {
562 dev_err(&spi->dev, "MCP251x didn't"
563 " enter in normal mode\n");
564 return;
565 }
566 }
567 }
568 priv->can.state = CAN_STATE_ERROR_ACTIVE;
569}
570
571static int mcp251x_do_set_bittiming(struct net_device *net)
572{
573 struct mcp251x_priv *priv = netdev_priv(net);
574 struct can_bittiming *bt = &priv->can.bittiming;
575 struct spi_device *spi = priv->spi;
576
577 mcp251x_write_reg(spi, CNF1, ((bt->sjw - 1) << CNF1_SJW_SHIFT) |
578 (bt->brp - 1));
579 mcp251x_write_reg(spi, CNF2, CNF2_BTLMODE |
580 (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES ?
581 CNF2_SAM : 0) |
582 ((bt->phase_seg1 - 1) << CNF2_PS1_SHIFT) |
583 (bt->prop_seg - 1));
584 mcp251x_write_bits(spi, CNF3, CNF3_PHSEG2_MASK,
585 (bt->phase_seg2 - 1));
586 dev_info(&spi->dev, "CNF: 0x%02x 0x%02x 0x%02x\n",
587 mcp251x_read_reg(spi, CNF1),
588 mcp251x_read_reg(spi, CNF2),
589 mcp251x_read_reg(spi, CNF3));
590
591 return 0;
592}
593
594static int mcp251x_setup(struct net_device *net, struct mcp251x_priv *priv,
595 struct spi_device *spi)
596{
597 int ret;
598
599 ret = open_candev(net);
600 if (ret) {
601 dev_err(&spi->dev, "unable to set initial baudrate!\n");
602 return ret;
603 }
604
605 /* Enable RX0->RX1 buffer roll over and disable filters */
606 mcp251x_write_bits(spi, RXBCTRL(0),
607 RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1,
608 RXBCTRL_BUKT | RXBCTRL_RXM0 | RXBCTRL_RXM1);
609 mcp251x_write_bits(spi, RXBCTRL(1),
610 RXBCTRL_RXM0 | RXBCTRL_RXM1,
611 RXBCTRL_RXM0 | RXBCTRL_RXM1);
612 return 0;
613}
614
615static void mcp251x_hw_reset(struct spi_device *spi)
616{
617 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
618 int ret;
619
620 mutex_lock(&priv->spi_lock);
621
622 priv->spi_tx_buf[0] = INSTRUCTION_RESET;
623
624 ret = spi_write(spi, priv->spi_tx_buf, 1);
625
626 mutex_unlock(&priv->spi_lock);
627
628 if (ret)
629 dev_err(&spi->dev, "reset failed: ret = %d\n", ret);
630 /* Wait for reset to finish */
631 mdelay(10);
632}
633
634static int mcp251x_hw_probe(struct spi_device *spi)
635{
636 int st1, st2;
637
638 mcp251x_hw_reset(spi);
639
640 /*
641 * Please note that these are "magic values" based on after
642 * reset defaults taken from data sheet which allows us to see
643 * if we really have a chip on the bus (we avoid common all
644 * zeroes or all ones situations)
645 */
646 st1 = mcp251x_read_reg(spi, CANSTAT) & 0xEE;
647 st2 = mcp251x_read_reg(spi, CANCTRL) & 0x17;
648
649 dev_dbg(&spi->dev, "CANSTAT 0x%02x CANCTRL 0x%02x\n", st1, st2);
650
651 /* Check for power up default values */
652 return (st1 == 0x80 && st2 == 0x07) ? 1 : 0;
653}
654
655static irqreturn_t mcp251x_can_isr(int irq, void *dev_id)
656{
657 struct net_device *net = (struct net_device *)dev_id;
658 struct mcp251x_priv *priv = netdev_priv(net);
659
660 /* Schedule bottom half */
661 if (!work_pending(&priv->irq_work))
662 queue_work(priv->wq, &priv->irq_work);
663
664 return IRQ_HANDLED;
665}
666
667static int mcp251x_open(struct net_device *net)
668{
669 struct mcp251x_priv *priv = netdev_priv(net);
670 struct spi_device *spi = priv->spi;
671 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
672 int ret;
673
674 if (pdata->transceiver_enable)
675 pdata->transceiver_enable(1);
676
677 priv->force_quit = 0;
678 priv->tx_skb = NULL;
679 priv->tx_len = 0;
680
681 ret = request_irq(spi->irq, mcp251x_can_isr,
682 IRQF_TRIGGER_FALLING, DEVICE_NAME, net);
683 if (ret) {
684 dev_err(&spi->dev, "failed to acquire irq %d\n", spi->irq);
685 if (pdata->transceiver_enable)
686 pdata->transceiver_enable(0);
687 return ret;
688 }
689
690 mcp251x_hw_wakeup(spi);
691 mcp251x_hw_reset(spi);
692 ret = mcp251x_setup(net, priv, spi);
693 if (ret) {
694 free_irq(spi->irq, net);
695 if (pdata->transceiver_enable)
696 pdata->transceiver_enable(0);
697 return ret;
698 }
699 mcp251x_set_normal_mode(spi);
700 netif_wake_queue(net);
701
702 return 0;
703}
704
705static int mcp251x_stop(struct net_device *net)
706{
707 struct mcp251x_priv *priv = netdev_priv(net);
708 struct spi_device *spi = priv->spi;
709 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
710
711 close_candev(net);
712
713 /* Disable and clear pending interrupts */
714 mcp251x_write_reg(spi, CANINTE, 0x00);
715 mcp251x_write_reg(spi, CANINTF, 0x00);
716
717 priv->force_quit = 1;
718 free_irq(spi->irq, net);
719 flush_workqueue(priv->wq);
720
721 mcp251x_write_reg(spi, TXBCTRL(0), 0);
722 if (priv->tx_skb || priv->tx_len)
723 mcp251x_clean(net);
724
725 mcp251x_hw_sleep(spi);
726
727 if (pdata->transceiver_enable)
728 pdata->transceiver_enable(0);
729
730 priv->can.state = CAN_STATE_STOPPED;
731
732 return 0;
733}
734
735static void mcp251x_tx_work_handler(struct work_struct *ws)
736{
737 struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
738 tx_work);
739 struct spi_device *spi = priv->spi;
740 struct net_device *net = priv->net;
741 struct can_frame *frame;
742
743 if (priv->tx_skb) {
744 frame = (struct can_frame *)priv->tx_skb->data;
745
746 if (priv->can.state == CAN_STATE_BUS_OFF) {
747 mcp251x_clean(net);
748 netif_wake_queue(net);
749 return;
750 }
751 if (frame->can_dlc > CAN_FRAME_MAX_DATA_LEN)
752 frame->can_dlc = CAN_FRAME_MAX_DATA_LEN;
753 mcp251x_hw_tx(spi, frame, 0);
754 priv->tx_len = 1 + frame->can_dlc;
755 can_put_echo_skb(priv->tx_skb, net, 0);
756 priv->tx_skb = NULL;
757 }
758}
759
760static void mcp251x_irq_work_handler(struct work_struct *ws)
761{
762 struct mcp251x_priv *priv = container_of(ws, struct mcp251x_priv,
763 irq_work);
764 struct spi_device *spi = priv->spi;
765 struct net_device *net = priv->net;
766 u8 txbnctrl;
767 u8 intf;
768 enum can_state new_state;
769
770 if (priv->after_suspend) {
771 mdelay(10);
772 mcp251x_hw_reset(spi);
773 mcp251x_setup(net, priv, spi);
774 if (priv->after_suspend & AFTER_SUSPEND_RESTART) {
775 mcp251x_set_normal_mode(spi);
776 } else if (priv->after_suspend & AFTER_SUSPEND_UP) {
777 netif_device_attach(net);
778 /* Clean since we lost tx buffer */
779 if (priv->tx_skb || priv->tx_len) {
780 mcp251x_clean(net);
781 netif_wake_queue(net);
782 }
783 mcp251x_set_normal_mode(spi);
784 } else {
785 mcp251x_hw_sleep(spi);
786 }
787 priv->after_suspend = 0;
788 }
789
790 if (priv->can.restart_ms == 0 && priv->can.state == CAN_STATE_BUS_OFF)
791 return;
792
793 while (!priv->force_quit && !freezing(current)) {
794 u8 eflag = mcp251x_read_reg(spi, EFLG);
795 int can_id = 0, data1 = 0;
796
797 mcp251x_write_reg(spi, EFLG, 0x00);
798
799 if (priv->restart_tx) {
800 priv->restart_tx = 0;
801 mcp251x_write_reg(spi, TXBCTRL(0), 0);
802 if (priv->tx_skb || priv->tx_len)
803 mcp251x_clean(net);
804 netif_wake_queue(net);
805 can_id |= CAN_ERR_RESTARTED;
806 }
807
808 if (priv->wake) {
809 /* Wait whilst the device wakes up */
810 mdelay(10);
811 priv->wake = 0;
812 }
813
814 intf = mcp251x_read_reg(spi, CANINTF);
815 mcp251x_write_bits(spi, CANINTF, intf, 0x00);
816
817 /* Update can state */
818 if (eflag & EFLG_TXBO) {
819 new_state = CAN_STATE_BUS_OFF;
820 can_id |= CAN_ERR_BUSOFF;
821 } else if (eflag & EFLG_TXEP) {
822 new_state = CAN_STATE_ERROR_PASSIVE;
823 can_id |= CAN_ERR_CRTL;
824 data1 |= CAN_ERR_CRTL_TX_PASSIVE;
825 } else if (eflag & EFLG_RXEP) {
826 new_state = CAN_STATE_ERROR_PASSIVE;
827 can_id |= CAN_ERR_CRTL;
828 data1 |= CAN_ERR_CRTL_RX_PASSIVE;
829 } else if (eflag & EFLG_TXWAR) {
830 new_state = CAN_STATE_ERROR_WARNING;
831 can_id |= CAN_ERR_CRTL;
832 data1 |= CAN_ERR_CRTL_TX_WARNING;
833 } else if (eflag & EFLG_RXWAR) {
834 new_state = CAN_STATE_ERROR_WARNING;
835 can_id |= CAN_ERR_CRTL;
836 data1 |= CAN_ERR_CRTL_RX_WARNING;
837 } else {
838 new_state = CAN_STATE_ERROR_ACTIVE;
839 }
840
841 /* Update can state statistics */
842 switch (priv->can.state) {
843 case CAN_STATE_ERROR_ACTIVE:
844 if (new_state >= CAN_STATE_ERROR_WARNING &&
845 new_state <= CAN_STATE_BUS_OFF)
846 priv->can.can_stats.error_warning++;
847 case CAN_STATE_ERROR_WARNING: /* fallthrough */
848 if (new_state >= CAN_STATE_ERROR_PASSIVE &&
849 new_state <= CAN_STATE_BUS_OFF)
850 priv->can.can_stats.error_passive++;
851 break;
852 default:
853 break;
854 }
855 priv->can.state = new_state;
856
857 if ((intf & CANINTF_ERRIF) || (can_id & CAN_ERR_RESTARTED)) {
858 struct sk_buff *skb;
859 struct can_frame *frame;
860
861 /* Create error frame */
862 skb = alloc_can_err_skb(net, &frame);
863 if (skb) {
864 /* Set error frame flags based on bus state */
865 frame->can_id = can_id;
866 frame->data[1] = data1;
867
868 /* Update net stats for overflows */
869 if (eflag & (EFLG_RX0OVR | EFLG_RX1OVR)) {
870 if (eflag & EFLG_RX0OVR)
871 net->stats.rx_over_errors++;
872 if (eflag & EFLG_RX1OVR)
873 net->stats.rx_over_errors++;
874 frame->can_id |= CAN_ERR_CRTL;
875 frame->data[1] |=
876 CAN_ERR_CRTL_RX_OVERFLOW;
877 }
878
879 netif_rx(skb);
880 } else {
881 dev_info(&spi->dev,
882 "cannot allocate error skb\n");
883 }
884 }
885
886 if (priv->can.state == CAN_STATE_BUS_OFF) {
887 if (priv->can.restart_ms == 0) {
888 can_bus_off(net);
889 mcp251x_hw_sleep(spi);
890 return;
891 }
892 }
893
894 if (intf == 0)
895 break;
896
897 if (intf & CANINTF_WAKIF)
898 complete(&priv->awake);
899
900 if (intf & CANINTF_MERRF) {
901 /* If there are pending Tx buffers, restart queue */
902 txbnctrl = mcp251x_read_reg(spi, TXBCTRL(0));
903 if (!(txbnctrl & TXBCTRL_TXREQ)) {
904 if (priv->tx_skb || priv->tx_len)
905 mcp251x_clean(net);
906 netif_wake_queue(net);
907 }
908 }
909
910 if (intf & (CANINTF_TX2IF | CANINTF_TX1IF | CANINTF_TX0IF)) {
911 net->stats.tx_packets++;
912 net->stats.tx_bytes += priv->tx_len - 1;
913 if (priv->tx_len) {
914 can_get_echo_skb(net, 0);
915 priv->tx_len = 0;
916 }
917 netif_wake_queue(net);
918 }
919
920 if (intf & CANINTF_RX0IF)
921 mcp251x_hw_rx(spi, 0);
922
923 if (intf & CANINTF_RX1IF)
924 mcp251x_hw_rx(spi, 1);
925 }
926}
927
928static const struct net_device_ops mcp251x_netdev_ops = {
929 .ndo_open = mcp251x_open,
930 .ndo_stop = mcp251x_stop,
931 .ndo_start_xmit = mcp251x_hard_start_xmit,
932};
933
934static int __devinit mcp251x_can_probe(struct spi_device *spi)
935{
936 struct net_device *net;
937 struct mcp251x_priv *priv;
938 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
939 int ret = -ENODEV;
940
941 if (!pdata)
942 /* Platform data is required for osc freq */
943 goto error_out;
944
945 /* Allocate can/net device */
946 net = alloc_candev(sizeof(struct mcp251x_priv), TX_ECHO_SKB_MAX);
947 if (!net) {
948 ret = -ENOMEM;
949 goto error_alloc;
950 }
951
952 net->netdev_ops = &mcp251x_netdev_ops;
953 net->flags |= IFF_ECHO;
954
955 priv = netdev_priv(net);
956 priv->can.bittiming_const = &mcp251x_bittiming_const;
957 priv->can.do_set_mode = mcp251x_do_set_mode;
958 priv->can.clock.freq = pdata->oscillator_frequency / 2;
959 priv->can.do_set_bittiming = mcp251x_do_set_bittiming;
960 priv->net = net;
961 dev_set_drvdata(&spi->dev, priv);
962
963 priv->spi = spi;
964 mutex_init(&priv->spi_lock);
965
966 /* If requested, allocate DMA buffers */
967 if (mcp251x_enable_dma) {
968 spi->dev.coherent_dma_mask = ~0;
969
970 /*
971 * Minimum coherent DMA allocation is PAGE_SIZE, so allocate
972 * that much and share it between Tx and Rx DMA buffers.
973 */
974 priv->spi_tx_buf = dma_alloc_coherent(&spi->dev,
975 PAGE_SIZE,
976 &priv->spi_tx_dma,
977 GFP_DMA);
978
979 if (priv->spi_tx_buf) {
980 priv->spi_rx_buf = (u8 *)(priv->spi_tx_buf +
981 (PAGE_SIZE / 2));
982 priv->spi_rx_dma = (dma_addr_t)(priv->spi_tx_dma +
983 (PAGE_SIZE / 2));
984 } else {
985 /* Fall back to non-DMA */
986 mcp251x_enable_dma = 0;
987 }
988 }
989
990 /* Allocate non-DMA buffers */
991 if (!mcp251x_enable_dma) {
992 priv->spi_tx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL);
993 if (!priv->spi_tx_buf) {
994 ret = -ENOMEM;
995 goto error_tx_buf;
996 }
997 priv->spi_rx_buf = kmalloc(SPI_TRANSFER_BUF_LEN, GFP_KERNEL);
998 if (!priv->spi_tx_buf) {
999 ret = -ENOMEM;
1000 goto error_rx_buf;
1001 }
1002 }
1003
1004 if (pdata->power_enable)
1005 pdata->power_enable(1);
1006
1007 /* Call out to platform specific setup */
1008 if (pdata->board_specific_setup)
1009 pdata->board_specific_setup(spi);
1010
1011 SET_NETDEV_DEV(net, &spi->dev);
1012
1013 priv->wq = create_freezeable_workqueue("mcp251x_wq");
1014
1015 INIT_WORK(&priv->tx_work, mcp251x_tx_work_handler);
1016 INIT_WORK(&priv->irq_work, mcp251x_irq_work_handler);
1017
1018 init_completion(&priv->awake);
1019
1020 /* Configure the SPI bus */
1021 spi->mode = SPI_MODE_0;
1022 spi->bits_per_word = 8;
1023 spi_setup(spi);
1024
1025 if (!mcp251x_hw_probe(spi)) {
1026 dev_info(&spi->dev, "Probe failed\n");
1027 goto error_probe;
1028 }
1029 mcp251x_hw_sleep(spi);
1030
1031 if (pdata->transceiver_enable)
1032 pdata->transceiver_enable(0);
1033
1034 ret = register_candev(net);
1035 if (!ret) {
1036 dev_info(&spi->dev, "probed\n");
1037 return ret;
1038 }
1039error_probe:
1040 if (!mcp251x_enable_dma)
1041 kfree(priv->spi_rx_buf);
1042error_rx_buf:
1043 if (!mcp251x_enable_dma)
1044 kfree(priv->spi_tx_buf);
1045error_tx_buf:
1046 free_candev(net);
1047 if (mcp251x_enable_dma)
1048 dma_free_coherent(&spi->dev, PAGE_SIZE,
1049 priv->spi_tx_buf, priv->spi_tx_dma);
1050error_alloc:
1051 if (pdata->power_enable)
1052 pdata->power_enable(0);
1053 dev_err(&spi->dev, "probe failed\n");
1054error_out:
1055 return ret;
1056}
1057
1058static int __devexit mcp251x_can_remove(struct spi_device *spi)
1059{
1060 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
1061 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
1062 struct net_device *net = priv->net;
1063
1064 unregister_candev(net);
1065 free_candev(net);
1066
1067 priv->force_quit = 1;
1068 flush_workqueue(priv->wq);
1069 destroy_workqueue(priv->wq);
1070
1071 if (mcp251x_enable_dma) {
1072 dma_free_coherent(&spi->dev, PAGE_SIZE,
1073 priv->spi_tx_buf, priv->spi_tx_dma);
1074 } else {
1075 kfree(priv->spi_tx_buf);
1076 kfree(priv->spi_rx_buf);
1077 }
1078
1079 if (pdata->power_enable)
1080 pdata->power_enable(0);
1081
1082 return 0;
1083}
1084
1085#ifdef CONFIG_PM
1086static int mcp251x_can_suspend(struct spi_device *spi, pm_message_t state)
1087{
1088 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
1089 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
1090 struct net_device *net = priv->net;
1091
1092 if (netif_running(net)) {
1093 netif_device_detach(net);
1094
1095 mcp251x_hw_sleep(spi);
1096 if (pdata->transceiver_enable)
1097 pdata->transceiver_enable(0);
1098 priv->after_suspend = AFTER_SUSPEND_UP;
1099 } else {
1100 priv->after_suspend = AFTER_SUSPEND_DOWN;
1101 }
1102
1103 if (pdata->power_enable) {
1104 pdata->power_enable(0);
1105 priv->after_suspend |= AFTER_SUSPEND_POWER;
1106 }
1107
1108 return 0;
1109}
1110
1111static int mcp251x_can_resume(struct spi_device *spi)
1112{
1113 struct mcp251x_platform_data *pdata = spi->dev.platform_data;
1114 struct mcp251x_priv *priv = dev_get_drvdata(&spi->dev);
1115
1116 if (priv->after_suspend & AFTER_SUSPEND_POWER) {
1117 pdata->power_enable(1);
1118 queue_work(priv->wq, &priv->irq_work);
1119 } else {
1120 if (priv->after_suspend & AFTER_SUSPEND_UP) {
1121 if (pdata->transceiver_enable)
1122 pdata->transceiver_enable(1);
1123 queue_work(priv->wq, &priv->irq_work);
1124 } else {
1125 priv->after_suspend = 0;
1126 }
1127 }
1128 return 0;
1129}
1130#else
1131#define mcp251x_can_suspend NULL
1132#define mcp251x_can_resume NULL
1133#endif
1134
1135static struct spi_driver mcp251x_can_driver = {
1136 .driver = {
1137 .name = DEVICE_NAME,
1138 .bus = &spi_bus_type,
1139 .owner = THIS_MODULE,
1140 },
1141
1142 .probe = mcp251x_can_probe,
1143 .remove = __devexit_p(mcp251x_can_remove),
1144 .suspend = mcp251x_can_suspend,
1145 .resume = mcp251x_can_resume,
1146};
1147
1148static int __init mcp251x_can_init(void)
1149{
1150 return spi_register_driver(&mcp251x_can_driver);
1151}
1152
1153static void __exit mcp251x_can_exit(void)
1154{
1155 spi_unregister_driver(&mcp251x_can_driver);
1156}
1157
1158module_init(mcp251x_can_init);
1159module_exit(mcp251x_can_exit);
1160
1161MODULE_AUTHOR("Chris Elston <celston@katalix.com>, "
1162 "Christian Pellegrin <chripell@evolware.org>");
1163MODULE_DESCRIPTION("Microchip 251x CAN driver");
1164MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/mscan/Makefile b/drivers/net/can/mscan/Makefile
new file mode 100644
index 000000000000..2bd9f04c7908
--- /dev/null
+++ b/drivers/net/can/mscan/Makefile
@@ -0,0 +1,5 @@
1
2obj-$(CONFIG_CAN_MPC52XX) += mscan-mpc52xx.o
3mscan-mpc52xx-objs := mscan.o mpc52xx_can.o
4
5ccflags-$(CONFIG_CAN_DEBUG_DEVICES) := -DDEBUG
diff --git a/drivers/net/can/mscan/mpc52xx_can.c b/drivers/net/can/mscan/mpc52xx_can.c
new file mode 100644
index 000000000000..4707a82f1ae0
--- /dev/null
+++ b/drivers/net/can/mscan/mpc52xx_can.c
@@ -0,0 +1,279 @@
1/*
2 * CAN bus driver for the Freescale MPC5xxx embedded CPU.
3 *
4 * Copyright (C) 2004-2005 Andrey Volkov <avolkov@varma-el.com>,
5 * Varma Electronics Oy
6 * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
7 * Copyright (C) 2009 Wolfram Sang, Pengutronix <w.sang@pengutronix.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the version 2 of the GNU General Public License
11 * as published by the Free Software Foundation
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/interrupt.h>
26#include <linux/platform_device.h>
27#include <linux/netdevice.h>
28#include <linux/can.h>
29#include <linux/can/dev.h>
30#include <linux/of_platform.h>
31#include <sysdev/fsl_soc.h>
32#include <linux/io.h>
33#include <asm/mpc52xx.h>
34
35#include "mscan.h"
36
37
38#define DRV_NAME "mpc5xxx_can"
39
40static struct of_device_id mpc52xx_cdm_ids[] __devinitdata = {
41 { .compatible = "fsl,mpc5200-cdm", },
42 { .compatible = "fsl,mpc5200b-cdm", },
43 {}
44};
45
46/*
47 * Get the frequency of the external oscillator clock connected
48 * to the SYS_XTAL_IN pin, or return 0 if it cannot be determined.
49 */
50static unsigned int __devinit mpc52xx_can_xtal_freq(struct of_device *of)
51{
52 struct mpc52xx_cdm __iomem *cdm;
53 struct device_node *np_cdm;
54 unsigned int freq;
55 u32 val;
56
57 freq = mpc5xxx_get_bus_frequency(of->node);
58 if (!freq)
59 return 0;
60
61 /*
62 * Determine SYS_XTAL_IN frequency from the clock domain settings
63 */
64 np_cdm = of_find_matching_node(NULL, mpc52xx_cdm_ids);
65 if (!np_cdm) {
66 dev_err(&of->dev, "can't get clock node!\n");
67 return 0;
68 }
69 cdm = of_iomap(np_cdm, 0);
70 of_node_put(np_cdm);
71
72 if (in_8(&cdm->ipb_clk_sel) & 0x1)
73 freq *= 2;
74 val = in_be32(&cdm->rstcfg);
75 if (val & (1 << 5))
76 freq *= 8;
77 else
78 freq *= 4;
79 if (val & (1 << 6))
80 freq /= 12;
81 else
82 freq /= 16;
83
84 iounmap(cdm);
85
86 return freq;
87}
88
89/*
90 * Get frequency of the MSCAN clock source
91 *
92 * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock (IP_CLK)
93 * can be selected. According to the MPC5200 user's manual, the oscillator
94 * clock is the better choice as it has less jitter but due to a hardware
95 * bug, it can not be selected for the old MPC5200 Rev. A chips.
96 */
97
98static unsigned int __devinit mpc52xx_can_clock_freq(struct of_device *of,
99 int clock_src)
100{
101 unsigned int pvr;
102
103 pvr = mfspr(SPRN_PVR);
104
105 if (clock_src == MSCAN_CLKSRC_BUS || pvr == 0x80822011)
106 return mpc5xxx_get_bus_frequency(of->node);
107
108 return mpc52xx_can_xtal_freq(of);
109}
110
111static int __devinit mpc5xxx_can_probe(struct of_device *ofdev,
112 const struct of_device_id *id)
113{
114 struct device_node *np = ofdev->node;
115 struct net_device *dev;
116 struct mscan_priv *priv;
117 void __iomem *base;
118 const char *clk_src;
119 int err, irq, clock_src;
120
121 base = of_iomap(ofdev->node, 0);
122 if (!base) {
123 dev_err(&ofdev->dev, "couldn't ioremap\n");
124 err = -ENOMEM;
125 goto exit_release_mem;
126 }
127
128 irq = irq_of_parse_and_map(np, 0);
129 if (!irq) {
130 dev_err(&ofdev->dev, "no irq found\n");
131 err = -ENODEV;
132 goto exit_unmap_mem;
133 }
134
135 dev = alloc_mscandev();
136 if (!dev) {
137 err = -ENOMEM;
138 goto exit_dispose_irq;
139 }
140
141 priv = netdev_priv(dev);
142 priv->reg_base = base;
143 dev->irq = irq;
144
145 /*
146 * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock
147 * (IP_CLK) can be selected as MSCAN clock source. According to
148 * the MPC5200 user's manual, the oscillator clock is the better
149 * choice as it has less jitter. For this reason, it is selected
150 * by default.
151 */
152 clk_src = of_get_property(np, "fsl,mscan-clk-src", NULL);
153 if (clk_src && strcmp(clk_src, "ip") == 0)
154 clock_src = MSCAN_CLKSRC_BUS;
155 else
156 clock_src = MSCAN_CLKSRC_XTAL;
157 priv->can.clock.freq = mpc52xx_can_clock_freq(ofdev, clock_src);
158 if (!priv->can.clock.freq) {
159 dev_err(&ofdev->dev, "couldn't get MSCAN clock frequency\n");
160 err = -ENODEV;
161 goto exit_free_mscan;
162 }
163
164 SET_NETDEV_DEV(dev, &ofdev->dev);
165
166 err = register_mscandev(dev, clock_src);
167 if (err) {
168 dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
169 DRV_NAME, err);
170 goto exit_free_mscan;
171 }
172
173 dev_set_drvdata(&ofdev->dev, dev);
174
175 dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n",
176 priv->reg_base, dev->irq, priv->can.clock.freq);
177
178 return 0;
179
180exit_free_mscan:
181 free_candev(dev);
182exit_dispose_irq:
183 irq_dispose_mapping(irq);
184exit_unmap_mem:
185 iounmap(base);
186exit_release_mem:
187 return err;
188}
189
190static int __devexit mpc5xxx_can_remove(struct of_device *ofdev)
191{
192 struct net_device *dev = dev_get_drvdata(&ofdev->dev);
193 struct mscan_priv *priv = netdev_priv(dev);
194
195 dev_set_drvdata(&ofdev->dev, NULL);
196
197 unregister_mscandev(dev);
198 iounmap(priv->reg_base);
199 irq_dispose_mapping(dev->irq);
200 free_candev(dev);
201
202 return 0;
203}
204
205#ifdef CONFIG_PM
206static struct mscan_regs saved_regs;
207static int mpc5xxx_can_suspend(struct of_device *ofdev, pm_message_t state)
208{
209 struct net_device *dev = dev_get_drvdata(&ofdev->dev);
210 struct mscan_priv *priv = netdev_priv(dev);
211 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
212
213 _memcpy_fromio(&saved_regs, regs, sizeof(*regs));
214
215 return 0;
216}
217
218static int mpc5xxx_can_resume(struct of_device *ofdev)
219{
220 struct net_device *dev = dev_get_drvdata(&ofdev->dev);
221 struct mscan_priv *priv = netdev_priv(dev);
222 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
223
224 regs->canctl0 |= MSCAN_INITRQ;
225 while ((regs->canctl1 & MSCAN_INITAK) == 0)
226 udelay(10);
227
228 regs->canctl1 = saved_regs.canctl1;
229 regs->canbtr0 = saved_regs.canbtr0;
230 regs->canbtr1 = saved_regs.canbtr1;
231 regs->canidac = saved_regs.canidac;
232
233 /* restore masks, buffers etc. */
234 _memcpy_toio(&regs->canidar1_0, (void *)&saved_regs.canidar1_0,
235 sizeof(*regs) - offsetof(struct mscan_regs, canidar1_0));
236
237 regs->canctl0 &= ~MSCAN_INITRQ;
238 regs->cantbsel = saved_regs.cantbsel;
239 regs->canrier = saved_regs.canrier;
240 regs->cantier = saved_regs.cantier;
241 regs->canctl0 = saved_regs.canctl0;
242
243 return 0;
244}
245#endif
246
247static struct of_device_id __devinitdata mpc5xxx_can_table[] = {
248 {.compatible = "fsl,mpc5200-mscan"},
249 {.compatible = "fsl,mpc5200b-mscan"},
250 {},
251};
252
253static struct of_platform_driver mpc5xxx_can_driver = {
254 .owner = THIS_MODULE,
255 .name = "mpc5xxx_can",
256 .probe = mpc5xxx_can_probe,
257 .remove = __devexit_p(mpc5xxx_can_remove),
258#ifdef CONFIG_PM
259 .suspend = mpc5xxx_can_suspend,
260 .resume = mpc5xxx_can_resume,
261#endif
262 .match_table = mpc5xxx_can_table,
263};
264
265static int __init mpc5xxx_can_init(void)
266{
267 return of_register_platform_driver(&mpc5xxx_can_driver);
268}
269module_init(mpc5xxx_can_init);
270
271static void __exit mpc5xxx_can_exit(void)
272{
273 return of_unregister_platform_driver(&mpc5xxx_can_driver);
274};
275module_exit(mpc5xxx_can_exit);
276
277MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
278MODULE_DESCRIPTION("Freescale MPC5200 CAN driver");
279MODULE_LICENSE("GPL v2");
diff --git a/drivers/net/can/mscan/mscan.c b/drivers/net/can/mscan/mscan.c
new file mode 100644
index 000000000000..49542cab9df4
--- /dev/null
+++ b/drivers/net/can/mscan/mscan.c
@@ -0,0 +1,699 @@
1/*
2 * CAN bus driver for the alone generic (as possible as) MSCAN controller.
3 *
4 * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
5 * Varma Electronics Oy
6 * Copyright (C) 2008-2009 Wolfgang Grandegger <wg@grandegger.com>
7 * Copytight (C) 2008-2009 Pengutronix <kernel@pengutronix.de>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the version 2 of the GNU General Public License
11 * as published by the Free Software Foundation
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
21 */
22
23#include <linux/kernel.h>
24#include <linux/module.h>
25#include <linux/interrupt.h>
26#include <linux/delay.h>
27#include <linux/netdevice.h>
28#include <linux/if_arp.h>
29#include <linux/if_ether.h>
30#include <linux/list.h>
31#include <linux/can.h>
32#include <linux/can/dev.h>
33#include <linux/can/error.h>
34#include <linux/io.h>
35
36#include "mscan.h"
37
38#define MSCAN_NORMAL_MODE 0
39#define MSCAN_SLEEP_MODE MSCAN_SLPRQ
40#define MSCAN_INIT_MODE (MSCAN_INITRQ | MSCAN_SLPRQ)
41#define MSCAN_POWEROFF_MODE (MSCAN_CSWAI | MSCAN_SLPRQ)
42#define MSCAN_SET_MODE_RETRIES 255
43#define MSCAN_ECHO_SKB_MAX 3
44
45#define BTR0_BRP_MASK 0x3f
46#define BTR0_SJW_SHIFT 6
47#define BTR0_SJW_MASK (0x3 << BTR0_SJW_SHIFT)
48
49#define BTR1_TSEG1_MASK 0xf
50#define BTR1_TSEG2_SHIFT 4
51#define BTR1_TSEG2_MASK (0x7 << BTR1_TSEG2_SHIFT)
52#define BTR1_SAM_SHIFT 7
53
54#define BTR0_SET_BRP(brp) (((brp) - 1) & BTR0_BRP_MASK)
55#define BTR0_SET_SJW(sjw) ((((sjw) - 1) << BTR0_SJW_SHIFT) & \
56 BTR0_SJW_MASK)
57
58#define BTR1_SET_TSEG1(tseg1) (((tseg1) - 1) & BTR1_TSEG1_MASK)
59#define BTR1_SET_TSEG2(tseg2) ((((tseg2) - 1) << BTR1_TSEG2_SHIFT) & \
60 BTR1_TSEG2_MASK)
61#define BTR1_SET_SAM(sam) ((sam) ? 1 << BTR1_SAM_SHIFT : 0)
62
63static struct can_bittiming_const mscan_bittiming_const = {
64 .name = "mscan",
65 .tseg1_min = 4,
66 .tseg1_max = 16,
67 .tseg2_min = 2,
68 .tseg2_max = 8,
69 .sjw_max = 4,
70 .brp_min = 1,
71 .brp_max = 64,
72 .brp_inc = 1,
73};
74
75struct mscan_state {
76 u8 mode;
77 u8 canrier;
78 u8 cantier;
79};
80
81#define F_RX_PROGRESS 0
82#define F_TX_PROGRESS 1
83#define F_TX_WAIT_ALL 2
84
85static enum can_state state_map[] = {
86 CAN_STATE_ERROR_ACTIVE,
87 CAN_STATE_ERROR_WARNING,
88 CAN_STATE_ERROR_PASSIVE,
89 CAN_STATE_BUS_OFF
90};
91
92static int mscan_set_mode(struct net_device *dev, u8 mode)
93{
94 struct mscan_priv *priv = netdev_priv(dev);
95 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
96 int ret = 0;
97 int i;
98 u8 canctl1;
99
100 if (mode != MSCAN_NORMAL_MODE) {
101
102 if (priv->tx_active) {
103 /* Abort transfers before going to sleep */#
104 out_8(&regs->cantarq, priv->tx_active);
105 /* Suppress TX done interrupts */
106 out_8(&regs->cantier, 0);
107 }
108
109 canctl1 = in_8(&regs->canctl1);
110 if ((mode & MSCAN_SLPRQ) && (canctl1 & MSCAN_SLPAK) == 0) {
111 out_8(&regs->canctl0,
112 in_8(&regs->canctl0) | MSCAN_SLPRQ);
113 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
114 if (in_8(&regs->canctl1) & MSCAN_SLPAK)
115 break;
116 udelay(100);
117 }
118 /*
119 * The mscan controller will fail to enter sleep mode,
120 * while there are irregular activities on bus, like
121 * somebody keeps retransmitting. This behavior is
122 * undocumented and seems to differ between mscan built
123 * in mpc5200b and mpc5200. We proceed in that case,
124 * since otherwise the slprq will be kept set and the
125 * controller will get stuck. NOTE: INITRQ or CSWAI
126 * will abort all active transmit actions, if still
127 * any, at once.
128 */
129 if (i >= MSCAN_SET_MODE_RETRIES)
130 dev_dbg(dev->dev.parent,
131 "device failed to enter sleep mode. "
132 "We proceed anyhow.\n");
133 else
134 priv->can.state = CAN_STATE_SLEEPING;
135 }
136
137 if ((mode & MSCAN_INITRQ) && (canctl1 & MSCAN_INITAK) == 0) {
138 out_8(&regs->canctl0,
139 in_8(&regs->canctl0) | MSCAN_INITRQ);
140 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
141 if (in_8(&regs->canctl1) & MSCAN_INITAK)
142 break;
143 }
144 if (i >= MSCAN_SET_MODE_RETRIES)
145 ret = -ENODEV;
146 }
147 if (!ret)
148 priv->can.state = CAN_STATE_STOPPED;
149
150 if (mode & MSCAN_CSWAI)
151 out_8(&regs->canctl0,
152 in_8(&regs->canctl0) | MSCAN_CSWAI);
153
154 } else {
155 canctl1 = in_8(&regs->canctl1);
156 if (canctl1 & (MSCAN_SLPAK | MSCAN_INITAK)) {
157 out_8(&regs->canctl0, in_8(&regs->canctl0) &
158 ~(MSCAN_SLPRQ | MSCAN_INITRQ));
159 for (i = 0; i < MSCAN_SET_MODE_RETRIES; i++) {
160 canctl1 = in_8(&regs->canctl1);
161 if (!(canctl1 & (MSCAN_INITAK | MSCAN_SLPAK)))
162 break;
163 }
164 if (i >= MSCAN_SET_MODE_RETRIES)
165 ret = -ENODEV;
166 else
167 priv->can.state = CAN_STATE_ERROR_ACTIVE;
168 }
169 }
170 return ret;
171}
172
173static int mscan_start(struct net_device *dev)
174{
175 struct mscan_priv *priv = netdev_priv(dev);
176 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
177 u8 canrflg;
178 int err;
179
180 out_8(&regs->canrier, 0);
181
182 INIT_LIST_HEAD(&priv->tx_head);
183 priv->prev_buf_id = 0;
184 priv->cur_pri = 0;
185 priv->tx_active = 0;
186 priv->shadow_canrier = 0;
187 priv->flags = 0;
188
189 err = mscan_set_mode(dev, MSCAN_NORMAL_MODE);
190 if (err)
191 return err;
192
193 canrflg = in_8(&regs->canrflg);
194 priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
195 priv->can.state = state_map[max(MSCAN_STATE_RX(canrflg),
196 MSCAN_STATE_TX(canrflg))];
197 out_8(&regs->cantier, 0);
198
199 /* Enable receive interrupts. */
200 out_8(&regs->canrier, MSCAN_OVRIE | MSCAN_RXFIE | MSCAN_CSCIE |
201 MSCAN_RSTATE1 | MSCAN_RSTATE0 | MSCAN_TSTATE1 | MSCAN_TSTATE0);
202
203 return 0;
204}
205
206static netdev_tx_t mscan_start_xmit(struct sk_buff *skb, struct net_device *dev)
207{
208 struct can_frame *frame = (struct can_frame *)skb->data;
209 struct mscan_priv *priv = netdev_priv(dev);
210 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
211 int i, rtr, buf_id;
212 u32 can_id;
213
214 if (frame->can_dlc > 8)
215 return -EINVAL;
216
217 out_8(&regs->cantier, 0);
218
219 i = ~priv->tx_active & MSCAN_TXE;
220 buf_id = ffs(i) - 1;
221 switch (hweight8(i)) {
222 case 0:
223 netif_stop_queue(dev);
224 dev_err(dev->dev.parent, "Tx Ring full when queue awake!\n");
225 return NETDEV_TX_BUSY;
226 case 1:
227 /*
228 * if buf_id < 3, then current frame will be send out of order,
229 * since buffer with lower id have higher priority (hell..)
230 */
231 netif_stop_queue(dev);
232 case 2:
233 if (buf_id < priv->prev_buf_id) {
234 priv->cur_pri++;
235 if (priv->cur_pri == 0xff) {
236 set_bit(F_TX_WAIT_ALL, &priv->flags);
237 netif_stop_queue(dev);
238 }
239 }
240 set_bit(F_TX_PROGRESS, &priv->flags);
241 break;
242 }
243 priv->prev_buf_id = buf_id;
244 out_8(&regs->cantbsel, i);
245
246 rtr = frame->can_id & CAN_RTR_FLAG;
247
248 if (frame->can_id & CAN_EFF_FLAG) {
249 can_id = (frame->can_id & CAN_EFF_MASK) << 1;
250 if (rtr)
251 can_id |= 1;
252 out_be16(&regs->tx.idr3_2, can_id);
253
254 can_id >>= 16;
255 can_id = (can_id & 0x7) | ((can_id << 2) & 0xffe0) | (3 << 3);
256 } else {
257 can_id = (frame->can_id & CAN_SFF_MASK) << 5;
258 if (rtr)
259 can_id |= 1 << 4;
260 }
261 out_be16(&regs->tx.idr1_0, can_id);
262
263 if (!rtr) {
264 void __iomem *data = &regs->tx.dsr1_0;
265 u16 *payload = (u16 *) frame->data;
266 /* It is safe to write into dsr[dlc+1] */
267 for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
268 out_be16(data, *payload++);
269 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
270 }
271 }
272
273 out_8(&regs->tx.dlr, frame->can_dlc);
274 out_8(&regs->tx.tbpr, priv->cur_pri);
275
276 /* Start transmission. */
277 out_8(&regs->cantflg, 1 << buf_id);
278
279 if (!test_bit(F_TX_PROGRESS, &priv->flags))
280 dev->trans_start = jiffies;
281
282 list_add_tail(&priv->tx_queue[buf_id].list, &priv->tx_head);
283
284 can_put_echo_skb(skb, dev, buf_id);
285
286 /* Enable interrupt. */
287 priv->tx_active |= 1 << buf_id;
288 out_8(&regs->cantier, priv->tx_active);
289
290 return NETDEV_TX_OK;
291}
292
293/* This function returns the old state to see where we came from */
294static enum can_state check_set_state(struct net_device *dev, u8 canrflg)
295{
296 struct mscan_priv *priv = netdev_priv(dev);
297 enum can_state state, old_state = priv->can.state;
298
299 if (canrflg & MSCAN_CSCIF && old_state <= CAN_STATE_BUS_OFF) {
300 state = state_map[max(MSCAN_STATE_RX(canrflg),
301 MSCAN_STATE_TX(canrflg))];
302 priv->can.state = state;
303 }
304 return old_state;
305}
306
307static void mscan_get_rx_frame(struct net_device *dev, struct can_frame *frame)
308{
309 struct mscan_priv *priv = netdev_priv(dev);
310 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
311 u32 can_id;
312 int i;
313
314 can_id = in_be16(&regs->rx.idr1_0);
315 if (can_id & (1 << 3)) {
316 frame->can_id = CAN_EFF_FLAG;
317 can_id = ((can_id << 16) | in_be16(&regs->rx.idr3_2));
318 can_id = ((can_id & 0xffe00000) |
319 ((can_id & 0x7ffff) << 2)) >> 2;
320 } else {
321 can_id >>= 4;
322 frame->can_id = 0;
323 }
324
325 frame->can_id |= can_id >> 1;
326 if (can_id & 1)
327 frame->can_id |= CAN_RTR_FLAG;
328 frame->can_dlc = in_8(&regs->rx.dlr) & 0xf;
329
330 if (!(frame->can_id & CAN_RTR_FLAG)) {
331 void __iomem *data = &regs->rx.dsr1_0;
332 u16 *payload = (u16 *) frame->data;
333 for (i = 0; i < (frame->can_dlc + 1) / 2; i++) {
334 *payload++ = in_be16(data);
335 data += 2 + _MSCAN_RESERVED_DSR_SIZE;
336 }
337 }
338
339 out_8(&regs->canrflg, MSCAN_RXF);
340}
341
342static void mscan_get_err_frame(struct net_device *dev, struct can_frame *frame,
343 u8 canrflg)
344{
345 struct mscan_priv *priv = netdev_priv(dev);
346 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
347 struct net_device_stats *stats = &dev->stats;
348 enum can_state old_state;
349
350 dev_dbg(dev->dev.parent, "error interrupt (canrflg=%#x)\n", canrflg);
351 frame->can_id = CAN_ERR_FLAG;
352
353 if (canrflg & MSCAN_OVRIF) {
354 frame->can_id |= CAN_ERR_CRTL;
355 frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
356 stats->rx_over_errors++;
357 stats->rx_errors++;
358 } else
359 frame->data[1] = 0;
360
361 old_state = check_set_state(dev, canrflg);
362 /* State changed */
363 if (old_state != priv->can.state) {
364 switch (priv->can.state) {
365 case CAN_STATE_ERROR_WARNING:
366 frame->can_id |= CAN_ERR_CRTL;
367 priv->can.can_stats.error_warning++;
368 if ((priv->shadow_statflg & MSCAN_RSTAT_MSK) <
369 (canrflg & MSCAN_RSTAT_MSK))
370 frame->data[1] |= CAN_ERR_CRTL_RX_WARNING;
371
372 if ((priv->shadow_statflg & MSCAN_TSTAT_MSK) <
373 (canrflg & MSCAN_TSTAT_MSK))
374 frame->data[1] |= CAN_ERR_CRTL_TX_WARNING;
375 break;
376 case CAN_STATE_ERROR_PASSIVE:
377 frame->can_id |= CAN_ERR_CRTL;
378 priv->can.can_stats.error_passive++;
379 frame->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
380 break;
381 case CAN_STATE_BUS_OFF:
382 frame->can_id |= CAN_ERR_BUSOFF;
383 /*
384 * The MSCAN on the MPC5200 does recover from bus-off
385 * automatically. To avoid that we stop the chip doing
386 * a light-weight stop (we are in irq-context).
387 */
388 out_8(&regs->cantier, 0);
389 out_8(&regs->canrier, 0);
390 out_8(&regs->canctl0, in_8(&regs->canctl0) |
391 MSCAN_SLPRQ | MSCAN_INITRQ);
392 can_bus_off(dev);
393 break;
394 default:
395 break;
396 }
397 }
398 priv->shadow_statflg = canrflg & MSCAN_STAT_MSK;
399 frame->can_dlc = CAN_ERR_DLC;
400 out_8(&regs->canrflg, MSCAN_ERR_IF);
401}
402
403static int mscan_rx_poll(struct napi_struct *napi, int quota)
404{
405 struct mscan_priv *priv = container_of(napi, struct mscan_priv, napi);
406 struct net_device *dev = napi->dev;
407 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
408 struct net_device_stats *stats = &dev->stats;
409 int npackets = 0;
410 int ret = 1;
411 struct sk_buff *skb;
412 struct can_frame *frame;
413 u8 canrflg;
414
415 while (npackets < quota && ((canrflg = in_8(&regs->canrflg)) &
416 (MSCAN_RXF | MSCAN_ERR_IF))) {
417
418 skb = alloc_can_skb(dev, &frame);
419 if (!skb) {
420 if (printk_ratelimit())
421 dev_notice(dev->dev.parent, "packet dropped\n");
422 stats->rx_dropped++;
423 out_8(&regs->canrflg, canrflg);
424 continue;
425 }
426
427 if (canrflg & MSCAN_RXF)
428 mscan_get_rx_frame(dev, frame);
429 else if (canrflg & MSCAN_ERR_IF)
430 mscan_get_err_frame(dev, frame, canrflg);
431
432 stats->rx_packets++;
433 stats->rx_bytes += frame->can_dlc;
434 npackets++;
435 netif_receive_skb(skb);
436 }
437
438 if (!(in_8(&regs->canrflg) & (MSCAN_RXF | MSCAN_ERR_IF))) {
439 napi_complete(&priv->napi);
440 clear_bit(F_RX_PROGRESS, &priv->flags);
441 if (priv->can.state < CAN_STATE_BUS_OFF)
442 out_8(&regs->canrier, priv->shadow_canrier);
443 ret = 0;
444 }
445 return ret;
446}
447
448static irqreturn_t mscan_isr(int irq, void *dev_id)
449{
450 struct net_device *dev = (struct net_device *)dev_id;
451 struct mscan_priv *priv = netdev_priv(dev);
452 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
453 struct net_device_stats *stats = &dev->stats;
454 u8 cantier, cantflg, canrflg;
455 irqreturn_t ret = IRQ_NONE;
456
457 cantier = in_8(&regs->cantier) & MSCAN_TXE;
458 cantflg = in_8(&regs->cantflg) & cantier;
459
460 if (cantier && cantflg) {
461
462 struct list_head *tmp, *pos;
463
464 list_for_each_safe(pos, tmp, &priv->tx_head) {
465 struct tx_queue_entry *entry =
466 list_entry(pos, struct tx_queue_entry, list);
467 u8 mask = entry->mask;
468
469 if (!(cantflg & mask))
470 continue;
471
472 out_8(&regs->cantbsel, mask);
473 stats->tx_bytes += in_8(&regs->tx.dlr);
474 stats->tx_packets++;
475 can_get_echo_skb(dev, entry->id);
476 priv->tx_active &= ~mask;
477 list_del(pos);
478 }
479
480 if (list_empty(&priv->tx_head)) {
481 clear_bit(F_TX_WAIT_ALL, &priv->flags);
482 clear_bit(F_TX_PROGRESS, &priv->flags);
483 priv->cur_pri = 0;
484 } else
485 dev->trans_start = jiffies;
486
487 if (!test_bit(F_TX_WAIT_ALL, &priv->flags))
488 netif_wake_queue(dev);
489
490 out_8(&regs->cantier, priv->tx_active);
491 ret = IRQ_HANDLED;
492 }
493
494 canrflg = in_8(&regs->canrflg);
495 if ((canrflg & ~MSCAN_STAT_MSK) &&
496 !test_and_set_bit(F_RX_PROGRESS, &priv->flags)) {
497 if (canrflg & ~MSCAN_STAT_MSK) {
498 priv->shadow_canrier = in_8(&regs->canrier);
499 out_8(&regs->canrier, 0);
500 napi_schedule(&priv->napi);
501 ret = IRQ_HANDLED;
502 } else
503 clear_bit(F_RX_PROGRESS, &priv->flags);
504 }
505 return ret;
506}
507
508static int mscan_do_set_mode(struct net_device *dev, enum can_mode mode)
509{
510
511 struct mscan_priv *priv = netdev_priv(dev);
512 int ret = 0;
513
514 if (!priv->open_time)
515 return -EINVAL;
516
517 switch (mode) {
518 case CAN_MODE_SLEEP:
519 case CAN_MODE_STOP:
520 netif_stop_queue(dev);
521 mscan_set_mode(dev,
522 (mode ==
523 CAN_MODE_STOP) ? MSCAN_INIT_MODE :
524 MSCAN_SLEEP_MODE);
525 break;
526 case CAN_MODE_START:
527 if (priv->can.state <= CAN_STATE_BUS_OFF)
528 mscan_set_mode(dev, MSCAN_INIT_MODE);
529 ret = mscan_start(dev);
530 if (ret)
531 break;
532 if (netif_queue_stopped(dev))
533 netif_wake_queue(dev);
534 break;
535
536 default:
537 ret = -EOPNOTSUPP;
538 break;
539 }
540 return ret;
541}
542
543static int mscan_do_set_bittiming(struct net_device *dev)
544{
545 struct mscan_priv *priv = netdev_priv(dev);
546 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
547 struct can_bittiming *bt = &priv->can.bittiming;
548 u8 btr0, btr1;
549
550 btr0 = BTR0_SET_BRP(bt->brp) | BTR0_SET_SJW(bt->sjw);
551 btr1 = (BTR1_SET_TSEG1(bt->prop_seg + bt->phase_seg1) |
552 BTR1_SET_TSEG2(bt->phase_seg2) |
553 BTR1_SET_SAM(priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES));
554
555 dev_info(dev->dev.parent, "setting BTR0=0x%02x BTR1=0x%02x\n",
556 btr0, btr1);
557
558 out_8(&regs->canbtr0, btr0);
559 out_8(&regs->canbtr1, btr1);
560
561 return 0;
562}
563
564static int mscan_open(struct net_device *dev)
565{
566 int ret;
567 struct mscan_priv *priv = netdev_priv(dev);
568 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
569
570 /* common open */
571 ret = open_candev(dev);
572 if (ret)
573 return ret;
574
575 napi_enable(&priv->napi);
576
577 ret = request_irq(dev->irq, mscan_isr, 0, dev->name, dev);
578 if (ret < 0) {
579 napi_disable(&priv->napi);
580 printk(KERN_ERR "%s - failed to attach interrupt\n",
581 dev->name);
582 return ret;
583 }
584
585 priv->open_time = jiffies;
586
587 out_8(&regs->canctl1, in_8(&regs->canctl1) & ~MSCAN_LISTEN);
588
589 ret = mscan_start(dev);
590 if (ret)
591 return ret;
592
593 netif_start_queue(dev);
594
595 return 0;
596}
597
598static int mscan_close(struct net_device *dev)
599{
600 struct mscan_priv *priv = netdev_priv(dev);
601 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
602
603 netif_stop_queue(dev);
604 napi_disable(&priv->napi);
605
606 out_8(&regs->cantier, 0);
607 out_8(&regs->canrier, 0);
608 mscan_set_mode(dev, MSCAN_INIT_MODE);
609 close_candev(dev);
610 free_irq(dev->irq, dev);
611 priv->open_time = 0;
612
613 return 0;
614}
615
616static const struct net_device_ops mscan_netdev_ops = {
617 .ndo_open = mscan_open,
618 .ndo_stop = mscan_close,
619 .ndo_start_xmit = mscan_start_xmit,
620};
621
622int register_mscandev(struct net_device *dev, int clock_src)
623{
624 struct mscan_priv *priv = netdev_priv(dev);
625 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
626 u8 ctl1;
627
628 ctl1 = in_8(&regs->canctl1);
629 if (clock_src)
630 ctl1 |= MSCAN_CLKSRC;
631 else
632 ctl1 &= ~MSCAN_CLKSRC;
633
634 ctl1 |= MSCAN_CANE;
635 out_8(&regs->canctl1, ctl1);
636 udelay(100);
637
638 /* acceptance mask/acceptance code (accept everything) */
639 out_be16(&regs->canidar1_0, 0);
640 out_be16(&regs->canidar3_2, 0);
641 out_be16(&regs->canidar5_4, 0);
642 out_be16(&regs->canidar7_6, 0);
643
644 out_be16(&regs->canidmr1_0, 0xffff);
645 out_be16(&regs->canidmr3_2, 0xffff);
646 out_be16(&regs->canidmr5_4, 0xffff);
647 out_be16(&regs->canidmr7_6, 0xffff);
648 /* Two 32 bit Acceptance Filters */
649 out_8(&regs->canidac, MSCAN_AF_32BIT);
650
651 mscan_set_mode(dev, MSCAN_INIT_MODE);
652
653 return register_candev(dev);
654}
655EXPORT_SYMBOL_GPL(register_mscandev);
656
657void unregister_mscandev(struct net_device *dev)
658{
659 struct mscan_priv *priv = netdev_priv(dev);
660 struct mscan_regs *regs = (struct mscan_regs *)priv->reg_base;
661 mscan_set_mode(dev, MSCAN_INIT_MODE);
662 out_8(&regs->canctl1, in_8(&regs->canctl1) & ~MSCAN_CANE);
663 unregister_candev(dev);
664}
665EXPORT_SYMBOL_GPL(unregister_mscandev);
666
667struct net_device *alloc_mscandev(void)
668{
669 struct net_device *dev;
670 struct mscan_priv *priv;
671 int i;
672
673 dev = alloc_candev(sizeof(struct mscan_priv), MSCAN_ECHO_SKB_MAX);
674 if (!dev)
675 return NULL;
676 priv = netdev_priv(dev);
677
678 dev->netdev_ops = &mscan_netdev_ops;
679
680 dev->flags |= IFF_ECHO; /* we support local echo */
681
682 netif_napi_add(dev, &priv->napi, mscan_rx_poll, 8);
683
684 priv->can.bittiming_const = &mscan_bittiming_const;
685 priv->can.do_set_bittiming = mscan_do_set_bittiming;
686 priv->can.do_set_mode = mscan_do_set_mode;
687
688 for (i = 0; i < TX_QUEUE_SIZE; i++) {
689 priv->tx_queue[i].id = i;
690 priv->tx_queue[i].mask = 1 << i;
691 }
692
693 return dev;
694}
695EXPORT_SYMBOL_GPL(alloc_mscandev);
696
697MODULE_AUTHOR("Andrey Volkov <avolkov@varma-el.com>");
698MODULE_LICENSE("GPL v2");
699MODULE_DESCRIPTION("CAN port driver for a MSCAN based chips");
diff --git a/drivers/net/can/mscan/mscan.h b/drivers/net/can/mscan/mscan.h
new file mode 100644
index 000000000000..57820f5fb817
--- /dev/null
+++ b/drivers/net/can/mscan/mscan.h
@@ -0,0 +1,262 @@
1/*
2 * Definitions of consts/structs to drive the Freescale MSCAN.
3 *
4 * Copyright (C) 2005-2006 Andrey Volkov <avolkov@varma-el.com>,
5 * Varma Electronics Oy
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the version 2 of the GNU General Public License
9 * as published by the Free Software Foundation
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20
21#ifndef __MSCAN_H__
22#define __MSCAN_H__
23
24#include <linux/types.h>
25
26/* MSCAN control register 0 (CANCTL0) bits */
27#define MSCAN_RXFRM 0x80
28#define MSCAN_RXACT 0x40
29#define MSCAN_CSWAI 0x20
30#define MSCAN_SYNCH 0x10
31#define MSCAN_TIME 0x08
32#define MSCAN_WUPE 0x04
33#define MSCAN_SLPRQ 0x02
34#define MSCAN_INITRQ 0x01
35
36/* MSCAN control register 1 (CANCTL1) bits */
37#define MSCAN_CANE 0x80
38#define MSCAN_CLKSRC 0x40
39#define MSCAN_LOOPB 0x20
40#define MSCAN_LISTEN 0x10
41#define MSCAN_WUPM 0x04
42#define MSCAN_SLPAK 0x02
43#define MSCAN_INITAK 0x01
44
45/* Use the MPC5200 MSCAN variant? */
46#ifdef CONFIG_PPC
47#define MSCAN_FOR_MPC5200
48#endif
49
50#ifdef MSCAN_FOR_MPC5200
51#define MSCAN_CLKSRC_BUS 0
52#define MSCAN_CLKSRC_XTAL MSCAN_CLKSRC
53#else
54#define MSCAN_CLKSRC_BUS MSCAN_CLKSRC
55#define MSCAN_CLKSRC_XTAL 0
56#endif
57
58/* MSCAN receiver flag register (CANRFLG) bits */
59#define MSCAN_WUPIF 0x80
60#define MSCAN_CSCIF 0x40
61#define MSCAN_RSTAT1 0x20
62#define MSCAN_RSTAT0 0x10
63#define MSCAN_TSTAT1 0x08
64#define MSCAN_TSTAT0 0x04
65#define MSCAN_OVRIF 0x02
66#define MSCAN_RXF 0x01
67#define MSCAN_ERR_IF (MSCAN_OVRIF | MSCAN_CSCIF)
68#define MSCAN_RSTAT_MSK (MSCAN_RSTAT1 | MSCAN_RSTAT0)
69#define MSCAN_TSTAT_MSK (MSCAN_TSTAT1 | MSCAN_TSTAT0)
70#define MSCAN_STAT_MSK (MSCAN_RSTAT_MSK | MSCAN_TSTAT_MSK)
71
72#define MSCAN_STATE_BUS_OFF (MSCAN_RSTAT1 | MSCAN_RSTAT0 | \
73 MSCAN_TSTAT1 | MSCAN_TSTAT0)
74#define MSCAN_STATE_TX(canrflg) (((canrflg)&MSCAN_TSTAT_MSK)>>2)
75#define MSCAN_STATE_RX(canrflg) (((canrflg)&MSCAN_RSTAT_MSK)>>4)
76#define MSCAN_STATE_ACTIVE 0
77#define MSCAN_STATE_WARNING 1
78#define MSCAN_STATE_PASSIVE 2
79#define MSCAN_STATE_BUSOFF 3
80
81/* MSCAN receiver interrupt enable register (CANRIER) bits */
82#define MSCAN_WUPIE 0x80
83#define MSCAN_CSCIE 0x40
84#define MSCAN_RSTATE1 0x20
85#define MSCAN_RSTATE0 0x10
86#define MSCAN_TSTATE1 0x08
87#define MSCAN_TSTATE0 0x04
88#define MSCAN_OVRIE 0x02
89#define MSCAN_RXFIE 0x01
90
91/* MSCAN transmitter flag register (CANTFLG) bits */
92#define MSCAN_TXE2 0x04
93#define MSCAN_TXE1 0x02
94#define MSCAN_TXE0 0x01
95#define MSCAN_TXE (MSCAN_TXE2 | MSCAN_TXE1 | MSCAN_TXE0)
96
97/* MSCAN transmitter interrupt enable register (CANTIER) bits */
98#define MSCAN_TXIE2 0x04
99#define MSCAN_TXIE1 0x02
100#define MSCAN_TXIE0 0x01
101#define MSCAN_TXIE (MSCAN_TXIE2 | MSCAN_TXIE1 | MSCAN_TXIE0)
102
103/* MSCAN transmitter message abort request (CANTARQ) bits */
104#define MSCAN_ABTRQ2 0x04
105#define MSCAN_ABTRQ1 0x02
106#define MSCAN_ABTRQ0 0x01
107
108/* MSCAN transmitter message abort ack (CANTAAK) bits */
109#define MSCAN_ABTAK2 0x04
110#define MSCAN_ABTAK1 0x02
111#define MSCAN_ABTAK0 0x01
112
113/* MSCAN transmit buffer selection (CANTBSEL) bits */
114#define MSCAN_TX2 0x04
115#define MSCAN_TX1 0x02
116#define MSCAN_TX0 0x01
117
118/* MSCAN ID acceptance control register (CANIDAC) bits */
119#define MSCAN_IDAM1 0x20
120#define MSCAN_IDAM0 0x10
121#define MSCAN_IDHIT2 0x04
122#define MSCAN_IDHIT1 0x02
123#define MSCAN_IDHIT0 0x01
124
125#define MSCAN_AF_32BIT 0x00
126#define MSCAN_AF_16BIT MSCAN_IDAM0
127#define MSCAN_AF_8BIT MSCAN_IDAM1
128#define MSCAN_AF_CLOSED (MSCAN_IDAM0|MSCAN_IDAM1)
129#define MSCAN_AF_MASK (~(MSCAN_IDAM0|MSCAN_IDAM1))
130
131/* MSCAN Miscellaneous Register (CANMISC) bits */
132#define MSCAN_BOHOLD 0x01
133
134#ifdef MSCAN_FOR_MPC5200
135#define _MSCAN_RESERVED_(n, num) u8 _res##n[num]
136#define _MSCAN_RESERVED_DSR_SIZE 2
137#else
138#define _MSCAN_RESERVED_(n, num)
139#define _MSCAN_RESERVED_DSR_SIZE 0
140#endif
141
142/* Structure of the hardware registers */
143struct mscan_regs {
144 /* (see doc S12MSCANV3/D) MPC5200 MSCAN */
145 u8 canctl0; /* + 0x00 0x00 */
146 u8 canctl1; /* + 0x01 0x01 */
147 _MSCAN_RESERVED_(1, 2); /* + 0x02 */
148 u8 canbtr0; /* + 0x04 0x02 */
149 u8 canbtr1; /* + 0x05 0x03 */
150 _MSCAN_RESERVED_(2, 2); /* + 0x06 */
151 u8 canrflg; /* + 0x08 0x04 */
152 u8 canrier; /* + 0x09 0x05 */
153 _MSCAN_RESERVED_(3, 2); /* + 0x0a */
154 u8 cantflg; /* + 0x0c 0x06 */
155 u8 cantier; /* + 0x0d 0x07 */
156 _MSCAN_RESERVED_(4, 2); /* + 0x0e */
157 u8 cantarq; /* + 0x10 0x08 */
158 u8 cantaak; /* + 0x11 0x09 */
159 _MSCAN_RESERVED_(5, 2); /* + 0x12 */
160 u8 cantbsel; /* + 0x14 0x0a */
161 u8 canidac; /* + 0x15 0x0b */
162 u8 reserved; /* + 0x16 0x0c */
163 _MSCAN_RESERVED_(6, 5); /* + 0x17 */
164#ifndef MSCAN_FOR_MPC5200
165 u8 canmisc; /* 0x0d */
166#endif
167 u8 canrxerr; /* + 0x1c 0x0e */
168 u8 cantxerr; /* + 0x1d 0x0f */
169 _MSCAN_RESERVED_(7, 2); /* + 0x1e */
170 u16 canidar1_0; /* + 0x20 0x10 */
171 _MSCAN_RESERVED_(8, 2); /* + 0x22 */
172 u16 canidar3_2; /* + 0x24 0x12 */
173 _MSCAN_RESERVED_(9, 2); /* + 0x26 */
174 u16 canidmr1_0; /* + 0x28 0x14 */
175 _MSCAN_RESERVED_(10, 2); /* + 0x2a */
176 u16 canidmr3_2; /* + 0x2c 0x16 */
177 _MSCAN_RESERVED_(11, 2); /* + 0x2e */
178 u16 canidar5_4; /* + 0x30 0x18 */
179 _MSCAN_RESERVED_(12, 2); /* + 0x32 */
180 u16 canidar7_6; /* + 0x34 0x1a */
181 _MSCAN_RESERVED_(13, 2); /* + 0x36 */
182 u16 canidmr5_4; /* + 0x38 0x1c */
183 _MSCAN_RESERVED_(14, 2); /* + 0x3a */
184 u16 canidmr7_6; /* + 0x3c 0x1e */
185 _MSCAN_RESERVED_(15, 2); /* + 0x3e */
186 struct {
187 u16 idr1_0; /* + 0x40 0x20 */
188 _MSCAN_RESERVED_(16, 2); /* + 0x42 */
189 u16 idr3_2; /* + 0x44 0x22 */
190 _MSCAN_RESERVED_(17, 2); /* + 0x46 */
191 u16 dsr1_0; /* + 0x48 0x24 */
192 _MSCAN_RESERVED_(18, 2); /* + 0x4a */
193 u16 dsr3_2; /* + 0x4c 0x26 */
194 _MSCAN_RESERVED_(19, 2); /* + 0x4e */
195 u16 dsr5_4; /* + 0x50 0x28 */
196 _MSCAN_RESERVED_(20, 2); /* + 0x52 */
197 u16 dsr7_6; /* + 0x54 0x2a */
198 _MSCAN_RESERVED_(21, 2); /* + 0x56 */
199 u8 dlr; /* + 0x58 0x2c */
200 u8:8; /* + 0x59 0x2d */
201 _MSCAN_RESERVED_(22, 2); /* + 0x5a */
202 u16 time; /* + 0x5c 0x2e */
203 } rx;
204 _MSCAN_RESERVED_(23, 2); /* + 0x5e */
205 struct {
206 u16 idr1_0; /* + 0x60 0x30 */
207 _MSCAN_RESERVED_(24, 2); /* + 0x62 */
208 u16 idr3_2; /* + 0x64 0x32 */
209 _MSCAN_RESERVED_(25, 2); /* + 0x66 */
210 u16 dsr1_0; /* + 0x68 0x34 */
211 _MSCAN_RESERVED_(26, 2); /* + 0x6a */
212 u16 dsr3_2; /* + 0x6c 0x36 */
213 _MSCAN_RESERVED_(27, 2); /* + 0x6e */
214 u16 dsr5_4; /* + 0x70 0x38 */
215 _MSCAN_RESERVED_(28, 2); /* + 0x72 */
216 u16 dsr7_6; /* + 0x74 0x3a */
217 _MSCAN_RESERVED_(29, 2); /* + 0x76 */
218 u8 dlr; /* + 0x78 0x3c */
219 u8 tbpr; /* + 0x79 0x3d */
220 _MSCAN_RESERVED_(30, 2); /* + 0x7a */
221 u16 time; /* + 0x7c 0x3e */
222 } tx;
223 _MSCAN_RESERVED_(31, 2); /* + 0x7e */
224} __attribute__ ((packed));
225
226#undef _MSCAN_RESERVED_
227#define MSCAN_REGION sizeof(struct mscan)
228
229#define TX_QUEUE_SIZE 3
230
231struct tx_queue_entry {
232 struct list_head list;
233 u8 mask;
234 u8 id;
235};
236
237struct mscan_priv {
238 struct can_priv can; /* must be the first member */
239 long open_time;
240 unsigned long flags;
241 void __iomem *reg_base; /* ioremap'ed address to registers */
242 u8 shadow_statflg;
243 u8 shadow_canrier;
244 u8 cur_pri;
245 u8 prev_buf_id;
246 u8 tx_active;
247
248 struct list_head tx_head;
249 struct tx_queue_entry tx_queue[TX_QUEUE_SIZE];
250 struct napi_struct napi;
251};
252
253struct net_device *alloc_mscandev(void);
254/*
255 * clock_src:
256 * 1 = The MSCAN clock source is the onchip Bus Clock.
257 * 0 = The MSCAN clock source is the chip Oscillator Clock.
258 */
259extern int register_mscandev(struct net_device *dev, int clock_src);
260extern void unregister_mscandev(struct net_device *dev);
261
262#endif /* __MSCAN_H__ */
diff --git a/drivers/net/can/sja1000/sja1000.c b/drivers/net/can/sja1000/sja1000.c
index 16d2ecd2a3b7..782a47fabf2c 100644
--- a/drivers/net/can/sja1000/sja1000.c
+++ b/drivers/net/can/sja1000/sja1000.c
@@ -296,11 +296,9 @@ static void sja1000_rx(struct net_device *dev)
296 uint8_t dlc; 296 uint8_t dlc;
297 int i; 297 int i;
298 298
299 skb = dev_alloc_skb(sizeof(struct can_frame)); 299 skb = alloc_can_skb(dev, &cf);
300 if (skb == NULL) 300 if (skb == NULL)
301 return; 301 return;
302 skb->dev = dev;
303 skb->protocol = htons(ETH_P_CAN);
304 302
305 fi = priv->read_reg(priv, REG_FI); 303 fi = priv->read_reg(priv, REG_FI);
306 dlc = fi & 0x0F; 304 dlc = fi & 0x0F;
@@ -323,8 +321,6 @@ static void sja1000_rx(struct net_device *dev)
323 if (fi & FI_RTR) 321 if (fi & FI_RTR)
324 id |= CAN_RTR_FLAG; 322 id |= CAN_RTR_FLAG;
325 323
326 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
327 memset(cf, 0, sizeof(struct can_frame));
328 cf->can_id = id; 324 cf->can_id = id;
329 cf->can_dlc = dlc; 325 cf->can_dlc = dlc;
330 for (i = 0; i < dlc; i++) 326 for (i = 0; i < dlc; i++)
@@ -351,15 +347,9 @@ static int sja1000_err(struct net_device *dev, uint8_t isrc, uint8_t status)
351 enum can_state state = priv->can.state; 347 enum can_state state = priv->can.state;
352 uint8_t ecc, alc; 348 uint8_t ecc, alc;
353 349
354 skb = dev_alloc_skb(sizeof(struct can_frame)); 350 skb = alloc_can_err_skb(dev, &cf);
355 if (skb == NULL) 351 if (skb == NULL)
356 return -ENOMEM; 352 return -ENOMEM;
357 skb->dev = dev;
358 skb->protocol = htons(ETH_P_CAN);
359 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
360 memset(cf, 0, sizeof(struct can_frame));
361 cf->can_id = CAN_ERR_FLAG;
362 cf->can_dlc = CAN_ERR_DLC;
363 353
364 if (isrc & IRQ_DOI) { 354 if (isrc & IRQ_DOI) {
365 /* data overrun interrupt */ 355 /* data overrun interrupt */
@@ -565,7 +555,8 @@ struct net_device *alloc_sja1000dev(int sizeof_priv)
565 struct net_device *dev; 555 struct net_device *dev;
566 struct sja1000_priv *priv; 556 struct sja1000_priv *priv;
567 557
568 dev = alloc_candev(sizeof(struct sja1000_priv) + sizeof_priv); 558 dev = alloc_candev(sizeof(struct sja1000_priv) + sizeof_priv,
559 SJA1000_ECHO_SKB_MAX);
569 if (!dev) 560 if (!dev)
570 return NULL; 561 return NULL;
571 562
diff --git a/drivers/net/can/sja1000/sja1000.h b/drivers/net/can/sja1000/sja1000.h
index 302d2c763ad7..97a622b9302f 100644
--- a/drivers/net/can/sja1000/sja1000.h
+++ b/drivers/net/can/sja1000/sja1000.h
@@ -50,6 +50,8 @@
50#include <linux/can/dev.h> 50#include <linux/can/dev.h>
51#include <linux/can/platform/sja1000.h> 51#include <linux/can/platform/sja1000.h>
52 52
53#define SJA1000_ECHO_SKB_MAX 1 /* the SJA1000 has one TX buffer object */
54
53#define SJA1000_MAX_IRQ 20 /* max. number of interrupts handled in ISR */ 55#define SJA1000_MAX_IRQ 20 /* max. number of interrupts handled in ISR */
54 56
55/* SJA1000 registers - manual section 6.4 (Pelican Mode) */ 57/* SJA1000 registers - manual section 6.4 (Pelican Mode) */
diff --git a/drivers/net/can/ti_hecc.c b/drivers/net/can/ti_hecc.c
new file mode 100644
index 000000000000..07e8016b17ec
--- /dev/null
+++ b/drivers/net/can/ti_hecc.c
@@ -0,0 +1,993 @@
1/*
2 * TI HECC (CAN) device driver
3 *
4 * This driver supports TI's HECC (High End CAN Controller module) and the
5 * specs for the same is available at <http://www.ti.com>
6 *
7 * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation version 2.
12 *
13 * This program is distributed as is WITHOUT ANY WARRANTY of any
14 * kind, whether express or implied; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 */
19
20/*
21 * Your platform definitions should specify module ram offsets and interrupt
22 * number to use as follows:
23 *
24 * static struct ti_hecc_platform_data am3517_evm_hecc_pdata = {
25 * .scc_hecc_offset = 0,
26 * .scc_ram_offset = 0x3000,
27 * .hecc_ram_offset = 0x3000,
28 * .mbx_offset = 0x2000,
29 * .int_line = 0,
30 * .revision = 1,
31 * };
32 *
33 * Please see include/can/platform/ti_hecc.h for description of above fields
34 *
35 */
36
37#include <linux/module.h>
38#include <linux/init.h>
39#include <linux/kernel.h>
40#include <linux/types.h>
41#include <linux/interrupt.h>
42#include <linux/errno.h>
43#include <linux/netdevice.h>
44#include <linux/skbuff.h>
45#include <linux/platform_device.h>
46#include <linux/clk.h>
47
48#include <linux/can.h>
49#include <linux/can/dev.h>
50#include <linux/can/error.h>
51#include <linux/can/platform/ti_hecc.h>
52
53#define DRV_NAME "ti_hecc"
54#define HECC_MODULE_VERSION "0.7"
55MODULE_VERSION(HECC_MODULE_VERSION);
56#define DRV_DESC "TI High End CAN Controller Driver " HECC_MODULE_VERSION
57
58/* TX / RX Mailbox Configuration */
59#define HECC_MAX_MAILBOXES 32 /* hardware mailboxes - do not change */
60#define MAX_TX_PRIO 0x3F /* hardware value - do not change */
61
62/*
63 * Important Note: TX mailbox configuration
64 * TX mailboxes should be restricted to the number of SKB buffers to avoid
65 * maintaining SKB buffers separately. TX mailboxes should be a power of 2
66 * for the mailbox logic to work. Top mailbox numbers are reserved for RX
67 * and lower mailboxes for TX.
68 *
69 * HECC_MAX_TX_MBOX HECC_MB_TX_SHIFT
70 * 4 (default) 2
71 * 8 3
72 * 16 4
73 */
74#define HECC_MB_TX_SHIFT 2 /* as per table above */
75#define HECC_MAX_TX_MBOX BIT(HECC_MB_TX_SHIFT)
76
77#define HECC_TX_PRIO_SHIFT (HECC_MB_TX_SHIFT)
78#define HECC_TX_PRIO_MASK (MAX_TX_PRIO << HECC_MB_TX_SHIFT)
79#define HECC_TX_MB_MASK (HECC_MAX_TX_MBOX - 1)
80#define HECC_TX_MASK ((HECC_MAX_TX_MBOX - 1) | HECC_TX_PRIO_MASK)
81#define HECC_TX_MBOX_MASK (~(BIT(HECC_MAX_TX_MBOX) - 1))
82#define HECC_DEF_NAPI_WEIGHT HECC_MAX_RX_MBOX
83
84/*
85 * Important Note: RX mailbox configuration
86 * RX mailboxes are further logically split into two - main and buffer
87 * mailboxes. The goal is to get all packets into main mailboxes as
88 * driven by mailbox number and receive priority (higher to lower) and
89 * buffer mailboxes are used to receive pkts while main mailboxes are being
90 * processed. This ensures in-order packet reception.
91 *
92 * Here are the recommended values for buffer mailbox. Note that RX mailboxes
93 * start after TX mailboxes:
94 *
95 * HECC_MAX_RX_MBOX HECC_RX_BUFFER_MBOX No of buffer mailboxes
96 * 28 12 8
97 * 16 20 4
98 */
99
100#define HECC_MAX_RX_MBOX (HECC_MAX_MAILBOXES - HECC_MAX_TX_MBOX)
101#define HECC_RX_BUFFER_MBOX 12 /* as per table above */
102#define HECC_RX_FIRST_MBOX (HECC_MAX_MAILBOXES - 1)
103#define HECC_RX_HIGH_MBOX_MASK (~(BIT(HECC_RX_BUFFER_MBOX) - 1))
104
105/* TI HECC module registers */
106#define HECC_CANME 0x0 /* Mailbox enable */
107#define HECC_CANMD 0x4 /* Mailbox direction */
108#define HECC_CANTRS 0x8 /* Transmit request set */
109#define HECC_CANTRR 0xC /* Transmit request */
110#define HECC_CANTA 0x10 /* Transmission acknowledge */
111#define HECC_CANAA 0x14 /* Abort acknowledge */
112#define HECC_CANRMP 0x18 /* Receive message pending */
113#define HECC_CANRML 0x1C /* Remote message lost */
114#define HECC_CANRFP 0x20 /* Remote frame pending */
115#define HECC_CANGAM 0x24 /* SECC only:Global acceptance mask */
116#define HECC_CANMC 0x28 /* Master control */
117#define HECC_CANBTC 0x2C /* Bit timing configuration */
118#define HECC_CANES 0x30 /* Error and status */
119#define HECC_CANTEC 0x34 /* Transmit error counter */
120#define HECC_CANREC 0x38 /* Receive error counter */
121#define HECC_CANGIF0 0x3C /* Global interrupt flag 0 */
122#define HECC_CANGIM 0x40 /* Global interrupt mask */
123#define HECC_CANGIF1 0x44 /* Global interrupt flag 1 */
124#define HECC_CANMIM 0x48 /* Mailbox interrupt mask */
125#define HECC_CANMIL 0x4C /* Mailbox interrupt level */
126#define HECC_CANOPC 0x50 /* Overwrite protection control */
127#define HECC_CANTIOC 0x54 /* Transmit I/O control */
128#define HECC_CANRIOC 0x58 /* Receive I/O control */
129#define HECC_CANLNT 0x5C /* HECC only: Local network time */
130#define HECC_CANTOC 0x60 /* HECC only: Time-out control */
131#define HECC_CANTOS 0x64 /* HECC only: Time-out status */
132#define HECC_CANTIOCE 0x68 /* SCC only:Enhanced TX I/O control */
133#define HECC_CANRIOCE 0x6C /* SCC only:Enhanced RX I/O control */
134
135/* Mailbox registers */
136#define HECC_CANMID 0x0
137#define HECC_CANMCF 0x4
138#define HECC_CANMDL 0x8
139#define HECC_CANMDH 0xC
140
141#define HECC_SET_REG 0xFFFFFFFF
142#define HECC_CANID_MASK 0x3FF /* 18 bits mask for extended id's */
143#define HECC_CCE_WAIT_COUNT 100 /* Wait for ~1 sec for CCE bit */
144
145#define HECC_CANMC_SCM BIT(13) /* SCC compat mode */
146#define HECC_CANMC_CCR BIT(12) /* Change config request */
147#define HECC_CANMC_PDR BIT(11) /* Local Power down - for sleep mode */
148#define HECC_CANMC_ABO BIT(7) /* Auto Bus On */
149#define HECC_CANMC_STM BIT(6) /* Self test mode - loopback */
150#define HECC_CANMC_SRES BIT(5) /* Software reset */
151
152#define HECC_CANTIOC_EN BIT(3) /* Enable CAN TX I/O pin */
153#define HECC_CANRIOC_EN BIT(3) /* Enable CAN RX I/O pin */
154
155#define HECC_CANMID_IDE BIT(31) /* Extended frame format */
156#define HECC_CANMID_AME BIT(30) /* Acceptance mask enable */
157#define HECC_CANMID_AAM BIT(29) /* Auto answer mode */
158
159#define HECC_CANES_FE BIT(24) /* form error */
160#define HECC_CANES_BE BIT(23) /* bit error */
161#define HECC_CANES_SA1 BIT(22) /* stuck at dominant error */
162#define HECC_CANES_CRCE BIT(21) /* CRC error */
163#define HECC_CANES_SE BIT(20) /* stuff bit error */
164#define HECC_CANES_ACKE BIT(19) /* ack error */
165#define HECC_CANES_BO BIT(18) /* Bus off status */
166#define HECC_CANES_EP BIT(17) /* Error passive status */
167#define HECC_CANES_EW BIT(16) /* Error warning status */
168#define HECC_CANES_SMA BIT(5) /* suspend mode ack */
169#define HECC_CANES_CCE BIT(4) /* Change config enabled */
170#define HECC_CANES_PDA BIT(3) /* Power down mode ack */
171
172#define HECC_CANBTC_SAM BIT(7) /* sample points */
173
174#define HECC_BUS_ERROR (HECC_CANES_FE | HECC_CANES_BE |\
175 HECC_CANES_CRCE | HECC_CANES_SE |\
176 HECC_CANES_ACKE)
177
178#define HECC_CANMCF_RTR BIT(4) /* Remote transmit request */
179
180#define HECC_CANGIF_MAIF BIT(17) /* Message alarm interrupt */
181#define HECC_CANGIF_TCOIF BIT(16) /* Timer counter overflow int */
182#define HECC_CANGIF_GMIF BIT(15) /* Global mailbox interrupt */
183#define HECC_CANGIF_AAIF BIT(14) /* Abort ack interrupt */
184#define HECC_CANGIF_WDIF BIT(13) /* Write denied interrupt */
185#define HECC_CANGIF_WUIF BIT(12) /* Wake up interrupt */
186#define HECC_CANGIF_RMLIF BIT(11) /* Receive message lost interrupt */
187#define HECC_CANGIF_BOIF BIT(10) /* Bus off interrupt */
188#define HECC_CANGIF_EPIF BIT(9) /* Error passive interrupt */
189#define HECC_CANGIF_WLIF BIT(8) /* Warning level interrupt */
190#define HECC_CANGIF_MBOX_MASK 0x1F /* Mailbox number mask */
191#define HECC_CANGIM_I1EN BIT(1) /* Int line 1 enable */
192#define HECC_CANGIM_I0EN BIT(0) /* Int line 0 enable */
193#define HECC_CANGIM_DEF_MASK 0x700 /* only busoff/warning/passive */
194#define HECC_CANGIM_SIL BIT(2) /* system interrupts to int line 1 */
195
196/* CAN Bittiming constants as per HECC specs */
197static struct can_bittiming_const ti_hecc_bittiming_const = {
198 .name = DRV_NAME,
199 .tseg1_min = 1,
200 .tseg1_max = 16,
201 .tseg2_min = 1,
202 .tseg2_max = 8,
203 .sjw_max = 4,
204 .brp_min = 1,
205 .brp_max = 256,
206 .brp_inc = 1,
207};
208
209struct ti_hecc_priv {
210 struct can_priv can; /* MUST be first member/field */
211 struct napi_struct napi;
212 struct net_device *ndev;
213 struct clk *clk;
214 void __iomem *base;
215 u32 scc_ram_offset;
216 u32 hecc_ram_offset;
217 u32 mbx_offset;
218 u32 int_line;
219 spinlock_t mbx_lock; /* CANME register needs protection */
220 u32 tx_head;
221 u32 tx_tail;
222 u32 rx_next;
223};
224
225static inline int get_tx_head_mb(struct ti_hecc_priv *priv)
226{
227 return priv->tx_head & HECC_TX_MB_MASK;
228}
229
230static inline int get_tx_tail_mb(struct ti_hecc_priv *priv)
231{
232 return priv->tx_tail & HECC_TX_MB_MASK;
233}
234
235static inline int get_tx_head_prio(struct ti_hecc_priv *priv)
236{
237 return (priv->tx_head >> HECC_TX_PRIO_SHIFT) & MAX_TX_PRIO;
238}
239
240static inline void hecc_write_lam(struct ti_hecc_priv *priv, u32 mbxno, u32 val)
241{
242 __raw_writel(val, priv->base + priv->hecc_ram_offset + mbxno * 4);
243}
244
245static inline void hecc_write_mbx(struct ti_hecc_priv *priv, u32 mbxno,
246 u32 reg, u32 val)
247{
248 __raw_writel(val, priv->base + priv->mbx_offset + mbxno * 0x10 +
249 reg);
250}
251
252static inline u32 hecc_read_mbx(struct ti_hecc_priv *priv, u32 mbxno, u32 reg)
253{
254 return __raw_readl(priv->base + priv->mbx_offset + mbxno * 0x10 +
255 reg);
256}
257
258static inline void hecc_write(struct ti_hecc_priv *priv, u32 reg, u32 val)
259{
260 __raw_writel(val, priv->base + reg);
261}
262
263static inline u32 hecc_read(struct ti_hecc_priv *priv, int reg)
264{
265 return __raw_readl(priv->base + reg);
266}
267
268static inline void hecc_set_bit(struct ti_hecc_priv *priv, int reg,
269 u32 bit_mask)
270{
271 hecc_write(priv, reg, hecc_read(priv, reg) | bit_mask);
272}
273
274static inline void hecc_clear_bit(struct ti_hecc_priv *priv, int reg,
275 u32 bit_mask)
276{
277 hecc_write(priv, reg, hecc_read(priv, reg) & ~bit_mask);
278}
279
280static inline u32 hecc_get_bit(struct ti_hecc_priv *priv, int reg, u32 bit_mask)
281{
282 return (hecc_read(priv, reg) & bit_mask) ? 1 : 0;
283}
284
285static int ti_hecc_get_state(const struct net_device *ndev,
286 enum can_state *state)
287{
288 struct ti_hecc_priv *priv = netdev_priv(ndev);
289
290 *state = priv->can.state;
291 return 0;
292}
293
294static int ti_hecc_set_btc(struct ti_hecc_priv *priv)
295{
296 struct can_bittiming *bit_timing = &priv->can.bittiming;
297 u32 can_btc;
298
299 can_btc = (bit_timing->phase_seg2 - 1) & 0x7;
300 can_btc |= ((bit_timing->phase_seg1 + bit_timing->prop_seg - 1)
301 & 0xF) << 3;
302 if (priv->can.ctrlmode & CAN_CTRLMODE_3_SAMPLES) {
303 if (bit_timing->brp > 4)
304 can_btc |= HECC_CANBTC_SAM;
305 else
306 dev_warn(priv->ndev->dev.parent, "WARN: Triple" \
307 "sampling not set due to h/w limitations");
308 }
309 can_btc |= ((bit_timing->sjw - 1) & 0x3) << 8;
310 can_btc |= ((bit_timing->brp - 1) & 0xFF) << 16;
311
312 /* ERM being set to 0 by default meaning resync at falling edge */
313
314 hecc_write(priv, HECC_CANBTC, can_btc);
315 dev_info(priv->ndev->dev.parent, "setting CANBTC=%#x\n", can_btc);
316
317 return 0;
318}
319
320static void ti_hecc_reset(struct net_device *ndev)
321{
322 u32 cnt;
323 struct ti_hecc_priv *priv = netdev_priv(ndev);
324
325 dev_dbg(ndev->dev.parent, "resetting hecc ...\n");
326 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_SRES);
327
328 /* Set change control request and wait till enabled */
329 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
330
331 /*
332 * INFO: It has been observed that at times CCE bit may not be
333 * set and hw seems to be ok even if this bit is not set so
334 * timing out with a timing of 1ms to respect the specs
335 */
336 cnt = HECC_CCE_WAIT_COUNT;
337 while (!hecc_get_bit(priv, HECC_CANES, HECC_CANES_CCE) && cnt != 0) {
338 --cnt;
339 udelay(10);
340 }
341
342 /*
343 * Note: On HECC, BTC can be programmed only in initialization mode, so
344 * it is expected that the can bittiming parameters are set via ip
345 * utility before the device is opened
346 */
347 ti_hecc_set_btc(priv);
348
349 /* Clear CCR (and CANMC register) and wait for CCE = 0 enable */
350 hecc_write(priv, HECC_CANMC, 0);
351
352 /*
353 * INFO: CAN net stack handles bus off and hence disabling auto-bus-on
354 * hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_ABO);
355 */
356
357 /*
358 * INFO: It has been observed that at times CCE bit may not be
359 * set and hw seems to be ok even if this bit is not set so
360 */
361 cnt = HECC_CCE_WAIT_COUNT;
362 while (hecc_get_bit(priv, HECC_CANES, HECC_CANES_CCE) && cnt != 0) {
363 --cnt;
364 udelay(10);
365 }
366
367 /* Enable TX and RX I/O Control pins */
368 hecc_write(priv, HECC_CANTIOC, HECC_CANTIOC_EN);
369 hecc_write(priv, HECC_CANRIOC, HECC_CANRIOC_EN);
370
371 /* Clear registers for clean operation */
372 hecc_write(priv, HECC_CANTA, HECC_SET_REG);
373 hecc_write(priv, HECC_CANRMP, HECC_SET_REG);
374 hecc_write(priv, HECC_CANGIF0, HECC_SET_REG);
375 hecc_write(priv, HECC_CANGIF1, HECC_SET_REG);
376 hecc_write(priv, HECC_CANME, 0);
377 hecc_write(priv, HECC_CANMD, 0);
378
379 /* SCC compat mode NOT supported (and not needed too) */
380 hecc_set_bit(priv, HECC_CANMC, HECC_CANMC_SCM);
381}
382
383static void ti_hecc_start(struct net_device *ndev)
384{
385 struct ti_hecc_priv *priv = netdev_priv(ndev);
386 u32 cnt, mbxno, mbx_mask;
387
388 /* put HECC in initialization mode and set btc */
389 ti_hecc_reset(ndev);
390
391 priv->tx_head = priv->tx_tail = HECC_TX_MASK;
392 priv->rx_next = HECC_RX_FIRST_MBOX;
393
394 /* Enable local and global acceptance mask registers */
395 hecc_write(priv, HECC_CANGAM, HECC_SET_REG);
396
397 /* Prepare configured mailboxes to receive messages */
398 for (cnt = 0; cnt < HECC_MAX_RX_MBOX; cnt++) {
399 mbxno = HECC_MAX_MAILBOXES - 1 - cnt;
400 mbx_mask = BIT(mbxno);
401 hecc_clear_bit(priv, HECC_CANME, mbx_mask);
402 hecc_write_mbx(priv, mbxno, HECC_CANMID, HECC_CANMID_AME);
403 hecc_write_lam(priv, mbxno, HECC_SET_REG);
404 hecc_set_bit(priv, HECC_CANMD, mbx_mask);
405 hecc_set_bit(priv, HECC_CANME, mbx_mask);
406 hecc_set_bit(priv, HECC_CANMIM, mbx_mask);
407 }
408
409 /* Prevent message over-write & Enable interrupts */
410 hecc_write(priv, HECC_CANOPC, HECC_SET_REG);
411 if (priv->int_line) {
412 hecc_write(priv, HECC_CANMIL, HECC_SET_REG);
413 hecc_write(priv, HECC_CANGIM, HECC_CANGIM_DEF_MASK |
414 HECC_CANGIM_I1EN | HECC_CANGIM_SIL);
415 } else {
416 hecc_write(priv, HECC_CANMIL, 0);
417 hecc_write(priv, HECC_CANGIM,
418 HECC_CANGIM_DEF_MASK | HECC_CANGIM_I0EN);
419 }
420 priv->can.state = CAN_STATE_ERROR_ACTIVE;
421}
422
423static void ti_hecc_stop(struct net_device *ndev)
424{
425 struct ti_hecc_priv *priv = netdev_priv(ndev);
426
427 /* Disable interrupts and disable mailboxes */
428 hecc_write(priv, HECC_CANGIM, 0);
429 hecc_write(priv, HECC_CANMIM, 0);
430 hecc_write(priv, HECC_CANME, 0);
431 priv->can.state = CAN_STATE_STOPPED;
432}
433
434static int ti_hecc_do_set_mode(struct net_device *ndev, enum can_mode mode)
435{
436 int ret = 0;
437
438 switch (mode) {
439 case CAN_MODE_START:
440 ti_hecc_start(ndev);
441 netif_wake_queue(ndev);
442 break;
443 default:
444 ret = -EOPNOTSUPP;
445 break;
446 }
447
448 return ret;
449}
450
451/*
452 * ti_hecc_xmit: HECC Transmit
453 *
454 * The transmit mailboxes start from 0 to HECC_MAX_TX_MBOX. In HECC the
455 * priority of the mailbox for tranmission is dependent upon priority setting
456 * field in mailbox registers. The mailbox with highest value in priority field
457 * is transmitted first. Only when two mailboxes have the same value in
458 * priority field the highest numbered mailbox is transmitted first.
459 *
460 * To utilize the HECC priority feature as described above we start with the
461 * highest numbered mailbox with highest priority level and move on to the next
462 * mailbox with the same priority level and so on. Once we loop through all the
463 * transmit mailboxes we choose the next priority level (lower) and so on
464 * until we reach the lowest priority level on the lowest numbered mailbox
465 * when we stop transmission until all mailboxes are transmitted and then
466 * restart at highest numbered mailbox with highest priority.
467 *
468 * Two counters (head and tail) are used to track the next mailbox to transmit
469 * and to track the echo buffer for already transmitted mailbox. The queue
470 * is stopped when all the mailboxes are busy or when there is a priority
471 * value roll-over happens.
472 */
473static netdev_tx_t ti_hecc_xmit(struct sk_buff *skb, struct net_device *ndev)
474{
475 struct ti_hecc_priv *priv = netdev_priv(ndev);
476 struct can_frame *cf = (struct can_frame *)skb->data;
477 u32 mbxno, mbx_mask, data;
478 unsigned long flags;
479
480 mbxno = get_tx_head_mb(priv);
481 mbx_mask = BIT(mbxno);
482 spin_lock_irqsave(&priv->mbx_lock, flags);
483 if (unlikely(hecc_read(priv, HECC_CANME) & mbx_mask)) {
484 spin_unlock_irqrestore(&priv->mbx_lock, flags);
485 netif_stop_queue(ndev);
486 dev_err(priv->ndev->dev.parent,
487 "BUG: TX mbx not ready tx_head=%08X, tx_tail=%08X\n",
488 priv->tx_head, priv->tx_tail);
489 return NETDEV_TX_BUSY;
490 }
491 spin_unlock_irqrestore(&priv->mbx_lock, flags);
492
493 /* Prepare mailbox for transmission */
494 data = min_t(u8, cf->can_dlc, 8);
495 if (cf->can_id & CAN_RTR_FLAG) /* Remote transmission request */
496 data |= HECC_CANMCF_RTR;
497 data |= get_tx_head_prio(priv) << 8;
498 hecc_write_mbx(priv, mbxno, HECC_CANMCF, data);
499
500 if (cf->can_id & CAN_EFF_FLAG) /* Extended frame format */
501 data = (cf->can_id & CAN_EFF_MASK) | HECC_CANMID_IDE;
502 else /* Standard frame format */
503 data = (cf->can_id & CAN_SFF_MASK) << 18;
504 hecc_write_mbx(priv, mbxno, HECC_CANMID, data);
505 hecc_write_mbx(priv, mbxno, HECC_CANMDL,
506 be32_to_cpu(*(u32 *)(cf->data)));
507 if (cf->can_dlc > 4)
508 hecc_write_mbx(priv, mbxno, HECC_CANMDH,
509 be32_to_cpu(*(u32 *)(cf->data + 4)));
510 else
511 *(u32 *)(cf->data + 4) = 0;
512 can_put_echo_skb(skb, ndev, mbxno);
513
514 spin_lock_irqsave(&priv->mbx_lock, flags);
515 --priv->tx_head;
516 if ((hecc_read(priv, HECC_CANME) & BIT(get_tx_head_mb(priv))) ||
517 (priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK) {
518 netif_stop_queue(ndev);
519 }
520 hecc_set_bit(priv, HECC_CANME, mbx_mask);
521 spin_unlock_irqrestore(&priv->mbx_lock, flags);
522
523 hecc_clear_bit(priv, HECC_CANMD, mbx_mask);
524 hecc_set_bit(priv, HECC_CANMIM, mbx_mask);
525 hecc_write(priv, HECC_CANTRS, mbx_mask);
526
527 return NETDEV_TX_OK;
528}
529
530static int ti_hecc_rx_pkt(struct ti_hecc_priv *priv, int mbxno)
531{
532 struct net_device_stats *stats = &priv->ndev->stats;
533 struct can_frame *cf;
534 struct sk_buff *skb;
535 u32 data, mbx_mask;
536 unsigned long flags;
537
538 skb = alloc_can_skb(priv->ndev, &cf);
539 if (!skb) {
540 if (printk_ratelimit())
541 dev_err(priv->ndev->dev.parent,
542 "ti_hecc_rx_pkt: alloc_can_skb() failed\n");
543 return -ENOMEM;
544 }
545
546 mbx_mask = BIT(mbxno);
547 data = hecc_read_mbx(priv, mbxno, HECC_CANMID);
548 if (data & HECC_CANMID_IDE)
549 cf->can_id = (data & CAN_EFF_MASK) | CAN_EFF_FLAG;
550 else
551 cf->can_id = (data >> 18) & CAN_SFF_MASK;
552 data = hecc_read_mbx(priv, mbxno, HECC_CANMCF);
553 if (data & HECC_CANMCF_RTR)
554 cf->can_id |= CAN_RTR_FLAG;
555 cf->can_dlc = data & 0xF;
556 data = hecc_read_mbx(priv, mbxno, HECC_CANMDL);
557 *(u32 *)(cf->data) = cpu_to_be32(data);
558 if (cf->can_dlc > 4) {
559 data = hecc_read_mbx(priv, mbxno, HECC_CANMDH);
560 *(u32 *)(cf->data + 4) = cpu_to_be32(data);
561 } else {
562 *(u32 *)(cf->data + 4) = 0;
563 }
564 spin_lock_irqsave(&priv->mbx_lock, flags);
565 hecc_clear_bit(priv, HECC_CANME, mbx_mask);
566 hecc_write(priv, HECC_CANRMP, mbx_mask);
567 /* enable mailbox only if it is part of rx buffer mailboxes */
568 if (priv->rx_next < HECC_RX_BUFFER_MBOX)
569 hecc_set_bit(priv, HECC_CANME, mbx_mask);
570 spin_unlock_irqrestore(&priv->mbx_lock, flags);
571
572 stats->rx_bytes += cf->can_dlc;
573 netif_receive_skb(skb);
574 stats->rx_packets++;
575
576 return 0;
577}
578
579/*
580 * ti_hecc_rx_poll - HECC receive pkts
581 *
582 * The receive mailboxes start from highest numbered mailbox till last xmit
583 * mailbox. On CAN frame reception the hardware places the data into highest
584 * numbered mailbox that matches the CAN ID filter. Since all receive mailboxes
585 * have same filtering (ALL CAN frames) packets will arrive in the highest
586 * available RX mailbox and we need to ensure in-order packet reception.
587 *
588 * To ensure the packets are received in the right order we logically divide
589 * the RX mailboxes into main and buffer mailboxes. Packets are received as per
590 * mailbox priotity (higher to lower) in the main bank and once it is full we
591 * disable further reception into main mailboxes. While the main mailboxes are
592 * processed in NAPI, further packets are received in buffer mailboxes.
593 *
594 * We maintain a RX next mailbox counter to process packets and once all main
595 * mailboxe packets are passed to the upper stack we enable all of them but
596 * continue to process packets received in buffer mailboxes. With each packet
597 * received from buffer mailbox we enable it immediately so as to handle the
598 * overflow from higher mailboxes.
599 */
600static int ti_hecc_rx_poll(struct napi_struct *napi, int quota)
601{
602 struct net_device *ndev = napi->dev;
603 struct ti_hecc_priv *priv = netdev_priv(ndev);
604 u32 num_pkts = 0;
605 u32 mbx_mask;
606 unsigned long pending_pkts, flags;
607
608 if (!netif_running(ndev))
609 return 0;
610
611 while ((pending_pkts = hecc_read(priv, HECC_CANRMP)) &&
612 num_pkts < quota) {
613 mbx_mask = BIT(priv->rx_next); /* next rx mailbox to process */
614 if (mbx_mask & pending_pkts) {
615 if (ti_hecc_rx_pkt(priv, priv->rx_next) < 0)
616 return num_pkts;
617 ++num_pkts;
618 } else if (priv->rx_next > HECC_RX_BUFFER_MBOX) {
619 break; /* pkt not received yet */
620 }
621 --priv->rx_next;
622 if (priv->rx_next == HECC_RX_BUFFER_MBOX) {
623 /* enable high bank mailboxes */
624 spin_lock_irqsave(&priv->mbx_lock, flags);
625 mbx_mask = hecc_read(priv, HECC_CANME);
626 mbx_mask |= HECC_RX_HIGH_MBOX_MASK;
627 hecc_write(priv, HECC_CANME, mbx_mask);
628 spin_unlock_irqrestore(&priv->mbx_lock, flags);
629 } else if (priv->rx_next == HECC_MAX_TX_MBOX - 1) {
630 priv->rx_next = HECC_RX_FIRST_MBOX;
631 break;
632 }
633 }
634
635 /* Enable packet interrupt if all pkts are handled */
636 if (hecc_read(priv, HECC_CANRMP) == 0) {
637 napi_complete(napi);
638 /* Re-enable RX mailbox interrupts */
639 mbx_mask = hecc_read(priv, HECC_CANMIM);
640 mbx_mask |= HECC_TX_MBOX_MASK;
641 hecc_write(priv, HECC_CANMIM, mbx_mask);
642 }
643
644 return num_pkts;
645}
646
647static int ti_hecc_error(struct net_device *ndev, int int_status,
648 int err_status)
649{
650 struct ti_hecc_priv *priv = netdev_priv(ndev);
651 struct net_device_stats *stats = &ndev->stats;
652 struct can_frame *cf;
653 struct sk_buff *skb;
654
655 /* propogate the error condition to the can stack */
656 skb = alloc_can_err_skb(ndev, &cf);
657 if (!skb) {
658 if (printk_ratelimit())
659 dev_err(priv->ndev->dev.parent,
660 "ti_hecc_error: alloc_can_err_skb() failed\n");
661 return -ENOMEM;
662 }
663
664 if (int_status & HECC_CANGIF_WLIF) { /* warning level int */
665 if ((int_status & HECC_CANGIF_BOIF) == 0) {
666 priv->can.state = CAN_STATE_ERROR_WARNING;
667 ++priv->can.can_stats.error_warning;
668 cf->can_id |= CAN_ERR_CRTL;
669 if (hecc_read(priv, HECC_CANTEC) > 96)
670 cf->data[1] |= CAN_ERR_CRTL_TX_WARNING;
671 if (hecc_read(priv, HECC_CANREC) > 96)
672 cf->data[1] |= CAN_ERR_CRTL_RX_WARNING;
673 }
674 hecc_set_bit(priv, HECC_CANES, HECC_CANES_EW);
675 dev_dbg(priv->ndev->dev.parent, "Error Warning interrupt\n");
676 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
677 }
678
679 if (int_status & HECC_CANGIF_EPIF) { /* error passive int */
680 if ((int_status & HECC_CANGIF_BOIF) == 0) {
681 priv->can.state = CAN_STATE_ERROR_PASSIVE;
682 ++priv->can.can_stats.error_passive;
683 cf->can_id |= CAN_ERR_CRTL;
684 if (hecc_read(priv, HECC_CANTEC) > 127)
685 cf->data[1] |= CAN_ERR_CRTL_TX_PASSIVE;
686 if (hecc_read(priv, HECC_CANREC) > 127)
687 cf->data[1] |= CAN_ERR_CRTL_RX_PASSIVE;
688 }
689 hecc_set_bit(priv, HECC_CANES, HECC_CANES_EP);
690 dev_dbg(priv->ndev->dev.parent, "Error passive interrupt\n");
691 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
692 }
693
694 /*
695 * Need to check busoff condition in error status register too to
696 * ensure warning interrupts don't hog the system
697 */
698 if ((int_status & HECC_CANGIF_BOIF) || (err_status & HECC_CANES_BO)) {
699 priv->can.state = CAN_STATE_BUS_OFF;
700 cf->can_id |= CAN_ERR_BUSOFF;
701 hecc_set_bit(priv, HECC_CANES, HECC_CANES_BO);
702 hecc_clear_bit(priv, HECC_CANMC, HECC_CANMC_CCR);
703 /* Disable all interrupts in bus-off to avoid int hog */
704 hecc_write(priv, HECC_CANGIM, 0);
705 can_bus_off(ndev);
706 }
707
708 if (err_status & HECC_BUS_ERROR) {
709 ++priv->can.can_stats.bus_error;
710 cf->can_id |= CAN_ERR_BUSERROR | CAN_ERR_PROT;
711 cf->data[2] |= CAN_ERR_PROT_UNSPEC;
712 if (err_status & HECC_CANES_FE) {
713 hecc_set_bit(priv, HECC_CANES, HECC_CANES_FE);
714 cf->data[2] |= CAN_ERR_PROT_FORM;
715 }
716 if (err_status & HECC_CANES_BE) {
717 hecc_set_bit(priv, HECC_CANES, HECC_CANES_BE);
718 cf->data[2] |= CAN_ERR_PROT_BIT;
719 }
720 if (err_status & HECC_CANES_SE) {
721 hecc_set_bit(priv, HECC_CANES, HECC_CANES_SE);
722 cf->data[2] |= CAN_ERR_PROT_STUFF;
723 }
724 if (err_status & HECC_CANES_CRCE) {
725 hecc_set_bit(priv, HECC_CANES, HECC_CANES_CRCE);
726 cf->data[2] |= CAN_ERR_PROT_LOC_CRC_SEQ |
727 CAN_ERR_PROT_LOC_CRC_DEL;
728 }
729 if (err_status & HECC_CANES_ACKE) {
730 hecc_set_bit(priv, HECC_CANES, HECC_CANES_ACKE);
731 cf->data[2] |= CAN_ERR_PROT_LOC_ACK |
732 CAN_ERR_PROT_LOC_ACK_DEL;
733 }
734 }
735
736 netif_receive_skb(skb);
737 stats->rx_packets++;
738 stats->rx_bytes += cf->can_dlc;
739 return 0;
740}
741
742static irqreturn_t ti_hecc_interrupt(int irq, void *dev_id)
743{
744 struct net_device *ndev = (struct net_device *)dev_id;
745 struct ti_hecc_priv *priv = netdev_priv(ndev);
746 struct net_device_stats *stats = &ndev->stats;
747 u32 mbxno, mbx_mask, int_status, err_status;
748 unsigned long ack, flags;
749
750 int_status = hecc_read(priv,
751 (priv->int_line) ? HECC_CANGIF1 : HECC_CANGIF0);
752
753 if (!int_status)
754 return IRQ_NONE;
755
756 err_status = hecc_read(priv, HECC_CANES);
757 if (err_status & (HECC_BUS_ERROR | HECC_CANES_BO |
758 HECC_CANES_EP | HECC_CANES_EW))
759 ti_hecc_error(ndev, int_status, err_status);
760
761 if (int_status & HECC_CANGIF_GMIF) {
762 while (priv->tx_tail - priv->tx_head > 0) {
763 mbxno = get_tx_tail_mb(priv);
764 mbx_mask = BIT(mbxno);
765 if (!(mbx_mask & hecc_read(priv, HECC_CANTA)))
766 break;
767 hecc_clear_bit(priv, HECC_CANMIM, mbx_mask);
768 hecc_write(priv, HECC_CANTA, mbx_mask);
769 spin_lock_irqsave(&priv->mbx_lock, flags);
770 hecc_clear_bit(priv, HECC_CANME, mbx_mask);
771 spin_unlock_irqrestore(&priv->mbx_lock, flags);
772 stats->tx_bytes += hecc_read_mbx(priv, mbxno,
773 HECC_CANMCF) & 0xF;
774 stats->tx_packets++;
775 can_get_echo_skb(ndev, mbxno);
776 --priv->tx_tail;
777 }
778
779 /* restart queue if wrap-up or if queue stalled on last pkt */
780 if (((priv->tx_head == priv->tx_tail) &&
781 ((priv->tx_head & HECC_TX_MASK) != HECC_TX_MASK)) ||
782 (((priv->tx_tail & HECC_TX_MASK) == HECC_TX_MASK) &&
783 ((priv->tx_head & HECC_TX_MASK) == HECC_TX_MASK)))
784 netif_wake_queue(ndev);
785
786 /* Disable RX mailbox interrupts and let NAPI reenable them */
787 if (hecc_read(priv, HECC_CANRMP)) {
788 ack = hecc_read(priv, HECC_CANMIM);
789 ack &= BIT(HECC_MAX_TX_MBOX) - 1;
790 hecc_write(priv, HECC_CANMIM, ack);
791 napi_schedule(&priv->napi);
792 }
793 }
794
795 /* clear all interrupt conditions - read back to avoid spurious ints */
796 if (priv->int_line) {
797 hecc_write(priv, HECC_CANGIF1, HECC_SET_REG);
798 int_status = hecc_read(priv, HECC_CANGIF1);
799 } else {
800 hecc_write(priv, HECC_CANGIF0, HECC_SET_REG);
801 int_status = hecc_read(priv, HECC_CANGIF0);
802 }
803
804 return IRQ_HANDLED;
805}
806
807static int ti_hecc_open(struct net_device *ndev)
808{
809 struct ti_hecc_priv *priv = netdev_priv(ndev);
810 int err;
811
812 err = request_irq(ndev->irq, ti_hecc_interrupt, IRQF_SHARED,
813 ndev->name, ndev);
814 if (err) {
815 dev_err(ndev->dev.parent, "error requesting interrupt\n");
816 return err;
817 }
818
819 /* Open common can device */
820 err = open_candev(ndev);
821 if (err) {
822 dev_err(ndev->dev.parent, "open_candev() failed %d\n", err);
823 free_irq(ndev->irq, ndev);
824 return err;
825 }
826
827 clk_enable(priv->clk);
828 ti_hecc_start(ndev);
829 napi_enable(&priv->napi);
830 netif_start_queue(ndev);
831
832 return 0;
833}
834
835static int ti_hecc_close(struct net_device *ndev)
836{
837 struct ti_hecc_priv *priv = netdev_priv(ndev);
838
839 netif_stop_queue(ndev);
840 napi_disable(&priv->napi);
841 ti_hecc_stop(ndev);
842 free_irq(ndev->irq, ndev);
843 clk_disable(priv->clk);
844 close_candev(ndev);
845
846 return 0;
847}
848
849static const struct net_device_ops ti_hecc_netdev_ops = {
850 .ndo_open = ti_hecc_open,
851 .ndo_stop = ti_hecc_close,
852 .ndo_start_xmit = ti_hecc_xmit,
853};
854
855static int ti_hecc_probe(struct platform_device *pdev)
856{
857 struct net_device *ndev = (struct net_device *)0;
858 struct ti_hecc_priv *priv;
859 struct ti_hecc_platform_data *pdata;
860 struct resource *mem, *irq;
861 void __iomem *addr;
862 int err = -ENODEV;
863
864 pdata = pdev->dev.platform_data;
865 if (!pdata) {
866 dev_err(&pdev->dev, "No platform data\n");
867 goto probe_exit;
868 }
869
870 mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
871 if (!mem) {
872 dev_err(&pdev->dev, "No mem resources\n");
873 goto probe_exit;
874 }
875 irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
876 if (!irq) {
877 dev_err(&pdev->dev, "No irq resource\n");
878 goto probe_exit;
879 }
880 if (!request_mem_region(mem->start, resource_size(mem), pdev->name)) {
881 dev_err(&pdev->dev, "HECC region already claimed\n");
882 err = -EBUSY;
883 goto probe_exit;
884 }
885 addr = ioremap(mem->start, resource_size(mem));
886 if (!addr) {
887 dev_err(&pdev->dev, "ioremap failed\n");
888 err = -ENOMEM;
889 goto probe_exit_free_region;
890 }
891
892 ndev = alloc_candev(sizeof(struct ti_hecc_priv), HECC_MAX_TX_MBOX);
893 if (!ndev) {
894 dev_err(&pdev->dev, "alloc_candev failed\n");
895 err = -ENOMEM;
896 goto probe_exit_iounmap;
897 }
898
899 priv = netdev_priv(ndev);
900 priv->ndev = ndev;
901 priv->base = addr;
902 priv->scc_ram_offset = pdata->scc_ram_offset;
903 priv->hecc_ram_offset = pdata->hecc_ram_offset;
904 priv->mbx_offset = pdata->mbx_offset;
905 priv->int_line = pdata->int_line;
906
907 priv->can.bittiming_const = &ti_hecc_bittiming_const;
908 priv->can.do_set_mode = ti_hecc_do_set_mode;
909 priv->can.do_get_state = ti_hecc_get_state;
910
911 ndev->irq = irq->start;
912 ndev->flags |= IFF_ECHO;
913 platform_set_drvdata(pdev, ndev);
914 SET_NETDEV_DEV(ndev, &pdev->dev);
915 ndev->netdev_ops = &ti_hecc_netdev_ops;
916
917 priv->clk = clk_get(&pdev->dev, "hecc_ck");
918 if (IS_ERR(priv->clk)) {
919 dev_err(&pdev->dev, "No clock available\n");
920 err = PTR_ERR(priv->clk);
921 priv->clk = NULL;
922 goto probe_exit_candev;
923 }
924 priv->can.clock.freq = clk_get_rate(priv->clk);
925 netif_napi_add(ndev, &priv->napi, ti_hecc_rx_poll,
926 HECC_DEF_NAPI_WEIGHT);
927
928 err = register_candev(ndev);
929 if (err) {
930 dev_err(&pdev->dev, "register_candev() failed\n");
931 goto probe_exit_clk;
932 }
933 dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%u)\n",
934 priv->base, (u32) ndev->irq);
935
936 return 0;
937
938probe_exit_clk:
939 clk_put(priv->clk);
940probe_exit_candev:
941 free_candev(ndev);
942probe_exit_iounmap:
943 iounmap(addr);
944probe_exit_free_region:
945 release_mem_region(mem->start, resource_size(mem));
946probe_exit:
947 return err;
948}
949
950static int __devexit ti_hecc_remove(struct platform_device *pdev)
951{
952 struct resource *res;
953 struct net_device *ndev = platform_get_drvdata(pdev);
954 struct ti_hecc_priv *priv = netdev_priv(ndev);
955
956 clk_put(priv->clk);
957 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
958 iounmap(priv->base);
959 release_mem_region(res->start, resource_size(res));
960 unregister_candev(ndev);
961 free_candev(ndev);
962 platform_set_drvdata(pdev, NULL);
963
964 return 0;
965}
966
967/* TI HECC netdevice driver: platform driver structure */
968static struct platform_driver ti_hecc_driver = {
969 .driver = {
970 .name = DRV_NAME,
971 .owner = THIS_MODULE,
972 },
973 .probe = ti_hecc_probe,
974 .remove = __devexit_p(ti_hecc_remove),
975};
976
977static int __init ti_hecc_init_driver(void)
978{
979 printk(KERN_INFO DRV_DESC "\n");
980 return platform_driver_register(&ti_hecc_driver);
981}
982module_init(ti_hecc_init_driver);
983
984static void __exit ti_hecc_exit_driver(void)
985{
986 printk(KERN_INFO DRV_DESC " unloaded\n");
987 platform_driver_unregister(&ti_hecc_driver);
988}
989module_exit(ti_hecc_exit_driver);
990
991MODULE_AUTHOR("Anant Gole <anantgole@ti.com>");
992MODULE_LICENSE("GPL v2");
993MODULE_DESCRIPTION(DRV_DESC);
diff --git a/drivers/net/can/usb/ems_usb.c b/drivers/net/can/usb/ems_usb.c
index abdbd9c2b788..3e4419054c81 100644
--- a/drivers/net/can/usb/ems_usb.c
+++ b/drivers/net/can/usb/ems_usb.c
@@ -232,7 +232,7 @@ MODULE_DEVICE_TABLE(usb, ems_usb_table);
232#define INTR_IN_BUFFER_SIZE 4 232#define INTR_IN_BUFFER_SIZE 4
233 233
234#define MAX_RX_URBS 10 234#define MAX_RX_URBS 10
235#define MAX_TX_URBS CAN_ECHO_SKB_MAX 235#define MAX_TX_URBS 10
236 236
237struct ems_usb; 237struct ems_usb;
238 238
@@ -311,14 +311,10 @@ static void ems_usb_rx_can_msg(struct ems_usb *dev, struct ems_cpc_msg *msg)
311 int i; 311 int i;
312 struct net_device_stats *stats = &dev->netdev->stats; 312 struct net_device_stats *stats = &dev->netdev->stats;
313 313
314 skb = netdev_alloc_skb(dev->netdev, sizeof(struct can_frame)); 314 skb = alloc_can_skb(dev->netdev, &cf);
315 if (skb == NULL) 315 if (skb == NULL)
316 return; 316 return;
317 317
318 skb->protocol = htons(ETH_P_CAN);
319
320 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
321
322 cf->can_id = le32_to_cpu(msg->msg.can_msg.id); 318 cf->can_id = le32_to_cpu(msg->msg.can_msg.id);
323 cf->can_dlc = min_t(u8, msg->msg.can_msg.length, 8); 319 cf->can_dlc = min_t(u8, msg->msg.can_msg.length, 8);
324 320
@@ -346,18 +342,10 @@ static void ems_usb_rx_err(struct ems_usb *dev, struct ems_cpc_msg *msg)
346 struct sk_buff *skb; 342 struct sk_buff *skb;
347 struct net_device_stats *stats = &dev->netdev->stats; 343 struct net_device_stats *stats = &dev->netdev->stats;
348 344
349 skb = netdev_alloc_skb(dev->netdev, sizeof(struct can_frame)); 345 skb = alloc_can_err_skb(dev->netdev, &cf);
350 if (skb == NULL) 346 if (skb == NULL)
351 return; 347 return;
352 348
353 skb->protocol = htons(ETH_P_CAN);
354
355 cf = (struct can_frame *)skb_put(skb, sizeof(struct can_frame));
356 memset(cf, 0, sizeof(struct can_frame));
357
358 cf->can_id = CAN_ERR_FLAG;
359 cf->can_dlc = CAN_ERR_DLC;
360
361 if (msg->type == CPC_MSG_TYPE_CAN_STATE) { 349 if (msg->type == CPC_MSG_TYPE_CAN_STATE) {
362 u8 state = msg->msg.can_state; 350 u8 state = msg->msg.can_state;
363 351
@@ -1015,7 +1003,7 @@ static int ems_usb_probe(struct usb_interface *intf,
1015 struct ems_usb *dev; 1003 struct ems_usb *dev;
1016 int i, err = -ENOMEM; 1004 int i, err = -ENOMEM;
1017 1005
1018 netdev = alloc_candev(sizeof(struct ems_usb)); 1006 netdev = alloc_candev(sizeof(struct ems_usb), MAX_TX_URBS);
1019 if (!netdev) { 1007 if (!netdev) {
1020 dev_err(netdev->dev.parent, "Couldn't alloc candev\n"); 1008 dev_err(netdev->dev.parent, "Couldn't alloc candev\n");
1021 return -ENOMEM; 1009 return -ENOMEM;
diff --git a/drivers/net/cnic.c b/drivers/net/cnic.c
index 3bf1b04f2cab..e503384e2a54 100644
--- a/drivers/net/cnic.c
+++ b/drivers/net/cnic.c
@@ -33,10 +33,16 @@
33#include <net/route.h> 33#include <net/route.h>
34#include <net/ipv6.h> 34#include <net/ipv6.h>
35#include <net/ip6_route.h> 35#include <net/ip6_route.h>
36#include <net/ip6_checksum.h>
36#include <scsi/iscsi_if.h> 37#include <scsi/iscsi_if.h>
37 38
38#include "cnic_if.h" 39#include "cnic_if.h"
39#include "bnx2.h" 40#include "bnx2.h"
41#include "bnx2x_reg.h"
42#include "bnx2x_fw_defs.h"
43#include "bnx2x_hsi.h"
44#include "../scsi/bnx2i/57xx_iscsi_constants.h"
45#include "../scsi/bnx2i/57xx_iscsi_hsi.h"
40#include "cnic.h" 46#include "cnic.h"
41#include "cnic_defs.h" 47#include "cnic_defs.h"
42 48
@@ -59,6 +65,7 @@ static DEFINE_MUTEX(cnic_lock);
59static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE]; 65static struct cnic_ulp_ops *cnic_ulp_tbl[MAX_CNIC_ULP_TYPE];
60 66
61static int cnic_service_bnx2(void *, void *); 67static int cnic_service_bnx2(void *, void *);
68static int cnic_service_bnx2x(void *, void *);
62static int cnic_ctl(void *, struct cnic_ctl_info *); 69static int cnic_ctl(void *, struct cnic_ctl_info *);
63 70
64static struct cnic_ops cnic_bnx2_ops = { 71static struct cnic_ops cnic_bnx2_ops = {
@@ -67,9 +74,14 @@ static struct cnic_ops cnic_bnx2_ops = {
67 .cnic_ctl = cnic_ctl, 74 .cnic_ctl = cnic_ctl,
68}; 75};
69 76
70static void cnic_shutdown_bnx2_rx_ring(struct cnic_dev *); 77static struct cnic_ops cnic_bnx2x_ops = {
71static void cnic_init_bnx2_tx_ring(struct cnic_dev *); 78 .cnic_owner = THIS_MODULE,
72static void cnic_init_bnx2_rx_ring(struct cnic_dev *); 79 .cnic_handler = cnic_service_bnx2x,
80 .cnic_ctl = cnic_ctl,
81};
82
83static void cnic_shutdown_rings(struct cnic_dev *);
84static void cnic_init_rings(struct cnic_dev *);
73static int cnic_cm_set_pg(struct cnic_sock *); 85static int cnic_cm_set_pg(struct cnic_sock *);
74 86
75static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode) 87static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
@@ -83,10 +95,16 @@ static int cnic_uio_open(struct uio_info *uinfo, struct inode *inode)
83 if (cp->uio_dev != -1) 95 if (cp->uio_dev != -1)
84 return -EBUSY; 96 return -EBUSY;
85 97
98 rtnl_lock();
99 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
100 rtnl_unlock();
101 return -ENODEV;
102 }
103
86 cp->uio_dev = iminor(inode); 104 cp->uio_dev = iminor(inode);
87 105
88 cnic_init_bnx2_tx_ring(dev); 106 cnic_init_rings(dev);
89 cnic_init_bnx2_rx_ring(dev); 107 rtnl_unlock();
90 108
91 return 0; 109 return 0;
92} 110}
@@ -96,7 +114,7 @@ static int cnic_uio_close(struct uio_info *uinfo, struct inode *inode)
96 struct cnic_dev *dev = uinfo->priv; 114 struct cnic_dev *dev = uinfo->priv;
97 struct cnic_local *cp = dev->cnic_priv; 115 struct cnic_local *cp = dev->cnic_priv;
98 116
99 cnic_shutdown_bnx2_rx_ring(dev); 117 cnic_shutdown_rings(dev);
100 118
101 cp->uio_dev = -1; 119 cp->uio_dev = -1;
102 return 0; 120 return 0;
@@ -162,6 +180,36 @@ static void cnic_ctx_wr(struct cnic_dev *dev, u32 cid_addr, u32 off, u32 val)
162 ethdev->drv_ctl(dev->netdev, &info); 180 ethdev->drv_ctl(dev->netdev, &info);
163} 181}
164 182
183static void cnic_ctx_tbl_wr(struct cnic_dev *dev, u32 off, dma_addr_t addr)
184{
185 struct cnic_local *cp = dev->cnic_priv;
186 struct cnic_eth_dev *ethdev = cp->ethdev;
187 struct drv_ctl_info info;
188 struct drv_ctl_io *io = &info.data.io;
189
190 info.cmd = DRV_CTL_CTXTBL_WR_CMD;
191 io->offset = off;
192 io->dma_addr = addr;
193 ethdev->drv_ctl(dev->netdev, &info);
194}
195
196static void cnic_ring_ctl(struct cnic_dev *dev, u32 cid, u32 cl_id, int start)
197{
198 struct cnic_local *cp = dev->cnic_priv;
199 struct cnic_eth_dev *ethdev = cp->ethdev;
200 struct drv_ctl_info info;
201 struct drv_ctl_l2_ring *ring = &info.data.ring;
202
203 if (start)
204 info.cmd = DRV_CTL_START_L2_CMD;
205 else
206 info.cmd = DRV_CTL_STOP_L2_CMD;
207
208 ring->cid = cid;
209 ring->client_id = cl_id;
210 ethdev->drv_ctl(dev->netdev, &info);
211}
212
165static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val) 213static void cnic_reg_wr_ind(struct cnic_dev *dev, u32 off, u32 val)
166{ 214{
167 struct cnic_local *cp = dev->cnic_priv; 215 struct cnic_local *cp = dev->cnic_priv;
@@ -204,6 +252,19 @@ static void cnic_kwq_completion(struct cnic_dev *dev, u32 count)
204 ethdev->drv_ctl(dev->netdev, &info); 252 ethdev->drv_ctl(dev->netdev, &info);
205} 253}
206 254
255static int cnic_get_l5_cid(struct cnic_local *cp, u32 cid, u32 *l5_cid)
256{
257 u32 i;
258
259 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
260 if (cp->ctx_tbl[i].cid == cid) {
261 *l5_cid = i;
262 return 0;
263 }
264 }
265 return -EINVAL;
266}
267
207static int cnic_send_nlmsg(struct cnic_local *cp, u32 type, 268static int cnic_send_nlmsg(struct cnic_local *cp, u32 type,
208 struct cnic_sock *csk) 269 struct cnic_sock *csk)
209{ 270{
@@ -347,7 +408,7 @@ int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops)
347{ 408{
348 struct cnic_dev *dev; 409 struct cnic_dev *dev;
349 410
350 if (ulp_type >= MAX_CNIC_ULP_TYPE) { 411 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
351 printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n", 412 printk(KERN_ERR PFX "cnic_register_driver: Bad type %d\n",
352 ulp_type); 413 ulp_type);
353 return -EINVAL; 414 return -EINVAL;
@@ -393,7 +454,7 @@ int cnic_unregister_driver(int ulp_type)
393 struct cnic_ulp_ops *ulp_ops; 454 struct cnic_ulp_ops *ulp_ops;
394 int i = 0; 455 int i = 0;
395 456
396 if (ulp_type >= MAX_CNIC_ULP_TYPE) { 457 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
397 printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n", 458 printk(KERN_ERR PFX "cnic_unregister_driver: Bad type %d\n",
398 ulp_type); 459 ulp_type);
399 return -EINVAL; 460 return -EINVAL;
@@ -449,7 +510,7 @@ static int cnic_register_device(struct cnic_dev *dev, int ulp_type,
449 struct cnic_local *cp = dev->cnic_priv; 510 struct cnic_local *cp = dev->cnic_priv;
450 struct cnic_ulp_ops *ulp_ops; 511 struct cnic_ulp_ops *ulp_ops;
451 512
452 if (ulp_type >= MAX_CNIC_ULP_TYPE) { 513 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
453 printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n", 514 printk(KERN_ERR PFX "cnic_register_device: Bad type %d\n",
454 ulp_type); 515 ulp_type);
455 return -EINVAL; 516 return -EINVAL;
@@ -490,7 +551,7 @@ static int cnic_unregister_device(struct cnic_dev *dev, int ulp_type)
490 struct cnic_local *cp = dev->cnic_priv; 551 struct cnic_local *cp = dev->cnic_priv;
491 int i = 0; 552 int i = 0;
492 553
493 if (ulp_type >= MAX_CNIC_ULP_TYPE) { 554 if (ulp_type < 0 || ulp_type >= MAX_CNIC_ULP_TYPE) {
494 printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n", 555 printk(KERN_ERR PFX "cnic_unregister_device: Bad type %d\n",
495 ulp_type); 556 ulp_type);
496 return -EINVAL; 557 return -EINVAL;
@@ -635,6 +696,20 @@ static void cnic_setup_page_tbl(struct cnic_dev *dev, struct cnic_dma *dma)
635 } 696 }
636} 697}
637 698
699static void cnic_setup_page_tbl_le(struct cnic_dev *dev, struct cnic_dma *dma)
700{
701 int i;
702 u32 *page_table = dma->pgtbl;
703
704 for (i = 0; i < dma->num_pages; i++) {
705 /* Each entry needs to be in little endian format. */
706 *page_table = dma->pg_map_arr[i] & 0xffffffff;
707 page_table++;
708 *page_table = (u32) ((u64) dma->pg_map_arr[i] >> 32);
709 page_table++;
710 }
711}
712
638static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma, 713static int cnic_alloc_dma(struct cnic_dev *dev, struct cnic_dma *dma,
639 int pages, int use_pg_tbl) 714 int pages, int use_pg_tbl)
640{ 715{
@@ -675,6 +750,21 @@ error:
675 return -ENOMEM; 750 return -ENOMEM;
676} 751}
677 752
753static void cnic_free_context(struct cnic_dev *dev)
754{
755 struct cnic_local *cp = dev->cnic_priv;
756 int i;
757
758 for (i = 0; i < cp->ctx_blks; i++) {
759 if (cp->ctx_arr[i].ctx) {
760 pci_free_consistent(dev->pcidev, cp->ctx_blk_size,
761 cp->ctx_arr[i].ctx,
762 cp->ctx_arr[i].mapping);
763 cp->ctx_arr[i].ctx = NULL;
764 }
765 }
766}
767
678static void cnic_free_resc(struct cnic_dev *dev) 768static void cnic_free_resc(struct cnic_dev *dev)
679{ 769{
680 struct cnic_local *cp = dev->cnic_priv; 770 struct cnic_local *cp = dev->cnic_priv;
@@ -702,14 +792,7 @@ static void cnic_free_resc(struct cnic_dev *dev)
702 cp->l2_ring = NULL; 792 cp->l2_ring = NULL;
703 } 793 }
704 794
705 for (i = 0; i < cp->ctx_blks; i++) { 795 cnic_free_context(dev);
706 if (cp->ctx_arr[i].ctx) {
707 pci_free_consistent(dev->pcidev, cp->ctx_blk_size,
708 cp->ctx_arr[i].ctx,
709 cp->ctx_arr[i].mapping);
710 cp->ctx_arr[i].ctx = NULL;
711 }
712 }
713 kfree(cp->ctx_arr); 796 kfree(cp->ctx_arr);
714 cp->ctx_arr = NULL; 797 cp->ctx_arr = NULL;
715 cp->ctx_blks = 0; 798 cp->ctx_blks = 0;
@@ -717,6 +800,7 @@ static void cnic_free_resc(struct cnic_dev *dev)
717 cnic_free_dma(dev, &cp->gbl_buf_info); 800 cnic_free_dma(dev, &cp->gbl_buf_info);
718 cnic_free_dma(dev, &cp->conn_buf_info); 801 cnic_free_dma(dev, &cp->conn_buf_info);
719 cnic_free_dma(dev, &cp->kwq_info); 802 cnic_free_dma(dev, &cp->kwq_info);
803 cnic_free_dma(dev, &cp->kwq_16_data_info);
720 cnic_free_dma(dev, &cp->kcq_info); 804 cnic_free_dma(dev, &cp->kcq_info);
721 kfree(cp->iscsi_tbl); 805 kfree(cp->iscsi_tbl);
722 cp->iscsi_tbl = NULL; 806 cp->iscsi_tbl = NULL;
@@ -808,14 +892,20 @@ static int cnic_alloc_uio(struct cnic_dev *dev) {
808 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start; 892 uinfo->mem[0].size = dev->netdev->mem_end - dev->netdev->mem_start;
809 uinfo->mem[0].memtype = UIO_MEM_PHYS; 893 uinfo->mem[0].memtype = UIO_MEM_PHYS;
810 894
811 uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK;
812 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) { 895 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
896 uinfo->mem[1].addr = (unsigned long) cp->status_blk & PAGE_MASK;
813 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) 897 if (cp->ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX)
814 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9; 898 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE * 9;
815 else 899 else
816 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE; 900 uinfo->mem[1].size = BNX2_SBLK_MSIX_ALIGN_SIZE;
817 901
818 uinfo->name = "bnx2_cnic"; 902 uinfo->name = "bnx2_cnic";
903 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
904 uinfo->mem[1].addr = (unsigned long) cp->bnx2x_def_status_blk &
905 PAGE_MASK;
906 uinfo->mem[1].size = sizeof(struct host_def_status_block);
907
908 uinfo->name = "bnx2x_cnic";
819 } 909 }
820 910
821 uinfo->mem[1].memtype = UIO_MEM_LOGICAL; 911 uinfo->mem[1].memtype = UIO_MEM_LOGICAL;
@@ -880,6 +970,151 @@ error:
880 return ret; 970 return ret;
881} 971}
882 972
973static int cnic_alloc_bnx2x_context(struct cnic_dev *dev)
974{
975 struct cnic_local *cp = dev->cnic_priv;
976 struct cnic_eth_dev *ethdev = cp->ethdev;
977 int ctx_blk_size = cp->ethdev->ctx_blk_size;
978 int total_mem, blks, i, cid_space;
979
980 if (BNX2X_ISCSI_START_CID < ethdev->starting_cid)
981 return -EINVAL;
982
983 cid_space = MAX_ISCSI_TBL_SZ +
984 (BNX2X_ISCSI_START_CID - ethdev->starting_cid);
985
986 total_mem = BNX2X_CONTEXT_MEM_SIZE * cid_space;
987 blks = total_mem / ctx_blk_size;
988 if (total_mem % ctx_blk_size)
989 blks++;
990
991 if (blks > cp->ethdev->ctx_tbl_len)
992 return -ENOMEM;
993
994 cp->ctx_arr = kzalloc(blks * sizeof(struct cnic_ctx), GFP_KERNEL);
995 if (cp->ctx_arr == NULL)
996 return -ENOMEM;
997
998 cp->ctx_blks = blks;
999 cp->ctx_blk_size = ctx_blk_size;
1000 if (BNX2X_CHIP_IS_E1H(cp->chip_id))
1001 cp->ctx_align = 0;
1002 else
1003 cp->ctx_align = ctx_blk_size;
1004
1005 cp->cids_per_blk = ctx_blk_size / BNX2X_CONTEXT_MEM_SIZE;
1006
1007 for (i = 0; i < blks; i++) {
1008 cp->ctx_arr[i].ctx =
1009 pci_alloc_consistent(dev->pcidev, cp->ctx_blk_size,
1010 &cp->ctx_arr[i].mapping);
1011 if (cp->ctx_arr[i].ctx == NULL)
1012 return -ENOMEM;
1013
1014 if (cp->ctx_align && cp->ctx_blk_size == ctx_blk_size) {
1015 if (cp->ctx_arr[i].mapping & (cp->ctx_align - 1)) {
1016 cnic_free_context(dev);
1017 cp->ctx_blk_size += cp->ctx_align;
1018 i = -1;
1019 continue;
1020 }
1021 }
1022 }
1023 return 0;
1024}
1025
1026static int cnic_alloc_bnx2x_resc(struct cnic_dev *dev)
1027{
1028 struct cnic_local *cp = dev->cnic_priv;
1029 int i, j, n, ret, pages;
1030 struct cnic_dma *kwq_16_dma = &cp->kwq_16_data_info;
1031
1032 cp->iscsi_tbl = kzalloc(sizeof(struct cnic_iscsi) * MAX_ISCSI_TBL_SZ,
1033 GFP_KERNEL);
1034 if (!cp->iscsi_tbl)
1035 goto error;
1036
1037 cp->ctx_tbl = kzalloc(sizeof(struct cnic_context) *
1038 MAX_CNIC_L5_CONTEXT, GFP_KERNEL);
1039 if (!cp->ctx_tbl)
1040 goto error;
1041
1042 for (i = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1043 cp->ctx_tbl[i].proto.iscsi = &cp->iscsi_tbl[i];
1044 cp->ctx_tbl[i].ulp_proto_id = CNIC_ULP_ISCSI;
1045 }
1046
1047 pages = PAGE_ALIGN(MAX_CNIC_L5_CONTEXT * CNIC_KWQ16_DATA_SIZE) /
1048 PAGE_SIZE;
1049
1050 ret = cnic_alloc_dma(dev, kwq_16_dma, pages, 0);
1051 if (ret)
1052 return -ENOMEM;
1053
1054 n = PAGE_SIZE / CNIC_KWQ16_DATA_SIZE;
1055 for (i = 0, j = 0; i < MAX_ISCSI_TBL_SZ; i++) {
1056 long off = CNIC_KWQ16_DATA_SIZE * (i % n);
1057
1058 cp->ctx_tbl[i].kwqe_data = kwq_16_dma->pg_arr[j] + off;
1059 cp->ctx_tbl[i].kwqe_data_mapping = kwq_16_dma->pg_map_arr[j] +
1060 off;
1061
1062 if ((i % n) == (n - 1))
1063 j++;
1064 }
1065
1066 ret = cnic_alloc_dma(dev, &cp->kcq_info, KCQ_PAGE_CNT, 0);
1067 if (ret)
1068 goto error;
1069 cp->kcq = (struct kcqe **) cp->kcq_info.pg_arr;
1070
1071 for (i = 0; i < KCQ_PAGE_CNT; i++) {
1072 struct bnx2x_bd_chain_next *next =
1073 (struct bnx2x_bd_chain_next *)
1074 &cp->kcq[i][MAX_KCQE_CNT];
1075 int j = i + 1;
1076
1077 if (j >= KCQ_PAGE_CNT)
1078 j = 0;
1079 next->addr_hi = (u64) cp->kcq_info.pg_map_arr[j] >> 32;
1080 next->addr_lo = cp->kcq_info.pg_map_arr[j] & 0xffffffff;
1081 }
1082
1083 pages = PAGE_ALIGN(BNX2X_ISCSI_NUM_CONNECTIONS *
1084 BNX2X_ISCSI_CONN_BUF_SIZE) / PAGE_SIZE;
1085 ret = cnic_alloc_dma(dev, &cp->conn_buf_info, pages, 1);
1086 if (ret)
1087 goto error;
1088
1089 pages = PAGE_ALIGN(BNX2X_ISCSI_GLB_BUF_SIZE) / PAGE_SIZE;
1090 ret = cnic_alloc_dma(dev, &cp->gbl_buf_info, pages, 0);
1091 if (ret)
1092 goto error;
1093
1094 ret = cnic_alloc_bnx2x_context(dev);
1095 if (ret)
1096 goto error;
1097
1098 cp->bnx2x_status_blk = cp->status_blk;
1099 cp->bnx2x_def_status_blk = cp->ethdev->irq_arr[1].status_blk;
1100
1101 cp->l2_rx_ring_size = 15;
1102
1103 ret = cnic_alloc_l2_rings(dev, 4);
1104 if (ret)
1105 goto error;
1106
1107 ret = cnic_alloc_uio(dev);
1108 if (ret)
1109 goto error;
1110
1111 return 0;
1112
1113error:
1114 cnic_free_resc(dev);
1115 return -ENOMEM;
1116}
1117
883static inline u32 cnic_kwq_avail(struct cnic_local *cp) 1118static inline u32 cnic_kwq_avail(struct cnic_local *cp)
884{ 1119{
885 return cp->max_kwq_idx - 1120 return cp->max_kwq_idx -
@@ -921,6 +1156,880 @@ static int cnic_submit_bnx2_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
921 return 0; 1156 return 0;
922} 1157}
923 1158
1159static void *cnic_get_kwqe_16_data(struct cnic_local *cp, u32 l5_cid,
1160 union l5cm_specific_data *l5_data)
1161{
1162 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1163 dma_addr_t map;
1164
1165 map = ctx->kwqe_data_mapping;
1166 l5_data->phy_address.lo = (u64) map & 0xffffffff;
1167 l5_data->phy_address.hi = (u64) map >> 32;
1168 return ctx->kwqe_data;
1169}
1170
1171static int cnic_submit_kwqe_16(struct cnic_dev *dev, u32 cmd, u32 cid,
1172 u32 type, union l5cm_specific_data *l5_data)
1173{
1174 struct cnic_local *cp = dev->cnic_priv;
1175 struct l5cm_spe kwqe;
1176 struct kwqe_16 *kwq[1];
1177 int ret;
1178
1179 kwqe.hdr.conn_and_cmd_data =
1180 cpu_to_le32(((cmd << SPE_HDR_CMD_ID_SHIFT) |
1181 BNX2X_HW_CID(cid, cp->func)));
1182 kwqe.hdr.type = cpu_to_le16(type);
1183 kwqe.hdr.reserved = 0;
1184 kwqe.data.phy_address.lo = cpu_to_le32(l5_data->phy_address.lo);
1185 kwqe.data.phy_address.hi = cpu_to_le32(l5_data->phy_address.hi);
1186
1187 kwq[0] = (struct kwqe_16 *) &kwqe;
1188
1189 spin_lock_bh(&cp->cnic_ulp_lock);
1190 ret = cp->ethdev->drv_submit_kwqes_16(dev->netdev, kwq, 1);
1191 spin_unlock_bh(&cp->cnic_ulp_lock);
1192
1193 if (ret == 1)
1194 return 0;
1195
1196 return -EBUSY;
1197}
1198
1199static void cnic_reply_bnx2x_kcqes(struct cnic_dev *dev, int ulp_type,
1200 struct kcqe *cqes[], u32 num_cqes)
1201{
1202 struct cnic_local *cp = dev->cnic_priv;
1203 struct cnic_ulp_ops *ulp_ops;
1204
1205 rcu_read_lock();
1206 ulp_ops = rcu_dereference(cp->ulp_ops[ulp_type]);
1207 if (likely(ulp_ops)) {
1208 ulp_ops->indicate_kcqes(cp->ulp_handle[ulp_type],
1209 cqes, num_cqes);
1210 }
1211 rcu_read_unlock();
1212}
1213
1214static int cnic_bnx2x_iscsi_init1(struct cnic_dev *dev, struct kwqe *kwqe)
1215{
1216 struct cnic_local *cp = dev->cnic_priv;
1217 struct iscsi_kwqe_init1 *req1 = (struct iscsi_kwqe_init1 *) kwqe;
1218 int func = cp->func, pages;
1219 int hq_bds;
1220
1221 cp->num_iscsi_tasks = req1->num_tasks_per_conn;
1222 cp->num_ccells = req1->num_ccells_per_conn;
1223 cp->task_array_size = BNX2X_ISCSI_TASK_CONTEXT_SIZE *
1224 cp->num_iscsi_tasks;
1225 cp->r2tq_size = cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS *
1226 BNX2X_ISCSI_R2TQE_SIZE;
1227 cp->hq_size = cp->num_ccells * BNX2X_ISCSI_HQ_BD_SIZE;
1228 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1229 hq_bds = pages * (PAGE_SIZE / BNX2X_ISCSI_HQ_BD_SIZE);
1230 cp->num_cqs = req1->num_cqs;
1231
1232 if (!dev->max_iscsi_conn)
1233 return 0;
1234
1235 /* init Tstorm RAM */
1236 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_RQ_SIZE_OFFSET(func),
1237 req1->rq_num_wqes);
1238 CNIC_WR16(dev, BAR_TSTRORM_INTMEM + TSTORM_ISCSI_PAGE_SIZE_OFFSET(func),
1239 PAGE_SIZE);
1240 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1241 TSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT);
1242 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1243 TSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func),
1244 req1->num_tasks_per_conn);
1245
1246 /* init Ustorm RAM */
1247 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1248 USTORM_ISCSI_RQ_BUFFER_SIZE_OFFSET(func),
1249 req1->rq_buffer_size);
1250 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_PAGE_SIZE_OFFSET(func),
1251 PAGE_SIZE);
1252 CNIC_WR8(dev, BAR_USTRORM_INTMEM +
1253 USTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT);
1254 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1255 USTORM_ISCSI_NUM_OF_TASKS_OFFSET(func),
1256 req1->num_tasks_per_conn);
1257 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_RQ_SIZE_OFFSET(func),
1258 req1->rq_num_wqes);
1259 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_CQ_SIZE_OFFSET(func),
1260 req1->cq_num_wqes);
1261 CNIC_WR16(dev, BAR_USTRORM_INTMEM + USTORM_ISCSI_R2TQ_SIZE_OFFSET(func),
1262 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1263
1264 /* init Xstorm RAM */
1265 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_PAGE_SIZE_OFFSET(func),
1266 PAGE_SIZE);
1267 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1268 XSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT);
1269 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1270 XSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func),
1271 req1->num_tasks_per_conn);
1272 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_HQ_SIZE_OFFSET(func),
1273 hq_bds);
1274 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_SQ_SIZE_OFFSET(func),
1275 req1->num_tasks_per_conn);
1276 CNIC_WR16(dev, BAR_XSTRORM_INTMEM + XSTORM_ISCSI_R2TQ_SIZE_OFFSET(func),
1277 cp->num_iscsi_tasks * BNX2X_ISCSI_MAX_PENDING_R2TS);
1278
1279 /* init Cstorm RAM */
1280 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_PAGE_SIZE_OFFSET(func),
1281 PAGE_SIZE);
1282 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
1283 CSTORM_ISCSI_PAGE_SIZE_LOG_OFFSET(func), PAGE_SHIFT);
1284 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1285 CSTORM_ISCSI_NUM_OF_TASKS_OFFSET(func),
1286 req1->num_tasks_per_conn);
1287 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_CQ_SIZE_OFFSET(func),
1288 req1->cq_num_wqes);
1289 CNIC_WR16(dev, BAR_CSTRORM_INTMEM + CSTORM_ISCSI_HQ_SIZE_OFFSET(func),
1290 hq_bds);
1291
1292 return 0;
1293}
1294
1295static int cnic_bnx2x_iscsi_init2(struct cnic_dev *dev, struct kwqe *kwqe)
1296{
1297 struct iscsi_kwqe_init2 *req2 = (struct iscsi_kwqe_init2 *) kwqe;
1298 struct cnic_local *cp = dev->cnic_priv;
1299 int func = cp->func;
1300 struct iscsi_kcqe kcqe;
1301 struct kcqe *cqes[1];
1302
1303 memset(&kcqe, 0, sizeof(kcqe));
1304 if (!dev->max_iscsi_conn) {
1305 kcqe.completion_status =
1306 ISCSI_KCQE_COMPLETION_STATUS_ISCSI_NOT_SUPPORTED;
1307 goto done;
1308 }
1309
1310 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1311 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func), req2->error_bit_map[0]);
1312 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
1313 TSTORM_ISCSI_ERROR_BITMAP_OFFSET(func) + 4,
1314 req2->error_bit_map[1]);
1315
1316 CNIC_WR16(dev, BAR_USTRORM_INTMEM +
1317 USTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn);
1318 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1319 USTORM_ISCSI_ERROR_BITMAP_OFFSET(func), req2->error_bit_map[0]);
1320 CNIC_WR(dev, BAR_USTRORM_INTMEM +
1321 USTORM_ISCSI_ERROR_BITMAP_OFFSET(func) + 4,
1322 req2->error_bit_map[1]);
1323
1324 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
1325 CSTORM_ISCSI_CQ_SQN_SIZE_OFFSET(func), req2->max_cq_sqn);
1326
1327 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1328
1329done:
1330 kcqe.op_code = ISCSI_KCQE_OPCODE_INIT;
1331 cqes[0] = (struct kcqe *) &kcqe;
1332 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1333
1334 return 0;
1335}
1336
1337static void cnic_free_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1338{
1339 struct cnic_local *cp = dev->cnic_priv;
1340 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1341
1342 if (ctx->ulp_proto_id == CNIC_ULP_ISCSI) {
1343 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1344
1345 cnic_free_dma(dev, &iscsi->hq_info);
1346 cnic_free_dma(dev, &iscsi->r2tq_info);
1347 cnic_free_dma(dev, &iscsi->task_array_info);
1348 }
1349 cnic_free_id(&cp->cid_tbl, ctx->cid);
1350 ctx->cid = 0;
1351}
1352
1353static int cnic_alloc_bnx2x_conn_resc(struct cnic_dev *dev, u32 l5_cid)
1354{
1355 u32 cid;
1356 int ret, pages;
1357 struct cnic_local *cp = dev->cnic_priv;
1358 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1359 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1360
1361 cid = cnic_alloc_new_id(&cp->cid_tbl);
1362 if (cid == -1) {
1363 ret = -ENOMEM;
1364 goto error;
1365 }
1366
1367 ctx->cid = cid;
1368 pages = PAGE_ALIGN(cp->task_array_size) / PAGE_SIZE;
1369
1370 ret = cnic_alloc_dma(dev, &iscsi->task_array_info, pages, 1);
1371 if (ret)
1372 goto error;
1373
1374 pages = PAGE_ALIGN(cp->r2tq_size) / PAGE_SIZE;
1375 ret = cnic_alloc_dma(dev, &iscsi->r2tq_info, pages, 1);
1376 if (ret)
1377 goto error;
1378
1379 pages = PAGE_ALIGN(cp->hq_size) / PAGE_SIZE;
1380 ret = cnic_alloc_dma(dev, &iscsi->hq_info, pages, 1);
1381 if (ret)
1382 goto error;
1383
1384 return 0;
1385
1386error:
1387 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1388 return ret;
1389}
1390
1391static void *cnic_get_bnx2x_ctx(struct cnic_dev *dev, u32 cid, int init,
1392 struct regpair *ctx_addr)
1393{
1394 struct cnic_local *cp = dev->cnic_priv;
1395 struct cnic_eth_dev *ethdev = cp->ethdev;
1396 int blk = (cid - ethdev->starting_cid) / cp->cids_per_blk;
1397 int off = (cid - ethdev->starting_cid) % cp->cids_per_blk;
1398 unsigned long align_off = 0;
1399 dma_addr_t ctx_map;
1400 void *ctx;
1401
1402 if (cp->ctx_align) {
1403 unsigned long mask = cp->ctx_align - 1;
1404
1405 if (cp->ctx_arr[blk].mapping & mask)
1406 align_off = cp->ctx_align -
1407 (cp->ctx_arr[blk].mapping & mask);
1408 }
1409 ctx_map = cp->ctx_arr[blk].mapping + align_off +
1410 (off * BNX2X_CONTEXT_MEM_SIZE);
1411 ctx = cp->ctx_arr[blk].ctx + align_off +
1412 (off * BNX2X_CONTEXT_MEM_SIZE);
1413 if (init)
1414 memset(ctx, 0, BNX2X_CONTEXT_MEM_SIZE);
1415
1416 ctx_addr->lo = ctx_map & 0xffffffff;
1417 ctx_addr->hi = (u64) ctx_map >> 32;
1418 return ctx;
1419}
1420
1421static int cnic_setup_bnx2x_ctx(struct cnic_dev *dev, struct kwqe *wqes[],
1422 u32 num)
1423{
1424 struct cnic_local *cp = dev->cnic_priv;
1425 struct iscsi_kwqe_conn_offload1 *req1 =
1426 (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1427 struct iscsi_kwqe_conn_offload2 *req2 =
1428 (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1429 struct iscsi_kwqe_conn_offload3 *req3;
1430 struct cnic_context *ctx = &cp->ctx_tbl[req1->iscsi_conn_id];
1431 struct cnic_iscsi *iscsi = ctx->proto.iscsi;
1432 u32 cid = ctx->cid;
1433 u32 hw_cid = BNX2X_HW_CID(cid, cp->func);
1434 struct iscsi_context *ictx;
1435 struct regpair context_addr;
1436 int i, j, n = 2, n_max;
1437
1438 ctx->ctx_flags = 0;
1439 if (!req2->num_additional_wqes)
1440 return -EINVAL;
1441
1442 n_max = req2->num_additional_wqes + 2;
1443
1444 ictx = cnic_get_bnx2x_ctx(dev, cid, 1, &context_addr);
1445 if (ictx == NULL)
1446 return -ENOMEM;
1447
1448 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1449
1450 ictx->xstorm_ag_context.hq_prod = 1;
1451
1452 ictx->xstorm_st_context.iscsi.first_burst_length =
1453 ISCSI_DEF_FIRST_BURST_LEN;
1454 ictx->xstorm_st_context.iscsi.max_send_pdu_length =
1455 ISCSI_DEF_MAX_RECV_SEG_LEN;
1456 ictx->xstorm_st_context.iscsi.sq_pbl_base.lo =
1457 req1->sq_page_table_addr_lo;
1458 ictx->xstorm_st_context.iscsi.sq_pbl_base.hi =
1459 req1->sq_page_table_addr_hi;
1460 ictx->xstorm_st_context.iscsi.sq_curr_pbe.lo = req2->sq_first_pte.hi;
1461 ictx->xstorm_st_context.iscsi.sq_curr_pbe.hi = req2->sq_first_pte.lo;
1462 ictx->xstorm_st_context.iscsi.hq_pbl_base.lo =
1463 iscsi->hq_info.pgtbl_map & 0xffffffff;
1464 ictx->xstorm_st_context.iscsi.hq_pbl_base.hi =
1465 (u64) iscsi->hq_info.pgtbl_map >> 32;
1466 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.lo =
1467 iscsi->hq_info.pgtbl[0];
1468 ictx->xstorm_st_context.iscsi.hq_curr_pbe_base.hi =
1469 iscsi->hq_info.pgtbl[1];
1470 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.lo =
1471 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1472 ictx->xstorm_st_context.iscsi.r2tq_pbl_base.hi =
1473 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1474 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.lo =
1475 iscsi->r2tq_info.pgtbl[0];
1476 ictx->xstorm_st_context.iscsi.r2tq_curr_pbe_base.hi =
1477 iscsi->r2tq_info.pgtbl[1];
1478 ictx->xstorm_st_context.iscsi.task_pbl_base.lo =
1479 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1480 ictx->xstorm_st_context.iscsi.task_pbl_base.hi =
1481 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1482 ictx->xstorm_st_context.iscsi.task_pbl_cache_idx =
1483 BNX2X_ISCSI_PBL_NOT_CACHED;
1484 ictx->xstorm_st_context.iscsi.flags.flags |=
1485 XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA;
1486 ictx->xstorm_st_context.iscsi.flags.flags |=
1487 XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T;
1488
1489 ictx->tstorm_st_context.iscsi.hdr_bytes_2_fetch = ISCSI_HEADER_SIZE;
1490 /* TSTORM requires the base address of RQ DB & not PTE */
1491 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.lo =
1492 req2->rq_page_table_addr_lo & PAGE_MASK;
1493 ictx->tstorm_st_context.iscsi.rq_db_phy_addr.hi =
1494 req2->rq_page_table_addr_hi;
1495 ictx->tstorm_st_context.iscsi.iscsi_conn_id = req1->iscsi_conn_id;
1496 ictx->tstorm_st_context.tcp.cwnd = 0x5A8;
1497 ictx->tstorm_st_context.tcp.flags2 |=
1498 TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN;
1499
1500 ictx->timers_context.flags |= ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG;
1501
1502 ictx->ustorm_st_context.ring.rq.pbl_base.lo =
1503 req2->rq_page_table_addr_lo & 0xffffffff;
1504 ictx->ustorm_st_context.ring.rq.pbl_base.hi =
1505 (u64) req2->rq_page_table_addr_hi >> 32;
1506 ictx->ustorm_st_context.ring.rq.curr_pbe.lo = req3->qp_first_pte[0].hi;
1507 ictx->ustorm_st_context.ring.rq.curr_pbe.hi = req3->qp_first_pte[0].lo;
1508 ictx->ustorm_st_context.ring.r2tq.pbl_base.lo =
1509 iscsi->r2tq_info.pgtbl_map & 0xffffffff;
1510 ictx->ustorm_st_context.ring.r2tq.pbl_base.hi =
1511 (u64) iscsi->r2tq_info.pgtbl_map >> 32;
1512 ictx->ustorm_st_context.ring.r2tq.curr_pbe.lo =
1513 iscsi->r2tq_info.pgtbl[0];
1514 ictx->ustorm_st_context.ring.r2tq.curr_pbe.hi =
1515 iscsi->r2tq_info.pgtbl[1];
1516 ictx->ustorm_st_context.ring.cq_pbl_base.lo =
1517 req1->cq_page_table_addr_lo;
1518 ictx->ustorm_st_context.ring.cq_pbl_base.hi =
1519 req1->cq_page_table_addr_hi;
1520 ictx->ustorm_st_context.ring.cq[0].cq_sn = ISCSI_INITIAL_SN;
1521 ictx->ustorm_st_context.ring.cq[0].curr_pbe.lo = req2->cq_first_pte.hi;
1522 ictx->ustorm_st_context.ring.cq[0].curr_pbe.hi = req2->cq_first_pte.lo;
1523 ictx->ustorm_st_context.task_pbe_cache_index =
1524 BNX2X_ISCSI_PBL_NOT_CACHED;
1525 ictx->ustorm_st_context.task_pdu_cache_index =
1526 BNX2X_ISCSI_PDU_HEADER_NOT_CACHED;
1527
1528 for (i = 1, j = 1; i < cp->num_cqs; i++, j++) {
1529 if (j == 3) {
1530 if (n >= n_max)
1531 break;
1532 req3 = (struct iscsi_kwqe_conn_offload3 *) wqes[n++];
1533 j = 0;
1534 }
1535 ictx->ustorm_st_context.ring.cq[i].cq_sn = ISCSI_INITIAL_SN;
1536 ictx->ustorm_st_context.ring.cq[i].curr_pbe.lo =
1537 req3->qp_first_pte[j].hi;
1538 ictx->ustorm_st_context.ring.cq[i].curr_pbe.hi =
1539 req3->qp_first_pte[j].lo;
1540 }
1541
1542 ictx->ustorm_st_context.task_pbl_base.lo =
1543 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1544 ictx->ustorm_st_context.task_pbl_base.hi =
1545 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1546 ictx->ustorm_st_context.tce_phy_addr.lo =
1547 iscsi->task_array_info.pgtbl[0];
1548 ictx->ustorm_st_context.tce_phy_addr.hi =
1549 iscsi->task_array_info.pgtbl[1];
1550 ictx->ustorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1551 ictx->ustorm_st_context.num_cqs = cp->num_cqs;
1552 ictx->ustorm_st_context.negotiated_rx |= ISCSI_DEF_MAX_RECV_SEG_LEN;
1553 ictx->ustorm_st_context.negotiated_rx_and_flags |=
1554 ISCSI_DEF_MAX_BURST_LEN;
1555 ictx->ustorm_st_context.negotiated_rx |=
1556 ISCSI_DEFAULT_MAX_OUTSTANDING_R2T <<
1557 USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT;
1558
1559 ictx->cstorm_st_context.hq_pbl_base.lo =
1560 iscsi->hq_info.pgtbl_map & 0xffffffff;
1561 ictx->cstorm_st_context.hq_pbl_base.hi =
1562 (u64) iscsi->hq_info.pgtbl_map >> 32;
1563 ictx->cstorm_st_context.hq_curr_pbe.lo = iscsi->hq_info.pgtbl[0];
1564 ictx->cstorm_st_context.hq_curr_pbe.hi = iscsi->hq_info.pgtbl[1];
1565 ictx->cstorm_st_context.task_pbl_base.lo =
1566 iscsi->task_array_info.pgtbl_map & 0xffffffff;
1567 ictx->cstorm_st_context.task_pbl_base.hi =
1568 (u64) iscsi->task_array_info.pgtbl_map >> 32;
1569 /* CSTORM and USTORM initialization is different, CSTORM requires
1570 * CQ DB base & not PTE addr */
1571 ictx->cstorm_st_context.cq_db_base.lo =
1572 req1->cq_page_table_addr_lo & PAGE_MASK;
1573 ictx->cstorm_st_context.cq_db_base.hi = req1->cq_page_table_addr_hi;
1574 ictx->cstorm_st_context.iscsi_conn_id = req1->iscsi_conn_id;
1575 ictx->cstorm_st_context.cq_proc_en_bit_map = (1 << cp->num_cqs) - 1;
1576 for (i = 0; i < cp->num_cqs; i++) {
1577 ictx->cstorm_st_context.cq_c_prod_sqn_arr.sqn[i] =
1578 ISCSI_INITIAL_SN;
1579 ictx->cstorm_st_context.cq_c_sqn_2_notify_arr.sqn[i] =
1580 ISCSI_INITIAL_SN;
1581 }
1582
1583 ictx->xstorm_ag_context.cdu_reserved =
1584 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_XCM_AG,
1585 ISCSI_CONNECTION_TYPE);
1586 ictx->ustorm_ag_context.cdu_usage =
1587 CDU_RSRVD_VALUE_TYPE_A(hw_cid, CDU_REGION_NUMBER_UCM_AG,
1588 ISCSI_CONNECTION_TYPE);
1589 return 0;
1590
1591}
1592
1593static int cnic_bnx2x_iscsi_ofld1(struct cnic_dev *dev, struct kwqe *wqes[],
1594 u32 num, int *work)
1595{
1596 struct iscsi_kwqe_conn_offload1 *req1;
1597 struct iscsi_kwqe_conn_offload2 *req2;
1598 struct cnic_local *cp = dev->cnic_priv;
1599 struct iscsi_kcqe kcqe;
1600 struct kcqe *cqes[1];
1601 u32 l5_cid;
1602 int ret;
1603
1604 if (num < 2) {
1605 *work = num;
1606 return -EINVAL;
1607 }
1608
1609 req1 = (struct iscsi_kwqe_conn_offload1 *) wqes[0];
1610 req2 = (struct iscsi_kwqe_conn_offload2 *) wqes[1];
1611 if ((num - 2) < req2->num_additional_wqes) {
1612 *work = num;
1613 return -EINVAL;
1614 }
1615 *work = 2 + req2->num_additional_wqes;;
1616
1617 l5_cid = req1->iscsi_conn_id;
1618 if (l5_cid >= MAX_ISCSI_TBL_SZ)
1619 return -EINVAL;
1620
1621 memset(&kcqe, 0, sizeof(kcqe));
1622 kcqe.op_code = ISCSI_KCQE_OPCODE_OFFLOAD_CONN;
1623 kcqe.iscsi_conn_id = l5_cid;
1624 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE;
1625
1626 if (atomic_inc_return(&cp->iscsi_conn) > dev->max_iscsi_conn) {
1627 atomic_dec(&cp->iscsi_conn);
1628 ret = 0;
1629 goto done;
1630 }
1631 ret = cnic_alloc_bnx2x_conn_resc(dev, l5_cid);
1632 if (ret) {
1633 atomic_dec(&cp->iscsi_conn);
1634 ret = 0;
1635 goto done;
1636 }
1637 ret = cnic_setup_bnx2x_ctx(dev, wqes, num);
1638 if (ret < 0) {
1639 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1640 atomic_dec(&cp->iscsi_conn);
1641 goto done;
1642 }
1643
1644 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1645 kcqe.iscsi_conn_context_id = BNX2X_HW_CID(cp->ctx_tbl[l5_cid].cid,
1646 cp->func);
1647
1648done:
1649 cqes[0] = (struct kcqe *) &kcqe;
1650 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1651 return ret;
1652}
1653
1654
1655static int cnic_bnx2x_iscsi_update(struct cnic_dev *dev, struct kwqe *kwqe)
1656{
1657 struct cnic_local *cp = dev->cnic_priv;
1658 struct iscsi_kwqe_conn_update *req =
1659 (struct iscsi_kwqe_conn_update *) kwqe;
1660 void *data;
1661 union l5cm_specific_data l5_data;
1662 u32 l5_cid, cid = BNX2X_SW_CID(req->context_id);
1663 int ret;
1664
1665 if (cnic_get_l5_cid(cp, cid, &l5_cid) != 0)
1666 return -EINVAL;
1667
1668 data = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1669 if (!data)
1670 return -ENOMEM;
1671
1672 memcpy(data, kwqe, sizeof(struct kwqe));
1673
1674 ret = cnic_submit_kwqe_16(dev, ISCSI_RAMROD_CMD_ID_UPDATE_CONN,
1675 req->context_id, ISCSI_CONNECTION_TYPE, &l5_data);
1676 return ret;
1677}
1678
1679static int cnic_bnx2x_iscsi_destroy(struct cnic_dev *dev, struct kwqe *kwqe)
1680{
1681 struct cnic_local *cp = dev->cnic_priv;
1682 struct iscsi_kwqe_conn_destroy *req =
1683 (struct iscsi_kwqe_conn_destroy *) kwqe;
1684 union l5cm_specific_data l5_data;
1685 u32 l5_cid = req->reserved0;
1686 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1687 int ret = 0;
1688 struct iscsi_kcqe kcqe;
1689 struct kcqe *cqes[1];
1690
1691 if (!(ctx->ctx_flags & CTX_FL_OFFLD_START))
1692 goto skip_cfc_delete;
1693
1694 while (!time_after(jiffies, ctx->timestamp + (2 * HZ)))
1695 msleep(250);
1696
1697 init_waitqueue_head(&ctx->waitq);
1698 ctx->wait_cond = 0;
1699 memset(&l5_data, 0, sizeof(l5_data));
1700 ret = cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CFC_DEL,
1701 req->context_id,
1702 ETH_CONNECTION_TYPE |
1703 (1 << SPE_HDR_COMMON_RAMROD_SHIFT),
1704 &l5_data);
1705 if (ret == 0)
1706 wait_event(ctx->waitq, ctx->wait_cond);
1707
1708skip_cfc_delete:
1709 cnic_free_bnx2x_conn_resc(dev, l5_cid);
1710
1711 atomic_dec(&cp->iscsi_conn);
1712
1713 memset(&kcqe, 0, sizeof(kcqe));
1714 kcqe.op_code = ISCSI_KCQE_OPCODE_DESTROY_CONN;
1715 kcqe.iscsi_conn_id = l5_cid;
1716 kcqe.completion_status = ISCSI_KCQE_COMPLETION_STATUS_SUCCESS;
1717 kcqe.iscsi_conn_context_id = req->context_id;
1718
1719 cqes[0] = (struct kcqe *) &kcqe;
1720 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_ISCSI, cqes, 1);
1721
1722 return ret;
1723}
1724
1725static void cnic_init_storm_conn_bufs(struct cnic_dev *dev,
1726 struct l4_kwq_connect_req1 *kwqe1,
1727 struct l4_kwq_connect_req3 *kwqe3,
1728 struct l5cm_active_conn_buffer *conn_buf)
1729{
1730 struct l5cm_conn_addr_params *conn_addr = &conn_buf->conn_addr_buf;
1731 struct l5cm_xstorm_conn_buffer *xstorm_buf =
1732 &conn_buf->xstorm_conn_buffer;
1733 struct l5cm_tstorm_conn_buffer *tstorm_buf =
1734 &conn_buf->tstorm_conn_buffer;
1735 struct regpair context_addr;
1736 u32 cid = BNX2X_SW_CID(kwqe1->cid);
1737 struct in6_addr src_ip, dst_ip;
1738 int i;
1739 u32 *addrp;
1740
1741 addrp = (u32 *) &conn_addr->local_ip_addr;
1742 for (i = 0; i < 4; i++, addrp++)
1743 src_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1744
1745 addrp = (u32 *) &conn_addr->remote_ip_addr;
1746 for (i = 0; i < 4; i++, addrp++)
1747 dst_ip.in6_u.u6_addr32[i] = cpu_to_be32(*addrp);
1748
1749 cnic_get_bnx2x_ctx(dev, cid, 0, &context_addr);
1750
1751 xstorm_buf->context_addr.hi = context_addr.hi;
1752 xstorm_buf->context_addr.lo = context_addr.lo;
1753 xstorm_buf->mss = 0xffff;
1754 xstorm_buf->rcv_buf = kwqe3->rcv_buf;
1755 if (kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NAGLE_ENABLE)
1756 xstorm_buf->params |= L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE;
1757 xstorm_buf->pseudo_header_checksum =
1758 swab16(~csum_ipv6_magic(&src_ip, &dst_ip, 0, IPPROTO_TCP, 0));
1759
1760 if (!(kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_NO_DELAY_ACK))
1761 tstorm_buf->params |=
1762 L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE;
1763 if (kwqe3->ka_timeout) {
1764 tstorm_buf->ka_enable = 1;
1765 tstorm_buf->ka_timeout = kwqe3->ka_timeout;
1766 tstorm_buf->ka_interval = kwqe3->ka_interval;
1767 tstorm_buf->ka_max_probe_count = kwqe3->ka_max_probe_count;
1768 }
1769 tstorm_buf->rcv_buf = kwqe3->rcv_buf;
1770 tstorm_buf->snd_buf = kwqe3->snd_buf;
1771 tstorm_buf->max_rt_time = 0xffffffff;
1772}
1773
1774static void cnic_init_bnx2x_mac(struct cnic_dev *dev)
1775{
1776 struct cnic_local *cp = dev->cnic_priv;
1777 int func = CNIC_FUNC(cp);
1778 u8 *mac = dev->mac_addr;
1779
1780 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1781 XSTORM_ISCSI_LOCAL_MAC_ADDR0_OFFSET(func), mac[0]);
1782 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1783 XSTORM_ISCSI_LOCAL_MAC_ADDR1_OFFSET(func), mac[1]);
1784 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1785 XSTORM_ISCSI_LOCAL_MAC_ADDR2_OFFSET(func), mac[2]);
1786 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1787 XSTORM_ISCSI_LOCAL_MAC_ADDR3_OFFSET(func), mac[3]);
1788 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1789 XSTORM_ISCSI_LOCAL_MAC_ADDR4_OFFSET(func), mac[4]);
1790 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1791 XSTORM_ISCSI_LOCAL_MAC_ADDR5_OFFSET(func), mac[5]);
1792
1793 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1794 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func), mac[5]);
1795 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1796 TSTORM_ISCSI_TCP_VARS_LSB_LOCAL_MAC_ADDR_OFFSET(func) + 1,
1797 mac[4]);
1798 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1799 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func), mac[3]);
1800 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1801 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 1,
1802 mac[2]);
1803 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1804 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 2,
1805 mac[1]);
1806 CNIC_WR8(dev, BAR_TSTRORM_INTMEM +
1807 TSTORM_ISCSI_TCP_VARS_MSB_LOCAL_MAC_ADDR_OFFSET(func) + 3,
1808 mac[0]);
1809}
1810
1811static void cnic_bnx2x_set_tcp_timestamp(struct cnic_dev *dev, int tcp_ts)
1812{
1813 struct cnic_local *cp = dev->cnic_priv;
1814 u8 xstorm_flags = XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN;
1815 u16 tstorm_flags = 0;
1816
1817 if (tcp_ts) {
1818 xstorm_flags |= XSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1819 tstorm_flags |= TSTORM_L5CM_TCP_FLAGS_TS_ENABLED;
1820 }
1821
1822 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
1823 XSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->func), xstorm_flags);
1824
1825 CNIC_WR16(dev, BAR_TSTRORM_INTMEM +
1826 TSTORM_ISCSI_TCP_VARS_FLAGS_OFFSET(cp->func), tstorm_flags);
1827}
1828
1829static int cnic_bnx2x_connect(struct cnic_dev *dev, struct kwqe *wqes[],
1830 u32 num, int *work)
1831{
1832 struct cnic_local *cp = dev->cnic_priv;
1833 struct l4_kwq_connect_req1 *kwqe1 =
1834 (struct l4_kwq_connect_req1 *) wqes[0];
1835 struct l4_kwq_connect_req3 *kwqe3;
1836 struct l5cm_active_conn_buffer *conn_buf;
1837 struct l5cm_conn_addr_params *conn_addr;
1838 union l5cm_specific_data l5_data;
1839 u32 l5_cid = kwqe1->pg_cid;
1840 struct cnic_sock *csk = &cp->csk_tbl[l5_cid];
1841 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
1842 int ret;
1843
1844 if (num < 2) {
1845 *work = num;
1846 return -EINVAL;
1847 }
1848
1849 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6)
1850 *work = 3;
1851 else
1852 *work = 2;
1853
1854 if (num < *work) {
1855 *work = num;
1856 return -EINVAL;
1857 }
1858
1859 if (sizeof(*conn_buf) > CNIC_KWQ16_DATA_SIZE) {
1860 printk(KERN_ERR PFX "%s: conn_buf size too big\n",
1861 dev->netdev->name);
1862 return -ENOMEM;
1863 }
1864 conn_buf = cnic_get_kwqe_16_data(cp, l5_cid, &l5_data);
1865 if (!conn_buf)
1866 return -ENOMEM;
1867
1868 memset(conn_buf, 0, sizeof(*conn_buf));
1869
1870 conn_addr = &conn_buf->conn_addr_buf;
1871 conn_addr->remote_addr_0 = csk->ha[0];
1872 conn_addr->remote_addr_1 = csk->ha[1];
1873 conn_addr->remote_addr_2 = csk->ha[2];
1874 conn_addr->remote_addr_3 = csk->ha[3];
1875 conn_addr->remote_addr_4 = csk->ha[4];
1876 conn_addr->remote_addr_5 = csk->ha[5];
1877
1878 if (kwqe1->conn_flags & L4_KWQ_CONNECT_REQ1_IP_V6) {
1879 struct l4_kwq_connect_req2 *kwqe2 =
1880 (struct l4_kwq_connect_req2 *) wqes[1];
1881
1882 conn_addr->local_ip_addr.ip_addr_hi_hi = kwqe2->src_ip_v6_4;
1883 conn_addr->local_ip_addr.ip_addr_hi_lo = kwqe2->src_ip_v6_3;
1884 conn_addr->local_ip_addr.ip_addr_lo_hi = kwqe2->src_ip_v6_2;
1885
1886 conn_addr->remote_ip_addr.ip_addr_hi_hi = kwqe2->dst_ip_v6_4;
1887 conn_addr->remote_ip_addr.ip_addr_hi_lo = kwqe2->dst_ip_v6_3;
1888 conn_addr->remote_ip_addr.ip_addr_lo_hi = kwqe2->dst_ip_v6_2;
1889 conn_addr->params |= L5CM_CONN_ADDR_PARAMS_IP_VERSION;
1890 }
1891 kwqe3 = (struct l4_kwq_connect_req3 *) wqes[*work - 1];
1892
1893 conn_addr->local_ip_addr.ip_addr_lo_lo = kwqe1->src_ip;
1894 conn_addr->remote_ip_addr.ip_addr_lo_lo = kwqe1->dst_ip;
1895 conn_addr->local_tcp_port = kwqe1->src_port;
1896 conn_addr->remote_tcp_port = kwqe1->dst_port;
1897
1898 conn_addr->pmtu = kwqe3->pmtu;
1899 cnic_init_storm_conn_bufs(dev, kwqe1, kwqe3, conn_buf);
1900
1901 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
1902 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(cp->func), csk->vlan_id);
1903
1904 cnic_bnx2x_set_tcp_timestamp(dev,
1905 kwqe1->tcp_flags & L4_KWQ_CONNECT_REQ1_TIME_STAMP);
1906
1907 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_TCP_CONNECT,
1908 kwqe1->cid, ISCSI_CONNECTION_TYPE, &l5_data);
1909 if (!ret)
1910 ctx->ctx_flags |= CTX_FL_OFFLD_START;
1911
1912 return ret;
1913}
1914
1915static int cnic_bnx2x_close(struct cnic_dev *dev, struct kwqe *kwqe)
1916{
1917 struct l4_kwq_close_req *req = (struct l4_kwq_close_req *) kwqe;
1918 union l5cm_specific_data l5_data;
1919 int ret;
1920
1921 memset(&l5_data, 0, sizeof(l5_data));
1922 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_CLOSE,
1923 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
1924 return ret;
1925}
1926
1927static int cnic_bnx2x_reset(struct cnic_dev *dev, struct kwqe *kwqe)
1928{
1929 struct l4_kwq_reset_req *req = (struct l4_kwq_reset_req *) kwqe;
1930 union l5cm_specific_data l5_data;
1931 int ret;
1932
1933 memset(&l5_data, 0, sizeof(l5_data));
1934 ret = cnic_submit_kwqe_16(dev, L5CM_RAMROD_CMD_ID_ABORT,
1935 req->cid, ISCSI_CONNECTION_TYPE, &l5_data);
1936 return ret;
1937}
1938static int cnic_bnx2x_offload_pg(struct cnic_dev *dev, struct kwqe *kwqe)
1939{
1940 struct l4_kwq_offload_pg *req = (struct l4_kwq_offload_pg *) kwqe;
1941 struct l4_kcq kcqe;
1942 struct kcqe *cqes[1];
1943
1944 memset(&kcqe, 0, sizeof(kcqe));
1945 kcqe.pg_host_opaque = req->host_opaque;
1946 kcqe.pg_cid = req->host_opaque;
1947 kcqe.op_code = L4_KCQE_OPCODE_VALUE_OFFLOAD_PG;
1948 cqes[0] = (struct kcqe *) &kcqe;
1949 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
1950 return 0;
1951}
1952
1953static int cnic_bnx2x_update_pg(struct cnic_dev *dev, struct kwqe *kwqe)
1954{
1955 struct l4_kwq_update_pg *req = (struct l4_kwq_update_pg *) kwqe;
1956 struct l4_kcq kcqe;
1957 struct kcqe *cqes[1];
1958
1959 memset(&kcqe, 0, sizeof(kcqe));
1960 kcqe.pg_host_opaque = req->pg_host_opaque;
1961 kcqe.pg_cid = req->pg_cid;
1962 kcqe.op_code = L4_KCQE_OPCODE_VALUE_UPDATE_PG;
1963 cqes[0] = (struct kcqe *) &kcqe;
1964 cnic_reply_bnx2x_kcqes(dev, CNIC_ULP_L4, cqes, 1);
1965 return 0;
1966}
1967
1968static int cnic_submit_bnx2x_kwqes(struct cnic_dev *dev, struct kwqe *wqes[],
1969 u32 num_wqes)
1970{
1971 int i, work, ret;
1972 u32 opcode;
1973 struct kwqe *kwqe;
1974
1975 if (!test_bit(CNIC_F_CNIC_UP, &dev->flags))
1976 return -EAGAIN; /* bnx2 is down */
1977
1978 for (i = 0; i < num_wqes; ) {
1979 kwqe = wqes[i];
1980 opcode = KWQE_OPCODE(kwqe->kwqe_op_flag);
1981 work = 1;
1982
1983 switch (opcode) {
1984 case ISCSI_KWQE_OPCODE_INIT1:
1985 ret = cnic_bnx2x_iscsi_init1(dev, kwqe);
1986 break;
1987 case ISCSI_KWQE_OPCODE_INIT2:
1988 ret = cnic_bnx2x_iscsi_init2(dev, kwqe);
1989 break;
1990 case ISCSI_KWQE_OPCODE_OFFLOAD_CONN1:
1991 ret = cnic_bnx2x_iscsi_ofld1(dev, &wqes[i],
1992 num_wqes - i, &work);
1993 break;
1994 case ISCSI_KWQE_OPCODE_UPDATE_CONN:
1995 ret = cnic_bnx2x_iscsi_update(dev, kwqe);
1996 break;
1997 case ISCSI_KWQE_OPCODE_DESTROY_CONN:
1998 ret = cnic_bnx2x_iscsi_destroy(dev, kwqe);
1999 break;
2000 case L4_KWQE_OPCODE_VALUE_CONNECT1:
2001 ret = cnic_bnx2x_connect(dev, &wqes[i], num_wqes - i,
2002 &work);
2003 break;
2004 case L4_KWQE_OPCODE_VALUE_CLOSE:
2005 ret = cnic_bnx2x_close(dev, kwqe);
2006 break;
2007 case L4_KWQE_OPCODE_VALUE_RESET:
2008 ret = cnic_bnx2x_reset(dev, kwqe);
2009 break;
2010 case L4_KWQE_OPCODE_VALUE_OFFLOAD_PG:
2011 ret = cnic_bnx2x_offload_pg(dev, kwqe);
2012 break;
2013 case L4_KWQE_OPCODE_VALUE_UPDATE_PG:
2014 ret = cnic_bnx2x_update_pg(dev, kwqe);
2015 break;
2016 case L4_KWQE_OPCODE_VALUE_UPLOAD_PG:
2017 ret = 0;
2018 break;
2019 default:
2020 ret = 0;
2021 printk(KERN_ERR PFX "%s: Unknown type of KWQE(0x%x)\n",
2022 dev->netdev->name, opcode);
2023 break;
2024 }
2025 if (ret < 0)
2026 printk(KERN_ERR PFX "%s: KWQE(0x%x) failed\n",
2027 dev->netdev->name, opcode);
2028 i += work;
2029 }
2030 return 0;
2031}
2032
924static void service_kcqes(struct cnic_dev *dev, int num_cqes) 2033static void service_kcqes(struct cnic_dev *dev, int num_cqes)
925{ 2034{
926 struct cnic_local *cp = dev->cnic_priv; 2035 struct cnic_local *cp = dev->cnic_priv;
@@ -987,6 +2096,22 @@ static u16 cnic_bnx2_hw_idx(u16 idx)
987 return idx; 2096 return idx;
988} 2097}
989 2098
2099static u16 cnic_bnx2x_next_idx(u16 idx)
2100{
2101 idx++;
2102 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
2103 idx++;
2104
2105 return idx;
2106}
2107
2108static u16 cnic_bnx2x_hw_idx(u16 idx)
2109{
2110 if ((idx & MAX_KCQE_CNT) == MAX_KCQE_CNT)
2111 idx++;
2112 return idx;
2113}
2114
990static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod) 2115static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
991{ 2116{
992 struct cnic_local *cp = dev->cnic_priv; 2117 struct cnic_local *cp = dev->cnic_priv;
@@ -1012,7 +2137,7 @@ static int cnic_get_kcqes(struct cnic_dev *dev, u16 hw_prod, u16 *sw_prod)
1012 return last_cnt; 2137 return last_cnt;
1013} 2138}
1014 2139
1015static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp) 2140static void cnic_chk_pkt_rings(struct cnic_local *cp)
1016{ 2141{
1017 u16 rx_cons = *cp->rx_cons_ptr; 2142 u16 rx_cons = *cp->rx_cons_ptr;
1018 u16 tx_cons = *cp->tx_cons_ptr; 2143 u16 tx_cons = *cp->tx_cons_ptr;
@@ -1020,6 +2145,7 @@ static void cnic_chk_bnx2_pkt_rings(struct cnic_local *cp)
1020 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) { 2145 if (cp->tx_cons != tx_cons || cp->rx_cons != rx_cons) {
1021 cp->tx_cons = tx_cons; 2146 cp->tx_cons = tx_cons;
1022 cp->rx_cons = rx_cons; 2147 cp->rx_cons = rx_cons;
2148
1023 uio_event_notify(cp->cnic_uinfo); 2149 uio_event_notify(cp->cnic_uinfo);
1024 } 2150 }
1025} 2151}
@@ -1062,7 +2188,7 @@ done:
1062 2188
1063 cp->kcq_prod_idx = sw_prod; 2189 cp->kcq_prod_idx = sw_prod;
1064 2190
1065 cnic_chk_bnx2_pkt_rings(cp); 2191 cnic_chk_pkt_rings(cp);
1066 return status_idx; 2192 return status_idx;
1067} 2193}
1068 2194
@@ -1100,7 +2226,7 @@ done:
1100 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod); 2226 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod);
1101 cp->kcq_prod_idx = sw_prod; 2227 cp->kcq_prod_idx = sw_prod;
1102 2228
1103 cnic_chk_bnx2_pkt_rings(cp); 2229 cnic_chk_pkt_rings(cp);
1104 2230
1105 cp->last_status_idx = status_idx; 2231 cp->last_status_idx = status_idx;
1106 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num | 2232 CNIC_WR(dev, BNX2_PCICFG_INT_ACK_CMD, cp->int_num |
@@ -1125,6 +2251,91 @@ static irqreturn_t cnic_irq(int irq, void *dev_instance)
1125 return IRQ_HANDLED; 2251 return IRQ_HANDLED;
1126} 2252}
1127 2253
2254static inline void cnic_ack_bnx2x_int(struct cnic_dev *dev, u8 id, u8 storm,
2255 u16 index, u8 op, u8 update)
2256{
2257 struct cnic_local *cp = dev->cnic_priv;
2258 u32 hc_addr = (HC_REG_COMMAND_REG + CNIC_PORT(cp) * 32 +
2259 COMMAND_REG_INT_ACK);
2260 struct igu_ack_register igu_ack;
2261
2262 igu_ack.status_block_index = index;
2263 igu_ack.sb_id_and_flags =
2264 ((id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
2265 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
2266 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
2267 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
2268
2269 CNIC_WR(dev, hc_addr, (*(u32 *)&igu_ack));
2270}
2271
2272static void cnic_ack_bnx2x_msix(struct cnic_dev *dev)
2273{
2274 struct cnic_local *cp = dev->cnic_priv;
2275
2276 cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID, 0,
2277 IGU_INT_DISABLE, 0);
2278}
2279
2280static void cnic_service_bnx2x_bh(unsigned long data)
2281{
2282 struct cnic_dev *dev = (struct cnic_dev *) data;
2283 struct cnic_local *cp = dev->cnic_priv;
2284 u16 hw_prod, sw_prod;
2285 struct cstorm_status_block_c *sblk =
2286 &cp->bnx2x_status_blk->c_status_block;
2287 u32 status_idx = sblk->status_block_index;
2288 int kcqe_cnt;
2289
2290 if (unlikely(!test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2291 return;
2292
2293 hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS];
2294 hw_prod = cp->hw_idx(hw_prod);
2295 sw_prod = cp->kcq_prod_idx;
2296 while (sw_prod != hw_prod) {
2297 kcqe_cnt = cnic_get_kcqes(dev, hw_prod, &sw_prod);
2298 if (kcqe_cnt == 0)
2299 goto done;
2300
2301 service_kcqes(dev, kcqe_cnt);
2302
2303 /* Tell compiler that sblk fields can change. */
2304 barrier();
2305 if (status_idx == sblk->status_block_index)
2306 break;
2307
2308 status_idx = sblk->status_block_index;
2309 hw_prod = sblk->index_values[HC_INDEX_C_ISCSI_EQ_CONS];
2310 hw_prod = cp->hw_idx(hw_prod);
2311 }
2312
2313done:
2314 CNIC_WR16(dev, cp->kcq_io_addr, sw_prod + MAX_KCQ_IDX);
2315 cnic_ack_bnx2x_int(dev, cp->status_blk_num, CSTORM_ID,
2316 status_idx, IGU_INT_ENABLE, 1);
2317
2318 cp->kcq_prod_idx = sw_prod;
2319 return;
2320}
2321
2322static int cnic_service_bnx2x(void *data, void *status_blk)
2323{
2324 struct cnic_dev *dev = data;
2325 struct cnic_local *cp = dev->cnic_priv;
2326 u16 prod = cp->kcq_prod_idx & MAX_KCQ_IDX;
2327
2328 prefetch(cp->status_blk);
2329 prefetch(&cp->kcq[KCQ_PG(prod)][KCQ_IDX(prod)]);
2330
2331 if (likely(test_bit(CNIC_F_CNIC_UP, &dev->flags)))
2332 tasklet_schedule(&cp->cnic_irq_task);
2333
2334 cnic_chk_pkt_rings(cp);
2335
2336 return 0;
2337}
2338
1128static void cnic_ulp_stop(struct cnic_dev *dev) 2339static void cnic_ulp_stop(struct cnic_dev *dev)
1129{ 2340{
1130 struct cnic_local *cp = dev->cnic_priv; 2341 struct cnic_local *cp = dev->cnic_priv;
@@ -1197,6 +2408,19 @@ static int cnic_ctl(void *data, struct cnic_ctl_info *info)
1197 2408
1198 cnic_put(dev); 2409 cnic_put(dev);
1199 break; 2410 break;
2411 case CNIC_CTL_COMPLETION_CMD: {
2412 u32 cid = BNX2X_SW_CID(info->data.comp.cid);
2413 u32 l5_cid;
2414 struct cnic_local *cp = dev->cnic_priv;
2415
2416 if (cnic_get_l5_cid(cp, cid, &l5_cid) == 0) {
2417 struct cnic_context *ctx = &cp->ctx_tbl[l5_cid];
2418
2419 ctx->wait_cond = 1;
2420 wake_up(&ctx->waitq);
2421 }
2422 break;
2423 }
1200 default: 2424 default:
1201 return -EINVAL; 2425 return -EINVAL;
1202 } 2426 }
@@ -1872,6 +3096,8 @@ static void cnic_cm_process_kcqe(struct cnic_dev *dev, struct kcqe *kcqe)
1872 /* fall through */ 3096 /* fall through */
1873 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP: 3097 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
1874 case L4_KCQE_OPCODE_VALUE_RESET_COMP: 3098 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3099 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3100 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
1875 cp->close_conn(csk, opcode); 3101 cp->close_conn(csk, opcode);
1876 break; 3102 break;
1877 3103
@@ -1957,6 +3183,76 @@ static int cnic_cm_init_bnx2_hw(struct cnic_dev *dev)
1957 return 0; 3183 return 0;
1958} 3184}
1959 3185
3186static void cnic_close_bnx2x_conn(struct cnic_sock *csk, u32 opcode)
3187{
3188 struct cnic_dev *dev = csk->dev;
3189 struct cnic_local *cp = dev->cnic_priv;
3190 struct cnic_context *ctx = &cp->ctx_tbl[csk->l5_cid];
3191 union l5cm_specific_data l5_data;
3192 u32 cmd = 0;
3193 int close_complete = 0;
3194
3195 switch (opcode) {
3196 case L4_KCQE_OPCODE_VALUE_RESET_RECEIVED:
3197 case L4_KCQE_OPCODE_VALUE_CLOSE_COMP:
3198 case L4_KCQE_OPCODE_VALUE_RESET_COMP:
3199 if (cnic_ready_to_close(csk, opcode))
3200 cmd = L5CM_RAMROD_CMD_ID_SEARCHER_DELETE;
3201 break;
3202 case L5CM_RAMROD_CMD_ID_SEARCHER_DELETE:
3203 cmd = L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD;
3204 break;
3205 case L5CM_RAMROD_CMD_ID_TERMINATE_OFFLOAD:
3206 close_complete = 1;
3207 break;
3208 }
3209 if (cmd) {
3210 memset(&l5_data, 0, sizeof(l5_data));
3211
3212 cnic_submit_kwqe_16(dev, cmd, csk->cid, ISCSI_CONNECTION_TYPE,
3213 &l5_data);
3214 } else if (close_complete) {
3215 ctx->timestamp = jiffies;
3216 cnic_close_conn(csk);
3217 cnic_cm_upcall(cp, csk, csk->state);
3218 }
3219}
3220
3221static void cnic_cm_stop_bnx2x_hw(struct cnic_dev *dev)
3222{
3223}
3224
3225static int cnic_cm_init_bnx2x_hw(struct cnic_dev *dev)
3226{
3227 struct cnic_local *cp = dev->cnic_priv;
3228 int func = CNIC_FUNC(cp);
3229
3230 cnic_init_bnx2x_mac(dev);
3231 cnic_bnx2x_set_tcp_timestamp(dev, 1);
3232
3233 CNIC_WR16(dev, BAR_XSTRORM_INTMEM +
3234 XSTORM_ISCSI_LOCAL_VLAN_OFFSET(func), 0);
3235
3236 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3237 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_ENABLED_OFFSET(func), 1);
3238 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3239 XSTORM_TCP_GLOBAL_DEL_ACK_COUNTER_MAX_COUNT_OFFSET(func),
3240 DEF_MAX_DA_COUNT);
3241
3242 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3243 XSTORM_ISCSI_TCP_VARS_TTL_OFFSET(func), DEF_TTL);
3244 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3245 XSTORM_ISCSI_TCP_VARS_TOS_OFFSET(func), DEF_TOS);
3246 CNIC_WR8(dev, BAR_XSTRORM_INTMEM +
3247 XSTORM_ISCSI_TCP_VARS_ADV_WND_SCL_OFFSET(func), 2);
3248 CNIC_WR(dev, BAR_XSTRORM_INTMEM +
3249 XSTORM_TCP_TX_SWS_TIMER_VAL_OFFSET(func), DEF_SWS_TIMER);
3250
3251 CNIC_WR(dev, BAR_TSTRORM_INTMEM + TSTORM_TCP_MAX_CWND_OFFSET(func),
3252 DEF_MAX_CWND);
3253 return 0;
3254}
3255
1960static int cnic_cm_open(struct cnic_dev *dev) 3256static int cnic_cm_open(struct cnic_dev *dev)
1961{ 3257{
1962 struct cnic_local *cp = dev->cnic_priv; 3258 struct cnic_local *cp = dev->cnic_priv;
@@ -2464,6 +3760,417 @@ static int cnic_start_bnx2_hw(struct cnic_dev *dev)
2464 return 0; 3760 return 0;
2465} 3761}
2466 3762
3763static void cnic_setup_bnx2x_context(struct cnic_dev *dev)
3764{
3765 struct cnic_local *cp = dev->cnic_priv;
3766 struct cnic_eth_dev *ethdev = cp->ethdev;
3767 u32 start_offset = ethdev->ctx_tbl_offset;
3768 int i;
3769
3770 for (i = 0; i < cp->ctx_blks; i++) {
3771 struct cnic_ctx *ctx = &cp->ctx_arr[i];
3772 dma_addr_t map = ctx->mapping;
3773
3774 if (cp->ctx_align) {
3775 unsigned long mask = cp->ctx_align - 1;
3776
3777 map = (map + mask) & ~mask;
3778 }
3779
3780 cnic_ctx_tbl_wr(dev, start_offset + i, map);
3781 }
3782}
3783
3784static int cnic_init_bnx2x_irq(struct cnic_dev *dev)
3785{
3786 struct cnic_local *cp = dev->cnic_priv;
3787 struct cnic_eth_dev *ethdev = cp->ethdev;
3788 int err = 0;
3789
3790 tasklet_init(&cp->cnic_irq_task, &cnic_service_bnx2x_bh,
3791 (unsigned long) dev);
3792 if (ethdev->drv_state & CNIC_DRV_STATE_USING_MSIX) {
3793 err = request_irq(ethdev->irq_arr[0].vector, cnic_irq, 0,
3794 "cnic", dev);
3795 if (err)
3796 tasklet_disable(&cp->cnic_irq_task);
3797 }
3798 return err;
3799}
3800
3801static void cnic_enable_bnx2x_int(struct cnic_dev *dev)
3802{
3803 struct cnic_local *cp = dev->cnic_priv;
3804 u8 sb_id = cp->status_blk_num;
3805 int port = CNIC_PORT(cp);
3806
3807 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
3808 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
3809 HC_INDEX_C_ISCSI_EQ_CONS),
3810 64 / 12);
3811 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
3812 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
3813 HC_INDEX_C_ISCSI_EQ_CONS), 0);
3814}
3815
3816static void cnic_disable_bnx2x_int_sync(struct cnic_dev *dev)
3817{
3818}
3819
3820static void cnic_init_bnx2x_tx_ring(struct cnic_dev *dev)
3821{
3822 struct cnic_local *cp = dev->cnic_priv;
3823 union eth_tx_bd_types *txbd = (union eth_tx_bd_types *) cp->l2_ring;
3824 struct eth_context *context;
3825 struct regpair context_addr;
3826 dma_addr_t buf_map;
3827 int func = CNIC_FUNC(cp);
3828 int port = CNIC_PORT(cp);
3829 int i;
3830 int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
3831 u32 val;
3832
3833 memset(txbd, 0, BCM_PAGE_SIZE);
3834
3835 buf_map = cp->l2_buf_map;
3836 for (i = 0; i < MAX_TX_DESC_CNT; i += 3, txbd += 3) {
3837 struct eth_tx_start_bd *start_bd = &txbd->start_bd;
3838 struct eth_tx_bd *reg_bd = &((txbd + 2)->reg_bd);
3839
3840 start_bd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
3841 start_bd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
3842 reg_bd->addr_hi = start_bd->addr_hi;
3843 reg_bd->addr_lo = start_bd->addr_lo + 0x10;
3844 start_bd->nbytes = cpu_to_le16(0x10);
3845 start_bd->nbd = cpu_to_le16(3);
3846 start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
3847 start_bd->general_data = (UNICAST_ADDRESS <<
3848 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
3849 start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
3850
3851 }
3852 context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 1, &context_addr);
3853
3854 val = (u64) cp->l2_ring_map >> 32;
3855 txbd->next_bd.addr_hi = cpu_to_le32(val);
3856
3857 context->xstorm_st_context.tx_bd_page_base_hi = val;
3858
3859 val = (u64) cp->l2_ring_map & 0xffffffff;
3860 txbd->next_bd.addr_lo = cpu_to_le32(val);
3861
3862 context->xstorm_st_context.tx_bd_page_base_lo = val;
3863
3864 context->cstorm_st_context.sb_index_number =
3865 HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS;
3866 context->cstorm_st_context.status_block_id = BNX2X_DEF_SB_ID;
3867
3868 context->xstorm_st_context.statistics_data = (cli |
3869 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
3870
3871 context->xstorm_ag_context.cdu_reserved =
3872 CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID, func),
3873 CDU_REGION_NUMBER_XCM_AG,
3874 ETH_CONNECTION_TYPE);
3875
3876 /* reset xstorm per client statistics */
3877 val = BAR_XSTRORM_INTMEM +
3878 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
3879 for (i = 0; i < sizeof(struct xstorm_per_client_stats) / 4; i++)
3880 CNIC_WR(dev, val + i * 4, 0);
3881
3882 cp->tx_cons_ptr =
3883 &cp->bnx2x_def_status_blk->c_def_status_block.index_values[
3884 HC_INDEX_DEF_C_ETH_ISCSI_CQ_CONS];
3885}
3886
3887static void cnic_init_bnx2x_rx_ring(struct cnic_dev *dev)
3888{
3889 struct cnic_local *cp = dev->cnic_priv;
3890 struct eth_rx_bd *rxbd = (struct eth_rx_bd *) (cp->l2_ring +
3891 BCM_PAGE_SIZE);
3892 struct eth_rx_cqe_next_page *rxcqe = (struct eth_rx_cqe_next_page *)
3893 (cp->l2_ring + (2 * BCM_PAGE_SIZE));
3894 struct eth_context *context;
3895 struct regpair context_addr;
3896 int i;
3897 int port = CNIC_PORT(cp);
3898 int func = CNIC_FUNC(cp);
3899 int cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
3900 u32 val;
3901 struct tstorm_eth_client_config tstorm_client = {0};
3902
3903 for (i = 0; i < BNX2X_MAX_RX_DESC_CNT; i++, rxbd++) {
3904 dma_addr_t buf_map;
3905 int n = (i % cp->l2_rx_ring_size) + 1;
3906
3907 buf_map = cp->l2_buf_map + (n * cp->l2_single_buf_size);
3908 rxbd->addr_hi = cpu_to_le32((u64) buf_map >> 32);
3909 rxbd->addr_lo = cpu_to_le32(buf_map & 0xffffffff);
3910 }
3911 context = cnic_get_bnx2x_ctx(dev, BNX2X_ISCSI_L2_CID, 0, &context_addr);
3912
3913 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) >> 32;
3914 rxbd->addr_hi = cpu_to_le32(val);
3915
3916 context->ustorm_st_context.common.bd_page_base_hi = val;
3917
3918 val = (u64) (cp->l2_ring_map + BCM_PAGE_SIZE) & 0xffffffff;
3919 rxbd->addr_lo = cpu_to_le32(val);
3920
3921 context->ustorm_st_context.common.bd_page_base_lo = val;
3922
3923 context->ustorm_st_context.common.sb_index_numbers =
3924 BNX2X_ISCSI_RX_SB_INDEX_NUM;
3925 context->ustorm_st_context.common.clientId = cli;
3926 context->ustorm_st_context.common.status_block_id = BNX2X_DEF_SB_ID;
3927 context->ustorm_st_context.common.flags =
3928 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS;
3929 context->ustorm_st_context.common.statistics_counter_id = cli;
3930 context->ustorm_st_context.common.mc_alignment_log_size = 0;
3931 context->ustorm_st_context.common.bd_buff_size =
3932 cp->l2_single_buf_size;
3933
3934 context->ustorm_ag_context.cdu_usage =
3935 CDU_RSRVD_VALUE_TYPE_A(BNX2X_HW_CID(BNX2X_ISCSI_L2_CID, func),
3936 CDU_REGION_NUMBER_UCM_AG,
3937 ETH_CONNECTION_TYPE);
3938
3939 rxcqe += BNX2X_MAX_RCQ_DESC_CNT;
3940 val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) >> 32;
3941 rxcqe->addr_hi = cpu_to_le32(val);
3942
3943 CNIC_WR(dev, BAR_USTRORM_INTMEM +
3944 USTORM_CQE_PAGE_BASE_OFFSET(port, cli) + 4, val);
3945
3946 CNIC_WR(dev, BAR_USTRORM_INTMEM +
3947 USTORM_CQE_PAGE_NEXT_OFFSET(port, cli) + 4, val);
3948
3949 val = (u64) (cp->l2_ring_map + (2 * BCM_PAGE_SIZE)) & 0xffffffff;
3950 rxcqe->addr_lo = cpu_to_le32(val);
3951
3952 CNIC_WR(dev, BAR_USTRORM_INTMEM +
3953 USTORM_CQE_PAGE_BASE_OFFSET(port, cli), val);
3954
3955 CNIC_WR(dev, BAR_USTRORM_INTMEM +
3956 USTORM_CQE_PAGE_NEXT_OFFSET(port, cli), val);
3957
3958 /* client tstorm info */
3959 tstorm_client.mtu = cp->l2_single_buf_size - 14;
3960 tstorm_client.config_flags =
3961 (TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE |
3962 TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE);
3963 tstorm_client.statistics_counter_id = cli;
3964
3965 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
3966 TSTORM_CLIENT_CONFIG_OFFSET(port, cli),
3967 ((u32 *)&tstorm_client)[0]);
3968 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
3969 TSTORM_CLIENT_CONFIG_OFFSET(port, cli) + 4,
3970 ((u32 *)&tstorm_client)[1]);
3971
3972 /* reset tstorm per client statistics */
3973 val = BAR_TSTRORM_INTMEM +
3974 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
3975 for (i = 0; i < sizeof(struct tstorm_per_client_stats) / 4; i++)
3976 CNIC_WR(dev, val + i * 4, 0);
3977
3978 /* reset ustorm per client statistics */
3979 val = BAR_USTRORM_INTMEM +
3980 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cli);
3981 for (i = 0; i < sizeof(struct ustorm_per_client_stats) / 4; i++)
3982 CNIC_WR(dev, val + i * 4, 0);
3983
3984 cp->rx_cons_ptr =
3985 &cp->bnx2x_def_status_blk->u_def_status_block.index_values[
3986 HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS];
3987}
3988
3989static void cnic_get_bnx2x_iscsi_info(struct cnic_dev *dev)
3990{
3991 struct cnic_local *cp = dev->cnic_priv;
3992 u32 base, addr, val;
3993 int port = CNIC_PORT(cp);
3994
3995 dev->max_iscsi_conn = 0;
3996 base = CNIC_RD(dev, MISC_REG_SHARED_MEM_ADDR);
3997 if (base < 0xa0000 || base >= 0xc0000)
3998 return;
3999
4000 val = BNX2X_SHMEM_ADDR(base,
4001 dev_info.port_hw_config[port].iscsi_mac_upper);
4002
4003 dev->mac_addr[0] = (u8) (val >> 8);
4004 dev->mac_addr[1] = (u8) val;
4005
4006 val = BNX2X_SHMEM_ADDR(base,
4007 dev_info.port_hw_config[port].iscsi_mac_lower);
4008
4009 dev->mac_addr[2] = (u8) (val >> 24);
4010 dev->mac_addr[3] = (u8) (val >> 16);
4011 dev->mac_addr[4] = (u8) (val >> 8);
4012 dev->mac_addr[5] = (u8) val;
4013
4014 addr = BNX2X_SHMEM_ADDR(base, validity_map[port]);
4015 val = CNIC_RD(dev, addr);
4016
4017 if (!(val & SHR_MEM_VALIDITY_LIC_NO_KEY_IN_EFFECT)) {
4018 u16 val16;
4019
4020 addr = BNX2X_SHMEM_ADDR(base,
4021 drv_lic_key[port].max_iscsi_init_conn);
4022 val16 = CNIC_RD16(dev, addr);
4023
4024 if (val16)
4025 val16 ^= 0x1e1e;
4026 dev->max_iscsi_conn = val16;
4027 }
4028 if (BNX2X_CHIP_IS_E1H(cp->chip_id)) {
4029 int func = CNIC_FUNC(cp);
4030
4031 addr = BNX2X_SHMEM_ADDR(base,
4032 mf_cfg.func_mf_config[func].e1hov_tag);
4033 val = CNIC_RD(dev, addr);
4034 val &= FUNC_MF_CFG_E1HOV_TAG_MASK;
4035 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
4036 addr = BNX2X_SHMEM_ADDR(base,
4037 mf_cfg.func_mf_config[func].config);
4038 val = CNIC_RD(dev, addr);
4039 val &= FUNC_MF_CFG_PROTOCOL_MASK;
4040 if (val != FUNC_MF_CFG_PROTOCOL_ISCSI)
4041 dev->max_iscsi_conn = 0;
4042 }
4043 }
4044}
4045
4046static int cnic_start_bnx2x_hw(struct cnic_dev *dev)
4047{
4048 struct cnic_local *cp = dev->cnic_priv;
4049 int func = CNIC_FUNC(cp), ret, i;
4050 int port = CNIC_PORT(cp);
4051 u16 eq_idx;
4052 u8 sb_id = cp->status_blk_num;
4053
4054 ret = cnic_init_id_tbl(&cp->cid_tbl, MAX_ISCSI_TBL_SZ,
4055 BNX2X_ISCSI_START_CID);
4056
4057 if (ret)
4058 return -ENOMEM;
4059
4060 cp->kcq_io_addr = BAR_CSTRORM_INTMEM +
4061 CSTORM_ISCSI_EQ_PROD_OFFSET(func, 0);
4062 cp->kcq_prod_idx = 0;
4063
4064 cnic_get_bnx2x_iscsi_info(dev);
4065
4066 /* Only 1 EQ */
4067 CNIC_WR16(dev, cp->kcq_io_addr, MAX_KCQ_IDX);
4068 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4069 CSTORM_ISCSI_EQ_CONS_OFFSET(func, 0), 0);
4070 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4071 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0),
4072 cp->kcq_info.pg_map_arr[1] & 0xffffffff);
4073 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4074 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_OFFSET(func, 0) + 4,
4075 (u64) cp->kcq_info.pg_map_arr[1] >> 32);
4076 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4077 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0),
4078 cp->kcq_info.pg_map_arr[0] & 0xffffffff);
4079 CNIC_WR(dev, BAR_CSTRORM_INTMEM +
4080 CSTORM_ISCSI_EQ_NEXT_EQE_ADDR_OFFSET(func, 0) + 4,
4081 (u64) cp->kcq_info.pg_map_arr[0] >> 32);
4082 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4083 CSTORM_ISCSI_EQ_NEXT_PAGE_ADDR_VALID_OFFSET(func, 0), 1);
4084 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
4085 CSTORM_ISCSI_EQ_SB_NUM_OFFSET(func, 0), cp->status_blk_num);
4086 CNIC_WR8(dev, BAR_CSTRORM_INTMEM +
4087 CSTORM_ISCSI_EQ_SB_INDEX_OFFSET(func, 0),
4088 HC_INDEX_C_ISCSI_EQ_CONS);
4089
4090 for (i = 0; i < cp->conn_buf_info.num_pages; i++) {
4091 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4092 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func, i),
4093 cp->conn_buf_info.pgtbl[2 * i]);
4094 CNIC_WR(dev, BAR_TSTRORM_INTMEM +
4095 TSTORM_ISCSI_CONN_BUF_PBL_OFFSET(func, i) + 4,
4096 cp->conn_buf_info.pgtbl[(2 * i) + 1]);
4097 }
4098
4099 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4100 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func),
4101 cp->gbl_buf_info.pg_map_arr[0] & 0xffffffff);
4102 CNIC_WR(dev, BAR_USTRORM_INTMEM +
4103 USTORM_ISCSI_GLOBAL_BUF_PHYS_ADDR_OFFSET(func) + 4,
4104 (u64) cp->gbl_buf_info.pg_map_arr[0] >> 32);
4105
4106 cnic_setup_bnx2x_context(dev);
4107
4108 eq_idx = CNIC_RD16(dev, BAR_CSTRORM_INTMEM +
4109 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) +
4110 offsetof(struct cstorm_status_block_c,
4111 index_values[HC_INDEX_C_ISCSI_EQ_CONS]));
4112 if (eq_idx != 0) {
4113 printk(KERN_ERR PFX "%s: EQ cons index %x != 0\n",
4114 dev->netdev->name, eq_idx);
4115 return -EBUSY;
4116 }
4117 ret = cnic_init_bnx2x_irq(dev);
4118 if (ret)
4119 return ret;
4120
4121 cnic_init_bnx2x_tx_ring(dev);
4122 cnic_init_bnx2x_rx_ring(dev);
4123
4124 return 0;
4125}
4126
4127static void cnic_init_rings(struct cnic_dev *dev)
4128{
4129 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
4130 cnic_init_bnx2_tx_ring(dev);
4131 cnic_init_bnx2_rx_ring(dev);
4132 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
4133 struct cnic_local *cp = dev->cnic_priv;
4134 struct cnic_eth_dev *ethdev = cp->ethdev;
4135 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
4136 union l5cm_specific_data l5_data;
4137 struct ustorm_eth_rx_producers rx_prods = {0};
4138 void __iomem *doorbell;
4139 int i;
4140
4141 rx_prods.bd_prod = 0;
4142 rx_prods.cqe_prod = BNX2X_MAX_RCQ_DESC_CNT;
4143 barrier();
4144
4145 doorbell = ethdev->io_base2 + BAR_USTRORM_INTMEM +
4146 USTORM_RX_PRODS_OFFSET(CNIC_PORT(cp), cli);
4147
4148 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers) / 4; i++)
4149 writel(((u32 *) &rx_prods)[i], doorbell + i * 4);
4150
4151 cnic_init_bnx2x_tx_ring(dev);
4152 cnic_init_bnx2x_rx_ring(dev);
4153
4154 l5_data.phy_address.lo = cli;
4155 l5_data.phy_address.hi = 0;
4156 cnic_submit_kwqe_16(dev, RAMROD_CMD_ID_ETH_CLIENT_SETUP,
4157 BNX2X_ISCSI_L2_CID, ETH_CONNECTION_TYPE, &l5_data);
4158 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 1);
4159 }
4160}
4161
4162static void cnic_shutdown_rings(struct cnic_dev *dev)
4163{
4164 if (test_bit(CNIC_F_BNX2_CLASS, &dev->flags)) {
4165 cnic_shutdown_bnx2_rx_ring(dev);
4166 } else if (test_bit(CNIC_F_BNX2X_CLASS, &dev->flags)) {
4167 struct cnic_local *cp = dev->cnic_priv;
4168 u32 cli = BNX2X_ISCSI_CL_ID(CNIC_E1HVN(cp));
4169
4170 cnic_ring_ctl(dev, BNX2X_ISCSI_L2_CID, cli, 0);
4171 }
4172}
4173
2467static int cnic_register_netdev(struct cnic_dev *dev) 4174static int cnic_register_netdev(struct cnic_dev *dev)
2468{ 4175{
2469 struct cnic_local *cp = dev->cnic_priv; 4176 struct cnic_local *cp = dev->cnic_priv;
@@ -2554,6 +4261,22 @@ static void cnic_stop_bnx2_hw(struct cnic_dev *dev)
2554 cnic_free_resc(dev); 4261 cnic_free_resc(dev);
2555} 4262}
2556 4263
4264
4265static void cnic_stop_bnx2x_hw(struct cnic_dev *dev)
4266{
4267 struct cnic_local *cp = dev->cnic_priv;
4268 u8 sb_id = cp->status_blk_num;
4269 int port = CNIC_PORT(cp);
4270
4271 cnic_free_irq(dev);
4272 CNIC_WR16(dev, BAR_CSTRORM_INTMEM +
4273 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id) +
4274 offsetof(struct cstorm_status_block_c,
4275 index_values[HC_INDEX_C_ISCSI_EQ_CONS]),
4276 0);
4277 cnic_free_resc(dev);
4278}
4279
2557static void cnic_stop_hw(struct cnic_dev *dev) 4280static void cnic_stop_hw(struct cnic_dev *dev)
2558{ 4281{
2559 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) { 4282 if (test_bit(CNIC_F_CNIC_UP, &dev->flags)) {
@@ -2685,6 +4408,57 @@ cnic_err:
2685 return NULL; 4408 return NULL;
2686} 4409}
2687 4410
4411static struct cnic_dev *init_bnx2x_cnic(struct net_device *dev)
4412{
4413 struct pci_dev *pdev;
4414 struct cnic_dev *cdev;
4415 struct cnic_local *cp;
4416 struct cnic_eth_dev *ethdev = NULL;
4417 struct cnic_eth_dev *(*probe)(struct net_device *) = NULL;
4418
4419 probe = symbol_get(bnx2x_cnic_probe);
4420 if (probe) {
4421 ethdev = (*probe)(dev);
4422 symbol_put(bnx2x_cnic_probe);
4423 }
4424 if (!ethdev)
4425 return NULL;
4426
4427 pdev = ethdev->pdev;
4428 if (!pdev)
4429 return NULL;
4430
4431 dev_hold(dev);
4432 cdev = cnic_alloc_dev(dev, pdev);
4433 if (cdev == NULL) {
4434 dev_put(dev);
4435 return NULL;
4436 }
4437
4438 set_bit(CNIC_F_BNX2X_CLASS, &cdev->flags);
4439 cdev->submit_kwqes = cnic_submit_bnx2x_kwqes;
4440
4441 cp = cdev->cnic_priv;
4442 cp->ethdev = ethdev;
4443 cdev->pcidev = pdev;
4444
4445 cp->cnic_ops = &cnic_bnx2x_ops;
4446 cp->start_hw = cnic_start_bnx2x_hw;
4447 cp->stop_hw = cnic_stop_bnx2x_hw;
4448 cp->setup_pgtbl = cnic_setup_page_tbl_le;
4449 cp->alloc_resc = cnic_alloc_bnx2x_resc;
4450 cp->free_resc = cnic_free_resc;
4451 cp->start_cm = cnic_cm_init_bnx2x_hw;
4452 cp->stop_cm = cnic_cm_stop_bnx2x_hw;
4453 cp->enable_int = cnic_enable_bnx2x_int;
4454 cp->disable_int_sync = cnic_disable_bnx2x_int_sync;
4455 cp->ack_int = cnic_ack_bnx2x_msix;
4456 cp->close_conn = cnic_close_bnx2x_conn;
4457 cp->next_idx = cnic_bnx2x_next_idx;
4458 cp->hw_idx = cnic_bnx2x_hw_idx;
4459 return cdev;
4460}
4461
2688static struct cnic_dev *is_cnic_dev(struct net_device *dev) 4462static struct cnic_dev *is_cnic_dev(struct net_device *dev)
2689{ 4463{
2690 struct ethtool_drvinfo drvinfo; 4464 struct ethtool_drvinfo drvinfo;
@@ -2696,6 +4470,8 @@ static struct cnic_dev *is_cnic_dev(struct net_device *dev)
2696 4470
2697 if (!strcmp(drvinfo.driver, "bnx2")) 4471 if (!strcmp(drvinfo.driver, "bnx2"))
2698 cdev = init_bnx2_cnic(dev); 4472 cdev = init_bnx2_cnic(dev);
4473 if (!strcmp(drvinfo.driver, "bnx2x"))
4474 cdev = init_bnx2x_cnic(dev);
2699 if (cdev) { 4475 if (cdev) {
2700 write_lock(&cnic_dev_lock); 4476 write_lock(&cnic_dev_lock);
2701 list_add(&cdev->list, &cnic_dev_list); 4477 list_add(&cdev->list, &cnic_dev_list);
diff --git a/drivers/net/cnic.h b/drivers/net/cnic.h
index a94b302bb464..241d09acc0d4 100644
--- a/drivers/net/cnic.h
+++ b/drivers/net/cnic.h
@@ -227,6 +227,7 @@ struct cnic_local {
227 void *status_blk; 227 void *status_blk;
228 struct status_block_msix *bnx2_status_blk; 228 struct status_block_msix *bnx2_status_blk;
229 struct host_status_block *bnx2x_status_blk; 229 struct host_status_block *bnx2x_status_blk;
230 struct host_def_status_block *bnx2x_def_status_blk;
230 231
231 u32 status_blk_num; 232 u32 status_blk_num;
232 u32 int_num; 233 u32 int_num;
@@ -258,6 +259,7 @@ struct cnic_local {
258 struct cnic_ctx *ctx_arr; 259 struct cnic_ctx *ctx_arr;
259 int ctx_blks; 260 int ctx_blks;
260 int ctx_blk_size; 261 int ctx_blk_size;
262 unsigned long ctx_align;
261 int cids_per_blk; 263 int cids_per_blk;
262 264
263 u32 chip_id; 265 u32 chip_id;
@@ -290,11 +292,73 @@ struct bnx2x_bd_chain_next {
290 u8 reserved[8]; 292 u8 reserved[8];
291}; 293};
292 294
295#define ISCSI_DEFAULT_MAX_OUTSTANDING_R2T (1)
296
293#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN (ISCSI_KCQE_OPCODE_UPDATE_CONN) 297#define ISCSI_RAMROD_CMD_ID_UPDATE_CONN (ISCSI_KCQE_OPCODE_UPDATE_CONN)
294#define ISCSI_RAMROD_CMD_ID_INIT (ISCSI_KCQE_OPCODE_INIT) 298#define ISCSI_RAMROD_CMD_ID_INIT (ISCSI_KCQE_OPCODE_INIT)
295 299
296#define CDU_REGION_NUMBER_XCM_AG 2 300#define CDU_REGION_NUMBER_XCM_AG 2
297#define CDU_REGION_NUMBER_UCM_AG 4 301#define CDU_REGION_NUMBER_UCM_AG 4
298 302
303#define CDU_VALID_DATA(_cid, _region, _type) \
304 (((_cid) << 8) | (((_region)&0xf)<<4) | (((_type)&0xf)))
305
306#define CDU_CRC8(_cid, _region, _type) \
307 (calc_crc8(CDU_VALID_DATA(_cid, _region, _type), 0xff))
308
309#define CDU_RSRVD_VALUE_TYPE_A(_cid, _region, _type) \
310 (0x80 | ((CDU_CRC8(_cid, _region, _type)) & 0x7f))
311
312#define BNX2X_CONTEXT_MEM_SIZE 1024
313#define BNX2X_FCOE_CID 16
314
315/* iSCSI client IDs are 17, 19, 21, 23 */
316#define BNX2X_ISCSI_BASE_CL_ID 17
317#define BNX2X_ISCSI_CL_ID(vn) (BNX2X_ISCSI_BASE_CL_ID + ((vn) << 1))
318
319#define BNX2X_ISCSI_L2_CID 17
320#define BNX2X_ISCSI_START_CID 18
321#define BNX2X_ISCSI_NUM_CONNECTIONS 128
322#define BNX2X_ISCSI_TASK_CONTEXT_SIZE 128
323#define BNX2X_ISCSI_MAX_PENDING_R2TS 4
324#define BNX2X_ISCSI_R2TQE_SIZE 8
325#define BNX2X_ISCSI_HQ_BD_SIZE 64
326#define BNX2X_ISCSI_CONN_BUF_SIZE 64
327#define BNX2X_ISCSI_GLB_BUF_SIZE 64
328#define BNX2X_ISCSI_PBL_NOT_CACHED 0xff
329#define BNX2X_ISCSI_PDU_HEADER_NOT_CACHED 0xff
330#define BNX2X_HW_CID(x, func) ((x) | (((func) % PORT_MAX) << 23) | \
331 (((func) >> 1) << 17))
332#define BNX2X_SW_CID(x) (x & 0x1ffff)
333#define BNX2X_CHIP_NUM_57711 0x164f
334#define BNX2X_CHIP_NUM_57711E 0x1650
335#define BNX2X_CHIP_NUM(x) (x >> 16)
336#define BNX2X_CHIP_IS_57711(x) \
337 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711)
338#define BNX2X_CHIP_IS_57711E(x) \
339 (BNX2X_CHIP_NUM(x) == BNX2X_CHIP_NUM_57711E)
340#define BNX2X_CHIP_IS_E1H(x) \
341 (BNX2X_CHIP_IS_57711(x) || BNX2X_CHIP_IS_57711E(x))
342#define IS_E1H_OFFSET BNX2X_CHIP_IS_E1H(cp->chip_id)
343
344#define BNX2X_RX_DESC_CNT (BCM_PAGE_SIZE / sizeof(struct eth_rx_bd))
345#define BNX2X_MAX_RX_DESC_CNT (BNX2X_RX_DESC_CNT - 2)
346#define BNX2X_RCQ_DESC_CNT (BCM_PAGE_SIZE / sizeof(union eth_rx_cqe))
347#define BNX2X_MAX_RCQ_DESC_CNT (BNX2X_RCQ_DESC_CNT - 1)
348
349#define BNX2X_DEF_SB_ID 16
350
351#define BNX2X_ISCSI_RX_SB_INDEX_NUM \
352 ((HC_INDEX_DEF_U_ETH_ISCSI_RX_CQ_CONS << \
353 USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER_SHIFT) & \
354 USTORM_ETH_ST_CONTEXT_CONFIG_CQE_SB_INDEX_NUMBER)
355
356#define BNX2X_SHMEM_ADDR(base, field) (base + \
357 offsetof(struct shmem_region, field))
358
359#define CNIC_PORT(cp) ((cp)->func % PORT_MAX)
360#define CNIC_FUNC(cp) ((cp)->func)
361#define CNIC_E1HVN(cp) ((cp)->func >> 1)
362
299#endif 363#endif
300 364
diff --git a/drivers/net/cnic_defs.h b/drivers/net/cnic_defs.h
index cee80f694457..9827b278dc7c 100644
--- a/drivers/net/cnic_defs.h
+++ b/drivers/net/cnic_defs.h
@@ -51,6 +51,9 @@
51#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0) 51#define L4_KCQE_COMPLETION_STATUS_SUCCESS (0)
52#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93) 52#define L4_KCQE_COMPLETION_STATUS_TIMEOUT (0x93)
53 53
54#define L4_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAIL (0x83)
55#define L4_KCQE_COMPLETION_STATUS_OFFLOADED_PG (0x89)
56
54#define L4_LAYER_CODE (4) 57#define L4_LAYER_CODE (4)
55#define L2_LAYER_CODE (2) 58#define L2_LAYER_CODE (2)
56 59
@@ -577,4 +580,1918 @@ struct l4_kwq_upload {
577 u32 reserved2[6]; 580 u32 reserved2[6];
578}; 581};
579 582
583/*
584 * bnx2x structures
585 */
586
587/*
588 * iSCSI context region, used only in iSCSI
589 */
590struct ustorm_iscsi_rq_db {
591 struct regpair pbl_base;
592 struct regpair curr_pbe;
593};
594
595/*
596 * iSCSI context region, used only in iSCSI
597 */
598struct ustorm_iscsi_r2tq_db {
599 struct regpair pbl_base;
600 struct regpair curr_pbe;
601};
602
603/*
604 * iSCSI context region, used only in iSCSI
605 */
606struct ustorm_iscsi_cq_db {
607#if defined(__BIG_ENDIAN)
608 u16 cq_sn;
609 u16 prod;
610#elif defined(__LITTLE_ENDIAN)
611 u16 prod;
612 u16 cq_sn;
613#endif
614 struct regpair curr_pbe;
615};
616
617/*
618 * iSCSI context region, used only in iSCSI
619 */
620struct rings_db {
621 struct ustorm_iscsi_rq_db rq;
622 struct ustorm_iscsi_r2tq_db r2tq;
623 struct ustorm_iscsi_cq_db cq[8];
624#if defined(__BIG_ENDIAN)
625 u16 rq_prod;
626 u16 r2tq_prod;
627#elif defined(__LITTLE_ENDIAN)
628 u16 r2tq_prod;
629 u16 rq_prod;
630#endif
631 struct regpair cq_pbl_base;
632};
633
634/*
635 * iSCSI context region, used only in iSCSI
636 */
637struct ustorm_iscsi_placement_db {
638 u32 sgl_base_lo;
639 u32 sgl_base_hi;
640 u32 local_sge_0_address_hi;
641 u32 local_sge_0_address_lo;
642#if defined(__BIG_ENDIAN)
643 u16 curr_sge_offset;
644 u16 local_sge_0_size;
645#elif defined(__LITTLE_ENDIAN)
646 u16 local_sge_0_size;
647 u16 curr_sge_offset;
648#endif
649 u32 local_sge_1_address_hi;
650 u32 local_sge_1_address_lo;
651#if defined(__BIG_ENDIAN)
652 u16 reserved6;
653 u16 local_sge_1_size;
654#elif defined(__LITTLE_ENDIAN)
655 u16 local_sge_1_size;
656 u16 reserved6;
657#endif
658#if defined(__BIG_ENDIAN)
659 u8 sgl_size;
660 u8 local_sge_index_2b;
661 u16 reserved7;
662#elif defined(__LITTLE_ENDIAN)
663 u16 reserved7;
664 u8 local_sge_index_2b;
665 u8 sgl_size;
666#endif
667 u32 rem_pdu;
668 u32 place_db_bitfield_1;
669#define USTORM_ISCSI_PLACEMENT_DB_REM_PDU_PAYLOAD (0xFFFFFF<<0)
670#define USTORM_ISCSI_PLACEMENT_DB_REM_PDU_PAYLOAD_SHIFT 0
671#define USTORM_ISCSI_PLACEMENT_DB_CQ_ID (0xFF<<24)
672#define USTORM_ISCSI_PLACEMENT_DB_CQ_ID_SHIFT 24
673 u32 place_db_bitfield_2;
674#define USTORM_ISCSI_PLACEMENT_DB_BYTES_2_TRUNCATE (0xFFFFFF<<0)
675#define USTORM_ISCSI_PLACEMENT_DB_BYTES_2_TRUNCATE_SHIFT 0
676#define USTORM_ISCSI_PLACEMENT_DB_HOST_SGE_INDEX (0xFF<<24)
677#define USTORM_ISCSI_PLACEMENT_DB_HOST_SGE_INDEX_SHIFT 24
678 u32 nal;
679#define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE (0xFFFFFF<<0)
680#define USTORM_ISCSI_PLACEMENT_DB_REM_SGE_SIZE_SHIFT 0
681#define USTORM_ISCSI_PLACEMENT_DB_EXP_PADDING_2B (0x3<<24)
682#define USTORM_ISCSI_PLACEMENT_DB_EXP_PADDING_2B_SHIFT 24
683#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B (0x7<<26)
684#define USTORM_ISCSI_PLACEMENT_DB_EXP_DIGEST_3B_SHIFT 26
685#define USTORM_ISCSI_PLACEMENT_DB_NAL_LEN_3B (0x7<<29)
686#define USTORM_ISCSI_PLACEMENT_DB_NAL_LEN_3B_SHIFT 29
687};
688
689/*
690 * Ustorm iSCSI Storm Context
691 */
692struct ustorm_iscsi_st_context {
693 u32 exp_stat_sn;
694 u32 exp_data_sn;
695 struct rings_db ring;
696 struct regpair task_pbl_base;
697 struct regpair tce_phy_addr;
698 struct ustorm_iscsi_placement_db place_db;
699 u32 data_rcv_seq;
700 u32 rem_rcv_len;
701#if defined(__BIG_ENDIAN)
702 u16 hdr_itt;
703 u16 iscsi_conn_id;
704#elif defined(__LITTLE_ENDIAN)
705 u16 iscsi_conn_id;
706 u16 hdr_itt;
707#endif
708 u32 nal_bytes;
709#if defined(__BIG_ENDIAN)
710 u8 hdr_second_byte_union;
711 u8 bitfield_0;
712#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU (0x1<<0)
713#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0
714#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1)
715#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1
716#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x3F<<2)
717#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 2
718 u8 task_pdu_cache_index;
719 u8 task_pbe_cache_index;
720#elif defined(__LITTLE_ENDIAN)
721 u8 task_pbe_cache_index;
722 u8 task_pdu_cache_index;
723 u8 bitfield_0;
724#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU (0x1<<0)
725#define USTORM_ISCSI_ST_CONTEXT_BMIDDLEOFPDU_SHIFT 0
726#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE (0x1<<1)
727#define USTORM_ISCSI_ST_CONTEXT_BFENCECQE_SHIFT 1
728#define USTORM_ISCSI_ST_CONTEXT_RESERVED1 (0x3F<<2)
729#define USTORM_ISCSI_ST_CONTEXT_RESERVED1_SHIFT 2
730 u8 hdr_second_byte_union;
731#endif
732#if defined(__BIG_ENDIAN)
733 u16 reserved3;
734 u8 reserved2;
735 u8 acDecrement;
736#elif defined(__LITTLE_ENDIAN)
737 u8 acDecrement;
738 u8 reserved2;
739 u16 reserved3;
740#endif
741 u32 task_stat;
742#if defined(__BIG_ENDIAN)
743 u8 hdr_opcode;
744 u8 num_cqs;
745 u16 reserved5;
746#elif defined(__LITTLE_ENDIAN)
747 u16 reserved5;
748 u8 num_cqs;
749 u8 hdr_opcode;
750#endif
751 u32 negotiated_rx;
752#define USTORM_ISCSI_ST_CONTEXT_MAX_RECV_PDU_LENGTH (0xFFFFFF<<0)
753#define USTORM_ISCSI_ST_CONTEXT_MAX_RECV_PDU_LENGTH_SHIFT 0
754#define USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS (0xFF<<24)
755#define USTORM_ISCSI_ST_CONTEXT_MAX_OUTSTANDING_R2TS_SHIFT 24
756 u32 negotiated_rx_and_flags;
757#define USTORM_ISCSI_ST_CONTEXT_MAX_BURST_LENGTH (0xFFFFFF<<0)
758#define USTORM_ISCSI_ST_CONTEXT_MAX_BURST_LENGTH_SHIFT 0
759#define USTORM_ISCSI_ST_CONTEXT_B_CQE_POSTED_OR_HEADER_CACHED (0x1<<24)
760#define USTORM_ISCSI_ST_CONTEXT_B_CQE_POSTED_OR_HEADER_CACHED_SHIFT 24
761#define USTORM_ISCSI_ST_CONTEXT_B_HDR_DIGEST_EN (0x1<<25)
762#define USTORM_ISCSI_ST_CONTEXT_B_HDR_DIGEST_EN_SHIFT 25
763#define USTORM_ISCSI_ST_CONTEXT_B_DATA_DIGEST_EN (0x1<<26)
764#define USTORM_ISCSI_ST_CONTEXT_B_DATA_DIGEST_EN_SHIFT 26
765#define USTORM_ISCSI_ST_CONTEXT_B_PROTOCOL_ERROR (0x1<<27)
766#define USTORM_ISCSI_ST_CONTEXT_B_PROTOCOL_ERROR_SHIFT 27
767#define USTORM_ISCSI_ST_CONTEXT_B_TASK_VALID (0x1<<28)
768#define USTORM_ISCSI_ST_CONTEXT_B_TASK_VALID_SHIFT 28
769#define USTORM_ISCSI_ST_CONTEXT_TASK_TYPE (0x3<<29)
770#define USTORM_ISCSI_ST_CONTEXT_TASK_TYPE_SHIFT 29
771#define USTORM_ISCSI_ST_CONTEXT_B_ALL_DATA_ACKED (0x1<<31)
772#define USTORM_ISCSI_ST_CONTEXT_B_ALL_DATA_ACKED_SHIFT 31
773};
774
775/*
776 * TCP context region, shared in TOE, RDMA and ISCSI
777 */
778struct tstorm_tcp_st_context_section {
779 u32 flags1;
780#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_20B (0xFFFFFF<<0)
781#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_SRTT_20B_SHIFT 0
782#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID (0x1<<24)
783#define TSTORM_TCP_ST_CONTEXT_SECTION_PAWS_INVALID_SHIFT 24
784#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS (0x1<<25)
785#define TSTORM_TCP_ST_CONTEXT_SECTION_TIMESTAMP_EXISTS_SHIFT 25
786#define TSTORM_TCP_ST_CONTEXT_SECTION_ISLE_EXISTS (0x1<<26)
787#define TSTORM_TCP_ST_CONTEXT_SECTION_ISLE_EXISTS_SHIFT 26
788#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD (0x1<<27)
789#define TSTORM_TCP_ST_CONTEXT_SECTION_STOP_RX_PAYLOAD_SHIFT 27
790#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED (0x1<<28)
791#define TSTORM_TCP_ST_CONTEXT_SECTION_KA_ENABLED_SHIFT 28
792#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE (0x1<<29)
793#define TSTORM_TCP_ST_CONTEXT_SECTION_FIRST_RTO_ESTIMATE_SHIFT 29
794#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN (0x1<<30)
795#define TSTORM_TCP_ST_CONTEXT_SECTION_MAX_SEG_RETRANSMIT_EN_SHIFT 30
796#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED3 (0x1<<31)
797#define TSTORM_TCP_ST_CONTEXT_SECTION_RESERVED3_SHIFT 31
798 u32 flags2;
799#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_20B (0xFFFFFF<<0)
800#define TSTORM_TCP_ST_CONTEXT_SECTION_RTT_VARIATION_20B_SHIFT 0
801#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN (0x1<<24)
802#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_EN_SHIFT 24
803#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN (0x1<<25)
804#define TSTORM_TCP_ST_CONTEXT_SECTION_DA_COUNTER_EN_SHIFT 25
805#define __TSTORM_TCP_ST_CONTEXT_SECTION_KA_PROBE_SENT (0x1<<26)
806#define __TSTORM_TCP_ST_CONTEXT_SECTION_KA_PROBE_SENT_SHIFT 26
807#define __TSTORM_TCP_ST_CONTEXT_SECTION_PERSIST_PROBE_SENT (0x1<<27)
808#define __TSTORM_TCP_ST_CONTEXT_SECTION_PERSIST_PROBE_SENT_SHIFT 27
809#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<28)
810#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 28
811#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<29)
812#define TSTORM_TCP_ST_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 29
813#define __TSTORM_TCP_ST_CONTEXT_SECTION_SECOND_ISLE_DROPPED (0x1<<30)
814#define __TSTORM_TCP_ST_CONTEXT_SECTION_SECOND_ISLE_DROPPED_SHIFT 30
815#define __TSTORM_TCP_ST_CONTEXT_SECTION_DONT_SUPPORT_OOO (0x1<<31)
816#define __TSTORM_TCP_ST_CONTEXT_SECTION_DONT_SUPPORT_OOO_SHIFT 31
817#if defined(__BIG_ENDIAN)
818 u16 reserved_slowpath;
819 u8 tcp_sm_state_3b;
820 u8 rto_exp_3b;
821#elif defined(__LITTLE_ENDIAN)
822 u8 rto_exp_3b;
823 u8 tcp_sm_state_3b;
824 u16 reserved_slowpath;
825#endif
826 u32 rcv_nxt;
827 u32 timestamp_recent;
828 u32 timestamp_recent_time;
829 u32 cwnd;
830 u32 ss_thresh;
831 u32 cwnd_accum;
832 u32 prev_seg_seq;
833 u32 expected_rel_seq;
834 u32 recover;
835#if defined(__BIG_ENDIAN)
836 u8 retransmit_count;
837 u8 ka_max_probe_count;
838 u8 persist_probe_count;
839 u8 ka_probe_count;
840#elif defined(__LITTLE_ENDIAN)
841 u8 ka_probe_count;
842 u8 persist_probe_count;
843 u8 ka_max_probe_count;
844 u8 retransmit_count;
845#endif
846#if defined(__BIG_ENDIAN)
847 u8 statistics_counter_id;
848 u8 ooo_support_mode;
849 u8 snd_wnd_scale_4b;
850 u8 dup_ack_count;
851#elif defined(__LITTLE_ENDIAN)
852 u8 dup_ack_count;
853 u8 snd_wnd_scale_4b;
854 u8 ooo_support_mode;
855 u8 statistics_counter_id;
856#endif
857 u32 retransmit_start_time;
858 u32 ka_timeout;
859 u32 ka_interval;
860 u32 isle_start_seq;
861 u32 isle_end_seq;
862#if defined(__BIG_ENDIAN)
863 u16 mss;
864 u16 recent_seg_wnd;
865#elif defined(__LITTLE_ENDIAN)
866 u16 recent_seg_wnd;
867 u16 mss;
868#endif
869 u32 reserved4;
870 u32 max_rt_time;
871#if defined(__BIG_ENDIAN)
872 u16 lsb_mac_address;
873 u16 vlan_id;
874#elif defined(__LITTLE_ENDIAN)
875 u16 vlan_id;
876 u16 lsb_mac_address;
877#endif
878 u32 msb_mac_address;
879 u32 reserved2;
880};
881
882/*
883 * Termination variables
884 */
885struct iscsi_term_vars {
886 u8 BitMap;
887#define ISCSI_TERM_VARS_TCP_STATE (0xF<<0)
888#define ISCSI_TERM_VARS_TCP_STATE_SHIFT 0
889#define ISCSI_TERM_VARS_FIN_RECEIVED_SBIT (0x1<<4)
890#define ISCSI_TERM_VARS_FIN_RECEIVED_SBIT_SHIFT 4
891#define ISCSI_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT (0x1<<5)
892#define ISCSI_TERM_VARS_ACK_ON_FIN_RECEIVED_SBIT_SHIFT 5
893#define ISCSI_TERM_VARS_TERM_ON_CHIP (0x1<<6)
894#define ISCSI_TERM_VARS_TERM_ON_CHIP_SHIFT 6
895#define ISCSI_TERM_VARS_RSRV (0x1<<7)
896#define ISCSI_TERM_VARS_RSRV_SHIFT 7
897};
898
899/*
900 * iSCSI context region, used only in iSCSI
901 */
902struct tstorm_iscsi_st_context_section {
903#if defined(__BIG_ENDIAN)
904 u16 rem_tcp_data_len;
905 u16 brb_offset;
906#elif defined(__LITTLE_ENDIAN)
907 u16 brb_offset;
908 u16 rem_tcp_data_len;
909#endif
910 u32 b2nh;
911#if defined(__BIG_ENDIAN)
912 u16 rq_cons;
913 u8 flags;
914#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN (0x1<<0)
915#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN_SHIFT 0
916#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN (0x1<<1)
917#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN_SHIFT 1
918#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER (0x1<<2)
919#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER_SHIFT 2
920#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE (0x1<<3)
921#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3
922#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4)
923#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4
924#define TSTORM_ISCSI_ST_CONTEXT_SECTION_FLAGS_RSRV (0x7<<5)
925#define TSTORM_ISCSI_ST_CONTEXT_SECTION_FLAGS_RSRV_SHIFT 5
926 u8 hdr_bytes_2_fetch;
927#elif defined(__LITTLE_ENDIAN)
928 u8 hdr_bytes_2_fetch;
929 u8 flags;
930#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN (0x1<<0)
931#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_HDR_DIGEST_EN_SHIFT 0
932#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN (0x1<<1)
933#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DATA_DIGEST_EN_SHIFT 1
934#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER (0x1<<2)
935#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_PARTIAL_HEADER_SHIFT 2
936#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE (0x1<<3)
937#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_FULL_FEATURE_SHIFT 3
938#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS (0x1<<4)
939#define TSTORM_ISCSI_ST_CONTEXT_SECTION_B_DROP_ALL_PDUS_SHIFT 4
940#define TSTORM_ISCSI_ST_CONTEXT_SECTION_FLAGS_RSRV (0x7<<5)
941#define TSTORM_ISCSI_ST_CONTEXT_SECTION_FLAGS_RSRV_SHIFT 5
942 u16 rq_cons;
943#endif
944 struct regpair rq_db_phy_addr;
945#if defined(__BIG_ENDIAN)
946 struct iscsi_term_vars term_vars;
947 u8 scratchpad_idx;
948 u16 iscsi_conn_id;
949#elif defined(__LITTLE_ENDIAN)
950 u16 iscsi_conn_id;
951 u8 scratchpad_idx;
952 struct iscsi_term_vars term_vars;
953#endif
954 u32 reserved2;
955};
956
957/*
958 * The iSCSI non-aggregative context of Tstorm
959 */
960struct tstorm_iscsi_st_context {
961 struct tstorm_tcp_st_context_section tcp;
962 struct tstorm_iscsi_st_context_section iscsi;
963};
964
965/*
966 * The tcp aggregative context section of Xstorm
967 */
968struct xstorm_tcp_tcp_ag_context_section {
969#if defined(__BIG_ENDIAN)
970 u8 __tcp_agg_vars1;
971 u8 __da_cnt;
972 u16 mss;
973#elif defined(__LITTLE_ENDIAN)
974 u16 mss;
975 u8 __da_cnt;
976 u8 __tcp_agg_vars1;
977#endif
978 u32 snd_nxt;
979 u32 tx_wnd;
980 u32 snd_una;
981 u32 local_adv_wnd;
982#if defined(__BIG_ENDIAN)
983 u8 __agg_val8_th;
984 u8 __agg_val8;
985 u16 tcp_agg_vars2;
986#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0)
987#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0
988#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1)
989#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1
990#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2)
991#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2
992#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
993#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
994#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
995#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
996#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5)
997#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5
998#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6)
999#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6
1000#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_CF_EN (0x1<<7)
1001#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_CF_EN_SHIFT 7
1002#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8)
1003#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8
1004#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
1005#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
1006#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
1007#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
1008#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
1009#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
1010#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF (0x3<<14)
1011#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF_SHIFT 14
1012#elif defined(__LITTLE_ENDIAN)
1013 u16 tcp_agg_vars2;
1014#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG (0x1<<0)
1015#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_SHIFT 0
1016#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED (0x1<<1)
1017#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_UNBLOCKED_SHIFT 1
1018#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE (0x1<<2)
1019#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_TIMER_ACTIVE_SHIFT 2
1020#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG (0x1<<3)
1021#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_FLAG_SHIFT 3
1022#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG (0x1<<4)
1023#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX4_FLAG_SHIFT 4
1024#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE (0x1<<5)
1025#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DA_ENABLE_SHIFT 5
1026#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN (0x1<<6)
1027#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ACK_TO_FE_UPDATED_EN_SHIFT 6
1028#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_CF_EN (0x1<<7)
1029#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX3_CF_EN_SHIFT 7
1030#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN (0x1<<8)
1031#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_FIN_FLAG_EN_SHIFT 8
1032#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<9)
1033#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 9
1034#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF (0x3<<10)
1035#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_SET_RTO_CF_SHIFT 10
1036#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF (0x3<<12)
1037#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_UPDATED_CF_SHIFT 12
1038#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF (0x3<<14)
1039#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF_SHIFT 14
1040 u8 __agg_val8;
1041 u8 __agg_val8_th;
1042#endif
1043 u32 ack_to_far_end;
1044 u32 rto_timer;
1045 u32 ka_timer;
1046 u32 ts_to_echo;
1047#if defined(__BIG_ENDIAN)
1048 u16 __agg_val7_th;
1049 u16 __agg_val7;
1050#elif defined(__LITTLE_ENDIAN)
1051 u16 __agg_val7;
1052 u16 __agg_val7_th;
1053#endif
1054#if defined(__BIG_ENDIAN)
1055 u8 __tcp_agg_vars5;
1056 u8 __tcp_agg_vars4;
1057 u8 __tcp_agg_vars3;
1058 u8 __force_pure_ack_cnt;
1059#elif defined(__LITTLE_ENDIAN)
1060 u8 __force_pure_ack_cnt;
1061 u8 __tcp_agg_vars3;
1062 u8 __tcp_agg_vars4;
1063 u8 __tcp_agg_vars5;
1064#endif
1065 u32 tcp_agg_vars6;
1066#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN (0x1<<0)
1067#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TS_TO_ECHO_CF_EN_SHIFT 0
1068#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF_EN (0x1<<1)
1069#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX8_CF_EN_SHIFT 1
1070#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN (0x1<<2)
1071#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_EN_SHIFT 2
1072#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<3)
1073#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 3
1074#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG (0x1<<4)
1075#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX6_FLAG_SHIFT 4
1076#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG (0x1<<5)
1077#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX7_FLAG_SHIFT 5
1078#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF (0x3<<6)
1079#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX5_CF_SHIFT 6
1080#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF (0x3<<8)
1081#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX9_CF_SHIFT 8
1082#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF (0x3<<10)
1083#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_SHIFT 10
1084#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF (0x3<<12)
1085#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_SHIFT 12
1086#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF (0x3<<14)
1087#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_SHIFT 14
1088#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF (0x3<<16)
1089#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX13_CF_SHIFT 16
1090#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF (0x3<<18)
1091#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX14_CF_SHIFT 18
1092#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF (0x3<<20)
1093#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX15_CF_SHIFT 20
1094#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF (0x3<<22)
1095#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX16_CF_SHIFT 22
1096#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF (0x3<<24)
1097#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX17_CF_SHIFT 24
1098#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG (0x1<<26)
1099#define XSTORM_TCP_TCP_AG_CONTEXT_SECTION_ECE_FLAG_SHIFT 26
1100#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71 (0x1<<27)
1101#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED71_SHIFT 27
1102#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY (0x1<<28)
1103#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_FORCE_PURE_ACK_CNT_DIRTY_SHIFT 28
1104#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG (0x1<<29)
1105#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_TCP_AUTO_STOP_FLAG_SHIFT 29
1106#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG (0x1<<30)
1107#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_DO_TS_UPDATE_FLAG_SHIFT 30
1108#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG (0x1<<31)
1109#define __XSTORM_TCP_TCP_AG_CONTEXT_SECTION_CANCEL_RETRANSMIT_FLAG_SHIFT 31
1110#if defined(__BIG_ENDIAN)
1111 u16 __agg_misc6;
1112 u16 __tcp_agg_vars7;
1113#elif defined(__LITTLE_ENDIAN)
1114 u16 __tcp_agg_vars7;
1115 u16 __agg_misc6;
1116#endif
1117 u32 __agg_val10;
1118 u32 __agg_val10_th;
1119#if defined(__BIG_ENDIAN)
1120 u16 __reserved3;
1121 u8 __reserved2;
1122 u8 __da_only_cnt;
1123#elif defined(__LITTLE_ENDIAN)
1124 u8 __da_only_cnt;
1125 u8 __reserved2;
1126 u16 __reserved3;
1127#endif
1128};
1129
1130/*
1131 * The iscsi aggregative context of Xstorm
1132 */
1133struct xstorm_iscsi_ag_context {
1134#if defined(__BIG_ENDIAN)
1135 u16 agg_val1;
1136 u8 agg_vars1;
1137#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
1138#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
1139#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
1140#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
1141#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
1142#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
1143#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
1144#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
1145#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
1146#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
1147#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5)
1148#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5
1149#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
1150#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
1151#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
1152#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
1153 u8 state;
1154#elif defined(__LITTLE_ENDIAN)
1155 u8 state;
1156 u8 agg_vars1;
1157#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
1158#define __XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
1159#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
1160#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
1161#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
1162#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
1163#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
1164#define XSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
1165#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN (0x1<<4)
1166#define __XSTORM_ISCSI_AG_CONTEXT_MORE_TO_SEND_EN_SHIFT 4
1167#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN (0x1<<5)
1168#define XSTORM_ISCSI_AG_CONTEXT_NAGLE_EN_SHIFT 5
1169#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG (0x1<<6)
1170#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_SHIFT 6
1171#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN (0x1<<7)
1172#define __XSTORM_ISCSI_AG_CONTEXT_UNA_GT_NXT_EN_SHIFT 7
1173 u16 agg_val1;
1174#endif
1175#if defined(__BIG_ENDIAN)
1176 u8 cdu_reserved;
1177 u8 agg_vars4;
1178#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF (0x3<<0)
1179#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_SHIFT 0
1180#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF (0x3<<2)
1181#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_SHIFT 2
1182#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN (0x1<<4)
1183#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN_SHIFT 4
1184#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN (0x1<<5)
1185#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN_SHIFT 5
1186#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN (0x1<<6)
1187#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN_SHIFT 6
1188#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN (0x1<<7)
1189#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN_SHIFT 7
1190 u8 agg_vars3;
1191#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
1192#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
1193#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF (0x3<<6)
1194#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_SHIFT 6
1195 u8 agg_vars2;
1196#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0)
1197#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0
1198#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
1199#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
1200#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3)
1201#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3
1202#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4)
1203#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4
1204#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
1205#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5
1206#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
1207#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
1208#elif defined(__LITTLE_ENDIAN)
1209 u8 agg_vars2;
1210#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF (0x3<<0)
1211#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_SHIFT 0
1212#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN (0x1<<2)
1213#define __XSTORM_ISCSI_AG_CONTEXT_DQ_SPARE_FLAG_EN_SHIFT 2
1214#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG (0x1<<3)
1215#define __XSTORM_ISCSI_AG_CONTEXT_AUX8_FLAG_SHIFT 3
1216#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG (0x1<<4)
1217#define __XSTORM_ISCSI_AG_CONTEXT_AUX9_FLAG_SHIFT 4
1218#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1 (0x3<<5)
1219#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE1_SHIFT 5
1220#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
1221#define __XSTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
1222 u8 agg_vars3;
1223#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2 (0x3F<<0)
1224#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM2_SHIFT 0
1225#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF (0x3<<6)
1226#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_SHIFT 6
1227 u8 agg_vars4;
1228#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF (0x3<<0)
1229#define XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_SHIFT 0
1230#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF (0x3<<2)
1231#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_SHIFT 2
1232#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN (0x1<<4)
1233#define __XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_EN_SHIFT 4
1234#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN (0x1<<5)
1235#define __XSTORM_ISCSI_AG_CONTEXT_AUX19_CF_EN_SHIFT 5
1236#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN (0x1<<6)
1237#define __XSTORM_ISCSI_AG_CONTEXT_R2TQ_PROD_CF_EN_SHIFT 6
1238#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN (0x1<<7)
1239#define __XSTORM_ISCSI_AG_CONTEXT_AUX21_CF_EN_SHIFT 7
1240 u8 cdu_reserved;
1241#endif
1242 u32 more_to_send;
1243#if defined(__BIG_ENDIAN)
1244 u16 agg_vars5;
1245#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
1246#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0
1247#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
1248#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
1249#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
1250#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
1251#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
1252#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14
1253 u16 sq_cons;
1254#elif defined(__LITTLE_ENDIAN)
1255 u16 sq_cons;
1256 u16 agg_vars5;
1257#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5 (0x3<<0)
1258#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE5_SHIFT 0
1259#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0 (0x3F<<2)
1260#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM0_SHIFT 2
1261#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1 (0x3F<<8)
1262#define XSTORM_ISCSI_AG_CONTEXT_PHYSICAL_QUEUE_NUM1_SHIFT 8
1263#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2 (0x3<<14)
1264#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE2_SHIFT 14
1265#endif
1266 struct xstorm_tcp_tcp_ag_context_section tcp;
1267#if defined(__BIG_ENDIAN)
1268 u16 agg_vars7;
1269#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
1270#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
1271#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
1272#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
1273#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF (0x3<<4)
1274#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_SHIFT 4
1275#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
1276#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
1277#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
1278#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8
1279#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
1280#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
1281#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
1282#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
1283#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12)
1284#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12
1285#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13)
1286#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
1287#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
1288#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
1289#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG (0x1<<15)
1290#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG_SHIFT 15
1291 u8 agg_val3_th;
1292 u8 agg_vars6;
1293#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
1294#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0
1295#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
1296#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3
1297#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
1298#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6
1299#elif defined(__LITTLE_ENDIAN)
1300 u8 agg_vars6;
1301#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6 (0x7<<0)
1302#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE6_SHIFT 0
1303#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7 (0x7<<3)
1304#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE7_SHIFT 3
1305#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4 (0x3<<6)
1306#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE4_SHIFT 6
1307 u8 agg_val3_th;
1308 u16 agg_vars7;
1309#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE (0x7<<0)
1310#define __XSTORM_ISCSI_AG_CONTEXT_AGG_VAL11_DECISION_RULE_SHIFT 0
1311#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG (0x1<<3)
1312#define __XSTORM_ISCSI_AG_CONTEXT_AUX13_FLAG_SHIFT 3
1313#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF (0x3<<4)
1314#define XSTORM_ISCSI_AG_CONTEXT_AUX18_CF_SHIFT 4
1315#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3 (0x3<<6)
1316#define XSTORM_ISCSI_AG_CONTEXT_DECISION_RULE3_SHIFT 6
1317#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF (0x3<<8)
1318#define XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_SHIFT 8
1319#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK (0x1<<10)
1320#define __XSTORM_ISCSI_AG_CONTEXT_COMPLETION_SEQ_DECISION_MASK_SHIFT 10
1321#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN (0x1<<11)
1322#define __XSTORM_ISCSI_AG_CONTEXT_AUX1_CF_EN_SHIFT 11
1323#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG (0x1<<12)
1324#define __XSTORM_ISCSI_AG_CONTEXT_AUX10_FLAG_SHIFT 12
1325#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG (0x1<<13)
1326#define __XSTORM_ISCSI_AG_CONTEXT_AUX11_FLAG_SHIFT 13
1327#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG (0x1<<14)
1328#define __XSTORM_ISCSI_AG_CONTEXT_AUX12_FLAG_SHIFT 14
1329#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG (0x1<<15)
1330#define __XSTORM_ISCSI_AG_CONTEXT_AUX2_FLAG_SHIFT 15
1331#endif
1332#if defined(__BIG_ENDIAN)
1333 u16 __agg_val11_th;
1334 u16 __agg_val11;
1335#elif defined(__LITTLE_ENDIAN)
1336 u16 __agg_val11;
1337 u16 __agg_val11_th;
1338#endif
1339#if defined(__BIG_ENDIAN)
1340 u8 __reserved1;
1341 u8 __agg_val6_th;
1342 u16 __agg_val9;
1343#elif defined(__LITTLE_ENDIAN)
1344 u16 __agg_val9;
1345 u8 __agg_val6_th;
1346 u8 __reserved1;
1347#endif
1348#if defined(__BIG_ENDIAN)
1349 u16 hq_prod;
1350 u16 hq_cons;
1351#elif defined(__LITTLE_ENDIAN)
1352 u16 hq_cons;
1353 u16 hq_prod;
1354#endif
1355 u32 agg_vars8;
1356#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2 (0xFFFFFF<<0)
1357#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC2_SHIFT 0
1358#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3 (0xFF<<24)
1359#define XSTORM_ISCSI_AG_CONTEXT_AGG_MISC3_SHIFT 24
1360#if defined(__BIG_ENDIAN)
1361 u16 r2tq_prod;
1362 u16 sq_prod;
1363#elif defined(__LITTLE_ENDIAN)
1364 u16 sq_prod;
1365 u16 r2tq_prod;
1366#endif
1367#if defined(__BIG_ENDIAN)
1368 u8 agg_val3;
1369 u8 agg_val6;
1370 u8 agg_val5_th;
1371 u8 agg_val5;
1372#elif defined(__LITTLE_ENDIAN)
1373 u8 agg_val5;
1374 u8 agg_val5_th;
1375 u8 agg_val6;
1376 u8 agg_val3;
1377#endif
1378#if defined(__BIG_ENDIAN)
1379 u16 __agg_misc1;
1380 u16 agg_limit1;
1381#elif defined(__LITTLE_ENDIAN)
1382 u16 agg_limit1;
1383 u16 __agg_misc1;
1384#endif
1385 u32 hq_cons_tcp_seq;
1386 u32 exp_stat_sn;
1387 u32 agg_misc5;
1388};
1389
1390/*
1391 * The tcp aggregative context section of Tstorm
1392 */
1393struct tstorm_tcp_tcp_ag_context_section {
1394 u32 __agg_val1;
1395#if defined(__BIG_ENDIAN)
1396 u8 __tcp_agg_vars2;
1397 u8 __agg_val3;
1398 u16 __agg_val2;
1399#elif defined(__LITTLE_ENDIAN)
1400 u16 __agg_val2;
1401 u8 __agg_val3;
1402 u8 __tcp_agg_vars2;
1403#endif
1404#if defined(__BIG_ENDIAN)
1405 u16 __agg_val5;
1406 u8 __agg_val6;
1407 u8 __tcp_agg_vars3;
1408#elif defined(__LITTLE_ENDIAN)
1409 u8 __tcp_agg_vars3;
1410 u8 __agg_val6;
1411 u16 __agg_val5;
1412#endif
1413 u32 snd_nxt;
1414 u32 rtt_seq;
1415 u32 rtt_time;
1416 u32 __reserved66;
1417 u32 wnd_right_edge;
1418 u32 tcp_agg_vars1;
1419#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<0)
1420#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 0
1421#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG (0x1<<1)
1422#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_LAST_PACKET_FIN_FLAG_SHIFT 1
1423#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF (0x3<<2)
1424#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_SHIFT 2
1425#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF (0x3<<4)
1426#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_SHIFT 4
1427#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN (0x1<<6)
1428#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_WND_UPD_CF_EN_SHIFT 6
1429#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN (0x1<<7)
1430#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TIMEOUT_CF_EN_SHIFT 7
1431#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN (0x1<<8)
1432#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_SEQ_EN_SHIFT 8
1433#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN (0x1<<9)
1434#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_SND_NXT_EN_SHIFT 9
1435#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG (0x1<<10)
1436#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_FLAG_SHIFT 10
1437#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG (0x1<<11)
1438#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_FLAG_SHIFT 11
1439#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN (0x1<<12)
1440#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_EN_SHIFT 12
1441#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN (0x1<<13)
1442#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_EN_SHIFT 13
1443#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF (0x3<<14)
1444#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX1_CF_SHIFT 14
1445#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF (0x3<<16)
1446#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX2_CF_SHIFT 16
1447#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED (0x1<<18)
1448#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_TX_BLOCKED_SHIFT 18
1449#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN (0x1<<19)
1450#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX10_CF_EN_SHIFT 19
1451#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN (0x1<<20)
1452#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX11_CF_EN_SHIFT 20
1453#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN (0x1<<21)
1454#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_AUX12_CF_EN_SHIFT 21
1455#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1 (0x3<<22)
1456#define __TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RESERVED1_SHIFT 22
1457#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ (0xF<<24)
1458#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_PEND_SEQ_SHIFT 24
1459#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ (0xF<<28)
1460#define TSTORM_TCP_TCP_AG_CONTEXT_SECTION_RETRANSMIT_DONE_SEQ_SHIFT 28
1461 u32 snd_max;
1462 u32 snd_una;
1463 u32 __reserved2;
1464};
1465
1466/*
1467 * The iscsi aggregative context of Tstorm
1468 */
1469struct tstorm_iscsi_ag_context {
1470#if defined(__BIG_ENDIAN)
1471 u16 ulp_credit;
1472 u8 agg_vars1;
1473#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
1474#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
1475#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
1476#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
1477#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
1478#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
1479#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
1480#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
1481#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF (0x3<<4)
1482#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_SHIFT 4
1483#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
1484#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
1485#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG (0x1<<7)
1486#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG_SHIFT 7
1487 u8 state;
1488#elif defined(__LITTLE_ENDIAN)
1489 u8 state;
1490 u8 agg_vars1;
1491#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
1492#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
1493#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
1494#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
1495#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
1496#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
1497#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
1498#define TSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
1499#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF (0x3<<4)
1500#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_SHIFT 4
1501#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG (0x1<<6)
1502#define __TSTORM_ISCSI_AG_CONTEXT_AUX3_FLAG_SHIFT 6
1503#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG (0x1<<7)
1504#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_FLAG_SHIFT 7
1505 u16 ulp_credit;
1506#endif
1507#if defined(__BIG_ENDIAN)
1508 u16 __agg_val4;
1509 u16 agg_vars2;
1510#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG (0x1<<0)
1511#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG_SHIFT 0
1512#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG (0x1<<1)
1513#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG_SHIFT 1
1514#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF (0x3<<2)
1515#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_SHIFT 2
1516#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF (0x3<<4)
1517#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_SHIFT 4
1518#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
1519#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
1520#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
1521#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
1522#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
1523#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
1524#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<11)
1525#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 11
1526#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
1527#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
1528#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
1529#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
1530#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
1531#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
1532#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
1533#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
1534#elif defined(__LITTLE_ENDIAN)
1535 u16 agg_vars2;
1536#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG (0x1<<0)
1537#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_FLAG_SHIFT 0
1538#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG (0x1<<1)
1539#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_FLAG_SHIFT 1
1540#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF (0x3<<2)
1541#define __TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_SHIFT 2
1542#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF (0x3<<4)
1543#define __TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_SHIFT 4
1544#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF (0x3<<6)
1545#define __TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_SHIFT 6
1546#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF (0x3<<8)
1547#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_SHIFT 8
1548#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG (0x1<<10)
1549#define __TSTORM_ISCSI_AG_CONTEXT_AUX7_FLAG_SHIFT 10
1550#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<11)
1551#define TSTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 11
1552#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN (0x1<<12)
1553#define TSTORM_ISCSI_AG_CONTEXT_AUX4_CF_EN_SHIFT 12
1554#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN (0x1<<13)
1555#define TSTORM_ISCSI_AG_CONTEXT_AUX5_CF_EN_SHIFT 13
1556#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN (0x1<<14)
1557#define TSTORM_ISCSI_AG_CONTEXT_AUX6_CF_EN_SHIFT 14
1558#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN (0x1<<15)
1559#define TSTORM_ISCSI_AG_CONTEXT_AUX7_CF_EN_SHIFT 15
1560 u16 __agg_val4;
1561#endif
1562 struct tstorm_tcp_tcp_ag_context_section tcp;
1563};
1564
1565/*
1566 * The iscsi aggregative context of Cstorm
1567 */
1568struct cstorm_iscsi_ag_context {
1569 u32 agg_vars1;
1570#define CSTORM_ISCSI_AG_CONTEXT_STATE (0xFF<<0)
1571#define CSTORM_ISCSI_AG_CONTEXT_STATE_SHIFT 0
1572#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<8)
1573#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 8
1574#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<9)
1575#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 9
1576#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<10)
1577#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 10
1578#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<11)
1579#define __CSTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 11
1580#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN (0x1<<12)
1581#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_SE_CF_EN_SHIFT 12
1582#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN (0x1<<13)
1583#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED_ULP_RX_INV_CF_EN_SHIFT 13
1584#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF (0x3<<14)
1585#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_SHIFT 14
1586#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66 (0x3<<16)
1587#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED66_SHIFT 16
1588#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN (0x1<<18)
1589#define __CSTORM_ISCSI_AG_CONTEXT_FIN_RECEIVED_CF_EN_SHIFT 18
1590#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN (0x1<<19)
1591#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION0_CF_EN_SHIFT 19
1592#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN (0x1<<20)
1593#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION1_CF_EN_SHIFT 20
1594#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN (0x1<<21)
1595#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION2_CF_EN_SHIFT 21
1596#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN (0x1<<22)
1597#define __CSTORM_ISCSI_AG_CONTEXT_PENDING_COMPLETION3_CF_EN_SHIFT 22
1598#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE (0x7<<23)
1599#define __CSTORM_ISCSI_AG_CONTEXT_REL_SEQ_RULE_SHIFT 23
1600#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE (0x3<<26)
1601#define CSTORM_ISCSI_AG_CONTEXT_HQ_PROD_RULE_SHIFT 26
1602#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52 (0x3<<28)
1603#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED52_SHIFT 28
1604#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53 (0x3<<30)
1605#define __CSTORM_ISCSI_AG_CONTEXT_RESERVED53_SHIFT 30
1606#if defined(__BIG_ENDIAN)
1607 u8 __aux1_th;
1608 u8 __aux1_val;
1609 u16 __agg_vars2;
1610#elif defined(__LITTLE_ENDIAN)
1611 u16 __agg_vars2;
1612 u8 __aux1_val;
1613 u8 __aux1_th;
1614#endif
1615 u32 rel_seq;
1616 u32 rel_seq_th;
1617#if defined(__BIG_ENDIAN)
1618 u16 hq_cons;
1619 u16 hq_prod;
1620#elif defined(__LITTLE_ENDIAN)
1621 u16 hq_prod;
1622 u16 hq_cons;
1623#endif
1624#if defined(__BIG_ENDIAN)
1625 u8 __reserved62;
1626 u8 __reserved61;
1627 u8 __reserved60;
1628 u8 __reserved59;
1629#elif defined(__LITTLE_ENDIAN)
1630 u8 __reserved59;
1631 u8 __reserved60;
1632 u8 __reserved61;
1633 u8 __reserved62;
1634#endif
1635#if defined(__BIG_ENDIAN)
1636 u16 __reserved64;
1637 u16 __cq_u_prod0;
1638#elif defined(__LITTLE_ENDIAN)
1639 u16 __cq_u_prod0;
1640 u16 __reserved64;
1641#endif
1642 u32 __cq_u_prod1;
1643#if defined(__BIG_ENDIAN)
1644 u16 __agg_vars3;
1645 u16 __cq_u_prod2;
1646#elif defined(__LITTLE_ENDIAN)
1647 u16 __cq_u_prod2;
1648 u16 __agg_vars3;
1649#endif
1650#if defined(__BIG_ENDIAN)
1651 u16 __aux2_th;
1652 u16 __cq_u_prod3;
1653#elif defined(__LITTLE_ENDIAN)
1654 u16 __cq_u_prod3;
1655 u16 __aux2_th;
1656#endif
1657};
1658
1659/*
1660 * The iscsi aggregative context of Ustorm
1661 */
1662struct ustorm_iscsi_ag_context {
1663#if defined(__BIG_ENDIAN)
1664 u8 __aux_counter_flags;
1665 u8 agg_vars2;
1666#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0)
1667#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0
1668#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2)
1669#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2
1670#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
1671#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
1672#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
1673#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
1674 u8 agg_vars1;
1675#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
1676#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
1677#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
1678#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
1679#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
1680#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
1681#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
1682#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
1683#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4)
1684#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4
1685#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6)
1686#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6
1687 u8 state;
1688#elif defined(__LITTLE_ENDIAN)
1689 u8 state;
1690 u8 agg_vars1;
1691#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0 (0x1<<0)
1692#define __USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM0_SHIFT 0
1693#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1 (0x1<<1)
1694#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM1_SHIFT 1
1695#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2 (0x1<<2)
1696#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM2_SHIFT 2
1697#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3 (0x1<<3)
1698#define USTORM_ISCSI_AG_CONTEXT_EXISTS_IN_QM3_SHIFT 3
1699#define USTORM_ISCSI_AG_CONTEXT_INV_CF (0x3<<4)
1700#define USTORM_ISCSI_AG_CONTEXT_INV_CF_SHIFT 4
1701#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF (0x3<<6)
1702#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_SHIFT 6
1703 u8 agg_vars2;
1704#define USTORM_ISCSI_AG_CONTEXT_TX_CF (0x3<<0)
1705#define USTORM_ISCSI_AG_CONTEXT_TX_CF_SHIFT 0
1706#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF (0x3<<2)
1707#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_SHIFT 2
1708#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE (0x7<<4)
1709#define USTORM_ISCSI_AG_CONTEXT_AGG_MISC4_RULE_SHIFT 4
1710#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK (0x1<<7)
1711#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_MASK_SHIFT 7
1712 u8 __aux_counter_flags;
1713#endif
1714#if defined(__BIG_ENDIAN)
1715 u8 cdu_usage;
1716 u8 agg_misc2;
1717 u16 __cq_local_comp_itt_val;
1718#elif defined(__LITTLE_ENDIAN)
1719 u16 __cq_local_comp_itt_val;
1720 u8 agg_misc2;
1721 u8 cdu_usage;
1722#endif
1723 u32 agg_misc4;
1724#if defined(__BIG_ENDIAN)
1725 u8 agg_val3_th;
1726 u8 agg_val3;
1727 u16 agg_misc3;
1728#elif defined(__LITTLE_ENDIAN)
1729 u16 agg_misc3;
1730 u8 agg_val3;
1731 u8 agg_val3_th;
1732#endif
1733 u32 agg_val1;
1734 u32 agg_misc4_th;
1735#if defined(__BIG_ENDIAN)
1736 u16 agg_val2_th;
1737 u16 agg_val2;
1738#elif defined(__LITTLE_ENDIAN)
1739 u16 agg_val2;
1740 u16 agg_val2_th;
1741#endif
1742#if defined(__BIG_ENDIAN)
1743 u16 __reserved2;
1744 u8 decision_rules;
1745#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0)
1746#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
1747#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
1748#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
1749#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
1750#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
1751#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
1752#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
1753 u8 decision_rule_enable_bits;
1754#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0)
1755#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0
1756#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
1757#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
1758#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2)
1759#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2
1760#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
1761#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
1762#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4)
1763#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4
1764#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5)
1765#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5
1766#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
1767#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
1768#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
1769#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
1770#elif defined(__LITTLE_ENDIAN)
1771 u8 decision_rule_enable_bits;
1772#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN (0x1<<0)
1773#define USTORM_ISCSI_AG_CONTEXT_INV_CF_EN_SHIFT 0
1774#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN (0x1<<1)
1775#define USTORM_ISCSI_AG_CONTEXT_COMPLETION_CF_EN_SHIFT 1
1776#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN (0x1<<2)
1777#define USTORM_ISCSI_AG_CONTEXT_TX_CF_EN_SHIFT 2
1778#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN (0x1<<3)
1779#define __USTORM_ISCSI_AG_CONTEXT_TIMER_CF_EN_SHIFT 3
1780#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN (0x1<<4)
1781#define __USTORM_ISCSI_AG_CONTEXT_CQ_LOCAL_COMP_CF_EN_SHIFT 4
1782#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN (0x1<<5)
1783#define __USTORM_ISCSI_AG_CONTEXT_QUEUES_FLUSH_Q0_CF_EN_SHIFT 5
1784#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN (0x1<<6)
1785#define __USTORM_ISCSI_AG_CONTEXT_AUX3_CF_EN_SHIFT 6
1786#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN (0x1<<7)
1787#define __USTORM_ISCSI_AG_CONTEXT_DQ_CF_EN_SHIFT 7
1788 u8 decision_rules;
1789#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE (0x7<<0)
1790#define USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_RULE_SHIFT 0
1791#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE (0x7<<3)
1792#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL3_RULE_SHIFT 3
1793#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG (0x1<<6)
1794#define __USTORM_ISCSI_AG_CONTEXT_AGG_VAL2_ARM_N_FLAG_SHIFT 6
1795#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1 (0x1<<7)
1796#define __USTORM_ISCSI_AG_CONTEXT_RESERVED1_SHIFT 7
1797 u16 __reserved2;
1798#endif
1799};
1800
1801/*
1802 * Timers connection context
1803 */
1804struct iscsi_timers_block_context {
1805 u32 __reserved_0;
1806 u32 __reserved_1;
1807 u32 __reserved_2;
1808 u32 flags;
1809#define __ISCSI_TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS (0x3<<0)
1810#define __ISCSI_TIMERS_BLOCK_CONTEXT_NUM_OF_ACTIVE_TIMERS_SHIFT 0
1811#define ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG (0x1<<2)
1812#define ISCSI_TIMERS_BLOCK_CONTEXT_CONN_VALID_FLG_SHIFT 2
1813#define __ISCSI_TIMERS_BLOCK_CONTEXT_RESERVED0 (0x1FFFFFFF<<3)
1814#define __ISCSI_TIMERS_BLOCK_CONTEXT_RESERVED0_SHIFT 3
1815};
1816
1817/*
1818 * Ethernet context section, shared in TOE, RDMA and ISCSI
1819 */
1820struct xstorm_eth_context_section {
1821#if defined(__BIG_ENDIAN)
1822 u8 remote_addr_4;
1823 u8 remote_addr_5;
1824 u8 local_addr_0;
1825 u8 local_addr_1;
1826#elif defined(__LITTLE_ENDIAN)
1827 u8 local_addr_1;
1828 u8 local_addr_0;
1829 u8 remote_addr_5;
1830 u8 remote_addr_4;
1831#endif
1832#if defined(__BIG_ENDIAN)
1833 u8 remote_addr_0;
1834 u8 remote_addr_1;
1835 u8 remote_addr_2;
1836 u8 remote_addr_3;
1837#elif defined(__LITTLE_ENDIAN)
1838 u8 remote_addr_3;
1839 u8 remote_addr_2;
1840 u8 remote_addr_1;
1841 u8 remote_addr_0;
1842#endif
1843#if defined(__BIG_ENDIAN)
1844 u16 reserved_vlan_type;
1845 u16 params;
1846#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
1847#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
1848#define XSTORM_ETH_CONTEXT_SECTION_CFI (0x1<<12)
1849#define XSTORM_ETH_CONTEXT_SECTION_CFI_SHIFT 12
1850#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
1851#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
1852#elif defined(__LITTLE_ENDIAN)
1853 u16 params;
1854#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID (0xFFF<<0)
1855#define XSTORM_ETH_CONTEXT_SECTION_VLAN_ID_SHIFT 0
1856#define XSTORM_ETH_CONTEXT_SECTION_CFI (0x1<<12)
1857#define XSTORM_ETH_CONTEXT_SECTION_CFI_SHIFT 12
1858#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY (0x7<<13)
1859#define XSTORM_ETH_CONTEXT_SECTION_PRIORITY_SHIFT 13
1860 u16 reserved_vlan_type;
1861#endif
1862#if defined(__BIG_ENDIAN)
1863 u8 local_addr_2;
1864 u8 local_addr_3;
1865 u8 local_addr_4;
1866 u8 local_addr_5;
1867#elif defined(__LITTLE_ENDIAN)
1868 u8 local_addr_5;
1869 u8 local_addr_4;
1870 u8 local_addr_3;
1871 u8 local_addr_2;
1872#endif
1873};
1874
1875/*
1876 * IpV4 context section, shared in TOE, RDMA and ISCSI
1877 */
1878struct xstorm_ip_v4_context_section {
1879#if defined(__BIG_ENDIAN)
1880 u16 __pbf_hdr_cmd_rsvd_id;
1881 u16 __pbf_hdr_cmd_rsvd_flags_offset;
1882#elif defined(__LITTLE_ENDIAN)
1883 u16 __pbf_hdr_cmd_rsvd_flags_offset;
1884 u16 __pbf_hdr_cmd_rsvd_id;
1885#endif
1886#if defined(__BIG_ENDIAN)
1887 u8 __pbf_hdr_cmd_rsvd_ver_ihl;
1888 u8 tos;
1889 u16 __pbf_hdr_cmd_rsvd_length;
1890#elif defined(__LITTLE_ENDIAN)
1891 u16 __pbf_hdr_cmd_rsvd_length;
1892 u8 tos;
1893 u8 __pbf_hdr_cmd_rsvd_ver_ihl;
1894#endif
1895 u32 ip_local_addr;
1896#if defined(__BIG_ENDIAN)
1897 u8 ttl;
1898 u8 __pbf_hdr_cmd_rsvd_protocol;
1899 u16 __pbf_hdr_cmd_rsvd_csum;
1900#elif defined(__LITTLE_ENDIAN)
1901 u16 __pbf_hdr_cmd_rsvd_csum;
1902 u8 __pbf_hdr_cmd_rsvd_protocol;
1903 u8 ttl;
1904#endif
1905 u32 __pbf_hdr_cmd_rsvd_1;
1906 u32 ip_remote_addr;
1907};
1908
1909/*
1910 * context section, shared in TOE, RDMA and ISCSI
1911 */
1912struct xstorm_padded_ip_v4_context_section {
1913 struct xstorm_ip_v4_context_section ip_v4;
1914 u32 reserved1[4];
1915};
1916
1917/*
1918 * IpV6 context section, shared in TOE, RDMA and ISCSI
1919 */
1920struct xstorm_ip_v6_context_section {
1921#if defined(__BIG_ENDIAN)
1922 u16 pbf_hdr_cmd_rsvd_payload_len;
1923 u8 pbf_hdr_cmd_rsvd_nxt_hdr;
1924 u8 hop_limit;
1925#elif defined(__LITTLE_ENDIAN)
1926 u8 hop_limit;
1927 u8 pbf_hdr_cmd_rsvd_nxt_hdr;
1928 u16 pbf_hdr_cmd_rsvd_payload_len;
1929#endif
1930 u32 priority_flow_label;
1931#define XSTORM_IP_V6_CONTEXT_SECTION_FLOW_LABEL (0xFFFFF<<0)
1932#define XSTORM_IP_V6_CONTEXT_SECTION_FLOW_LABEL_SHIFT 0
1933#define XSTORM_IP_V6_CONTEXT_SECTION_TRAFFIC_CLASS (0xFF<<20)
1934#define XSTORM_IP_V6_CONTEXT_SECTION_TRAFFIC_CLASS_SHIFT 20
1935#define XSTORM_IP_V6_CONTEXT_SECTION_PBF_HDR_CMD_RSVD_VER (0xF<<28)
1936#define XSTORM_IP_V6_CONTEXT_SECTION_PBF_HDR_CMD_RSVD_VER_SHIFT 28
1937 u32 ip_local_addr_lo_hi;
1938 u32 ip_local_addr_lo_lo;
1939 u32 ip_local_addr_hi_hi;
1940 u32 ip_local_addr_hi_lo;
1941 u32 ip_remote_addr_lo_hi;
1942 u32 ip_remote_addr_lo_lo;
1943 u32 ip_remote_addr_hi_hi;
1944 u32 ip_remote_addr_hi_lo;
1945};
1946
1947union xstorm_ip_context_section_types {
1948 struct xstorm_padded_ip_v4_context_section padded_ip_v4;
1949 struct xstorm_ip_v6_context_section ip_v6;
1950};
1951
1952/*
1953 * TCP context section, shared in TOE, RDMA and ISCSI
1954 */
1955struct xstorm_tcp_context_section {
1956 u32 snd_max;
1957#if defined(__BIG_ENDIAN)
1958 u16 remote_port;
1959 u16 local_port;
1960#elif defined(__LITTLE_ENDIAN)
1961 u16 local_port;
1962 u16 remote_port;
1963#endif
1964#if defined(__BIG_ENDIAN)
1965 u8 original_nagle_1b;
1966 u8 ts_enabled_1b;
1967 u16 tcp_params;
1968#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE (0xFF<<0)
1969#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE_SHIFT 0
1970#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT (0x1<<8)
1971#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT_SHIFT 8
1972#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED (0x1<<9)
1973#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9
1974#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10)
1975#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10
1976#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE (0x1<<11)
1977#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE_SHIFT 11
1978#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12)
1979#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12
1980#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13)
1981#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED_SHIFT 13
1982#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER (0x3<<14)
1983#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER_SHIFT 14
1984#elif defined(__LITTLE_ENDIAN)
1985 u16 tcp_params;
1986#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE (0xFF<<0)
1987#define XSTORM_TCP_CONTEXT_SECTION_TOTAL_HEADER_SIZE_SHIFT 0
1988#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT (0x1<<8)
1989#define __XSTORM_TCP_CONTEXT_SECTION_ECT_BIT_SHIFT 8
1990#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED (0x1<<9)
1991#define __XSTORM_TCP_CONTEXT_SECTION_ECN_ENABLED_SHIFT 9
1992#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED (0x1<<10)
1993#define XSTORM_TCP_CONTEXT_SECTION_SACK_ENABLED_SHIFT 10
1994#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE (0x1<<11)
1995#define XSTORM_TCP_CONTEXT_SECTION_KA_STATE_SHIFT 11
1996#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG (0x1<<12)
1997#define XSTORM_TCP_CONTEXT_SECTION_FIN_SENT_FLAG_SHIFT 12
1998#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED (0x1<<13)
1999#define XSTORM_TCP_CONTEXT_SECTION_WINDOW_SATURATED_SHIFT 13
2000#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER (0x3<<14)
2001#define XSTORM_TCP_CONTEXT_SECTION_SLOWPATH_QUEUES_FLUSH_COUNTER_SHIFT 14
2002 u8 ts_enabled_1b;
2003 u8 original_nagle_1b;
2004#endif
2005#if defined(__BIG_ENDIAN)
2006 u16 pseudo_csum;
2007 u16 window_scaling_factor;
2008#elif defined(__LITTLE_ENDIAN)
2009 u16 window_scaling_factor;
2010 u16 pseudo_csum;
2011#endif
2012 u32 reserved2;
2013 u32 ts_time_diff;
2014 u32 __next_timer_expir;
2015};
2016
2017/*
2018 * Common context section, shared in TOE, RDMA and ISCSI
2019 */
2020struct xstorm_common_context_section {
2021 struct xstorm_eth_context_section ethernet;
2022 union xstorm_ip_context_section_types ip_union;
2023 struct xstorm_tcp_context_section tcp;
2024#if defined(__BIG_ENDIAN)
2025 u16 reserved;
2026 u8 statistics_params;
2027#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0)
2028#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0
2029#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1)
2030#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
2031#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2)
2032#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2
2033#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0 (0x1<<7)
2034#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0_SHIFT 7
2035 u8 ip_version_1b;
2036#elif defined(__LITTLE_ENDIAN)
2037 u8 ip_version_1b;
2038 u8 statistics_params;
2039#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L2_STATSTICS (0x1<<0)
2040#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L2_STATSTICS_SHIFT 0
2041#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS (0x1<<1)
2042#define XSTORM_COMMON_CONTEXT_SECTION_UPDATE_L4_STATSTICS_SHIFT 1
2043#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID (0x1F<<2)
2044#define XSTORM_COMMON_CONTEXT_SECTION_STATISTICS_COUNTER_ID_SHIFT 2
2045#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0 (0x1<<7)
2046#define XSTORM_COMMON_CONTEXT_SECTION_RESERVED0_SHIFT 7
2047 u16 reserved;
2048#endif
2049};
2050
2051/*
2052 * Flags used in ISCSI context section
2053 */
2054struct xstorm_iscsi_context_flags {
2055 u8 flags;
2056#define XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA (0x1<<0)
2057#define XSTORM_ISCSI_CONTEXT_FLAGS_B_IMMEDIATE_DATA_SHIFT 0
2058#define XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T (0x1<<1)
2059#define XSTORM_ISCSI_CONTEXT_FLAGS_B_INITIAL_R2T_SHIFT 1
2060#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_HEADER_DIGEST (0x1<<2)
2061#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_HEADER_DIGEST_SHIFT 2
2062#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_DATA_DIGEST (0x1<<3)
2063#define XSTORM_ISCSI_CONTEXT_FLAGS_B_EN_DATA_DIGEST_SHIFT 3
2064#define XSTORM_ISCSI_CONTEXT_FLAGS_B_HQ_BD_WRITTEN (0x1<<4)
2065#define XSTORM_ISCSI_CONTEXT_FLAGS_B_HQ_BD_WRITTEN_SHIFT 4
2066#define XSTORM_ISCSI_CONTEXT_FLAGS_B_LAST_OP_SQ (0x1<<5)
2067#define XSTORM_ISCSI_CONTEXT_FLAGS_B_LAST_OP_SQ_SHIFT 5
2068#define XSTORM_ISCSI_CONTEXT_FLAGS_B_UPDATE_SND_NXT (0x1<<6)
2069#define XSTORM_ISCSI_CONTEXT_FLAGS_B_UPDATE_SND_NXT_SHIFT 6
2070#define XSTORM_ISCSI_CONTEXT_FLAGS_RESERVED4 (0x1<<7)
2071#define XSTORM_ISCSI_CONTEXT_FLAGS_RESERVED4_SHIFT 7
2072};
2073
2074struct iscsi_task_context_entry_x {
2075 u32 data_out_buffer_offset;
2076 u32 itt;
2077 u32 data_sn;
2078};
2079
2080struct iscsi_task_context_entry_xuc_x_write_only {
2081 u32 tx_r2t_sn;
2082};
2083
2084struct iscsi_task_context_entry_xuc_xu_write_both {
2085 u32 sgl_base_lo;
2086 u32 sgl_base_hi;
2087#if defined(__BIG_ENDIAN)
2088 u8 sgl_size;
2089 u8 sge_index;
2090 u16 sge_offset;
2091#elif defined(__LITTLE_ENDIAN)
2092 u16 sge_offset;
2093 u8 sge_index;
2094 u8 sgl_size;
2095#endif
2096};
2097
2098/*
2099 * iSCSI context section
2100 */
2101struct xstorm_iscsi_context_section {
2102 u32 first_burst_length;
2103 u32 max_send_pdu_length;
2104 struct regpair sq_pbl_base;
2105 struct regpair sq_curr_pbe;
2106 struct regpair hq_pbl_base;
2107 struct regpair hq_curr_pbe_base;
2108 struct regpair r2tq_pbl_base;
2109 struct regpair r2tq_curr_pbe_base;
2110 struct regpair task_pbl_base;
2111#if defined(__BIG_ENDIAN)
2112 u16 data_out_count;
2113 struct xstorm_iscsi_context_flags flags;
2114 u8 task_pbl_cache_idx;
2115#elif defined(__LITTLE_ENDIAN)
2116 u8 task_pbl_cache_idx;
2117 struct xstorm_iscsi_context_flags flags;
2118 u16 data_out_count;
2119#endif
2120 u32 seq_more_2_send;
2121 u32 pdu_more_2_send;
2122 struct iscsi_task_context_entry_x temp_tce_x;
2123 struct iscsi_task_context_entry_xuc_x_write_only temp_tce_x_wr;
2124 struct iscsi_task_context_entry_xuc_xu_write_both temp_tce_xu_wr;
2125 struct regpair lun;
2126 u32 exp_data_transfer_len_ttt;
2127 u32 pdu_data_2_rxmit;
2128 u32 rxmit_bytes_2_dr;
2129#if defined(__BIG_ENDIAN)
2130 u16 rxmit_sge_offset;
2131 u16 hq_rxmit_cons;
2132#elif defined(__LITTLE_ENDIAN)
2133 u16 hq_rxmit_cons;
2134 u16 rxmit_sge_offset;
2135#endif
2136#if defined(__BIG_ENDIAN)
2137 u16 r2tq_cons;
2138 u8 rxmit_flags;
2139#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD (0x1<<0)
2140#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD_SHIFT 0
2141#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR (0x1<<1)
2142#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR_SHIFT 1
2143#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU (0x1<<2)
2144#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU_SHIFT 2
2145#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR (0x1<<3)
2146#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR_SHIFT 3
2147#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR (0x1<<4)
2148#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR_SHIFT 4
2149#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING (0x3<<5)
2150#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING_SHIFT 5
2151#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT (0x1<<7)
2152#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT_SHIFT 7
2153 u8 rxmit_sge_idx;
2154#elif defined(__LITTLE_ENDIAN)
2155 u8 rxmit_sge_idx;
2156 u8 rxmit_flags;
2157#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD (0x1<<0)
2158#define XSTORM_ISCSI_CONTEXT_SECTION_B_NEW_HQ_BD_SHIFT 0
2159#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR (0x1<<1)
2160#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PDU_HDR_SHIFT 1
2161#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU (0x1<<2)
2162#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_END_PDU_SHIFT 2
2163#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR (0x1<<3)
2164#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_DR_SHIFT 3
2165#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR (0x1<<4)
2166#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_START_DR_SHIFT 4
2167#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING (0x3<<5)
2168#define XSTORM_ISCSI_CONTEXT_SECTION_B_RXMIT_PADDING_SHIFT 5
2169#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT (0x1<<7)
2170#define XSTORM_ISCSI_CONTEXT_SECTION_B_ISCSI_CONT_FAST_RXMIT_SHIFT 7
2171 u16 r2tq_cons;
2172#endif
2173 u32 hq_rxmit_tcp_seq;
2174};
2175
2176/*
2177 * Xstorm iSCSI Storm Context
2178 */
2179struct xstorm_iscsi_st_context {
2180 struct xstorm_common_context_section common;
2181 struct xstorm_iscsi_context_section iscsi;
2182};
2183
2184/*
2185 * CQ DB CQ producer and pending completion counter
2186 */
2187struct iscsi_cq_db_prod_pnd_cmpltn_cnt {
2188#if defined(__BIG_ENDIAN)
2189 u16 cntr;
2190 u16 prod;
2191#elif defined(__LITTLE_ENDIAN)
2192 u16 prod;
2193 u16 cntr;
2194#endif
2195};
2196
2197/*
2198 * CQ DB pending completion ITT array
2199 */
2200struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr {
2201 struct iscsi_cq_db_prod_pnd_cmpltn_cnt prod_pend_comp[8];
2202};
2203
2204/*
2205 * Cstorm CQ sequence to notify array, updated by driver
2206 */
2207struct iscsi_cq_db_sqn_2_notify_arr {
2208 u16 sqn[8];
2209};
2210
2211/*
2212 * Cstorm iSCSI Storm Context
2213 */
2214struct cstorm_iscsi_st_context {
2215 struct iscsi_cq_db_prod_pnd_cmpltn_cnt_arr cq_c_prod_pend_comp_ctr_arr;
2216 struct iscsi_cq_db_sqn_2_notify_arr cq_c_prod_sqn_arr;
2217 struct iscsi_cq_db_sqn_2_notify_arr cq_c_sqn_2_notify_arr;
2218 struct regpair hq_pbl_base;
2219 struct regpair hq_curr_pbe;
2220 struct regpair task_pbl_base;
2221 struct regpair cq_db_base;
2222#if defined(__BIG_ENDIAN)
2223 u16 hq_bd_itt;
2224 u16 iscsi_conn_id;
2225#elif defined(__LITTLE_ENDIAN)
2226 u16 iscsi_conn_id;
2227 u16 hq_bd_itt;
2228#endif
2229 u32 hq_bd_data_segment_len;
2230 u32 hq_bd_buffer_offset;
2231#if defined(__BIG_ENDIAN)
2232 u8 timer_entry_idx;
2233 u8 cq_proc_en_bit_map;
2234 u8 cq_pend_comp_itt_valid_bit_map;
2235 u8 hq_bd_opcode;
2236#elif defined(__LITTLE_ENDIAN)
2237 u8 hq_bd_opcode;
2238 u8 cq_pend_comp_itt_valid_bit_map;
2239 u8 cq_proc_en_bit_map;
2240 u8 timer_entry_idx;
2241#endif
2242 u32 hq_tcp_seq;
2243#if defined(__BIG_ENDIAN)
2244 u16 flags;
2245#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0)
2246#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0
2247#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1)
2248#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1
2249#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2)
2250#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2
2251#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3)
2252#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3
2253#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4)
2254#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4
2255#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5)
2256#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5
2257 u16 hq_cons;
2258#elif defined(__LITTLE_ENDIAN)
2259 u16 hq_cons;
2260 u16 flags;
2261#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN (0x1<<0)
2262#define CSTORM_ISCSI_ST_CONTEXT_DATA_DIGEST_EN_SHIFT 0
2263#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN (0x1<<1)
2264#define CSTORM_ISCSI_ST_CONTEXT_HDR_DIGEST_EN_SHIFT 1
2265#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID (0x1<<2)
2266#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_CTXT_VALID_SHIFT 2
2267#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG (0x1<<3)
2268#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_LCL_CMPLN_FLG_SHIFT 3
2269#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK (0x1<<4)
2270#define CSTORM_ISCSI_ST_CONTEXT_HQ_BD_WRITE_TASK_SHIFT 4
2271#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV (0x7FF<<5)
2272#define CSTORM_ISCSI_ST_CONTEXT_CTRL_FLAGS_RSRV_SHIFT 5
2273#endif
2274 struct regpair rsrv1;
2275};
2276
2277/*
2278 * Iscsi connection context
2279 */
2280struct iscsi_context {
2281 struct ustorm_iscsi_st_context ustorm_st_context;
2282 struct tstorm_iscsi_st_context tstorm_st_context;
2283 struct xstorm_iscsi_ag_context xstorm_ag_context;
2284 struct tstorm_iscsi_ag_context tstorm_ag_context;
2285 struct cstorm_iscsi_ag_context cstorm_ag_context;
2286 struct ustorm_iscsi_ag_context ustorm_ag_context;
2287 struct iscsi_timers_block_context timers_context;
2288 struct regpair upb_context;
2289 struct xstorm_iscsi_st_context xstorm_st_context;
2290 struct regpair xpb_context;
2291 struct cstorm_iscsi_st_context cstorm_st_context;
2292};
2293
2294/*
2295 * Buffer per connection, used in Tstorm
2296 */
2297struct iscsi_conn_buf {
2298 struct regpair reserved[8];
2299};
2300
2301/*
2302 * ipv6 structure
2303 */
2304struct ip_v6_addr {
2305 u32 ip_addr_lo_lo;
2306 u32 ip_addr_lo_hi;
2307 u32 ip_addr_hi_lo;
2308 u32 ip_addr_hi_hi;
2309};
2310
2311/*
2312 * l5cm- connection identification params
2313 */
2314struct l5cm_conn_addr_params {
2315 u32 pmtu;
2316#if defined(__BIG_ENDIAN)
2317 u8 remote_addr_3;
2318 u8 remote_addr_2;
2319 u8 remote_addr_1;
2320 u8 remote_addr_0;
2321#elif defined(__LITTLE_ENDIAN)
2322 u8 remote_addr_0;
2323 u8 remote_addr_1;
2324 u8 remote_addr_2;
2325 u8 remote_addr_3;
2326#endif
2327#if defined(__BIG_ENDIAN)
2328 u16 params;
2329#define L5CM_CONN_ADDR_PARAMS_IP_VERSION (0x1<<0)
2330#define L5CM_CONN_ADDR_PARAMS_IP_VERSION_SHIFT 0
2331#define L5CM_CONN_ADDR_PARAMS_RSRV (0x7FFF<<1)
2332#define L5CM_CONN_ADDR_PARAMS_RSRV_SHIFT 1
2333 u8 remote_addr_5;
2334 u8 remote_addr_4;
2335#elif defined(__LITTLE_ENDIAN)
2336 u8 remote_addr_4;
2337 u8 remote_addr_5;
2338 u16 params;
2339#define L5CM_CONN_ADDR_PARAMS_IP_VERSION (0x1<<0)
2340#define L5CM_CONN_ADDR_PARAMS_IP_VERSION_SHIFT 0
2341#define L5CM_CONN_ADDR_PARAMS_RSRV (0x7FFF<<1)
2342#define L5CM_CONN_ADDR_PARAMS_RSRV_SHIFT 1
2343#endif
2344 struct ip_v6_addr local_ip_addr;
2345 struct ip_v6_addr remote_ip_addr;
2346 u32 ipv6_flow_label_20b;
2347 u32 reserved1;
2348#if defined(__BIG_ENDIAN)
2349 u16 remote_tcp_port;
2350 u16 local_tcp_port;
2351#elif defined(__LITTLE_ENDIAN)
2352 u16 local_tcp_port;
2353 u16 remote_tcp_port;
2354#endif
2355};
2356
2357/*
2358 * l5cm-xstorm connection buffer
2359 */
2360struct l5cm_xstorm_conn_buffer {
2361#if defined(__BIG_ENDIAN)
2362 u16 rsrv1;
2363 u16 params;
2364#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE (0x1<<0)
2365#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE_SHIFT 0
2366#define L5CM_XSTORM_CONN_BUFFER_RSRV (0x7FFF<<1)
2367#define L5CM_XSTORM_CONN_BUFFER_RSRV_SHIFT 1
2368#elif defined(__LITTLE_ENDIAN)
2369 u16 params;
2370#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE (0x1<<0)
2371#define L5CM_XSTORM_CONN_BUFFER_NAGLE_ENABLE_SHIFT 0
2372#define L5CM_XSTORM_CONN_BUFFER_RSRV (0x7FFF<<1)
2373#define L5CM_XSTORM_CONN_BUFFER_RSRV_SHIFT 1
2374 u16 rsrv1;
2375#endif
2376#if defined(__BIG_ENDIAN)
2377 u16 mss;
2378 u16 pseudo_header_checksum;
2379#elif defined(__LITTLE_ENDIAN)
2380 u16 pseudo_header_checksum;
2381 u16 mss;
2382#endif
2383 u32 rcv_buf;
2384 u32 rsrv2;
2385 struct regpair context_addr;
2386};
2387
2388/*
2389 * l5cm-tstorm connection buffer
2390 */
2391struct l5cm_tstorm_conn_buffer {
2392 u32 snd_buf;
2393 u32 rcv_buf;
2394#if defined(__BIG_ENDIAN)
2395 u16 params;
2396#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE (0x1<<0)
2397#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE_SHIFT 0
2398#define L5CM_TSTORM_CONN_BUFFER_RSRV (0x7FFF<<1)
2399#define L5CM_TSTORM_CONN_BUFFER_RSRV_SHIFT 1
2400 u8 ka_max_probe_count;
2401 u8 ka_enable;
2402#elif defined(__LITTLE_ENDIAN)
2403 u8 ka_enable;
2404 u8 ka_max_probe_count;
2405 u16 params;
2406#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE (0x1<<0)
2407#define L5CM_TSTORM_CONN_BUFFER_DELAYED_ACK_ENABLE_SHIFT 0
2408#define L5CM_TSTORM_CONN_BUFFER_RSRV (0x7FFF<<1)
2409#define L5CM_TSTORM_CONN_BUFFER_RSRV_SHIFT 1
2410#endif
2411 u32 ka_timeout;
2412 u32 ka_interval;
2413 u32 max_rt_time;
2414};
2415
2416/*
2417 * l5cm connection buffer for active side
2418 */
2419struct l5cm_active_conn_buffer {
2420 struct l5cm_conn_addr_params conn_addr_buf;
2421 struct l5cm_xstorm_conn_buffer xstorm_conn_buffer;
2422 struct l5cm_tstorm_conn_buffer tstorm_conn_buffer;
2423};
2424
2425/*
2426 * l5cm slow path element
2427 */
2428struct l5cm_packet_size {
2429 u32 size;
2430 u32 rsrv;
2431};
2432
2433/*
2434 * l5cm connection parameters
2435 */
2436union l5cm_reduce_param_union {
2437 u32 passive_side_scramble_key;
2438 u32 pcs_id;
2439};
2440
2441/*
2442 * l5cm connection parameters
2443 */
2444struct l5cm_reduce_conn {
2445 union l5cm_reduce_param_union param;
2446 u32 isn;
2447};
2448
2449/*
2450 * l5cm slow path element
2451 */
2452union l5cm_specific_data {
2453 u8 protocol_data[8];
2454 struct regpair phy_address;
2455 struct l5cm_packet_size packet_size;
2456 struct l5cm_reduce_conn reduced_conn;
2457};
2458
2459/*
2460 * l5 slow path element
2461 */
2462struct l5cm_spe {
2463 struct spe_hdr hdr;
2464 union l5cm_specific_data data;
2465};
2466
2467/*
2468 * Tstorm Tcp flags
2469 */
2470struct tstorm_l5cm_tcp_flags {
2471 u16 flags;
2472#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID (0xFFF<<0)
2473#define TSTORM_L5CM_TCP_FLAGS_VLAN_ID_SHIFT 0
2474#define TSTORM_L5CM_TCP_FLAGS_RSRV0 (0x1<<12)
2475#define TSTORM_L5CM_TCP_FLAGS_RSRV0_SHIFT 12
2476#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<13)
2477#define TSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 13
2478#define TSTORM_L5CM_TCP_FLAGS_RSRV1 (0x3<<14)
2479#define TSTORM_L5CM_TCP_FLAGS_RSRV1_SHIFT 14
2480};
2481
2482/*
2483 * Xstorm Tcp flags
2484 */
2485struct xstorm_l5cm_tcp_flags {
2486 u8 flags;
2487#define XSTORM_L5CM_TCP_FLAGS_ENC_ENABLED (0x1<<0)
2488#define XSTORM_L5CM_TCP_FLAGS_ENC_ENABLED_SHIFT 0
2489#define XSTORM_L5CM_TCP_FLAGS_TS_ENABLED (0x1<<1)
2490#define XSTORM_L5CM_TCP_FLAGS_TS_ENABLED_SHIFT 1
2491#define XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN (0x1<<2)
2492#define XSTORM_L5CM_TCP_FLAGS_WND_SCL_EN_SHIFT 2
2493#define XSTORM_L5CM_TCP_FLAGS_RSRV (0x1F<<3)
2494#define XSTORM_L5CM_TCP_FLAGS_RSRV_SHIFT 3
2495};
2496
580#endif /* CNIC_DEFS_H */ 2497#endif /* CNIC_DEFS_H */
diff --git a/drivers/net/cnic_if.h b/drivers/net/cnic_if.h
index d8b09efdcb52..8aaf98bdd4f7 100644
--- a/drivers/net/cnic_if.h
+++ b/drivers/net/cnic_if.h
@@ -12,8 +12,8 @@
12#ifndef CNIC_IF_H 12#ifndef CNIC_IF_H
13#define CNIC_IF_H 13#define CNIC_IF_H
14 14
15#define CNIC_MODULE_VERSION "2.0.1" 15#define CNIC_MODULE_VERSION "2.1.0"
16#define CNIC_MODULE_RELDATE "Oct 01, 2009" 16#define CNIC_MODULE_RELDATE "Oct 10, 2009"
17 17
18#define CNIC_ULP_RDMA 0 18#define CNIC_ULP_RDMA 0
19#define CNIC_ULP_ISCSI 1 19#define CNIC_ULP_ISCSI 1
@@ -81,6 +81,8 @@ struct kcqe {
81#define DRV_CTL_CTX_WR_CMD 0x103 81#define DRV_CTL_CTX_WR_CMD 0x103
82#define DRV_CTL_CTXTBL_WR_CMD 0x104 82#define DRV_CTL_CTXTBL_WR_CMD 0x104
83#define DRV_CTL_COMPLETION_CMD 0x105 83#define DRV_CTL_COMPLETION_CMD 0x105
84#define DRV_CTL_START_L2_CMD 0x106
85#define DRV_CTL_STOP_L2_CMD 0x107
84 86
85struct cnic_ctl_completion { 87struct cnic_ctl_completion {
86 u32 cid; 88 u32 cid;
@@ -105,11 +107,17 @@ struct drv_ctl_io {
105 dma_addr_t dma_addr; 107 dma_addr_t dma_addr;
106}; 108};
107 109
110struct drv_ctl_l2_ring {
111 u32 client_id;
112 u32 cid;
113};
114
108struct drv_ctl_info { 115struct drv_ctl_info {
109 int cmd; 116 int cmd;
110 union { 117 union {
111 struct drv_ctl_completion comp; 118 struct drv_ctl_completion comp;
112 struct drv_ctl_io io; 119 struct drv_ctl_io io;
120 struct drv_ctl_l2_ring ring;
113 char bytes[MAX_DRV_CTL_DATA]; 121 char bytes[MAX_DRV_CTL_DATA];
114 } data; 122 } data;
115}; 123};
@@ -143,6 +151,7 @@ struct cnic_eth_dev {
143 u32 max_kwqe_pending; 151 u32 max_kwqe_pending;
144 struct pci_dev *pdev; 152 struct pci_dev *pdev;
145 void __iomem *io_base; 153 void __iomem *io_base;
154 void __iomem *io_base2;
146 155
147 u32 ctx_tbl_offset; 156 u32 ctx_tbl_offset;
148 u32 ctx_tbl_len; 157 u32 ctx_tbl_len;
@@ -298,5 +307,6 @@ extern int cnic_register_driver(int ulp_type, struct cnic_ulp_ops *ulp_ops);
298extern int cnic_unregister_driver(int ulp_type); 307extern int cnic_unregister_driver(int ulp_type);
299 308
300extern struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev); 309extern struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev);
310extern struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev);
301 311
302#endif 312#endif
diff --git a/drivers/net/cpmac.c b/drivers/net/cpmac.c
index 61f9da2b4943..678222389407 100644
--- a/drivers/net/cpmac.c
+++ b/drivers/net/cpmac.c
@@ -380,9 +380,8 @@ static struct sk_buff *cpmac_rx_one(struct cpmac_priv *priv,
380 return NULL; 380 return NULL;
381 } 381 }
382 382
383 skb = netdev_alloc_skb(priv->dev, CPMAC_SKB_SIZE); 383 skb = netdev_alloc_skb_ip_align(priv->dev, CPMAC_SKB_SIZE);
384 if (likely(skb)) { 384 if (likely(skb)) {
385 skb_reserve(skb, 2);
386 skb_put(desc->skb, desc->datalen); 385 skb_put(desc->skb, desc->datalen);
387 desc->skb->protocol = eth_type_trans(desc->skb, priv->dev); 386 desc->skb->protocol = eth_type_trans(desc->skb, priv->dev);
388 desc->skb->ip_summed = CHECKSUM_NONE; 387 desc->skb->ip_summed = CHECKSUM_NONE;
@@ -991,12 +990,11 @@ static int cpmac_open(struct net_device *dev)
991 990
992 priv->rx_head = &priv->desc_ring[CPMAC_QUEUES]; 991 priv->rx_head = &priv->desc_ring[CPMAC_QUEUES];
993 for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) { 992 for (i = 0, desc = priv->rx_head; i < priv->ring_size; i++, desc++) {
994 skb = netdev_alloc_skb(dev, CPMAC_SKB_SIZE); 993 skb = netdev_alloc_skb_ip_align(dev, CPMAC_SKB_SIZE);
995 if (unlikely(!skb)) { 994 if (unlikely(!skb)) {
996 res = -ENOMEM; 995 res = -ENOMEM;
997 goto fail_desc; 996 goto fail_desc;
998 } 997 }
999 skb_reserve(skb, 2);
1000 desc->skb = skb; 998 desc->skb = skb;
1001 desc->data_mapping = dma_map_single(&dev->dev, skb->data, 999 desc->data_mapping = dma_map_single(&dev->dev, skb->data,
1002 CPMAC_SKB_SIZE, 1000 CPMAC_SKB_SIZE,
diff --git a/drivers/net/cris/eth_v10.c b/drivers/net/cris/eth_v10.c
index 15c0195ebd31..a24be34a3f7a 100644
--- a/drivers/net/cris/eth_v10.c
+++ b/drivers/net/cris/eth_v10.c
@@ -768,10 +768,24 @@ e100_negotiate(struct net_device* dev)
768 768
769 e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data); 769 e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_ADVERTISE, data);
770 770
771 /* Renegotiate with link partner */ 771 data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR);
772 if (autoneg_normal) { 772 if (autoneg_normal) {
773 data = e100_get_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR); 773 /* Renegotiate with link partner */
774 data |= BMCR_ANENABLE | BMCR_ANRESTART; 774 data |= BMCR_ANENABLE | BMCR_ANRESTART;
775 } else {
776 /* Don't negotiate speed or duplex */
777 data &= ~(BMCR_ANENABLE | BMCR_ANRESTART);
778
779 /* Set speed and duplex static */
780 if (current_speed_selection == 10)
781 data &= ~BMCR_SPEED100;
782 else
783 data |= BMCR_SPEED100;
784
785 if (current_duplex != full)
786 data &= ~BMCR_FULLDPLX;
787 else
788 data |= BMCR_FULLDPLX;
775 } 789 }
776 e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data); 790 e100_set_mdio_reg(dev, np->mii_if.phy_id, MII_BMCR, data);
777} 791}
diff --git a/drivers/net/cxgb3/adapter.h b/drivers/net/cxgb3/adapter.h
index 2b1aea6aa558..3e8618b4efbc 100644
--- a/drivers/net/cxgb3/adapter.h
+++ b/drivers/net/cxgb3/adapter.h
@@ -48,12 +48,27 @@
48struct vlan_group; 48struct vlan_group;
49struct adapter; 49struct adapter;
50struct sge_qset; 50struct sge_qset;
51struct port_info;
51 52
52enum { /* rx_offload flags */ 53enum { /* rx_offload flags */
53 T3_RX_CSUM = 1 << 0, 54 T3_RX_CSUM = 1 << 0,
54 T3_LRO = 1 << 1, 55 T3_LRO = 1 << 1,
55}; 56};
56 57
58enum mac_idx_types {
59 LAN_MAC_IDX = 0,
60 SAN_MAC_IDX,
61
62 MAX_MAC_IDX
63};
64
65struct iscsi_config {
66 __u8 mac_addr[ETH_ALEN];
67 __u32 flags;
68 int (*send)(struct port_info *pi, struct sk_buff **skb);
69 int (*recv)(struct port_info *pi, struct sk_buff *skb);
70};
71
57struct port_info { 72struct port_info {
58 struct adapter *adapter; 73 struct adapter *adapter;
59 struct vlan_group *vlan_grp; 74 struct vlan_group *vlan_grp;
@@ -68,6 +83,7 @@ struct port_info {
68 struct net_device_stats netstats; 83 struct net_device_stats netstats;
69 int activity; 84 int activity;
70 __be32 iscsi_ipv4addr; 85 __be32 iscsi_ipv4addr;
86 struct iscsi_config iscsic;
71 87
72 int link_fault; /* link fault was detected */ 88 int link_fault; /* link fault was detected */
73}; 89};
diff --git a/drivers/net/cxgb3/common.h b/drivers/net/cxgb3/common.h
index 1b2c305fb82b..6ff356d4c7ab 100644
--- a/drivers/net/cxgb3/common.h
+++ b/drivers/net/cxgb3/common.h
@@ -125,11 +125,9 @@ enum { /* adapter interrupt-maintained statistics */
125 IRQ_NUM_STATS /* keep last */ 125 IRQ_NUM_STATS /* keep last */
126}; 126};
127 127
128enum { 128#define TP_VERSION_MAJOR 1
129 TP_VERSION_MAJOR = 1, 129#define TP_VERSION_MINOR 1
130 TP_VERSION_MINOR = 1, 130#define TP_VERSION_MICRO 0
131 TP_VERSION_MICRO = 0
132};
133 131
134#define S_TP_VERSION_MAJOR 16 132#define S_TP_VERSION_MAJOR 16
135#define M_TP_VERSION_MAJOR 0xFF 133#define M_TP_VERSION_MAJOR 0xFF
diff --git a/drivers/net/cxgb3/cxgb3_main.c b/drivers/net/cxgb3/cxgb3_main.c
index 34e776c5f06b..b1a5a00a78cd 100644
--- a/drivers/net/cxgb3/cxgb3_main.c
+++ b/drivers/net/cxgb3/cxgb3_main.c
@@ -44,6 +44,7 @@
44#include <linux/rtnetlink.h> 44#include <linux/rtnetlink.h>
45#include <linux/firmware.h> 45#include <linux/firmware.h>
46#include <linux/log2.h> 46#include <linux/log2.h>
47#include <linux/stringify.h>
47#include <asm/uaccess.h> 48#include <asm/uaccess.h>
48 49
49#include "common.h" 50#include "common.h"
@@ -344,8 +345,10 @@ static void link_start(struct net_device *dev)
344 345
345 init_rx_mode(&rm, dev, dev->mc_list); 346 init_rx_mode(&rm, dev, dev->mc_list);
346 t3_mac_reset(mac); 347 t3_mac_reset(mac);
348 t3_mac_set_num_ucast(mac, MAX_MAC_IDX);
347 t3_mac_set_mtu(mac, dev->mtu); 349 t3_mac_set_mtu(mac, dev->mtu);
348 t3_mac_set_address(mac, 0, dev->dev_addr); 350 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
351 t3_mac_set_address(mac, SAN_MAC_IDX, pi->iscsic.mac_addr);
349 t3_mac_set_rx_mode(mac, &rm); 352 t3_mac_set_rx_mode(mac, &rm);
350 t3_link_start(&pi->phy, mac, &pi->link_config); 353 t3_link_start(&pi->phy, mac, &pi->link_config);
351 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); 354 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
@@ -903,6 +906,7 @@ static inline int offload_tx(struct t3cdev *tdev, struct sk_buff *skb)
903static int write_smt_entry(struct adapter *adapter, int idx) 906static int write_smt_entry(struct adapter *adapter, int idx)
904{ 907{
905 struct cpl_smt_write_req *req; 908 struct cpl_smt_write_req *req;
909 struct port_info *pi = netdev_priv(adapter->port[idx]);
906 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL); 910 struct sk_buff *skb = alloc_skb(sizeof(*req), GFP_KERNEL);
907 911
908 if (!skb) 912 if (!skb)
@@ -913,8 +917,8 @@ static int write_smt_entry(struct adapter *adapter, int idx)
913 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx)); 917 OPCODE_TID(req) = htonl(MK_OPCODE_TID(CPL_SMT_WRITE_REQ, idx));
914 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */ 918 req->mtu_idx = NMTUS - 1; /* should be 0 but there's a T3 bug */
915 req->iff = idx; 919 req->iff = idx;
916 memset(req->src_mac1, 0, sizeof(req->src_mac1));
917 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN); 920 memcpy(req->src_mac0, adapter->port[idx]->dev_addr, ETH_ALEN);
921 memcpy(req->src_mac1, pi->iscsic.mac_addr, ETH_ALEN);
918 skb->priority = 1; 922 skb->priority = 1;
919 offload_tx(&adapter->tdev, skb); 923 offload_tx(&adapter->tdev, skb);
920 return 0; 924 return 0;
@@ -989,11 +993,21 @@ static int bind_qsets(struct adapter *adap)
989 return err; 993 return err;
990} 994}
991 995
992#define FW_FNAME "cxgb3/t3fw-%d.%d.%d.bin" 996#define FW_VERSION __stringify(FW_VERSION_MAJOR) "." \
993#define TPSRAM_NAME "cxgb3/t3%c_psram-%d.%d.%d.bin" 997 __stringify(FW_VERSION_MINOR) "." __stringify(FW_VERSION_MICRO)
998#define FW_FNAME "cxgb3/t3fw-" FW_VERSION ".bin"
999#define TPSRAM_VERSION __stringify(TP_VERSION_MAJOR) "." \
1000 __stringify(TP_VERSION_MINOR) "." __stringify(TP_VERSION_MICRO)
1001#define TPSRAM_NAME "cxgb3/t3%c_psram-" TPSRAM_VERSION ".bin"
994#define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin" 1002#define AEL2005_OPT_EDC_NAME "cxgb3/ael2005_opt_edc.bin"
995#define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin" 1003#define AEL2005_TWX_EDC_NAME "cxgb3/ael2005_twx_edc.bin"
996#define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin" 1004#define AEL2020_TWX_EDC_NAME "cxgb3/ael2020_twx_edc.bin"
1005MODULE_FIRMWARE(FW_FNAME);
1006MODULE_FIRMWARE("cxgb3/t3b_psram-" TPSRAM_VERSION ".bin");
1007MODULE_FIRMWARE("cxgb3/t3c_psram-" TPSRAM_VERSION ".bin");
1008MODULE_FIRMWARE(AEL2005_OPT_EDC_NAME);
1009MODULE_FIRMWARE(AEL2005_TWX_EDC_NAME);
1010MODULE_FIRMWARE(AEL2020_TWX_EDC_NAME);
997 1011
998static inline const char *get_edc_fw_name(int edc_idx) 1012static inline const char *get_edc_fw_name(int edc_idx)
999{ 1013{
@@ -1064,16 +1078,13 @@ int t3_get_edc_fw(struct cphy *phy, int edc_idx, int size)
1064static int upgrade_fw(struct adapter *adap) 1078static int upgrade_fw(struct adapter *adap)
1065{ 1079{
1066 int ret; 1080 int ret;
1067 char buf[64];
1068 const struct firmware *fw; 1081 const struct firmware *fw;
1069 struct device *dev = &adap->pdev->dev; 1082 struct device *dev = &adap->pdev->dev;
1070 1083
1071 snprintf(buf, sizeof(buf), FW_FNAME, FW_VERSION_MAJOR, 1084 ret = request_firmware(&fw, FW_FNAME, dev);
1072 FW_VERSION_MINOR, FW_VERSION_MICRO);
1073 ret = request_firmware(&fw, buf, dev);
1074 if (ret < 0) { 1085 if (ret < 0) {
1075 dev_err(dev, "could not upgrade firmware: unable to load %s\n", 1086 dev_err(dev, "could not upgrade firmware: unable to load %s\n",
1076 buf); 1087 FW_FNAME);
1077 return ret; 1088 return ret;
1078 } 1089 }
1079 ret = t3_load_fw(adap, fw->data, fw->size); 1090 ret = t3_load_fw(adap, fw->data, fw->size);
@@ -1117,8 +1128,7 @@ static int update_tpsram(struct adapter *adap)
1117 if (!rev) 1128 if (!rev)
1118 return 0; 1129 return 0;
1119 1130
1120 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev, 1131 snprintf(buf, sizeof(buf), TPSRAM_NAME, rev);
1121 TP_VERSION_MAJOR, TP_VERSION_MINOR, TP_VERSION_MICRO);
1122 1132
1123 ret = request_firmware(&tpsram, buf, dev); 1133 ret = request_firmware(&tpsram, buf, dev);
1124 if (ret < 0) { 1134 if (ret < 0) {
@@ -2516,7 +2526,7 @@ static int cxgb_set_mac_addr(struct net_device *dev, void *p)
2516 return -EINVAL; 2526 return -EINVAL;
2517 2527
2518 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len); 2528 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
2519 t3_mac_set_address(&pi->mac, 0, dev->dev_addr); 2529 t3_mac_set_address(&pi->mac, LAN_MAC_IDX, dev->dev_addr);
2520 if (offload_running(adapter)) 2530 if (offload_running(adapter))
2521 write_smt_entry(adapter, pi->port_id); 2531 write_smt_entry(adapter, pi->port_id);
2522 return 0; 2532 return 0;
@@ -2654,7 +2664,7 @@ static void check_t3b2_mac(struct adapter *adapter)
2654 struct cmac *mac = &p->mac; 2664 struct cmac *mac = &p->mac;
2655 2665
2656 t3_mac_set_mtu(mac, dev->mtu); 2666 t3_mac_set_mtu(mac, dev->mtu);
2657 t3_mac_set_address(mac, 0, dev->dev_addr); 2667 t3_mac_set_address(mac, LAN_MAC_IDX, dev->dev_addr);
2658 cxgb_set_rxmode(dev); 2668 cxgb_set_rxmode(dev);
2659 t3_link_start(&p->phy, mac, &p->link_config); 2669 t3_link_start(&p->phy, mac, &p->link_config);
2660 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX); 2670 t3_mac_enable(mac, MAC_DIRECTION_RX | MAC_DIRECTION_TX);
@@ -3112,6 +3122,14 @@ static const struct net_device_ops cxgb_netdev_ops = {
3112#endif 3122#endif
3113}; 3123};
3114 3124
3125static void __devinit cxgb3_init_iscsi_mac(struct net_device *dev)
3126{
3127 struct port_info *pi = netdev_priv(dev);
3128
3129 memcpy(pi->iscsic.mac_addr, dev->dev_addr, ETH_ALEN);
3130 pi->iscsic.mac_addr[3] |= 0x80;
3131}
3132
3115static int __devinit init_one(struct pci_dev *pdev, 3133static int __devinit init_one(struct pci_dev *pdev,
3116 const struct pci_device_id *ent) 3134 const struct pci_device_id *ent)
3117{ 3135{
@@ -3270,6 +3288,9 @@ static int __devinit init_one(struct pci_dev *pdev,
3270 goto out_free_dev; 3288 goto out_free_dev;
3271 } 3289 }
3272 3290
3291 for_each_port(adapter, i)
3292 cxgb3_init_iscsi_mac(adapter->port[i]);
3293
3273 /* Driver's ready. Reflect it on LEDs */ 3294 /* Driver's ready. Reflect it on LEDs */
3274 t3_led_ready(adapter); 3295 t3_led_ready(adapter);
3275 3296
diff --git a/drivers/net/cxgb3/sge.c b/drivers/net/cxgb3/sge.c
index f86612857a73..cf2e1d3c0d8d 100644
--- a/drivers/net/cxgb3/sge.c
+++ b/drivers/net/cxgb3/sge.c
@@ -1260,7 +1260,7 @@ netdev_tx_t t3_eth_xmit(struct sk_buff *skb, struct net_device *dev)
1260 if (should_restart_tx(q) && 1260 if (should_restart_tx(q) &&
1261 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) { 1261 test_and_clear_bit(TXQ_ETH, &qs->txq_stopped)) {
1262 q->restarts++; 1262 q->restarts++;
1263 netif_tx_wake_queue(txq); 1263 netif_tx_start_queue(txq);
1264 } 1264 }
1265 } 1265 }
1266 1266
@@ -1946,10 +1946,9 @@ static void restart_tx(struct sge_qset *qs)
1946 * Check if the ARP request is probing the private IP address 1946 * Check if the ARP request is probing the private IP address
1947 * dedicated to iSCSI, generate an ARP reply if so. 1947 * dedicated to iSCSI, generate an ARP reply if so.
1948 */ 1948 */
1949static void cxgb3_arp_process(struct adapter *adapter, struct sk_buff *skb) 1949static void cxgb3_arp_process(struct port_info *pi, struct sk_buff *skb)
1950{ 1950{
1951 struct net_device *dev = skb->dev; 1951 struct net_device *dev = skb->dev;
1952 struct port_info *pi;
1953 struct arphdr *arp; 1952 struct arphdr *arp;
1954 unsigned char *arp_ptr; 1953 unsigned char *arp_ptr;
1955 unsigned char *sha; 1954 unsigned char *sha;
@@ -1972,12 +1971,11 @@ static void cxgb3_arp_process(struct adapter *adapter, struct sk_buff *skb)
1972 arp_ptr += dev->addr_len; 1971 arp_ptr += dev->addr_len;
1973 memcpy(&tip, arp_ptr, sizeof(tip)); 1972 memcpy(&tip, arp_ptr, sizeof(tip));
1974 1973
1975 pi = netdev_priv(dev);
1976 if (tip != pi->iscsi_ipv4addr) 1974 if (tip != pi->iscsi_ipv4addr)
1977 return; 1975 return;
1978 1976
1979 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha, 1977 arp_send(ARPOP_REPLY, ETH_P_ARP, sip, dev, tip, sha,
1980 dev->dev_addr, sha); 1978 pi->iscsic.mac_addr, sha);
1981 1979
1982} 1980}
1983 1981
@@ -1986,6 +1984,19 @@ static inline int is_arp(struct sk_buff *skb)
1986 return skb->protocol == htons(ETH_P_ARP); 1984 return skb->protocol == htons(ETH_P_ARP);
1987} 1985}
1988 1986
1987static void cxgb3_process_iscsi_prov_pack(struct port_info *pi,
1988 struct sk_buff *skb)
1989{
1990 if (is_arp(skb)) {
1991 cxgb3_arp_process(pi, skb);
1992 return;
1993 }
1994
1995 if (pi->iscsic.recv)
1996 pi->iscsic.recv(pi, skb);
1997
1998}
1999
1989/** 2000/**
1990 * rx_eth - process an ingress ethernet packet 2001 * rx_eth - process an ingress ethernet packet
1991 * @adap: the adapter 2002 * @adap: the adapter
@@ -2024,13 +2035,12 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2024 vlan_gro_receive(&qs->napi, grp, 2035 vlan_gro_receive(&qs->napi, grp,
2025 ntohs(p->vlan), skb); 2036 ntohs(p->vlan), skb);
2026 else { 2037 else {
2027 if (unlikely(pi->iscsi_ipv4addr && 2038 if (unlikely(pi->iscsic.flags)) {
2028 is_arp(skb))) {
2029 unsigned short vtag = ntohs(p->vlan) & 2039 unsigned short vtag = ntohs(p->vlan) &
2030 VLAN_VID_MASK; 2040 VLAN_VID_MASK;
2031 skb->dev = vlan_group_get_device(grp, 2041 skb->dev = vlan_group_get_device(grp,
2032 vtag); 2042 vtag);
2033 cxgb3_arp_process(adap, skb); 2043 cxgb3_process_iscsi_prov_pack(pi, skb);
2034 } 2044 }
2035 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan), 2045 __vlan_hwaccel_rx(skb, grp, ntohs(p->vlan),
2036 rq->polling); 2046 rq->polling);
@@ -2041,8 +2051,8 @@ static void rx_eth(struct adapter *adap, struct sge_rspq *rq,
2041 if (lro) 2051 if (lro)
2042 napi_gro_receive(&qs->napi, skb); 2052 napi_gro_receive(&qs->napi, skb);
2043 else { 2053 else {
2044 if (unlikely(pi->iscsi_ipv4addr && is_arp(skb))) 2054 if (unlikely(pi->iscsic.flags))
2045 cxgb3_arp_process(adap, skb); 2055 cxgb3_process_iscsi_prov_pack(pi, skb);
2046 netif_receive_skb(skb); 2056 netif_receive_skb(skb);
2047 } 2057 }
2048 } else 2058 } else
@@ -2125,6 +2135,7 @@ static void lro_add_page(struct adapter *adap, struct sge_qset *qs,
2125 if (!complete) 2135 if (!complete)
2126 return; 2136 return;
2127 2137
2138 skb_record_rx_queue(skb, qs - &adap->sge.qs[0]);
2128 skb->ip_summed = CHECKSUM_UNNECESSARY; 2139 skb->ip_summed = CHECKSUM_UNNECESSARY;
2129 cpl = qs->lro_va; 2140 cpl = qs->lro_va;
2130 2141
diff --git a/drivers/net/davinci_emac.c b/drivers/net/davinci_emac.c
index db6380379478..8edac8915ea8 100644
--- a/drivers/net/davinci_emac.c
+++ b/drivers/net/davinci_emac.c
@@ -164,16 +164,14 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
164# define EMAC_MBP_MCASTCHAN(ch) ((ch) & 0x7) 164# define EMAC_MBP_MCASTCHAN(ch) ((ch) & 0x7)
165 165
166/* EMAC mac_control register */ 166/* EMAC mac_control register */
167#define EMAC_MACCONTROL_TXPTYPE (0x200) 167#define EMAC_MACCONTROL_TXPTYPE BIT(9)
168#define EMAC_MACCONTROL_TXPACEEN (0x40) 168#define EMAC_MACCONTROL_TXPACEEN BIT(6)
169#define EMAC_MACCONTROL_MIIEN (0x20) 169#define EMAC_MACCONTROL_GMIIEN BIT(5)
170#define EMAC_MACCONTROL_GIGABITEN (0x80) 170#define EMAC_MACCONTROL_GIGABITEN BIT(7)
171#define EMAC_MACCONTROL_GIGABITEN_SHIFT (7) 171#define EMAC_MACCONTROL_FULLDUPLEXEN BIT(0)
172#define EMAC_MACCONTROL_FULLDUPLEXEN (0x1)
173#define EMAC_MACCONTROL_RMIISPEED_MASK BIT(15) 172#define EMAC_MACCONTROL_RMIISPEED_MASK BIT(15)
174 173
175/* GIGABIT MODE related bits */ 174/* GIGABIT MODE related bits */
176#define EMAC_DM646X_MACCONTORL_GMIIEN BIT(5)
177#define EMAC_DM646X_MACCONTORL_GIG BIT(7) 175#define EMAC_DM646X_MACCONTORL_GIG BIT(7)
178#define EMAC_DM646X_MACCONTORL_GIGFORCE BIT(17) 176#define EMAC_DM646X_MACCONTORL_GIGFORCE BIT(17)
179 177
@@ -192,10 +190,10 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
192#define EMAC_RX_BUFFER_OFFSET_MASK (0xFFFF) 190#define EMAC_RX_BUFFER_OFFSET_MASK (0xFFFF)
193 191
194/* MAC_IN_VECTOR (0x180) register bit fields */ 192/* MAC_IN_VECTOR (0x180) register bit fields */
195#define EMAC_DM644X_MAC_IN_VECTOR_HOST_INT (0x20000) 193#define EMAC_DM644X_MAC_IN_VECTOR_HOST_INT BIT(17)
196#define EMAC_DM644X_MAC_IN_VECTOR_STATPEND_INT (0x10000) 194#define EMAC_DM644X_MAC_IN_VECTOR_STATPEND_INT BIT(16)
197#define EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC (0x0100) 195#define EMAC_DM644X_MAC_IN_VECTOR_RX_INT_VEC BIT(8)
198#define EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC (0x01) 196#define EMAC_DM644X_MAC_IN_VECTOR_TX_INT_VEC BIT(0)
199 197
200/** NOTE:: For DM646x the IN_VECTOR has changed */ 198/** NOTE:: For DM646x the IN_VECTOR has changed */
201#define EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC BIT(EMAC_DEF_RX_CH) 199#define EMAC_DM646X_MAC_IN_VECTOR_RX_INT_VEC BIT(EMAC_DEF_RX_CH)
@@ -203,7 +201,6 @@ static const char emac_version_string[] = "TI DaVinci EMAC Linux v6.1";
203#define EMAC_DM646X_MAC_IN_VECTOR_HOST_INT BIT(26) 201#define EMAC_DM646X_MAC_IN_VECTOR_HOST_INT BIT(26)
204#define EMAC_DM646X_MAC_IN_VECTOR_STATPEND_INT BIT(27) 202#define EMAC_DM646X_MAC_IN_VECTOR_STATPEND_INT BIT(27)
205 203
206
207/* CPPI bit positions */ 204/* CPPI bit positions */
208#define EMAC_CPPI_SOP_BIT BIT(31) 205#define EMAC_CPPI_SOP_BIT BIT(31)
209#define EMAC_CPPI_EOP_BIT BIT(30) 206#define EMAC_CPPI_EOP_BIT BIT(30)
@@ -750,8 +747,7 @@ static void emac_update_phystatus(struct emac_priv *priv)
750 747
751 if (priv->speed == SPEED_1000 && (priv->version == EMAC_VERSION_2)) { 748 if (priv->speed == SPEED_1000 && (priv->version == EMAC_VERSION_2)) {
752 mac_control = emac_read(EMAC_MACCONTROL); 749 mac_control = emac_read(EMAC_MACCONTROL);
753 mac_control |= (EMAC_DM646X_MACCONTORL_GMIIEN | 750 mac_control |= (EMAC_DM646X_MACCONTORL_GIG |
754 EMAC_DM646X_MACCONTORL_GIG |
755 EMAC_DM646X_MACCONTORL_GIGFORCE); 751 EMAC_DM646X_MACCONTORL_GIGFORCE);
756 } else { 752 } else {
757 /* Clear the GIG bit and GIGFORCE bit */ 753 /* Clear the GIG bit and GIGFORCE bit */
@@ -2108,7 +2104,7 @@ static int emac_hw_enable(struct emac_priv *priv)
2108 2104
2109 /* Enable MII */ 2105 /* Enable MII */
2110 val = emac_read(EMAC_MACCONTROL); 2106 val = emac_read(EMAC_MACCONTROL);
2111 val |= (EMAC_MACCONTROL_MIIEN); 2107 val |= (EMAC_MACCONTROL_GMIIEN);
2112 emac_write(EMAC_MACCONTROL, val); 2108 emac_write(EMAC_MACCONTROL, val);
2113 2109
2114 /* Enable NAPI and interrupts */ 2110 /* Enable NAPI and interrupts */
@@ -2807,11 +2803,33 @@ static int __devexit davinci_emac_remove(struct platform_device *pdev)
2807 return 0; 2803 return 0;
2808} 2804}
2809 2805
2806static
2807int davinci_emac_suspend(struct platform_device *pdev, pm_message_t state)
2808{
2809 struct net_device *dev = platform_get_drvdata(pdev);
2810
2811 if (netif_running(dev))
2812 emac_dev_stop(dev);
2813
2814 clk_disable(emac_clk);
2815
2816 return 0;
2817}
2818
2819static int davinci_emac_resume(struct platform_device *pdev)
2820{
2821 struct net_device *dev = platform_get_drvdata(pdev);
2822
2823 clk_enable(emac_clk);
2824
2825 if (netif_running(dev))
2826 emac_dev_open(dev);
2827
2828 return 0;
2829}
2830
2810/** 2831/**
2811 * davinci_emac_driver: EMAC platform driver structure 2832 * davinci_emac_driver: EMAC platform driver structure
2812 *
2813 * We implement only probe and remove functions - suspend/resume and
2814 * others not supported by this module
2815 */ 2833 */
2816static struct platform_driver davinci_emac_driver = { 2834static struct platform_driver davinci_emac_driver = {
2817 .driver = { 2835 .driver = {
@@ -2820,6 +2838,8 @@ static struct platform_driver davinci_emac_driver = {
2820 }, 2838 },
2821 .probe = davinci_emac_probe, 2839 .probe = davinci_emac_probe,
2822 .remove = __devexit_p(davinci_emac_remove), 2840 .remove = __devexit_p(davinci_emac_remove),
2841 .suspend = davinci_emac_suspend,
2842 .resume = davinci_emac_resume,
2823}; 2843};
2824 2844
2825/** 2845/**
diff --git a/drivers/net/dl2k.c b/drivers/net/dl2k.c
index 7fa7a907f134..ce8fef184f2c 100644
--- a/drivers/net/dl2k.c
+++ b/drivers/net/dl2k.c
@@ -505,7 +505,8 @@ rio_timer (unsigned long data)
505 entry = np->old_rx % RX_RING_SIZE; 505 entry = np->old_rx % RX_RING_SIZE;
506 /* Dropped packets don't need to re-allocate */ 506 /* Dropped packets don't need to re-allocate */
507 if (np->rx_skbuff[entry] == NULL) { 507 if (np->rx_skbuff[entry] == NULL) {
508 skb = netdev_alloc_skb (dev, np->rx_buf_sz); 508 skb = netdev_alloc_skb_ip_align(dev,
509 np->rx_buf_sz);
509 if (skb == NULL) { 510 if (skb == NULL) {
510 np->rx_ring[entry].fraginfo = 0; 511 np->rx_ring[entry].fraginfo = 0;
511 printk (KERN_INFO 512 printk (KERN_INFO
@@ -514,8 +515,6 @@ rio_timer (unsigned long data)
514 break; 515 break;
515 } 516 }
516 np->rx_skbuff[entry] = skb; 517 np->rx_skbuff[entry] = skb;
517 /* 16 byte align the IP header */
518 skb_reserve (skb, 2);
519 np->rx_ring[entry].fraginfo = 518 np->rx_ring[entry].fraginfo =
520 cpu_to_le64 (pci_map_single 519 cpu_to_le64 (pci_map_single
521 (np->pdev, skb->data, np->rx_buf_sz, 520 (np->pdev, skb->data, np->rx_buf_sz,
@@ -576,7 +575,9 @@ alloc_list (struct net_device *dev)
576 /* Allocate the rx buffers */ 575 /* Allocate the rx buffers */
577 for (i = 0; i < RX_RING_SIZE; i++) { 576 for (i = 0; i < RX_RING_SIZE; i++) {
578 /* Allocated fixed size of skbuff */ 577 /* Allocated fixed size of skbuff */
579 struct sk_buff *skb = netdev_alloc_skb (dev, np->rx_buf_sz); 578 struct sk_buff *skb;
579
580 skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
580 np->rx_skbuff[i] = skb; 581 np->rx_skbuff[i] = skb;
581 if (skb == NULL) { 582 if (skb == NULL) {
582 printk (KERN_ERR 583 printk (KERN_ERR
@@ -584,7 +585,6 @@ alloc_list (struct net_device *dev)
584 dev->name); 585 dev->name);
585 break; 586 break;
586 } 587 }
587 skb_reserve (skb, 2); /* 16 byte align the IP header. */
588 /* Rubicon now supports 40 bits of addressing space. */ 588 /* Rubicon now supports 40 bits of addressing space. */
589 np->rx_ring[i].fraginfo = 589 np->rx_ring[i].fraginfo =
590 cpu_to_le64 ( pci_map_single ( 590 cpu_to_le64 ( pci_map_single (
@@ -871,13 +871,11 @@ receive_packet (struct net_device *dev)
871 PCI_DMA_FROMDEVICE); 871 PCI_DMA_FROMDEVICE);
872 skb_put (skb = np->rx_skbuff[entry], pkt_len); 872 skb_put (skb = np->rx_skbuff[entry], pkt_len);
873 np->rx_skbuff[entry] = NULL; 873 np->rx_skbuff[entry] = NULL;
874 } else if ((skb = netdev_alloc_skb(dev, pkt_len + 2))) { 874 } else if ((skb = netdev_alloc_skb_ip_align(dev, pkt_len))) {
875 pci_dma_sync_single_for_cpu(np->pdev, 875 pci_dma_sync_single_for_cpu(np->pdev,
876 desc_to_dma(desc), 876 desc_to_dma(desc),
877 np->rx_buf_sz, 877 np->rx_buf_sz,
878 PCI_DMA_FROMDEVICE); 878 PCI_DMA_FROMDEVICE);
879 /* 16 byte align the IP header */
880 skb_reserve (skb, 2);
881 skb_copy_to_linear_data (skb, 879 skb_copy_to_linear_data (skb,
882 np->rx_skbuff[entry]->data, 880 np->rx_skbuff[entry]->data,
883 pkt_len); 881 pkt_len);
@@ -907,7 +905,7 @@ receive_packet (struct net_device *dev)
907 struct sk_buff *skb; 905 struct sk_buff *skb;
908 /* Dropped packets don't need to re-allocate */ 906 /* Dropped packets don't need to re-allocate */
909 if (np->rx_skbuff[entry] == NULL) { 907 if (np->rx_skbuff[entry] == NULL) {
910 skb = netdev_alloc_skb(dev, np->rx_buf_sz); 908 skb = netdev_alloc_skb_ip_align(dev, np->rx_buf_sz);
911 if (skb == NULL) { 909 if (skb == NULL) {
912 np->rx_ring[entry].fraginfo = 0; 910 np->rx_ring[entry].fraginfo = 0;
913 printk (KERN_INFO 911 printk (KERN_INFO
@@ -917,8 +915,6 @@ receive_packet (struct net_device *dev)
917 break; 915 break;
918 } 916 }
919 np->rx_skbuff[entry] = skb; 917 np->rx_skbuff[entry] = skb;
920 /* 16 byte align the IP header */
921 skb_reserve (skb, 2);
922 np->rx_ring[entry].fraginfo = 918 np->rx_ring[entry].fraginfo =
923 cpu_to_le64 (pci_map_single 919 cpu_to_le64 (pci_map_single
924 (np->pdev, skb->data, np->rx_buf_sz, 920 (np->pdev, skb->data, np->rx_buf_sz,
diff --git a/drivers/net/dm9000.c b/drivers/net/dm9000.c
index 31b8bef49d2e..3aab2e466008 100644
--- a/drivers/net/dm9000.c
+++ b/drivers/net/dm9000.c
@@ -100,6 +100,7 @@ typedef struct board_info {
100 100
101 unsigned int flags; 101 unsigned int flags;
102 unsigned int in_suspend :1; 102 unsigned int in_suspend :1;
103 unsigned int wake_supported :1;
103 int debug_level; 104 int debug_level;
104 105
105 enum dm9000_type type; 106 enum dm9000_type type;
@@ -116,6 +117,8 @@ typedef struct board_info {
116 struct resource *data_req; 117 struct resource *data_req;
117 struct resource *irq_res; 118 struct resource *irq_res;
118 119
120 int irq_wake;
121
119 struct mutex addr_lock; /* phy and eeprom access lock */ 122 struct mutex addr_lock; /* phy and eeprom access lock */
120 123
121 struct delayed_work phy_poll; 124 struct delayed_work phy_poll;
@@ -125,6 +128,7 @@ typedef struct board_info {
125 128
126 struct mii_if_info mii; 129 struct mii_if_info mii;
127 u32 msg_enable; 130 u32 msg_enable;
131 u32 wake_state;
128 132
129 int rx_csum; 133 int rx_csum;
130 int can_csum; 134 int can_csum;
@@ -568,6 +572,54 @@ static int dm9000_set_eeprom(struct net_device *dev,
568 return 0; 572 return 0;
569} 573}
570 574
575static void dm9000_get_wol(struct net_device *dev, struct ethtool_wolinfo *w)
576{
577 board_info_t *dm = to_dm9000_board(dev);
578
579 memset(w, 0, sizeof(struct ethtool_wolinfo));
580
581 /* note, we could probably support wake-phy too */
582 w->supported = dm->wake_supported ? WAKE_MAGIC : 0;
583 w->wolopts = dm->wake_state;
584}
585
586static int dm9000_set_wol(struct net_device *dev, struct ethtool_wolinfo *w)
587{
588 board_info_t *dm = to_dm9000_board(dev);
589 unsigned long flags;
590 u32 opts = w->wolopts;
591 u32 wcr = 0;
592
593 if (!dm->wake_supported)
594 return -EOPNOTSUPP;
595
596 if (opts & ~WAKE_MAGIC)
597 return -EINVAL;
598
599 if (opts & WAKE_MAGIC)
600 wcr |= WCR_MAGICEN;
601
602 mutex_lock(&dm->addr_lock);
603
604 spin_lock_irqsave(&dm->lock, flags);
605 iow(dm, DM9000_WCR, wcr);
606 spin_unlock_irqrestore(&dm->lock, flags);
607
608 mutex_unlock(&dm->addr_lock);
609
610 if (dm->wake_state != opts) {
611 /* change in wol state, update IRQ state */
612
613 if (!dm->wake_state)
614 set_irq_wake(dm->irq_wake, 1);
615 else if (dm->wake_state & !opts)
616 set_irq_wake(dm->irq_wake, 0);
617 }
618
619 dm->wake_state = opts;
620 return 0;
621}
622
571static const struct ethtool_ops dm9000_ethtool_ops = { 623static const struct ethtool_ops dm9000_ethtool_ops = {
572 .get_drvinfo = dm9000_get_drvinfo, 624 .get_drvinfo = dm9000_get_drvinfo,
573 .get_settings = dm9000_get_settings, 625 .get_settings = dm9000_get_settings,
@@ -576,6 +628,8 @@ static const struct ethtool_ops dm9000_ethtool_ops = {
576 .set_msglevel = dm9000_set_msglevel, 628 .set_msglevel = dm9000_set_msglevel,
577 .nway_reset = dm9000_nway_reset, 629 .nway_reset = dm9000_nway_reset,
578 .get_link = dm9000_get_link, 630 .get_link = dm9000_get_link,
631 .get_wol = dm9000_get_wol,
632 .set_wol = dm9000_set_wol,
579 .get_eeprom_len = dm9000_get_eeprom_len, 633 .get_eeprom_len = dm9000_get_eeprom_len,
580 .get_eeprom = dm9000_get_eeprom, 634 .get_eeprom = dm9000_get_eeprom,
581 .set_eeprom = dm9000_set_eeprom, 635 .set_eeprom = dm9000_set_eeprom,
@@ -722,6 +776,7 @@ dm9000_init_dm9000(struct net_device *dev)
722{ 776{
723 board_info_t *db = netdev_priv(dev); 777 board_info_t *db = netdev_priv(dev);
724 unsigned int imr; 778 unsigned int imr;
779 unsigned int ncr;
725 780
726 dm9000_dbg(db, 1, "entering %s\n", __func__); 781 dm9000_dbg(db, 1, "entering %s\n", __func__);
727 782
@@ -736,8 +791,15 @@ dm9000_init_dm9000(struct net_device *dev)
736 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */ 791 iow(db, DM9000_GPCR, GPCR_GEP_CNTL); /* Let GPIO0 output */
737 iow(db, DM9000_GPR, 0); /* Enable PHY */ 792 iow(db, DM9000_GPR, 0); /* Enable PHY */
738 793
739 if (db->flags & DM9000_PLATF_EXT_PHY) 794 ncr = (db->flags & DM9000_PLATF_EXT_PHY) ? NCR_EXT_PHY : 0;
740 iow(db, DM9000_NCR, NCR_EXT_PHY); 795
796 /* if wol is needed, then always set NCR_WAKEEN otherwise we end
797 * up dumping the wake events if we disable this. There is already
798 * a wake-mask in DM9000_WCR */
799 if (db->wake_supported)
800 ncr |= NCR_WAKEEN;
801
802 iow(db, DM9000_NCR, ncr);
741 803
742 /* Program operating register */ 804 /* Program operating register */
743 iow(db, DM9000_TCR, 0); /* TX Polling clear */ 805 iow(db, DM9000_TCR, 0); /* TX Polling clear */
@@ -1045,6 +1107,41 @@ static irqreturn_t dm9000_interrupt(int irq, void *dev_id)
1045 return IRQ_HANDLED; 1107 return IRQ_HANDLED;
1046} 1108}
1047 1109
1110static irqreturn_t dm9000_wol_interrupt(int irq, void *dev_id)
1111{
1112 struct net_device *dev = dev_id;
1113 board_info_t *db = netdev_priv(dev);
1114 unsigned long flags;
1115 unsigned nsr, wcr;
1116
1117 spin_lock_irqsave(&db->lock, flags);
1118
1119 nsr = ior(db, DM9000_NSR);
1120 wcr = ior(db, DM9000_WCR);
1121
1122 dev_dbg(db->dev, "%s: NSR=0x%02x, WCR=0x%02x\n", __func__, nsr, wcr);
1123
1124 if (nsr & NSR_WAKEST) {
1125 /* clear, so we can avoid */
1126 iow(db, DM9000_NSR, NSR_WAKEST);
1127
1128 if (wcr & WCR_LINKST)
1129 dev_info(db->dev, "wake by link status change\n");
1130 if (wcr & WCR_SAMPLEST)
1131 dev_info(db->dev, "wake by sample packet\n");
1132 if (wcr & WCR_MAGICST )
1133 dev_info(db->dev, "wake by magic packet\n");
1134 if (!(wcr & (WCR_LINKST | WCR_SAMPLEST | WCR_MAGICST)))
1135 dev_err(db->dev, "wake signalled with no reason? "
1136 "NSR=0x%02x, WSR=0x%02x\n", nsr, wcr);
1137
1138 }
1139
1140 spin_unlock_irqrestore(&db->lock, flags);
1141
1142 return (nsr & NSR_WAKEST) ? IRQ_HANDLED : IRQ_NONE;
1143}
1144
1048#ifdef CONFIG_NET_POLL_CONTROLLER 1145#ifdef CONFIG_NET_POLL_CONTROLLER
1049/* 1146/*
1050 *Used by netconsole 1147 *Used by netconsole
@@ -1299,6 +1396,29 @@ dm9000_probe(struct platform_device *pdev)
1299 goto out; 1396 goto out;
1300 } 1397 }
1301 1398
1399 db->irq_wake = platform_get_irq(pdev, 1);
1400 if (db->irq_wake >= 0) {
1401 dev_dbg(db->dev, "wakeup irq %d\n", db->irq_wake);
1402
1403 ret = request_irq(db->irq_wake, dm9000_wol_interrupt,
1404 IRQF_SHARED, dev_name(db->dev), ndev);
1405 if (ret) {
1406 dev_err(db->dev, "cannot get wakeup irq (%d)\n", ret);
1407 } else {
1408
1409 /* test to see if irq is really wakeup capable */
1410 ret = set_irq_wake(db->irq_wake, 1);
1411 if (ret) {
1412 dev_err(db->dev, "irq %d cannot set wakeup (%d)\n",
1413 db->irq_wake, ret);
1414 ret = 0;
1415 } else {
1416 set_irq_wake(db->irq_wake, 0);
1417 db->wake_supported = 1;
1418 }
1419 }
1420 }
1421
1302 iosize = resource_size(db->addr_res); 1422 iosize = resource_size(db->addr_res);
1303 db->addr_req = request_mem_region(db->addr_res->start, iosize, 1423 db->addr_req = request_mem_region(db->addr_res->start, iosize,
1304 pdev->name); 1424 pdev->name);
@@ -1490,10 +1610,14 @@ dm9000_drv_suspend(struct device *dev)
1490 db = netdev_priv(ndev); 1610 db = netdev_priv(ndev);
1491 db->in_suspend = 1; 1611 db->in_suspend = 1;
1492 1612
1493 if (netif_running(ndev)) { 1613 if (!netif_running(ndev))
1494 netif_device_detach(ndev); 1614 return 0;
1615
1616 netif_device_detach(ndev);
1617
1618 /* only shutdown if not using WoL */
1619 if (!db->wake_state)
1495 dm9000_shutdown(ndev); 1620 dm9000_shutdown(ndev);
1496 }
1497 } 1621 }
1498 return 0; 1622 return 0;
1499} 1623}
@@ -1506,10 +1630,13 @@ dm9000_drv_resume(struct device *dev)
1506 board_info_t *db = netdev_priv(ndev); 1630 board_info_t *db = netdev_priv(ndev);
1507 1631
1508 if (ndev) { 1632 if (ndev) {
1509
1510 if (netif_running(ndev)) { 1633 if (netif_running(ndev)) {
1511 dm9000_reset(db); 1634 /* reset if we were not in wake mode to ensure if
1512 dm9000_init_dm9000(ndev); 1635 * the device was powered off it is in a known state */
1636 if (!db->wake_state) {
1637 dm9000_reset(db);
1638 dm9000_init_dm9000(ndev);
1639 }
1513 1640
1514 netif_device_attach(ndev); 1641 netif_device_attach(ndev);
1515 } 1642 }
diff --git a/drivers/net/dm9000.h b/drivers/net/dm9000.h
index fb1c924d79b4..55688bd1a3ef 100644
--- a/drivers/net/dm9000.h
+++ b/drivers/net/dm9000.h
@@ -111,6 +111,13 @@
111#define RSR_CE (1<<1) 111#define RSR_CE (1<<1)
112#define RSR_FOE (1<<0) 112#define RSR_FOE (1<<0)
113 113
114#define WCR_LINKEN (1 << 5)
115#define WCR_SAMPLEEN (1 << 4)
116#define WCR_MAGICEN (1 << 3)
117#define WCR_LINKST (1 << 2)
118#define WCR_SAMPLEST (1 << 1)
119#define WCR_MAGICST (1 << 0)
120
114#define FCTR_HWOT(ot) (( ot & 0xf ) << 4 ) 121#define FCTR_HWOT(ot) (( ot & 0xf ) << 4 )
115#define FCTR_LWOT(ot) ( ot & 0xf ) 122#define FCTR_LWOT(ot) ( ot & 0xf )
116 123
diff --git a/drivers/net/e100.c b/drivers/net/e100.c
index d19b0845970a..7462fdfd7f92 100644
--- a/drivers/net/e100.c
+++ b/drivers/net/e100.c
@@ -621,6 +621,7 @@ struct nic {
621 u16 eeprom_wc; 621 u16 eeprom_wc;
622 __le16 eeprom[256]; 622 __le16 eeprom[256];
623 spinlock_t mdio_lock; 623 spinlock_t mdio_lock;
624 const struct firmware *fw;
624}; 625};
625 626
626static inline void e100_write_flush(struct nic *nic) 627static inline void e100_write_flush(struct nic *nic)
@@ -1222,9 +1223,9 @@ static void e100_configure(struct nic *nic, struct cb *cb, struct sk_buff *skb)
1222static const struct firmware *e100_request_firmware(struct nic *nic) 1223static const struct firmware *e100_request_firmware(struct nic *nic)
1223{ 1224{
1224 const char *fw_name; 1225 const char *fw_name;
1225 const struct firmware *fw; 1226 const struct firmware *fw = nic->fw;
1226 u8 timer, bundle, min_size; 1227 u8 timer, bundle, min_size;
1227 int err; 1228 int err = 0;
1228 1229
1229 /* do not load u-code for ICH devices */ 1230 /* do not load u-code for ICH devices */
1230 if (nic->flags & ich) 1231 if (nic->flags & ich)
@@ -1240,12 +1241,20 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
1240 else /* No ucode on other devices */ 1241 else /* No ucode on other devices */
1241 return NULL; 1242 return NULL;
1242 1243
1243 err = request_firmware(&fw, fw_name, &nic->pdev->dev); 1244 /* If the firmware has not previously been loaded, request a pointer
1245 * to it. If it was previously loaded, we are reinitializing the
1246 * adapter, possibly in a resume from hibernate, in which case
1247 * request_firmware() cannot be used.
1248 */
1249 if (!fw)
1250 err = request_firmware(&fw, fw_name, &nic->pdev->dev);
1251
1244 if (err) { 1252 if (err) {
1245 DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n", 1253 DPRINTK(PROBE, ERR, "Failed to load firmware \"%s\": %d\n",
1246 fw_name, err); 1254 fw_name, err);
1247 return ERR_PTR(err); 1255 return ERR_PTR(err);
1248 } 1256 }
1257
1249 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes 1258 /* Firmware should be precisely UCODE_SIZE (words) plus three bytes
1250 indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */ 1259 indicating the offsets for BUNDLESMALL, BUNDLEMAX, INTDELAY */
1251 if (fw->size != UCODE_SIZE * 4 + 3) { 1260 if (fw->size != UCODE_SIZE * 4 + 3) {
@@ -1268,7 +1277,10 @@ static const struct firmware *e100_request_firmware(struct nic *nic)
1268 release_firmware(fw); 1277 release_firmware(fw);
1269 return ERR_PTR(-EINVAL); 1278 return ERR_PTR(-EINVAL);
1270 } 1279 }
1271 /* OK, firmware is validated and ready to use... */ 1280
1281 /* OK, firmware is validated and ready to use. Save a pointer
1282 * to it in the nic */
1283 nic->fw = fw;
1272 return fw; 1284 return fw;
1273} 1285}
1274 1286
@@ -1851,11 +1863,10 @@ static inline void e100_start_receiver(struct nic *nic, struct rx *rx)
1851#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN) 1863#define RFD_BUF_LEN (sizeof(struct rfd) + VLAN_ETH_FRAME_LEN)
1852static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx) 1864static int e100_rx_alloc_skb(struct nic *nic, struct rx *rx)
1853{ 1865{
1854 if (!(rx->skb = netdev_alloc_skb(nic->netdev, RFD_BUF_LEN + NET_IP_ALIGN))) 1866 if (!(rx->skb = netdev_alloc_skb_ip_align(nic->netdev, RFD_BUF_LEN)))
1855 return -ENOMEM; 1867 return -ENOMEM;
1856 1868
1857 /* Align, init, and map the RFD. */ 1869 /* Init, and map the RFD. */
1858 skb_reserve(rx->skb, NET_IP_ALIGN);
1859 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd)); 1870 skb_copy_to_linear_data(rx->skb, &nic->blank_rfd, sizeof(struct rfd));
1860 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data, 1871 rx->dma_addr = pci_map_single(nic->pdev, rx->skb->data,
1861 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL); 1872 RFD_BUF_LEN, PCI_DMA_BIDIRECTIONAL);
diff --git a/drivers/net/e1000/e1000.h b/drivers/net/e1000/e1000.h
index 42e2b7e21c29..a5665287bd64 100644
--- a/drivers/net/e1000/e1000.h
+++ b/drivers/net/e1000/e1000.h
@@ -302,7 +302,6 @@ struct e1000_adapter {
302 /* OS defined structs */ 302 /* OS defined structs */
303 struct net_device *netdev; 303 struct net_device *netdev;
304 struct pci_dev *pdev; 304 struct pci_dev *pdev;
305 struct net_device_stats net_stats;
306 305
307 /* structs defined in e1000_hw.h */ 306 /* structs defined in e1000_hw.h */
308 struct e1000_hw hw; 307 struct e1000_hw hw;
diff --git a/drivers/net/e1000/e1000_ethtool.c b/drivers/net/e1000/e1000_ethtool.c
index 490b2b7cd3ab..ffbae0a0b4f1 100644
--- a/drivers/net/e1000/e1000_ethtool.c
+++ b/drivers/net/e1000/e1000_ethtool.c
@@ -31,14 +31,22 @@
31#include "e1000.h" 31#include "e1000.h"
32#include <asm/uaccess.h> 32#include <asm/uaccess.h>
33 33
34enum {NETDEV_STATS, E1000_STATS};
35
34struct e1000_stats { 36struct e1000_stats {
35 char stat_string[ETH_GSTRING_LEN]; 37 char stat_string[ETH_GSTRING_LEN];
38 int type;
36 int sizeof_stat; 39 int sizeof_stat;
37 int stat_offset; 40 int stat_offset;
38}; 41};
39 42
40#define E1000_STAT(m) FIELD_SIZEOF(struct e1000_adapter, m), \ 43#define E1000_STAT(m) E1000_STATS, \
41 offsetof(struct e1000_adapter, m) 44 sizeof(((struct e1000_adapter *)0)->m), \
45 offsetof(struct e1000_adapter, m)
46#define E1000_NETDEV_STAT(m) NETDEV_STATS, \
47 sizeof(((struct net_device *)0)->m), \
48 offsetof(struct net_device, m)
49
42static const struct e1000_stats e1000_gstrings_stats[] = { 50static const struct e1000_stats e1000_gstrings_stats[] = {
43 { "rx_packets", E1000_STAT(stats.gprc) }, 51 { "rx_packets", E1000_STAT(stats.gprc) },
44 { "tx_packets", E1000_STAT(stats.gptc) }, 52 { "tx_packets", E1000_STAT(stats.gptc) },
@@ -50,19 +58,19 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
50 { "tx_multicast", E1000_STAT(stats.mptc) }, 58 { "tx_multicast", E1000_STAT(stats.mptc) },
51 { "rx_errors", E1000_STAT(stats.rxerrc) }, 59 { "rx_errors", E1000_STAT(stats.rxerrc) },
52 { "tx_errors", E1000_STAT(stats.txerrc) }, 60 { "tx_errors", E1000_STAT(stats.txerrc) },
53 { "tx_dropped", E1000_STAT(net_stats.tx_dropped) }, 61 { "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) },
54 { "multicast", E1000_STAT(stats.mprc) }, 62 { "multicast", E1000_STAT(stats.mprc) },
55 { "collisions", E1000_STAT(stats.colc) }, 63 { "collisions", E1000_STAT(stats.colc) },
56 { "rx_length_errors", E1000_STAT(stats.rlerrc) }, 64 { "rx_length_errors", E1000_STAT(stats.rlerrc) },
57 { "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) }, 65 { "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) },
58 { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, 66 { "rx_crc_errors", E1000_STAT(stats.crcerrs) },
59 { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) }, 67 { "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) },
60 { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, 68 { "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
61 { "rx_missed_errors", E1000_STAT(stats.mpc) }, 69 { "rx_missed_errors", E1000_STAT(stats.mpc) },
62 { "tx_aborted_errors", E1000_STAT(stats.ecol) }, 70 { "tx_aborted_errors", E1000_STAT(stats.ecol) },
63 { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, 71 { "tx_carrier_errors", E1000_STAT(stats.tncrs) },
64 { "tx_fifo_errors", E1000_STAT(net_stats.tx_fifo_errors) }, 72 { "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) },
65 { "tx_heartbeat_errors", E1000_STAT(net_stats.tx_heartbeat_errors) }, 73 { "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) },
66 { "tx_window_errors", E1000_STAT(stats.latecol) }, 74 { "tx_window_errors", E1000_STAT(stats.latecol) },
67 { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, 75 { "tx_abort_late_coll", E1000_STAT(stats.latecol) },
68 { "tx_deferred_ok", E1000_STAT(stats.dc) }, 76 { "tx_deferred_ok", E1000_STAT(stats.dc) },
@@ -1830,10 +1838,21 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
1830{ 1838{
1831 struct e1000_adapter *adapter = netdev_priv(netdev); 1839 struct e1000_adapter *adapter = netdev_priv(netdev);
1832 int i; 1840 int i;
1841 char *p = NULL;
1833 1842
1834 e1000_update_stats(adapter); 1843 e1000_update_stats(adapter);
1835 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 1844 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1836 char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset; 1845 switch (e1000_gstrings_stats[i].type) {
1846 case NETDEV_STATS:
1847 p = (char *) netdev +
1848 e1000_gstrings_stats[i].stat_offset;
1849 break;
1850 case E1000_STATS:
1851 p = (char *) adapter +
1852 e1000_gstrings_stats[i].stat_offset;
1853 break;
1854 }
1855
1837 data[i] = (e1000_gstrings_stats[i].sizeof_stat == 1856 data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
1838 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1857 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1839 } 1858 }
diff --git a/drivers/net/e1000/e1000_main.c b/drivers/net/e1000/e1000_main.c
index bcd192ca47b0..c938114a34ab 100644
--- a/drivers/net/e1000/e1000_main.c
+++ b/drivers/net/e1000/e1000_main.c
@@ -3101,10 +3101,8 @@ static void e1000_reset_task(struct work_struct *work)
3101 3101
3102static struct net_device_stats *e1000_get_stats(struct net_device *netdev) 3102static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
3103{ 3103{
3104 struct e1000_adapter *adapter = netdev_priv(netdev);
3105
3106 /* only return the current stats */ 3104 /* only return the current stats */
3107 return &adapter->net_stats; 3105 return &netdev->stats;
3108} 3106}
3109 3107
3110/** 3108/**
@@ -3196,6 +3194,7 @@ static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
3196 3194
3197void e1000_update_stats(struct e1000_adapter *adapter) 3195void e1000_update_stats(struct e1000_adapter *adapter)
3198{ 3196{
3197 struct net_device *netdev = adapter->netdev;
3199 struct e1000_hw *hw = &adapter->hw; 3198 struct e1000_hw *hw = &adapter->hw;
3200 struct pci_dev *pdev = adapter->pdev; 3199 struct pci_dev *pdev = adapter->pdev;
3201 unsigned long flags; 3200 unsigned long flags;
@@ -3288,32 +3287,32 @@ void e1000_update_stats(struct e1000_adapter *adapter)
3288 } 3287 }
3289 3288
3290 /* Fill out the OS statistics structure */ 3289 /* Fill out the OS statistics structure */
3291 adapter->net_stats.multicast = adapter->stats.mprc; 3290 netdev->stats.multicast = adapter->stats.mprc;
3292 adapter->net_stats.collisions = adapter->stats.colc; 3291 netdev->stats.collisions = adapter->stats.colc;
3293 3292
3294 /* Rx Errors */ 3293 /* Rx Errors */
3295 3294
3296 /* RLEC on some newer hardware can be incorrect so build 3295 /* RLEC on some newer hardware can be incorrect so build
3297 * our own version based on RUC and ROC */ 3296 * our own version based on RUC and ROC */
3298 adapter->net_stats.rx_errors = adapter->stats.rxerrc + 3297 netdev->stats.rx_errors = adapter->stats.rxerrc +
3299 adapter->stats.crcerrs + adapter->stats.algnerrc + 3298 adapter->stats.crcerrs + adapter->stats.algnerrc +
3300 adapter->stats.ruc + adapter->stats.roc + 3299 adapter->stats.ruc + adapter->stats.roc +
3301 adapter->stats.cexterr; 3300 adapter->stats.cexterr;
3302 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc; 3301 adapter->stats.rlerrc = adapter->stats.ruc + adapter->stats.roc;
3303 adapter->net_stats.rx_length_errors = adapter->stats.rlerrc; 3302 netdev->stats.rx_length_errors = adapter->stats.rlerrc;
3304 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 3303 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3305 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; 3304 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3306 adapter->net_stats.rx_missed_errors = adapter->stats.mpc; 3305 netdev->stats.rx_missed_errors = adapter->stats.mpc;
3307 3306
3308 /* Tx Errors */ 3307 /* Tx Errors */
3309 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol; 3308 adapter->stats.txerrc = adapter->stats.ecol + adapter->stats.latecol;
3310 adapter->net_stats.tx_errors = adapter->stats.txerrc; 3309 netdev->stats.tx_errors = adapter->stats.txerrc;
3311 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; 3310 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3312 adapter->net_stats.tx_window_errors = adapter->stats.latecol; 3311 netdev->stats.tx_window_errors = adapter->stats.latecol;
3313 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; 3312 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3314 if (hw->bad_tx_carr_stats_fd && 3313 if (hw->bad_tx_carr_stats_fd &&
3315 adapter->link_duplex == FULL_DUPLEX) { 3314 adapter->link_duplex == FULL_DUPLEX) {
3316 adapter->net_stats.tx_carrier_errors = 0; 3315 netdev->stats.tx_carrier_errors = 0;
3317 adapter->stats.tncrs = 0; 3316 adapter->stats.tncrs = 0;
3318 } 3317 }
3319 3318
@@ -3514,8 +3513,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
3514 } 3513 }
3515 adapter->total_tx_bytes += total_tx_bytes; 3514 adapter->total_tx_bytes += total_tx_bytes;
3516 adapter->total_tx_packets += total_tx_packets; 3515 adapter->total_tx_packets += total_tx_packets;
3517 adapter->net_stats.tx_bytes += total_tx_bytes; 3516 netdev->stats.tx_bytes += total_tx_bytes;
3518 adapter->net_stats.tx_packets += total_tx_packets; 3517 netdev->stats.tx_packets += total_tx_packets;
3519 return (count < tx_ring->count); 3518 return (count < tx_ring->count);
3520} 3519}
3521 3520
@@ -3767,8 +3766,8 @@ next_desc:
3767 3766
3768 adapter->total_rx_packets += total_rx_packets; 3767 adapter->total_rx_packets += total_rx_packets;
3769 adapter->total_rx_bytes += total_rx_bytes; 3768 adapter->total_rx_bytes += total_rx_bytes;
3770 adapter->net_stats.rx_bytes += total_rx_bytes; 3769 netdev->stats.rx_bytes += total_rx_bytes;
3771 adapter->net_stats.rx_packets += total_rx_packets; 3770 netdev->stats.rx_packets += total_rx_packets;
3772 return cleaned; 3771 return cleaned;
3773} 3772}
3774 3773
@@ -3867,9 +3866,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
3867 * of reassembly being done in the stack */ 3866 * of reassembly being done in the stack */
3868 if (length < copybreak) { 3867 if (length < copybreak) {
3869 struct sk_buff *new_skb = 3868 struct sk_buff *new_skb =
3870 netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 3869 netdev_alloc_skb_ip_align(netdev, length);
3871 if (new_skb) { 3870 if (new_skb) {
3872 skb_reserve(new_skb, NET_IP_ALIGN);
3873 skb_copy_to_linear_data_offset(new_skb, 3871 skb_copy_to_linear_data_offset(new_skb,
3874 -NET_IP_ALIGN, 3872 -NET_IP_ALIGN,
3875 (skb->data - 3873 (skb->data -
@@ -3916,8 +3914,8 @@ next_desc:
3916 3914
3917 adapter->total_rx_packets += total_rx_packets; 3915 adapter->total_rx_packets += total_rx_packets;
3918 adapter->total_rx_bytes += total_rx_bytes; 3916 adapter->total_rx_bytes += total_rx_bytes;
3919 adapter->net_stats.rx_bytes += total_rx_bytes; 3917 netdev->stats.rx_bytes += total_rx_bytes;
3920 adapter->net_stats.rx_packets += total_rx_packets; 3918 netdev->stats.rx_packets += total_rx_packets;
3921 return cleaned; 3919 return cleaned;
3922} 3920}
3923 3921
@@ -3938,9 +3936,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
3938 struct e1000_buffer *buffer_info; 3936 struct e1000_buffer *buffer_info;
3939 struct sk_buff *skb; 3937 struct sk_buff *skb;
3940 unsigned int i; 3938 unsigned int i;
3941 unsigned int bufsz = 256 - 3939 unsigned int bufsz = 256 - 16 /*for skb_reserve */ ;
3942 16 /*for skb_reserve */ -
3943 NET_IP_ALIGN;
3944 3940
3945 i = rx_ring->next_to_use; 3941 i = rx_ring->next_to_use;
3946 buffer_info = &rx_ring->buffer_info[i]; 3942 buffer_info = &rx_ring->buffer_info[i];
@@ -3952,7 +3948,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
3952 goto check_page; 3948 goto check_page;
3953 } 3949 }
3954 3950
3955 skb = netdev_alloc_skb(netdev, bufsz); 3951 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
3956 if (unlikely(!skb)) { 3952 if (unlikely(!skb)) {
3957 /* Better luck next round */ 3953 /* Better luck next round */
3958 adapter->alloc_rx_buff_failed++; 3954 adapter->alloc_rx_buff_failed++;
@@ -3965,7 +3961,7 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
3965 DPRINTK(PROBE, ERR, "skb align check failed: %u bytes " 3961 DPRINTK(PROBE, ERR, "skb align check failed: %u bytes "
3966 "at %p\n", bufsz, skb->data); 3962 "at %p\n", bufsz, skb->data);
3967 /* Try again, without freeing the previous */ 3963 /* Try again, without freeing the previous */
3968 skb = netdev_alloc_skb(netdev, bufsz); 3964 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
3969 /* Failed allocation, critical failure */ 3965 /* Failed allocation, critical failure */
3970 if (!skb) { 3966 if (!skb) {
3971 dev_kfree_skb(oldskb); 3967 dev_kfree_skb(oldskb);
@@ -3983,12 +3979,6 @@ e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
3983 /* Use new allocation */ 3979 /* Use new allocation */
3984 dev_kfree_skb(oldskb); 3980 dev_kfree_skb(oldskb);
3985 } 3981 }
3986 /* Make buffer alignment 2 beyond a 16 byte boundary
3987 * this will result in a 16 byte aligned IP header after
3988 * the 14 byte MAC header is removed
3989 */
3990 skb_reserve(skb, NET_IP_ALIGN);
3991
3992 buffer_info->skb = skb; 3982 buffer_info->skb = skb;
3993 buffer_info->length = adapter->rx_buffer_len; 3983 buffer_info->length = adapter->rx_buffer_len;
3994check_page: 3984check_page:
@@ -4045,7 +4035,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4045 struct e1000_buffer *buffer_info; 4035 struct e1000_buffer *buffer_info;
4046 struct sk_buff *skb; 4036 struct sk_buff *skb;
4047 unsigned int i; 4037 unsigned int i;
4048 unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; 4038 unsigned int bufsz = adapter->rx_buffer_len;
4049 4039
4050 i = rx_ring->next_to_use; 4040 i = rx_ring->next_to_use;
4051 buffer_info = &rx_ring->buffer_info[i]; 4041 buffer_info = &rx_ring->buffer_info[i];
@@ -4057,7 +4047,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4057 goto map_skb; 4047 goto map_skb;
4058 } 4048 }
4059 4049
4060 skb = netdev_alloc_skb(netdev, bufsz); 4050 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4061 if (unlikely(!skb)) { 4051 if (unlikely(!skb)) {
4062 /* Better luck next round */ 4052 /* Better luck next round */
4063 adapter->alloc_rx_buff_failed++; 4053 adapter->alloc_rx_buff_failed++;
@@ -4070,7 +4060,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4070 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes " 4060 DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
4071 "at %p\n", bufsz, skb->data); 4061 "at %p\n", bufsz, skb->data);
4072 /* Try again, without freeing the previous */ 4062 /* Try again, without freeing the previous */
4073 skb = netdev_alloc_skb(netdev, bufsz); 4063 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4074 /* Failed allocation, critical failure */ 4064 /* Failed allocation, critical failure */
4075 if (!skb) { 4065 if (!skb) {
4076 dev_kfree_skb(oldskb); 4066 dev_kfree_skb(oldskb);
@@ -4089,12 +4079,6 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
4089 /* Use new allocation */ 4079 /* Use new allocation */
4090 dev_kfree_skb(oldskb); 4080 dev_kfree_skb(oldskb);
4091 } 4081 }
4092 /* Make buffer alignment 2 beyond a 16 byte boundary
4093 * this will result in a 16 byte aligned IP header after
4094 * the 14 byte MAC header is removed
4095 */
4096 skb_reserve(skb, NET_IP_ALIGN);
4097
4098 buffer_info->skb = skb; 4082 buffer_info->skb = skb;
4099 buffer_info->length = adapter->rx_buffer_len; 4083 buffer_info->length = adapter->rx_buffer_len;
4100map_skb: 4084map_skb:
diff --git a/drivers/net/e1000e/e1000.h b/drivers/net/e1000e/e1000.h
index 189dfa2d6c76..00989c5534c1 100644
--- a/drivers/net/e1000e/e1000.h
+++ b/drivers/net/e1000e/e1000.h
@@ -329,7 +329,6 @@ struct e1000_adapter {
329 /* OS defined structs */ 329 /* OS defined structs */
330 struct net_device *netdev; 330 struct net_device *netdev;
331 struct pci_dev *pdev; 331 struct pci_dev *pdev;
332 struct net_device_stats net_stats;
333 332
334 /* structs defined in e1000_hw.h */ 333 /* structs defined in e1000_hw.h */
335 struct e1000_hw hw; 334 struct e1000_hw hw;
diff --git a/drivers/net/e1000e/ethtool.c b/drivers/net/e1000e/ethtool.c
index 1bf4d2a5d34f..0364b91488af 100644
--- a/drivers/net/e1000e/ethtool.c
+++ b/drivers/net/e1000e/ethtool.c
@@ -35,14 +35,22 @@
35 35
36#include "e1000.h" 36#include "e1000.h"
37 37
38enum {NETDEV_STATS, E1000_STATS};
39
38struct e1000_stats { 40struct e1000_stats {
39 char stat_string[ETH_GSTRING_LEN]; 41 char stat_string[ETH_GSTRING_LEN];
42 int type;
40 int sizeof_stat; 43 int sizeof_stat;
41 int stat_offset; 44 int stat_offset;
42}; 45};
43 46
44#define E1000_STAT(m) sizeof(((struct e1000_adapter *)0)->m), \ 47#define E1000_STAT(m) E1000_STATS, \
45 offsetof(struct e1000_adapter, m) 48 sizeof(((struct e1000_adapter *)0)->m), \
49 offsetof(struct e1000_adapter, m)
50#define E1000_NETDEV_STAT(m) NETDEV_STATS, \
51 sizeof(((struct net_device *)0)->m), \
52 offsetof(struct net_device, m)
53
46static const struct e1000_stats e1000_gstrings_stats[] = { 54static const struct e1000_stats e1000_gstrings_stats[] = {
47 { "rx_packets", E1000_STAT(stats.gprc) }, 55 { "rx_packets", E1000_STAT(stats.gprc) },
48 { "tx_packets", E1000_STAT(stats.gptc) }, 56 { "tx_packets", E1000_STAT(stats.gptc) },
@@ -52,21 +60,21 @@ static const struct e1000_stats e1000_gstrings_stats[] = {
52 { "tx_broadcast", E1000_STAT(stats.bptc) }, 60 { "tx_broadcast", E1000_STAT(stats.bptc) },
53 { "rx_multicast", E1000_STAT(stats.mprc) }, 61 { "rx_multicast", E1000_STAT(stats.mprc) },
54 { "tx_multicast", E1000_STAT(stats.mptc) }, 62 { "tx_multicast", E1000_STAT(stats.mptc) },
55 { "rx_errors", E1000_STAT(net_stats.rx_errors) }, 63 { "rx_errors", E1000_NETDEV_STAT(stats.rx_errors) },
56 { "tx_errors", E1000_STAT(net_stats.tx_errors) }, 64 { "tx_errors", E1000_NETDEV_STAT(stats.tx_errors) },
57 { "tx_dropped", E1000_STAT(net_stats.tx_dropped) }, 65 { "tx_dropped", E1000_NETDEV_STAT(stats.tx_dropped) },
58 { "multicast", E1000_STAT(stats.mprc) }, 66 { "multicast", E1000_STAT(stats.mprc) },
59 { "collisions", E1000_STAT(stats.colc) }, 67 { "collisions", E1000_STAT(stats.colc) },
60 { "rx_length_errors", E1000_STAT(net_stats.rx_length_errors) }, 68 { "rx_length_errors", E1000_NETDEV_STAT(stats.rx_length_errors) },
61 { "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) }, 69 { "rx_over_errors", E1000_NETDEV_STAT(stats.rx_over_errors) },
62 { "rx_crc_errors", E1000_STAT(stats.crcerrs) }, 70 { "rx_crc_errors", E1000_STAT(stats.crcerrs) },
63 { "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) }, 71 { "rx_frame_errors", E1000_NETDEV_STAT(stats.rx_frame_errors) },
64 { "rx_no_buffer_count", E1000_STAT(stats.rnbc) }, 72 { "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
65 { "rx_missed_errors", E1000_STAT(stats.mpc) }, 73 { "rx_missed_errors", E1000_STAT(stats.mpc) },
66 { "tx_aborted_errors", E1000_STAT(stats.ecol) }, 74 { "tx_aborted_errors", E1000_STAT(stats.ecol) },
67 { "tx_carrier_errors", E1000_STAT(stats.tncrs) }, 75 { "tx_carrier_errors", E1000_STAT(stats.tncrs) },
68 { "tx_fifo_errors", E1000_STAT(net_stats.tx_fifo_errors) }, 76 { "tx_fifo_errors", E1000_NETDEV_STAT(stats.tx_fifo_errors) },
69 { "tx_heartbeat_errors", E1000_STAT(net_stats.tx_heartbeat_errors) }, 77 { "tx_heartbeat_errors", E1000_NETDEV_STAT(stats.tx_heartbeat_errors) },
70 { "tx_window_errors", E1000_STAT(stats.latecol) }, 78 { "tx_window_errors", E1000_STAT(stats.latecol) },
71 { "tx_abort_late_coll", E1000_STAT(stats.latecol) }, 79 { "tx_abort_late_coll", E1000_STAT(stats.latecol) },
72 { "tx_deferred_ok", E1000_STAT(stats.dc) }, 80 { "tx_deferred_ok", E1000_STAT(stats.dc) },
@@ -327,10 +335,18 @@ static int e1000_set_pauseparam(struct net_device *netdev,
327 335
328 hw->fc.current_mode = hw->fc.requested_mode; 336 hw->fc.current_mode = hw->fc.requested_mode;
329 337
330 retval = ((hw->phy.media_type == e1000_media_type_fiber) ? 338 if (hw->phy.media_type == e1000_media_type_fiber) {
331 hw->mac.ops.setup_link(hw) : e1000e_force_mac_fc(hw)); 339 retval = hw->mac.ops.setup_link(hw);
340 /* implicit goto out */
341 } else {
342 retval = e1000e_force_mac_fc(hw);
343 if (retval)
344 goto out;
345 e1000e_set_fc_watermarks(hw);
346 }
332 } 347 }
333 348
349out:
334 clear_bit(__E1000_RESETTING, &adapter->state); 350 clear_bit(__E1000_RESETTING, &adapter->state);
335 return retval; 351 return retval;
336} 352}
@@ -1904,10 +1920,21 @@ static void e1000_get_ethtool_stats(struct net_device *netdev,
1904{ 1920{
1905 struct e1000_adapter *adapter = netdev_priv(netdev); 1921 struct e1000_adapter *adapter = netdev_priv(netdev);
1906 int i; 1922 int i;
1923 char *p = NULL;
1907 1924
1908 e1000e_update_stats(adapter); 1925 e1000e_update_stats(adapter);
1909 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) { 1926 for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
1910 char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset; 1927 switch (e1000_gstrings_stats[i].type) {
1928 case NETDEV_STATS:
1929 p = (char *) netdev +
1930 e1000_gstrings_stats[i].stat_offset;
1931 break;
1932 case E1000_STATS:
1933 p = (char *) adapter +
1934 e1000_gstrings_stats[i].stat_offset;
1935 break;
1936 }
1937
1911 data[i] = (e1000_gstrings_stats[i].sizeof_stat == 1938 data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
1912 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1939 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1913 } 1940 }
diff --git a/drivers/net/e1000e/netdev.c b/drivers/net/e1000e/netdev.c
index 0687c6aa4e46..376924804f3f 100644
--- a/drivers/net/e1000e/netdev.c
+++ b/drivers/net/e1000e/netdev.c
@@ -167,7 +167,7 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
167 struct e1000_buffer *buffer_info; 167 struct e1000_buffer *buffer_info;
168 struct sk_buff *skb; 168 struct sk_buff *skb;
169 unsigned int i; 169 unsigned int i;
170 unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN; 170 unsigned int bufsz = adapter->rx_buffer_len;
171 171
172 i = rx_ring->next_to_use; 172 i = rx_ring->next_to_use;
173 buffer_info = &rx_ring->buffer_info[i]; 173 buffer_info = &rx_ring->buffer_info[i];
@@ -179,20 +179,13 @@ static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
179 goto map_skb; 179 goto map_skb;
180 } 180 }
181 181
182 skb = netdev_alloc_skb(netdev, bufsz); 182 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
183 if (!skb) { 183 if (!skb) {
184 /* Better luck next round */ 184 /* Better luck next round */
185 adapter->alloc_rx_buff_failed++; 185 adapter->alloc_rx_buff_failed++;
186 break; 186 break;
187 } 187 }
188 188
189 /*
190 * Make buffer alignment 2 beyond a 16 byte boundary
191 * this will result in a 16 byte aligned IP header after
192 * the 14 byte MAC header is removed
193 */
194 skb_reserve(skb, NET_IP_ALIGN);
195
196 buffer_info->skb = skb; 189 buffer_info->skb = skb;
197map_skb: 190map_skb:
198 buffer_info->dma = pci_map_single(pdev, skb->data, 191 buffer_info->dma = pci_map_single(pdev, skb->data,
@@ -284,21 +277,14 @@ static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
284 cpu_to_le64(ps_page->dma); 277 cpu_to_le64(ps_page->dma);
285 } 278 }
286 279
287 skb = netdev_alloc_skb(netdev, 280 skb = netdev_alloc_skb_ip_align(netdev,
288 adapter->rx_ps_bsize0 + NET_IP_ALIGN); 281 adapter->rx_ps_bsize0);
289 282
290 if (!skb) { 283 if (!skb) {
291 adapter->alloc_rx_buff_failed++; 284 adapter->alloc_rx_buff_failed++;
292 break; 285 break;
293 } 286 }
294 287
295 /*
296 * Make buffer alignment 2 beyond a 16 byte boundary
297 * this will result in a 16 byte aligned IP header after
298 * the 14 byte MAC header is removed
299 */
300 skb_reserve(skb, NET_IP_ALIGN);
301
302 buffer_info->skb = skb; 288 buffer_info->skb = skb;
303 buffer_info->dma = pci_map_single(pdev, skb->data, 289 buffer_info->dma = pci_map_single(pdev, skb->data,
304 adapter->rx_ps_bsize0, 290 adapter->rx_ps_bsize0,
@@ -359,9 +345,7 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
359 struct e1000_buffer *buffer_info; 345 struct e1000_buffer *buffer_info;
360 struct sk_buff *skb; 346 struct sk_buff *skb;
361 unsigned int i; 347 unsigned int i;
362 unsigned int bufsz = 256 - 348 unsigned int bufsz = 256 - 16 /* for skb_reserve */;
363 16 /* for skb_reserve */ -
364 NET_IP_ALIGN;
365 349
366 i = rx_ring->next_to_use; 350 i = rx_ring->next_to_use;
367 buffer_info = &rx_ring->buffer_info[i]; 351 buffer_info = &rx_ring->buffer_info[i];
@@ -373,19 +357,13 @@ static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
373 goto check_page; 357 goto check_page;
374 } 358 }
375 359
376 skb = netdev_alloc_skb(netdev, bufsz); 360 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
377 if (unlikely(!skb)) { 361 if (unlikely(!skb)) {
378 /* Better luck next round */ 362 /* Better luck next round */
379 adapter->alloc_rx_buff_failed++; 363 adapter->alloc_rx_buff_failed++;
380 break; 364 break;
381 } 365 }
382 366
383 /* Make buffer alignment 2 beyond a 16 byte boundary
384 * this will result in a 16 byte aligned IP header after
385 * the 14 byte MAC header is removed
386 */
387 skb_reserve(skb, NET_IP_ALIGN);
388
389 buffer_info->skb = skb; 367 buffer_info->skb = skb;
390check_page: 368check_page:
391 /* allocate a new page if necessary */ 369 /* allocate a new page if necessary */
@@ -513,9 +491,8 @@ static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
513 */ 491 */
514 if (length < copybreak) { 492 if (length < copybreak) {
515 struct sk_buff *new_skb = 493 struct sk_buff *new_skb =
516 netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 494 netdev_alloc_skb_ip_align(netdev, length);
517 if (new_skb) { 495 if (new_skb) {
518 skb_reserve(new_skb, NET_IP_ALIGN);
519 skb_copy_to_linear_data_offset(new_skb, 496 skb_copy_to_linear_data_offset(new_skb,
520 -NET_IP_ALIGN, 497 -NET_IP_ALIGN,
521 (skb->data - 498 (skb->data -
@@ -560,8 +537,8 @@ next_desc:
560 537
561 adapter->total_rx_bytes += total_rx_bytes; 538 adapter->total_rx_bytes += total_rx_bytes;
562 adapter->total_rx_packets += total_rx_packets; 539 adapter->total_rx_packets += total_rx_packets;
563 adapter->net_stats.rx_bytes += total_rx_bytes; 540 netdev->stats.rx_bytes += total_rx_bytes;
564 adapter->net_stats.rx_packets += total_rx_packets; 541 netdev->stats.rx_packets += total_rx_packets;
565 return cleaned; 542 return cleaned;
566} 543}
567 544
@@ -690,8 +667,8 @@ static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
690 } 667 }
691 adapter->total_tx_bytes += total_tx_bytes; 668 adapter->total_tx_bytes += total_tx_bytes;
692 adapter->total_tx_packets += total_tx_packets; 669 adapter->total_tx_packets += total_tx_packets;
693 adapter->net_stats.tx_bytes += total_tx_bytes; 670 netdev->stats.tx_bytes += total_tx_bytes;
694 adapter->net_stats.tx_packets += total_tx_packets; 671 netdev->stats.tx_packets += total_tx_packets;
695 return (count < tx_ring->count); 672 return (count < tx_ring->count);
696} 673}
697 674
@@ -871,8 +848,8 @@ next_desc:
871 848
872 adapter->total_rx_bytes += total_rx_bytes; 849 adapter->total_rx_bytes += total_rx_bytes;
873 adapter->total_rx_packets += total_rx_packets; 850 adapter->total_rx_packets += total_rx_packets;
874 adapter->net_stats.rx_bytes += total_rx_bytes; 851 netdev->stats.rx_bytes += total_rx_bytes;
875 adapter->net_stats.rx_packets += total_rx_packets; 852 netdev->stats.rx_packets += total_rx_packets;
876 return cleaned; 853 return cleaned;
877} 854}
878 855
@@ -1051,8 +1028,8 @@ next_desc:
1051 1028
1052 adapter->total_rx_bytes += total_rx_bytes; 1029 adapter->total_rx_bytes += total_rx_bytes;
1053 adapter->total_rx_packets += total_rx_packets; 1030 adapter->total_rx_packets += total_rx_packets;
1054 adapter->net_stats.rx_bytes += total_rx_bytes; 1031 netdev->stats.rx_bytes += total_rx_bytes;
1055 adapter->net_stats.rx_packets += total_rx_packets; 1032 netdev->stats.rx_packets += total_rx_packets;
1056 return cleaned; 1033 return cleaned;
1057} 1034}
1058 1035
@@ -3287,6 +3264,7 @@ static void e1000_update_phy_info(unsigned long data)
3287 **/ 3264 **/
3288void e1000e_update_stats(struct e1000_adapter *adapter) 3265void e1000e_update_stats(struct e1000_adapter *adapter)
3289{ 3266{
3267 struct net_device *netdev = adapter->netdev;
3290 struct e1000_hw *hw = &adapter->hw; 3268 struct e1000_hw *hw = &adapter->hw;
3291 struct pci_dev *pdev = adapter->pdev; 3269 struct pci_dev *pdev = adapter->pdev;
3292 u16 phy_data; 3270 u16 phy_data;
@@ -3381,8 +3359,8 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
3381 adapter->stats.tsctfc += er32(TSCTFC); 3359 adapter->stats.tsctfc += er32(TSCTFC);
3382 3360
3383 /* Fill out the OS statistics structure */ 3361 /* Fill out the OS statistics structure */
3384 adapter->net_stats.multicast = adapter->stats.mprc; 3362 netdev->stats.multicast = adapter->stats.mprc;
3385 adapter->net_stats.collisions = adapter->stats.colc; 3363 netdev->stats.collisions = adapter->stats.colc;
3386 3364
3387 /* Rx Errors */ 3365 /* Rx Errors */
3388 3366
@@ -3390,22 +3368,22 @@ void e1000e_update_stats(struct e1000_adapter *adapter)
3390 * RLEC on some newer hardware can be incorrect so build 3368 * RLEC on some newer hardware can be incorrect so build
3391 * our own version based on RUC and ROC 3369 * our own version based on RUC and ROC
3392 */ 3370 */
3393 adapter->net_stats.rx_errors = adapter->stats.rxerrc + 3371 netdev->stats.rx_errors = adapter->stats.rxerrc +
3394 adapter->stats.crcerrs + adapter->stats.algnerrc + 3372 adapter->stats.crcerrs + adapter->stats.algnerrc +
3395 adapter->stats.ruc + adapter->stats.roc + 3373 adapter->stats.ruc + adapter->stats.roc +
3396 adapter->stats.cexterr; 3374 adapter->stats.cexterr;
3397 adapter->net_stats.rx_length_errors = adapter->stats.ruc + 3375 netdev->stats.rx_length_errors = adapter->stats.ruc +
3398 adapter->stats.roc; 3376 adapter->stats.roc;
3399 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 3377 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
3400 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; 3378 netdev->stats.rx_frame_errors = adapter->stats.algnerrc;
3401 adapter->net_stats.rx_missed_errors = adapter->stats.mpc; 3379 netdev->stats.rx_missed_errors = adapter->stats.mpc;
3402 3380
3403 /* Tx Errors */ 3381 /* Tx Errors */
3404 adapter->net_stats.tx_errors = adapter->stats.ecol + 3382 netdev->stats.tx_errors = adapter->stats.ecol +
3405 adapter->stats.latecol; 3383 adapter->stats.latecol;
3406 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; 3384 netdev->stats.tx_aborted_errors = adapter->stats.ecol;
3407 adapter->net_stats.tx_window_errors = adapter->stats.latecol; 3385 netdev->stats.tx_window_errors = adapter->stats.latecol;
3408 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; 3386 netdev->stats.tx_carrier_errors = adapter->stats.tncrs;
3409 3387
3410 /* Tx Dropped needs to be maintained elsewhere */ 3388 /* Tx Dropped needs to be maintained elsewhere */
3411 3389
@@ -4254,10 +4232,8 @@ static void e1000_reset_task(struct work_struct *work)
4254 **/ 4232 **/
4255static struct net_device_stats *e1000_get_stats(struct net_device *netdev) 4233static struct net_device_stats *e1000_get_stats(struct net_device *netdev)
4256{ 4234{
4257 struct e1000_adapter *adapter = netdev_priv(netdev);
4258
4259 /* only return the current stats */ 4235 /* only return the current stats */
4260 return &adapter->net_stats; 4236 return &netdev->stats;
4261} 4237}
4262 4238
4263/** 4239/**
diff --git a/drivers/net/ehea/ehea_main.c b/drivers/net/ehea/ehea_main.c
index 41bd7aeafd82..7f8fcc2fa748 100644
--- a/drivers/net/ehea/ehea_main.c
+++ b/drivers/net/ehea/ehea_main.c
@@ -447,7 +447,9 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
447 max_index_mask = q_skba->len - 1; 447 max_index_mask = q_skba->len - 1;
448 for (i = 0; i < fill_wqes; i++) { 448 for (i = 0; i < fill_wqes; i++) {
449 u64 tmp_addr; 449 u64 tmp_addr;
450 struct sk_buff *skb = netdev_alloc_skb(dev, packet_size); 450 struct sk_buff *skb;
451
452 skb = netdev_alloc_skb_ip_align(dev, packet_size);
451 if (!skb) { 453 if (!skb) {
452 q_skba->os_skbs = fill_wqes - i; 454 q_skba->os_skbs = fill_wqes - i;
453 if (q_skba->os_skbs == q_skba->len - 2) { 455 if (q_skba->os_skbs == q_skba->len - 2) {
@@ -457,7 +459,6 @@ static int ehea_refill_rq_def(struct ehea_port_res *pr,
457 } 459 }
458 break; 460 break;
459 } 461 }
460 skb_reserve(skb, NET_IP_ALIGN);
461 462
462 skb_arr[index] = skb; 463 skb_arr[index] = skb;
463 tmp_addr = ehea_map_vaddr(skb->data); 464 tmp_addr = ehea_map_vaddr(skb->data);
@@ -500,7 +501,7 @@ static int ehea_refill_rq2(struct ehea_port_res *pr, int nr_of_wqes)
500{ 501{
501 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2, 502 return ehea_refill_rq_def(pr, &pr->rq2_skba, 2,
502 nr_of_wqes, EHEA_RWQE2_TYPE, 503 nr_of_wqes, EHEA_RWQE2_TYPE,
503 EHEA_RQ2_PKT_SIZE + NET_IP_ALIGN); 504 EHEA_RQ2_PKT_SIZE);
504} 505}
505 506
506 507
@@ -508,7 +509,7 @@ static int ehea_refill_rq3(struct ehea_port_res *pr, int nr_of_wqes)
508{ 509{
509 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3, 510 return ehea_refill_rq_def(pr, &pr->rq3_skba, 3,
510 nr_of_wqes, EHEA_RWQE3_TYPE, 511 nr_of_wqes, EHEA_RWQE3_TYPE,
511 EHEA_MAX_PACKET_SIZE + NET_IP_ALIGN); 512 EHEA_MAX_PACKET_SIZE);
512} 513}
513 514
514static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num) 515static inline int ehea_check_cqe(struct ehea_cqe *cqe, int *rq_num)
diff --git a/drivers/net/enic/enic_main.c b/drivers/net/enic/enic_main.c
index d69d52ed7726..f875751af15e 100644
--- a/drivers/net/enic/enic_main.c
+++ b/drivers/net/enic/enic_main.c
@@ -870,19 +870,6 @@ static void enic_free_rq_buf(struct vnic_rq *rq, struct vnic_rq_buf *buf)
870 dev_kfree_skb_any(buf->os_buf); 870 dev_kfree_skb_any(buf->os_buf);
871} 871}
872 872
873static inline struct sk_buff *enic_rq_alloc_skb(struct net_device *netdev,
874 unsigned int size)
875{
876 struct sk_buff *skb;
877
878 skb = netdev_alloc_skb(netdev, size + NET_IP_ALIGN);
879
880 if (skb)
881 skb_reserve(skb, NET_IP_ALIGN);
882
883 return skb;
884}
885
886static int enic_rq_alloc_buf(struct vnic_rq *rq) 873static int enic_rq_alloc_buf(struct vnic_rq *rq)
887{ 874{
888 struct enic *enic = vnic_dev_priv(rq->vdev); 875 struct enic *enic = vnic_dev_priv(rq->vdev);
@@ -892,7 +879,7 @@ static int enic_rq_alloc_buf(struct vnic_rq *rq)
892 unsigned int os_buf_index = 0; 879 unsigned int os_buf_index = 0;
893 dma_addr_t dma_addr; 880 dma_addr_t dma_addr;
894 881
895 skb = enic_rq_alloc_skb(netdev, len); 882 skb = netdev_alloc_skb_ip_align(netdev, len);
896 if (!skb) 883 if (!skb)
897 return -ENOMEM; 884 return -ENOMEM;
898 885
diff --git a/drivers/net/ethoc.c b/drivers/net/ethoc.c
index 590473afb3dc..f1c565282d58 100644
--- a/drivers/net/ethoc.c
+++ b/drivers/net/ethoc.c
@@ -405,10 +405,10 @@ static int ethoc_rx(struct net_device *dev, int limit)
405 405
406 if (ethoc_update_rx_stats(priv, &bd) == 0) { 406 if (ethoc_update_rx_stats(priv, &bd) == 0) {
407 int size = bd.stat >> 16; 407 int size = bd.stat >> 16;
408 struct sk_buff *skb = netdev_alloc_skb(dev, size); 408 struct sk_buff *skb;
409 409
410 size -= 4; /* strip the CRC */ 410 size -= 4; /* strip the CRC */
411 skb_reserve(skb, 2); /* align TCP/IP header */ 411 skb = netdev_alloc_skb_ip_align(dev, size);
412 412
413 if (likely(skb)) { 413 if (likely(skb)) {
414 void *src = phys_to_virt(bd.addr); 414 void *src = phys_to_virt(bd.addr);
diff --git a/drivers/net/fsl_pq_mdio.c b/drivers/net/fsl_pq_mdio.c
index efbf67689eca..25fabb3eedc5 100644
--- a/drivers/net/fsl_pq_mdio.c
+++ b/drivers/net/fsl_pq_mdio.c
@@ -3,8 +3,9 @@
3 * Provides Bus interface for MIIM regs 3 * Provides Bus interface for MIIM regs
4 * 4 *
5 * Author: Andy Fleming <afleming@freescale.com> 5 * Author: Andy Fleming <afleming@freescale.com>
6 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
6 * 7 *
7 * Copyright (c) 2002-2004,2008 Freescale Semiconductor, Inc. 8 * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
8 * 9 *
9 * Based on gianfar_mii.c and ucc_geth_mii.c (Li Yang, Kim Phillips) 10 * Based on gianfar_mii.c and ucc_geth_mii.c (Li Yang, Kim Phillips)
10 * 11 *
@@ -102,13 +103,18 @@ int fsl_pq_local_mdio_read(struct fsl_pq_mdio __iomem *regs,
102 return value; 103 return value;
103} 104}
104 105
106static struct fsl_pq_mdio __iomem *fsl_pq_mdio_get_regs(struct mii_bus *bus)
107{
108 return (void __iomem __force *)bus->priv;
109}
110
105/* 111/*
106 * Write value to the PHY at mii_id at register regnum, 112 * Write value to the PHY at mii_id at register regnum,
107 * on the bus, waiting until the write is done before returning. 113 * on the bus, waiting until the write is done before returning.
108 */ 114 */
109int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value) 115int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
110{ 116{
111 struct fsl_pq_mdio __iomem *regs = (void __iomem *)bus->priv; 117 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
112 118
113 /* Write to the local MII regs */ 119 /* Write to the local MII regs */
114 return(fsl_pq_local_mdio_write(regs, mii_id, regnum, value)); 120 return(fsl_pq_local_mdio_write(regs, mii_id, regnum, value));
@@ -120,7 +126,7 @@ int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value)
120 */ 126 */
121int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum) 127int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
122{ 128{
123 struct fsl_pq_mdio __iomem *regs = (void __iomem *)bus->priv; 129 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
124 130
125 /* Read the local MII regs */ 131 /* Read the local MII regs */
126 return(fsl_pq_local_mdio_read(regs, mii_id, regnum)); 132 return(fsl_pq_local_mdio_read(regs, mii_id, regnum));
@@ -129,7 +135,7 @@ int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
129/* Reset the MIIM registers, and wait for the bus to free */ 135/* Reset the MIIM registers, and wait for the bus to free */
130static int fsl_pq_mdio_reset(struct mii_bus *bus) 136static int fsl_pq_mdio_reset(struct mii_bus *bus)
131{ 137{
132 struct fsl_pq_mdio __iomem *regs = (void __iomem *)bus->priv; 138 struct fsl_pq_mdio __iomem *regs = fsl_pq_mdio_get_regs(bus);
133 int timeout = PHY_INIT_TIMEOUT; 139 int timeout = PHY_INIT_TIMEOUT;
134 140
135 mutex_lock(&bus->mdio_lock); 141 mutex_lock(&bus->mdio_lock);
@@ -189,19 +195,29 @@ static int fsl_pq_mdio_find_free(struct mii_bus *new_bus)
189 195
190 196
191#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) 197#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
192static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs) 198static u32 __iomem *get_gfar_tbipa(struct fsl_pq_mdio __iomem *regs, struct device_node *np)
193{ 199{
194 struct gfar __iomem *enet_regs; 200 struct gfar __iomem *enet_regs;
201 u32 __iomem *ioremap_tbipa;
202 u64 addr, size;
195 203
196 /* 204 /*
197 * This is mildly evil, but so is our hardware for doing this. 205 * This is mildly evil, but so is our hardware for doing this.
198 * Also, we have to cast back to struct gfar because of 206 * Also, we have to cast back to struct gfar because of
199 * definition weirdness done in gianfar.h. 207 * definition weirdness done in gianfar.h.
200 */ 208 */
201 enet_regs = (struct gfar __iomem *) 209 if(of_device_is_compatible(np, "fsl,gianfar-mdio") ||
202 ((char __iomem *)regs - offsetof(struct gfar, gfar_mii_regs)); 210 of_device_is_compatible(np, "fsl,gianfar-tbi") ||
203 211 of_device_is_compatible(np, "gianfar")) {
204 return &enet_regs->tbipa; 212 enet_regs = (struct gfar __iomem *)regs;
213 return &enet_regs->tbipa;
214 } else if (of_device_is_compatible(np, "fsl,etsec2-mdio") ||
215 of_device_is_compatible(np, "fsl,etsec2-tbi")) {
216 addr = of_translate_address(np, of_get_address(np, 1, &size, NULL));
217 ioremap_tbipa = ioremap(addr, size);
218 return ioremap_tbipa;
219 } else
220 return NULL;
205} 221}
206#endif 222#endif
207 223
@@ -250,11 +266,12 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
250{ 266{
251 struct device_node *np = ofdev->node; 267 struct device_node *np = ofdev->node;
252 struct device_node *tbi; 268 struct device_node *tbi;
253 struct fsl_pq_mdio __iomem *regs; 269 struct fsl_pq_mdio __iomem *regs = NULL;
270 void __iomem *map;
254 u32 __iomem *tbipa; 271 u32 __iomem *tbipa;
255 struct mii_bus *new_bus; 272 struct mii_bus *new_bus;
256 int tbiaddr = -1; 273 int tbiaddr = -1;
257 u64 addr, size; 274 u64 addr = 0, size = 0;
258 int err = 0; 275 int err = 0;
259 276
260 new_bus = mdiobus_alloc(); 277 new_bus = mdiobus_alloc();
@@ -269,13 +286,19 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
269 286
270 /* Set the PHY base address */ 287 /* Set the PHY base address */
271 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL)); 288 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL));
272 regs = ioremap(addr, size); 289 map = ioremap(addr, size);
273 290 if (!map) {
274 if (NULL == regs) {
275 err = -ENOMEM; 291 err = -ENOMEM;
276 goto err_free_bus; 292 goto err_free_bus;
277 } 293 }
278 294
295 if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
296 of_device_is_compatible(np, "fsl,gianfar-tbi") ||
297 of_device_is_compatible(np, "fsl,ucc-mdio") ||
298 of_device_is_compatible(np, "ucc_geth_phy"))
299 map -= offsetof(struct fsl_pq_mdio, miimcfg);
300 regs = map;
301
279 new_bus->priv = (void __force *)regs; 302 new_bus->priv = (void __force *)regs;
280 303
281 new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL); 304 new_bus->irq = kcalloc(PHY_MAX_ADDR, sizeof(int), GFP_KERNEL);
@@ -290,9 +313,15 @@ static int fsl_pq_mdio_probe(struct of_device *ofdev,
290 313
291 if (of_device_is_compatible(np, "fsl,gianfar-mdio") || 314 if (of_device_is_compatible(np, "fsl,gianfar-mdio") ||
292 of_device_is_compatible(np, "fsl,gianfar-tbi") || 315 of_device_is_compatible(np, "fsl,gianfar-tbi") ||
316 of_device_is_compatible(np, "fsl,etsec2-mdio") ||
317 of_device_is_compatible(np, "fsl,etsec2-tbi") ||
293 of_device_is_compatible(np, "gianfar")) { 318 of_device_is_compatible(np, "gianfar")) {
294#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE) 319#if defined(CONFIG_GIANFAR) || defined(CONFIG_GIANFAR_MODULE)
295 tbipa = get_gfar_tbipa(regs); 320 tbipa = get_gfar_tbipa(regs, np);
321 if (!tbipa) {
322 err = -EINVAL;
323 goto err_free_irqs;
324 }
296#else 325#else
297 err = -ENODEV; 326 err = -ENODEV;
298 goto err_free_irqs; 327 goto err_free_irqs;
@@ -380,7 +409,7 @@ static int fsl_pq_mdio_remove(struct of_device *ofdev)
380 409
381 dev_set_drvdata(device, NULL); 410 dev_set_drvdata(device, NULL);
382 411
383 iounmap((void __iomem *)bus->priv); 412 iounmap(fsl_pq_mdio_get_regs(bus));
384 bus->priv = NULL; 413 bus->priv = NULL;
385 mdiobus_free(bus); 414 mdiobus_free(bus);
386 415
@@ -405,6 +434,12 @@ static struct of_device_id fsl_pq_mdio_match[] = {
405 { 434 {
406 .compatible = "fsl,gianfar-mdio", 435 .compatible = "fsl,gianfar-mdio",
407 }, 436 },
437 {
438 .compatible = "fsl,etsec2-tbi",
439 },
440 {
441 .compatible = "fsl,etsec2-mdio",
442 },
408 {}, 443 {},
409}; 444};
410MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match); 445MODULE_DEVICE_TABLE(of, fsl_pq_mdio_match);
diff --git a/drivers/net/fsl_pq_mdio.h b/drivers/net/fsl_pq_mdio.h
index 36dad527410b..1f7d865cedb6 100644
--- a/drivers/net/fsl_pq_mdio.h
+++ b/drivers/net/fsl_pq_mdio.h
@@ -3,8 +3,9 @@
3 * Driver for the MDIO bus controller on Freescale PowerQUICC processors 3 * Driver for the MDIO bus controller on Freescale PowerQUICC processors
4 * 4 *
5 * Author: Andy Fleming 5 * Author: Andy Fleming
6 * Modifier: Sandeep Gopalpet
6 * 7 *
7 * Copyright (c) 2002-2004,2008 Freescale Semiconductor, Inc. 8 * Copyright 2002-2004, 2008-2009 Freescale Semiconductor, Inc.
8 * 9 *
9 * This program is free software; you can redistribute it and/or modify it 10 * This program is free software; you can redistribute it and/or modify it
10 * under the terms of the GNU General Public License as published by the 11 * under the terms of the GNU General Public License as published by the
@@ -23,6 +24,12 @@
23#define MII_READ_COMMAND 0x00000001 24#define MII_READ_COMMAND 0x00000001
24 25
25struct fsl_pq_mdio { 26struct fsl_pq_mdio {
27 u8 res1[16];
28 u32 ieventm; /* MDIO Interrupt event register (for etsec2)*/
29 u32 imaskm; /* MDIO Interrupt mask register (for etsec2)*/
30 u8 res2[4];
31 u32 emapm; /* MDIO Event mapping register (for etsec2)*/
32 u8 res3[1280];
26 u32 miimcfg; /* MII management configuration reg */ 33 u32 miimcfg; /* MII management configuration reg */
27 u32 miimcom; /* MII management command reg */ 34 u32 miimcom; /* MII management command reg */
28 u32 miimadd; /* MII management address reg */ 35 u32 miimadd; /* MII management address reg */
@@ -31,9 +38,9 @@ struct fsl_pq_mdio {
31 u32 miimind; /* MII management indication reg */ 38 u32 miimind; /* MII management indication reg */
32 u8 reserved[28]; /* Space holder */ 39 u8 reserved[28]; /* Space holder */
33 u32 utbipar; /* TBI phy address reg (only on UCC) */ 40 u32 utbipar; /* TBI phy address reg (only on UCC) */
41 u8 res4[2728];
34} __attribute__ ((packed)); 42} __attribute__ ((packed));
35 43
36
37int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum); 44int fsl_pq_mdio_read(struct mii_bus *bus, int mii_id, int regnum);
38int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value); 45int fsl_pq_mdio_write(struct mii_bus *bus, int mii_id, int regnum, u16 value);
39int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id, 46int fsl_pq_local_mdio_write(struct fsl_pq_mdio __iomem *regs, int mii_id,
diff --git a/drivers/net/gianfar.c b/drivers/net/gianfar.c
index 5bf31f1509c9..16def131c390 100644
--- a/drivers/net/gianfar.c
+++ b/drivers/net/gianfar.c
@@ -8,9 +8,10 @@
8 * 8 *
9 * Author: Andy Fleming 9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala 10 * Maintainer: Kumar Gala
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11 * 12 *
12 * Copyright (c) 2002-2006 Freescale Semiconductor, Inc. 13 * Copyright 2002-2009 Freescale Semiconductor, Inc.
13 * Copyright (c) 2007 MontaVista Software, Inc. 14 * Copyright 2007 MontaVista Software, Inc.
14 * 15 *
15 * This program is free software; you can redistribute it and/or modify it 16 * This program is free software; you can redistribute it and/or modify it
16 * under the terms of the GNU General Public License as published by the 17 * under the terms of the GNU General Public License as published by the
@@ -109,7 +110,7 @@ static void gfar_reset_task(struct work_struct *work);
109static void gfar_timeout(struct net_device *dev); 110static void gfar_timeout(struct net_device *dev);
110static int gfar_close(struct net_device *dev); 111static int gfar_close(struct net_device *dev);
111struct sk_buff *gfar_new_skb(struct net_device *dev); 112struct sk_buff *gfar_new_skb(struct net_device *dev);
112static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp, 113static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
113 struct sk_buff *skb); 114 struct sk_buff *skb);
114static int gfar_set_mac_address(struct net_device *dev); 115static int gfar_set_mac_address(struct net_device *dev);
115static int gfar_change_mtu(struct net_device *dev, int new_mtu); 116static int gfar_change_mtu(struct net_device *dev, int new_mtu);
@@ -130,8 +131,8 @@ static int gfar_poll(struct napi_struct *napi, int budget);
130#ifdef CONFIG_NET_POLL_CONTROLLER 131#ifdef CONFIG_NET_POLL_CONTROLLER
131static void gfar_netpoll(struct net_device *dev); 132static void gfar_netpoll(struct net_device *dev);
132#endif 133#endif
133int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 134int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
134static int gfar_clean_tx_ring(struct net_device *dev); 135static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue);
135static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb, 136static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
136 int amount_pull); 137 int amount_pull);
137static void gfar_vlan_rx_register(struct net_device *netdev, 138static void gfar_vlan_rx_register(struct net_device *netdev,
@@ -142,11 +143,277 @@ void gfar_start(struct net_device *dev);
142static void gfar_clear_exact_match(struct net_device *dev); 143static void gfar_clear_exact_match(struct net_device *dev);
143static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr); 144static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr);
144static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd); 145static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
146u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb);
145 147
146MODULE_AUTHOR("Freescale Semiconductor, Inc"); 148MODULE_AUTHOR("Freescale Semiconductor, Inc");
147MODULE_DESCRIPTION("Gianfar Ethernet Driver"); 149MODULE_DESCRIPTION("Gianfar Ethernet Driver");
148MODULE_LICENSE("GPL"); 150MODULE_LICENSE("GPL");
149 151
152static void gfar_init_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
153 dma_addr_t buf)
154{
155 u32 lstatus;
156
157 bdp->bufPtr = buf;
158
159 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
160 if (bdp == rx_queue->rx_bd_base + rx_queue->rx_ring_size - 1)
161 lstatus |= BD_LFLAG(RXBD_WRAP);
162
163 eieio();
164
165 bdp->lstatus = lstatus;
166}
167
168static int gfar_init_bds(struct net_device *ndev)
169{
170 struct gfar_private *priv = netdev_priv(ndev);
171 struct gfar_priv_tx_q *tx_queue = NULL;
172 struct gfar_priv_rx_q *rx_queue = NULL;
173 struct txbd8 *txbdp;
174 struct rxbd8 *rxbdp;
175 int i, j;
176
177 for (i = 0; i < priv->num_tx_queues; i++) {
178 tx_queue = priv->tx_queue[i];
179 /* Initialize some variables in our dev structure */
180 tx_queue->num_txbdfree = tx_queue->tx_ring_size;
181 tx_queue->dirty_tx = tx_queue->tx_bd_base;
182 tx_queue->cur_tx = tx_queue->tx_bd_base;
183 tx_queue->skb_curtx = 0;
184 tx_queue->skb_dirtytx = 0;
185
186 /* Initialize Transmit Descriptor Ring */
187 txbdp = tx_queue->tx_bd_base;
188 for (j = 0; j < tx_queue->tx_ring_size; j++) {
189 txbdp->lstatus = 0;
190 txbdp->bufPtr = 0;
191 txbdp++;
192 }
193
194 /* Set the last descriptor in the ring to indicate wrap */
195 txbdp--;
196 txbdp->status |= TXBD_WRAP;
197 }
198
199 for (i = 0; i < priv->num_rx_queues; i++) {
200 rx_queue = priv->rx_queue[i];
201 rx_queue->cur_rx = rx_queue->rx_bd_base;
202 rx_queue->skb_currx = 0;
203 rxbdp = rx_queue->rx_bd_base;
204
205 for (j = 0; j < rx_queue->rx_ring_size; j++) {
206 struct sk_buff *skb = rx_queue->rx_skbuff[j];
207
208 if (skb) {
209 gfar_init_rxbdp(rx_queue, rxbdp,
210 rxbdp->bufPtr);
211 } else {
212 skb = gfar_new_skb(ndev);
213 if (!skb) {
214 pr_err("%s: Can't allocate RX buffers\n",
215 ndev->name);
216 goto err_rxalloc_fail;
217 }
218 rx_queue->rx_skbuff[j] = skb;
219
220 gfar_new_rxbdp(rx_queue, rxbdp, skb);
221 }
222
223 rxbdp++;
224 }
225
226 }
227
228 return 0;
229
230err_rxalloc_fail:
231 free_skb_resources(priv);
232 return -ENOMEM;
233}
234
235static int gfar_alloc_skb_resources(struct net_device *ndev)
236{
237 void *vaddr;
238 dma_addr_t addr;
239 int i, j, k;
240 struct gfar_private *priv = netdev_priv(ndev);
241 struct device *dev = &priv->ofdev->dev;
242 struct gfar_priv_tx_q *tx_queue = NULL;
243 struct gfar_priv_rx_q *rx_queue = NULL;
244
245 priv->total_tx_ring_size = 0;
246 for (i = 0; i < priv->num_tx_queues; i++)
247 priv->total_tx_ring_size += priv->tx_queue[i]->tx_ring_size;
248
249 priv->total_rx_ring_size = 0;
250 for (i = 0; i < priv->num_rx_queues; i++)
251 priv->total_rx_ring_size += priv->rx_queue[i]->rx_ring_size;
252
253 /* Allocate memory for the buffer descriptors */
254 vaddr = dma_alloc_coherent(dev,
255 sizeof(struct txbd8) * priv->total_tx_ring_size +
256 sizeof(struct rxbd8) * priv->total_rx_ring_size,
257 &addr, GFP_KERNEL);
258 if (!vaddr) {
259 if (netif_msg_ifup(priv))
260 pr_err("%s: Could not allocate buffer descriptors!\n",
261 ndev->name);
262 return -ENOMEM;
263 }
264
265 for (i = 0; i < priv->num_tx_queues; i++) {
266 tx_queue = priv->tx_queue[i];
267 tx_queue->tx_bd_base = (struct txbd8 *) vaddr;
268 tx_queue->tx_bd_dma_base = addr;
269 tx_queue->dev = ndev;
270 /* enet DMA only understands physical addresses */
271 addr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
272 vaddr += sizeof(struct txbd8) *tx_queue->tx_ring_size;
273 }
274
275 /* Start the rx descriptor ring where the tx ring leaves off */
276 for (i = 0; i < priv->num_rx_queues; i++) {
277 rx_queue = priv->rx_queue[i];
278 rx_queue->rx_bd_base = (struct rxbd8 *) vaddr;
279 rx_queue->rx_bd_dma_base = addr;
280 rx_queue->dev = ndev;
281 addr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
282 vaddr += sizeof (struct rxbd8) * rx_queue->rx_ring_size;
283 }
284
285 /* Setup the skbuff rings */
286 for (i = 0; i < priv->num_tx_queues; i++) {
287 tx_queue = priv->tx_queue[i];
288 tx_queue->tx_skbuff = kmalloc(sizeof(*tx_queue->tx_skbuff) *
289 tx_queue->tx_ring_size, GFP_KERNEL);
290 if (!tx_queue->tx_skbuff) {
291 if (netif_msg_ifup(priv))
292 pr_err("%s: Could not allocate tx_skbuff\n",
293 ndev->name);
294 goto cleanup;
295 }
296
297 for (k = 0; k < tx_queue->tx_ring_size; k++)
298 tx_queue->tx_skbuff[k] = NULL;
299 }
300
301 for (i = 0; i < priv->num_rx_queues; i++) {
302 rx_queue = priv->rx_queue[i];
303 rx_queue->rx_skbuff = kmalloc(sizeof(*rx_queue->rx_skbuff) *
304 rx_queue->rx_ring_size, GFP_KERNEL);
305
306 if (!rx_queue->rx_skbuff) {
307 if (netif_msg_ifup(priv))
308 pr_err("%s: Could not allocate rx_skbuff\n",
309 ndev->name);
310 goto cleanup;
311 }
312
313 for (j = 0; j < rx_queue->rx_ring_size; j++)
314 rx_queue->rx_skbuff[j] = NULL;
315 }
316
317 if (gfar_init_bds(ndev))
318 goto cleanup;
319
320 return 0;
321
322cleanup:
323 free_skb_resources(priv);
324 return -ENOMEM;
325}
326
327static void gfar_init_tx_rx_base(struct gfar_private *priv)
328{
329 struct gfar __iomem *regs = priv->gfargrp[0].regs;
330 u32 __iomem *baddr;
331 int i;
332
333 baddr = &regs->tbase0;
334 for(i = 0; i < priv->num_tx_queues; i++) {
335 gfar_write(baddr, priv->tx_queue[i]->tx_bd_dma_base);
336 baddr += 2;
337 }
338
339 baddr = &regs->rbase0;
340 for(i = 0; i < priv->num_rx_queues; i++) {
341 gfar_write(baddr, priv->rx_queue[i]->rx_bd_dma_base);
342 baddr += 2;
343 }
344}
345
346static void gfar_init_mac(struct net_device *ndev)
347{
348 struct gfar_private *priv = netdev_priv(ndev);
349 struct gfar __iomem *regs = priv->gfargrp[0].regs;
350 u32 rctrl = 0;
351 u32 tctrl = 0;
352 u32 attrs = 0;
353
354 /* write the tx/rx base registers */
355 gfar_init_tx_rx_base(priv);
356
357 /* Configure the coalescing support */
358 gfar_configure_coalescing(priv, 0xFF, 0xFF);
359
360 if (priv->rx_filer_enable)
361 rctrl |= RCTRL_FILREN;
362
363 if (priv->rx_csum_enable)
364 rctrl |= RCTRL_CHECKSUMMING;
365
366 if (priv->extended_hash) {
367 rctrl |= RCTRL_EXTHASH;
368
369 gfar_clear_exact_match(ndev);
370 rctrl |= RCTRL_EMEN;
371 }
372
373 if (priv->padding) {
374 rctrl &= ~RCTRL_PAL_MASK;
375 rctrl |= RCTRL_PADDING(priv->padding);
376 }
377
378 /* keep vlan related bits if it's enabled */
379 if (priv->vlgrp) {
380 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT;
381 tctrl |= TCTRL_VLINS;
382 }
383
384 /* Init rctrl based on our settings */
385 gfar_write(&regs->rctrl, rctrl);
386
387 if (ndev->features & NETIF_F_IP_CSUM)
388 tctrl |= TCTRL_INIT_CSUM;
389
390 tctrl |= TCTRL_TXSCHED_PRIO;
391
392 gfar_write(&regs->tctrl, tctrl);
393
394 /* Set the extraction length and index */
395 attrs = ATTRELI_EL(priv->rx_stash_size) |
396 ATTRELI_EI(priv->rx_stash_index);
397
398 gfar_write(&regs->attreli, attrs);
399
400 /* Start with defaults, and add stashing or locking
401 * depending on the approprate variables */
402 attrs = ATTR_INIT_SETTINGS;
403
404 if (priv->bd_stash_en)
405 attrs |= ATTR_BDSTASH;
406
407 if (priv->rx_stash_size != 0)
408 attrs |= ATTR_BUFSTASH;
409
410 gfar_write(&regs->attr, attrs);
411
412 gfar_write(&regs->fifo_tx_thr, priv->fifo_threshold);
413 gfar_write(&regs->fifo_tx_starve, priv->fifo_starve);
414 gfar_write(&regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
415}
416
150static const struct net_device_ops gfar_netdev_ops = { 417static const struct net_device_ops gfar_netdev_ops = {
151 .ndo_open = gfar_enet_open, 418 .ndo_open = gfar_enet_open,
152 .ndo_start_xmit = gfar_start_xmit, 419 .ndo_start_xmit = gfar_start_xmit,
@@ -155,6 +422,7 @@ static const struct net_device_ops gfar_netdev_ops = {
155 .ndo_set_multicast_list = gfar_set_multi, 422 .ndo_set_multicast_list = gfar_set_multi,
156 .ndo_tx_timeout = gfar_timeout, 423 .ndo_tx_timeout = gfar_timeout,
157 .ndo_do_ioctl = gfar_ioctl, 424 .ndo_do_ioctl = gfar_ioctl,
425 .ndo_select_queue = gfar_select_queue,
158 .ndo_vlan_rx_register = gfar_vlan_rx_register, 426 .ndo_vlan_rx_register = gfar_vlan_rx_register,
159 .ndo_set_mac_address = eth_mac_addr, 427 .ndo_set_mac_address = eth_mac_addr,
160 .ndo_validate_addr = eth_validate_addr, 428 .ndo_validate_addr = eth_validate_addr,
@@ -163,56 +431,252 @@ static const struct net_device_ops gfar_netdev_ops = {
163#endif 431#endif
164}; 432};
165 433
434unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
435unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
436
437void lock_rx_qs(struct gfar_private *priv)
438{
439 int i = 0x0;
440
441 for (i = 0; i < priv->num_rx_queues; i++)
442 spin_lock(&priv->rx_queue[i]->rxlock);
443}
444
445void lock_tx_qs(struct gfar_private *priv)
446{
447 int i = 0x0;
448
449 for (i = 0; i < priv->num_tx_queues; i++)
450 spin_lock(&priv->tx_queue[i]->txlock);
451}
452
453void unlock_rx_qs(struct gfar_private *priv)
454{
455 int i = 0x0;
456
457 for (i = 0; i < priv->num_rx_queues; i++)
458 spin_unlock(&priv->rx_queue[i]->rxlock);
459}
460
461void unlock_tx_qs(struct gfar_private *priv)
462{
463 int i = 0x0;
464
465 for (i = 0; i < priv->num_tx_queues; i++)
466 spin_unlock(&priv->tx_queue[i]->txlock);
467}
468
166/* Returns 1 if incoming frames use an FCB */ 469/* Returns 1 if incoming frames use an FCB */
167static inline int gfar_uses_fcb(struct gfar_private *priv) 470static inline int gfar_uses_fcb(struct gfar_private *priv)
168{ 471{
169 return priv->vlgrp || priv->rx_csum_enable; 472 return priv->vlgrp || priv->rx_csum_enable;
170} 473}
171 474
172static int gfar_of_init(struct net_device *dev) 475u16 gfar_select_queue(struct net_device *dev, struct sk_buff *skb)
476{
477 return skb_get_queue_mapping(skb);
478}
479static void free_tx_pointers(struct gfar_private *priv)
480{
481 int i = 0;
482
483 for (i = 0; i < priv->num_tx_queues; i++)
484 kfree(priv->tx_queue[i]);
485}
486
487static void free_rx_pointers(struct gfar_private *priv)
488{
489 int i = 0;
490
491 for (i = 0; i < priv->num_rx_queues; i++)
492 kfree(priv->rx_queue[i]);
493}
494
495static void unmap_group_regs(struct gfar_private *priv)
496{
497 int i = 0;
498
499 for (i = 0; i < MAXGROUPS; i++)
500 if (priv->gfargrp[i].regs)
501 iounmap(priv->gfargrp[i].regs);
502}
503
504static void disable_napi(struct gfar_private *priv)
505{
506 int i = 0;
507
508 for (i = 0; i < priv->num_grps; i++)
509 napi_disable(&priv->gfargrp[i].napi);
510}
511
512static void enable_napi(struct gfar_private *priv)
513{
514 int i = 0;
515
516 for (i = 0; i < priv->num_grps; i++)
517 napi_enable(&priv->gfargrp[i].napi);
518}
519
520static int gfar_parse_group(struct device_node *np,
521 struct gfar_private *priv, const char *model)
522{
523 u32 *queue_mask;
524 u64 addr, size;
525
526 addr = of_translate_address(np,
527 of_get_address(np, 0, &size, NULL));
528 priv->gfargrp[priv->num_grps].regs = ioremap(addr, size);
529
530 if (!priv->gfargrp[priv->num_grps].regs)
531 return -ENOMEM;
532
533 priv->gfargrp[priv->num_grps].interruptTransmit =
534 irq_of_parse_and_map(np, 0);
535
536 /* If we aren't the FEC we have multiple interrupts */
537 if (model && strcasecmp(model, "FEC")) {
538 priv->gfargrp[priv->num_grps].interruptReceive =
539 irq_of_parse_and_map(np, 1);
540 priv->gfargrp[priv->num_grps].interruptError =
541 irq_of_parse_and_map(np,2);
542 if (priv->gfargrp[priv->num_grps].interruptTransmit < 0 ||
543 priv->gfargrp[priv->num_grps].interruptReceive < 0 ||
544 priv->gfargrp[priv->num_grps].interruptError < 0) {
545 return -EINVAL;
546 }
547 }
548
549 priv->gfargrp[priv->num_grps].grp_id = priv->num_grps;
550 priv->gfargrp[priv->num_grps].priv = priv;
551 spin_lock_init(&priv->gfargrp[priv->num_grps].grplock);
552 if(priv->mode == MQ_MG_MODE) {
553 queue_mask = (u32 *)of_get_property(np,
554 "fsl,rx-bit-map", NULL);
555 priv->gfargrp[priv->num_grps].rx_bit_map =
556 queue_mask ? *queue_mask :(DEFAULT_MAPPING >> priv->num_grps);
557 queue_mask = (u32 *)of_get_property(np,
558 "fsl,tx-bit-map", NULL);
559 priv->gfargrp[priv->num_grps].tx_bit_map =
560 queue_mask ? *queue_mask : (DEFAULT_MAPPING >> priv->num_grps);
561 } else {
562 priv->gfargrp[priv->num_grps].rx_bit_map = 0xFF;
563 priv->gfargrp[priv->num_grps].tx_bit_map = 0xFF;
564 }
565 priv->num_grps++;
566
567 return 0;
568}
569
570static int gfar_of_init(struct of_device *ofdev, struct net_device **pdev)
173{ 571{
174 const char *model; 572 const char *model;
175 const char *ctype; 573 const char *ctype;
176 const void *mac_addr; 574 const void *mac_addr;
177 u64 addr, size; 575 int err = 0, i;
178 int err = 0; 576 struct net_device *dev = NULL;
179 struct gfar_private *priv = netdev_priv(dev); 577 struct gfar_private *priv = NULL;
180 struct device_node *np = priv->node; 578 struct device_node *np = ofdev->node;
579 struct device_node *child = NULL;
181 const u32 *stash; 580 const u32 *stash;
182 const u32 *stash_len; 581 const u32 *stash_len;
183 const u32 *stash_idx; 582 const u32 *stash_idx;
583 unsigned int num_tx_qs, num_rx_qs;
584 u32 *tx_queues, *rx_queues;
184 585
185 if (!np || !of_device_is_available(np)) 586 if (!np || !of_device_is_available(np))
186 return -ENODEV; 587 return -ENODEV;
187 588
188 /* get a pointer to the register memory */ 589 /* parse the num of tx and rx queues */
189 addr = of_translate_address(np, of_get_address(np, 0, &size, NULL)); 590 tx_queues = (u32 *)of_get_property(np, "fsl,num_tx_queues", NULL);
190 priv->regs = ioremap(addr, size); 591 num_tx_qs = tx_queues ? *tx_queues : 1;
592
593 if (num_tx_qs > MAX_TX_QS) {
594 printk(KERN_ERR "num_tx_qs(=%d) greater than MAX_TX_QS(=%d)\n",
595 num_tx_qs, MAX_TX_QS);
596 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
597 return -EINVAL;
598 }
599
600 rx_queues = (u32 *)of_get_property(np, "fsl,num_rx_queues", NULL);
601 num_rx_qs = rx_queues ? *rx_queues : 1;
191 602
192 if (priv->regs == NULL) 603 if (num_rx_qs > MAX_RX_QS) {
604 printk(KERN_ERR "num_rx_qs(=%d) greater than MAX_RX_QS(=%d)\n",
605 num_tx_qs, MAX_TX_QS);
606 printk(KERN_ERR "Cannot do alloc_etherdev, aborting\n");
607 return -EINVAL;
608 }
609
610 *pdev = alloc_etherdev_mq(sizeof(*priv), num_tx_qs);
611 dev = *pdev;
612 if (NULL == dev)
193 return -ENOMEM; 613 return -ENOMEM;
194 614
195 priv->interruptTransmit = irq_of_parse_and_map(np, 0); 615 priv = netdev_priv(dev);
616 priv->node = ofdev->node;
617 priv->ndev = dev;
618
619 dev->num_tx_queues = num_tx_qs;
620 dev->real_num_tx_queues = num_tx_qs;
621 priv->num_tx_queues = num_tx_qs;
622 priv->num_rx_queues = num_rx_qs;
623 priv->num_grps = 0x0;
196 624
197 model = of_get_property(np, "model", NULL); 625 model = of_get_property(np, "model", NULL);
198 626
199 /* If we aren't the FEC we have multiple interrupts */ 627 for (i = 0; i < MAXGROUPS; i++)
200 if (model && strcasecmp(model, "FEC")) { 628 priv->gfargrp[i].regs = NULL;
201 priv->interruptReceive = irq_of_parse_and_map(np, 1); 629
630 /* Parse and initialize group specific information */
631 if (of_device_is_compatible(np, "fsl,etsec2")) {
632 priv->mode = MQ_MG_MODE;
633 for_each_child_of_node(np, child) {
634 err = gfar_parse_group(child, priv, model);
635 if (err)
636 goto err_grp_init;
637 }
638 } else {
639 priv->mode = SQ_SG_MODE;
640 err = gfar_parse_group(np, priv, model);
641 if(err)
642 goto err_grp_init;
643 }
202 644
203 priv->interruptError = irq_of_parse_and_map(np, 2); 645 for (i = 0; i < priv->num_tx_queues; i++)
646 priv->tx_queue[i] = NULL;
647 for (i = 0; i < priv->num_rx_queues; i++)
648 priv->rx_queue[i] = NULL;
649
650 for (i = 0; i < priv->num_tx_queues; i++) {
651 priv->tx_queue[i] = (struct gfar_priv_tx_q *)kmalloc(
652 sizeof (struct gfar_priv_tx_q), GFP_KERNEL);
653 if (!priv->tx_queue[i]) {
654 err = -ENOMEM;
655 goto tx_alloc_failed;
656 }
657 priv->tx_queue[i]->tx_skbuff = NULL;
658 priv->tx_queue[i]->qindex = i;
659 priv->tx_queue[i]->dev = dev;
660 spin_lock_init(&(priv->tx_queue[i]->txlock));
661 }
204 662
205 if (priv->interruptTransmit < 0 || 663 for (i = 0; i < priv->num_rx_queues; i++) {
206 priv->interruptReceive < 0 || 664 priv->rx_queue[i] = (struct gfar_priv_rx_q *)kmalloc(
207 priv->interruptError < 0) { 665 sizeof (struct gfar_priv_rx_q), GFP_KERNEL);
208 err = -EINVAL; 666 if (!priv->rx_queue[i]) {
209 goto err_out; 667 err = -ENOMEM;
668 goto rx_alloc_failed;
210 } 669 }
670 priv->rx_queue[i]->rx_skbuff = NULL;
671 priv->rx_queue[i]->qindex = i;
672 priv->rx_queue[i]->dev = dev;
673 spin_lock_init(&(priv->rx_queue[i]->rxlock));
211 } 674 }
212 675
676
213 stash = of_get_property(np, "bd-stash", NULL); 677 stash = of_get_property(np, "bd-stash", NULL);
214 678
215 if(stash) { 679 if (stash) {
216 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING; 680 priv->device_flags |= FSL_GIANFAR_DEV_HAS_BD_STASHING;
217 priv->bd_stash_en = 1; 681 priv->bd_stash_en = 1;
218 } 682 }
@@ -270,8 +734,13 @@ static int gfar_of_init(struct net_device *dev)
270 734
271 return 0; 735 return 0;
272 736
273err_out: 737rx_alloc_failed:
274 iounmap(priv->regs); 738 free_rx_pointers(priv);
739tx_alloc_failed:
740 free_tx_pointers(priv);
741err_grp_init:
742 unmap_group_regs(priv);
743 free_netdev(dev);
275 return err; 744 return err;
276} 745}
277 746
@@ -289,6 +758,85 @@ static int gfar_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
289 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd); 758 return phy_mii_ioctl(priv->phydev, if_mii(rq), cmd);
290} 759}
291 760
761static unsigned int reverse_bitmap(unsigned int bit_map, unsigned int max_qs)
762{
763 unsigned int new_bit_map = 0x0;
764 int mask = 0x1 << (max_qs - 1), i;
765 for (i = 0; i < max_qs; i++) {
766 if (bit_map & mask)
767 new_bit_map = new_bit_map + (1 << i);
768 mask = mask >> 0x1;
769 }
770 return new_bit_map;
771}
772
773static u32 cluster_entry_per_class(struct gfar_private *priv, u32 rqfar,
774 u32 class)
775{
776 u32 rqfpr = FPR_FILER_MASK;
777 u32 rqfcr = 0x0;
778
779 rqfar--;
780 rqfcr = RQFCR_CLE | RQFCR_PID_MASK | RQFCR_CMP_EXACT;
781 ftp_rqfpr[rqfar] = rqfpr;
782 ftp_rqfcr[rqfar] = rqfcr;
783 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
784
785 rqfar--;
786 rqfcr = RQFCR_CMP_NOMATCH;
787 ftp_rqfpr[rqfar] = rqfpr;
788 ftp_rqfcr[rqfar] = rqfcr;
789 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
790
791 rqfar--;
792 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_PARSE | RQFCR_CLE | RQFCR_AND;
793 rqfpr = class;
794 ftp_rqfcr[rqfar] = rqfcr;
795 ftp_rqfpr[rqfar] = rqfpr;
796 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
797
798 rqfar--;
799 rqfcr = RQFCR_CMP_EXACT | RQFCR_PID_MASK | RQFCR_AND;
800 rqfpr = class;
801 ftp_rqfcr[rqfar] = rqfcr;
802 ftp_rqfpr[rqfar] = rqfpr;
803 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
804
805 return rqfar;
806}
807
808static void gfar_init_filer_table(struct gfar_private *priv)
809{
810 int i = 0x0;
811 u32 rqfar = MAX_FILER_IDX;
812 u32 rqfcr = 0x0;
813 u32 rqfpr = FPR_FILER_MASK;
814
815 /* Default rule */
816 rqfcr = RQFCR_CMP_MATCH;
817 ftp_rqfcr[rqfar] = rqfcr;
818 ftp_rqfpr[rqfar] = rqfpr;
819 gfar_write_filer(priv, rqfar, rqfcr, rqfpr);
820
821 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6);
822 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_UDP);
823 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV6 | RQFPR_TCP);
824 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4);
825 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_UDP);
826 rqfar = cluster_entry_per_class(priv, rqfar, RQFPR_IPV4 | RQFPR_TCP);
827
828 /* cur_filer_idx indicated the fisrt non-masked rule */
829 priv->cur_filer_idx = rqfar;
830
831 /* Rest are masked rules */
832 rqfcr = RQFCR_CMP_NOMATCH;
833 for (i = 0; i < rqfar; i++) {
834 ftp_rqfcr[i] = rqfcr;
835 ftp_rqfpr[i] = rqfpr;
836 gfar_write_filer(priv, i, rqfcr, rqfpr);
837 }
838}
839
292/* Set up the ethernet device structure, private data, 840/* Set up the ethernet device structure, private data,
293 * and anything else we need before we start */ 841 * and anything else we need before we start */
294static int gfar_probe(struct of_device *ofdev, 842static int gfar_probe(struct of_device *ofdev,
@@ -297,14 +845,17 @@ static int gfar_probe(struct of_device *ofdev,
297 u32 tempval; 845 u32 tempval;
298 struct net_device *dev = NULL; 846 struct net_device *dev = NULL;
299 struct gfar_private *priv = NULL; 847 struct gfar_private *priv = NULL;
300 int err = 0; 848 struct gfar __iomem *regs = NULL;
849 int err = 0, i, grp_idx = 0;
301 int len_devname; 850 int len_devname;
851 u32 rstat = 0, tstat = 0, rqueue = 0, tqueue = 0;
852 u32 isrg = 0;
853 u32 __iomem *baddr;
302 854
303 /* Create an ethernet device instance */ 855 err = gfar_of_init(ofdev, &dev);
304 dev = alloc_etherdev(sizeof (*priv));
305 856
306 if (NULL == dev) 857 if (err)
307 return -ENOMEM; 858 return err;
308 859
309 priv = netdev_priv(dev); 860 priv = netdev_priv(dev);
310 priv->ndev = dev; 861 priv->ndev = dev;
@@ -312,50 +863,46 @@ static int gfar_probe(struct of_device *ofdev,
312 priv->node = ofdev->node; 863 priv->node = ofdev->node;
313 SET_NETDEV_DEV(dev, &ofdev->dev); 864 SET_NETDEV_DEV(dev, &ofdev->dev);
314 865
315 err = gfar_of_init(dev);
316
317 if (err)
318 goto regs_fail;
319
320 spin_lock_init(&priv->txlock);
321 spin_lock_init(&priv->rxlock);
322 spin_lock_init(&priv->bflock); 866 spin_lock_init(&priv->bflock);
323 INIT_WORK(&priv->reset_task, gfar_reset_task); 867 INIT_WORK(&priv->reset_task, gfar_reset_task);
324 868
325 dev_set_drvdata(&ofdev->dev, priv); 869 dev_set_drvdata(&ofdev->dev, priv);
870 regs = priv->gfargrp[0].regs;
326 871
327 /* Stop the DMA engine now, in case it was running before */ 872 /* Stop the DMA engine now, in case it was running before */
328 /* (The firmware could have used it, and left it running). */ 873 /* (The firmware could have used it, and left it running). */
329 gfar_halt(dev); 874 gfar_halt(dev);
330 875
331 /* Reset MAC layer */ 876 /* Reset MAC layer */
332 gfar_write(&priv->regs->maccfg1, MACCFG1_SOFT_RESET); 877 gfar_write(&regs->maccfg1, MACCFG1_SOFT_RESET);
333 878
334 /* We need to delay at least 3 TX clocks */ 879 /* We need to delay at least 3 TX clocks */
335 udelay(2); 880 udelay(2);
336 881
337 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW); 882 tempval = (MACCFG1_TX_FLOW | MACCFG1_RX_FLOW);
338 gfar_write(&priv->regs->maccfg1, tempval); 883 gfar_write(&regs->maccfg1, tempval);
339 884
340 /* Initialize MACCFG2. */ 885 /* Initialize MACCFG2. */
341 gfar_write(&priv->regs->maccfg2, MACCFG2_INIT_SETTINGS); 886 gfar_write(&regs->maccfg2, MACCFG2_INIT_SETTINGS);
342 887
343 /* Initialize ECNTRL */ 888 /* Initialize ECNTRL */
344 gfar_write(&priv->regs->ecntrl, ECNTRL_INIT_SETTINGS); 889 gfar_write(&regs->ecntrl, ECNTRL_INIT_SETTINGS);
345 890
346 /* Set the dev->base_addr to the gfar reg region */ 891 /* Set the dev->base_addr to the gfar reg region */
347 dev->base_addr = (unsigned long) (priv->regs); 892 dev->base_addr = (unsigned long) regs;
348 893
349 SET_NETDEV_DEV(dev, &ofdev->dev); 894 SET_NETDEV_DEV(dev, &ofdev->dev);
350 895
351 /* Fill in the dev structure */ 896 /* Fill in the dev structure */
352 dev->watchdog_timeo = TX_TIMEOUT; 897 dev->watchdog_timeo = TX_TIMEOUT;
353 netif_napi_add(dev, &priv->napi, gfar_poll, GFAR_DEV_WEIGHT);
354 dev->mtu = 1500; 898 dev->mtu = 1500;
355
356 dev->netdev_ops = &gfar_netdev_ops; 899 dev->netdev_ops = &gfar_netdev_ops;
357 dev->ethtool_ops = &gfar_ethtool_ops; 900 dev->ethtool_ops = &gfar_ethtool_ops;
358 901
902 /* Register for napi ...We are registering NAPI for each grp */
903 for (i = 0; i < priv->num_grps; i++)
904 netif_napi_add(dev, &priv->gfargrp[i].napi, gfar_poll, GFAR_DEV_WEIGHT);
905
359 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) { 906 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM) {
360 priv->rx_csum_enable = 1; 907 priv->rx_csum_enable = 1;
361 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA; 908 dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_HIGHDMA;
@@ -371,35 +918,35 @@ static int gfar_probe(struct of_device *ofdev,
371 priv->extended_hash = 1; 918 priv->extended_hash = 1;
372 priv->hash_width = 9; 919 priv->hash_width = 9;
373 920
374 priv->hash_regs[0] = &priv->regs->igaddr0; 921 priv->hash_regs[0] = &regs->igaddr0;
375 priv->hash_regs[1] = &priv->regs->igaddr1; 922 priv->hash_regs[1] = &regs->igaddr1;
376 priv->hash_regs[2] = &priv->regs->igaddr2; 923 priv->hash_regs[2] = &regs->igaddr2;
377 priv->hash_regs[3] = &priv->regs->igaddr3; 924 priv->hash_regs[3] = &regs->igaddr3;
378 priv->hash_regs[4] = &priv->regs->igaddr4; 925 priv->hash_regs[4] = &regs->igaddr4;
379 priv->hash_regs[5] = &priv->regs->igaddr5; 926 priv->hash_regs[5] = &regs->igaddr5;
380 priv->hash_regs[6] = &priv->regs->igaddr6; 927 priv->hash_regs[6] = &regs->igaddr6;
381 priv->hash_regs[7] = &priv->regs->igaddr7; 928 priv->hash_regs[7] = &regs->igaddr7;
382 priv->hash_regs[8] = &priv->regs->gaddr0; 929 priv->hash_regs[8] = &regs->gaddr0;
383 priv->hash_regs[9] = &priv->regs->gaddr1; 930 priv->hash_regs[9] = &regs->gaddr1;
384 priv->hash_regs[10] = &priv->regs->gaddr2; 931 priv->hash_regs[10] = &regs->gaddr2;
385 priv->hash_regs[11] = &priv->regs->gaddr3; 932 priv->hash_regs[11] = &regs->gaddr3;
386 priv->hash_regs[12] = &priv->regs->gaddr4; 933 priv->hash_regs[12] = &regs->gaddr4;
387 priv->hash_regs[13] = &priv->regs->gaddr5; 934 priv->hash_regs[13] = &regs->gaddr5;
388 priv->hash_regs[14] = &priv->regs->gaddr6; 935 priv->hash_regs[14] = &regs->gaddr6;
389 priv->hash_regs[15] = &priv->regs->gaddr7; 936 priv->hash_regs[15] = &regs->gaddr7;
390 937
391 } else { 938 } else {
392 priv->extended_hash = 0; 939 priv->extended_hash = 0;
393 priv->hash_width = 8; 940 priv->hash_width = 8;
394 941
395 priv->hash_regs[0] = &priv->regs->gaddr0; 942 priv->hash_regs[0] = &regs->gaddr0;
396 priv->hash_regs[1] = &priv->regs->gaddr1; 943 priv->hash_regs[1] = &regs->gaddr1;
397 priv->hash_regs[2] = &priv->regs->gaddr2; 944 priv->hash_regs[2] = &regs->gaddr2;
398 priv->hash_regs[3] = &priv->regs->gaddr3; 945 priv->hash_regs[3] = &regs->gaddr3;
399 priv->hash_regs[4] = &priv->regs->gaddr4; 946 priv->hash_regs[4] = &regs->gaddr4;
400 priv->hash_regs[5] = &priv->regs->gaddr5; 947 priv->hash_regs[5] = &regs->gaddr5;
401 priv->hash_regs[6] = &priv->regs->gaddr6; 948 priv->hash_regs[6] = &regs->gaddr6;
402 priv->hash_regs[7] = &priv->regs->gaddr7; 949 priv->hash_regs[7] = &regs->gaddr7;
403 } 950 }
404 951
405 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING) 952 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_PADDING)
@@ -410,15 +957,70 @@ static int gfar_probe(struct of_device *ofdev,
410 if (dev->features & NETIF_F_IP_CSUM) 957 if (dev->features & NETIF_F_IP_CSUM)
411 dev->hard_header_len += GMAC_FCB_LEN; 958 dev->hard_header_len += GMAC_FCB_LEN;
412 959
960 /* Program the isrg regs only if number of grps > 1 */
961 if (priv->num_grps > 1) {
962 baddr = &regs->isrg0;
963 for (i = 0; i < priv->num_grps; i++) {
964 isrg |= (priv->gfargrp[i].rx_bit_map << ISRG_SHIFT_RX);
965 isrg |= (priv->gfargrp[i].tx_bit_map << ISRG_SHIFT_TX);
966 gfar_write(baddr, isrg);
967 baddr++;
968 isrg = 0x0;
969 }
970 }
971
972 /* Need to reverse the bit maps as bit_map's MSB is q0
973 * but, for_each_bit parses from right to left, which
974 * basically reverses the queue numbers */
975 for (i = 0; i< priv->num_grps; i++) {
976 priv->gfargrp[i].tx_bit_map = reverse_bitmap(
977 priv->gfargrp[i].tx_bit_map, MAX_TX_QS);
978 priv->gfargrp[i].rx_bit_map = reverse_bitmap(
979 priv->gfargrp[i].rx_bit_map, MAX_RX_QS);
980 }
981
982 /* Calculate RSTAT, TSTAT, RQUEUE and TQUEUE values,
983 * also assign queues to groups */
984 for (grp_idx = 0; grp_idx < priv->num_grps; grp_idx++) {
985 priv->gfargrp[grp_idx].num_rx_queues = 0x0;
986 for_each_bit(i, &priv->gfargrp[grp_idx].rx_bit_map,
987 priv->num_rx_queues) {
988 priv->gfargrp[grp_idx].num_rx_queues++;
989 priv->rx_queue[i]->grp = &priv->gfargrp[grp_idx];
990 rstat = rstat | (RSTAT_CLEAR_RHALT >> i);
991 rqueue = rqueue | ((RQUEUE_EN0 | RQUEUE_EX0) >> i);
992 }
993 priv->gfargrp[grp_idx].num_tx_queues = 0x0;
994 for_each_bit (i, &priv->gfargrp[grp_idx].tx_bit_map,
995 priv->num_tx_queues) {
996 priv->gfargrp[grp_idx].num_tx_queues++;
997 priv->tx_queue[i]->grp = &priv->gfargrp[grp_idx];
998 tstat = tstat | (TSTAT_CLEAR_THALT >> i);
999 tqueue = tqueue | (TQUEUE_EN0 >> i);
1000 }
1001 priv->gfargrp[grp_idx].rstat = rstat;
1002 priv->gfargrp[grp_idx].tstat = tstat;
1003 rstat = tstat =0;
1004 }
1005
1006 gfar_write(&regs->rqueue, rqueue);
1007 gfar_write(&regs->tqueue, tqueue);
1008
413 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE; 1009 priv->rx_buffer_size = DEFAULT_RX_BUFFER_SIZE;
414 priv->tx_ring_size = DEFAULT_TX_RING_SIZE;
415 priv->rx_ring_size = DEFAULT_RX_RING_SIZE;
416 priv->num_txbdfree = DEFAULT_TX_RING_SIZE;
417 1010
418 priv->txcoalescing = DEFAULT_TX_COALESCE; 1011 /* Initializing some of the rx/tx queue level parameters */
419 priv->txic = DEFAULT_TXIC; 1012 for (i = 0; i < priv->num_tx_queues; i++) {
420 priv->rxcoalescing = DEFAULT_RX_COALESCE; 1013 priv->tx_queue[i]->tx_ring_size = DEFAULT_TX_RING_SIZE;
421 priv->rxic = DEFAULT_RXIC; 1014 priv->tx_queue[i]->num_txbdfree = DEFAULT_TX_RING_SIZE;
1015 priv->tx_queue[i]->txcoalescing = DEFAULT_TX_COALESCE;
1016 priv->tx_queue[i]->txic = DEFAULT_TXIC;
1017 }
1018
1019 for (i = 0; i < priv->num_rx_queues; i++) {
1020 priv->rx_queue[i]->rx_ring_size = DEFAULT_RX_RING_SIZE;
1021 priv->rx_queue[i]->rxcoalescing = DEFAULT_RX_COALESCE;
1022 priv->rx_queue[i]->rxic = DEFAULT_RXIC;
1023 }
422 1024
423 /* Enable most messages by default */ 1025 /* Enable most messages by default */
424 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1; 1026 priv->msg_enable = (NETIF_MSG_IFUP << 1 ) - 1;
@@ -439,20 +1041,43 @@ static int gfar_probe(struct of_device *ofdev,
439 1041
440 /* fill out IRQ number and name fields */ 1042 /* fill out IRQ number and name fields */
441 len_devname = strlen(dev->name); 1043 len_devname = strlen(dev->name);
442 strncpy(&priv->int_name_tx[0], dev->name, len_devname); 1044 for (i = 0; i < priv->num_grps; i++) {
443 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1045 strncpy(&priv->gfargrp[i].int_name_tx[0], dev->name,
444 strncpy(&priv->int_name_tx[len_devname], 1046 len_devname);
445 "_tx", sizeof("_tx") + 1); 1047 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
446 1048 strncpy(&priv->gfargrp[i].int_name_tx[len_devname],
447 strncpy(&priv->int_name_rx[0], dev->name, len_devname); 1049 "_g", sizeof("_g"));
448 strncpy(&priv->int_name_rx[len_devname], 1050 priv->gfargrp[i].int_name_tx[
449 "_rx", sizeof("_rx") + 1); 1051 strlen(priv->gfargrp[i].int_name_tx)] = i+48;
1052 strncpy(&priv->gfargrp[i].int_name_tx[strlen(
1053 priv->gfargrp[i].int_name_tx)],
1054 "_tx", sizeof("_tx") + 1);
1055
1056 strncpy(&priv->gfargrp[i].int_name_rx[0], dev->name,
1057 len_devname);
1058 strncpy(&priv->gfargrp[i].int_name_rx[len_devname],
1059 "_g", sizeof("_g"));
1060 priv->gfargrp[i].int_name_rx[
1061 strlen(priv->gfargrp[i].int_name_rx)] = i+48;
1062 strncpy(&priv->gfargrp[i].int_name_rx[strlen(
1063 priv->gfargrp[i].int_name_rx)],
1064 "_rx", sizeof("_rx") + 1);
1065
1066 strncpy(&priv->gfargrp[i].int_name_er[0], dev->name,
1067 len_devname);
1068 strncpy(&priv->gfargrp[i].int_name_er[len_devname],
1069 "_g", sizeof("_g"));
1070 priv->gfargrp[i].int_name_er[strlen(
1071 priv->gfargrp[i].int_name_er)] = i+48;
1072 strncpy(&priv->gfargrp[i].int_name_er[strlen(\
1073 priv->gfargrp[i].int_name_er)],
1074 "_er", sizeof("_er") + 1);
1075 } else
1076 priv->gfargrp[i].int_name_tx[len_devname] = '\0';
1077 }
450 1078
451 strncpy(&priv->int_name_er[0], dev->name, len_devname); 1079 /* Initialize the filer table */
452 strncpy(&priv->int_name_er[len_devname], 1080 gfar_init_filer_table(priv);
453 "_er", sizeof("_er") + 1);
454 } else
455 priv->int_name_tx[len_devname] = '\0';
456 1081
457 /* Create all the sysfs files */ 1082 /* Create all the sysfs files */
458 gfar_init_sysfs(dev); 1083 gfar_init_sysfs(dev);
@@ -463,14 +1088,19 @@ static int gfar_probe(struct of_device *ofdev,
463 /* Even more device info helps when determining which kernel */ 1088 /* Even more device info helps when determining which kernel */
464 /* provided which set of benchmarks. */ 1089 /* provided which set of benchmarks. */
465 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name); 1090 printk(KERN_INFO "%s: Running with NAPI enabled\n", dev->name);
466 printk(KERN_INFO "%s: %d/%d RX/TX BD ring size\n", 1091 for (i = 0; i < priv->num_rx_queues; i++)
467 dev->name, priv->rx_ring_size, priv->tx_ring_size); 1092 printk(KERN_INFO "%s: :RX BD ring size for Q[%d]: %d\n",
1093 dev->name, i, priv->rx_queue[i]->rx_ring_size);
1094 for(i = 0; i < priv->num_tx_queues; i++)
1095 printk(KERN_INFO "%s:TX BD ring size for Q[%d]: %d\n",
1096 dev->name, i, priv->tx_queue[i]->tx_ring_size);
468 1097
469 return 0; 1098 return 0;
470 1099
471register_fail: 1100register_fail:
472 iounmap(priv->regs); 1101 unmap_group_regs(priv);
473regs_fail: 1102 free_tx_pointers(priv);
1103 free_rx_pointers(priv);
474 if (priv->phy_node) 1104 if (priv->phy_node)
475 of_node_put(priv->phy_node); 1105 of_node_put(priv->phy_node);
476 if (priv->tbi_node) 1106 if (priv->tbi_node)
@@ -491,54 +1121,59 @@ static int gfar_remove(struct of_device *ofdev)
491 dev_set_drvdata(&ofdev->dev, NULL); 1121 dev_set_drvdata(&ofdev->dev, NULL);
492 1122
493 unregister_netdev(priv->ndev); 1123 unregister_netdev(priv->ndev);
494 iounmap(priv->regs); 1124 unmap_group_regs(priv);
495 free_netdev(priv->ndev); 1125 free_netdev(priv->ndev);
496 1126
497 return 0; 1127 return 0;
498} 1128}
499 1129
500#ifdef CONFIG_PM 1130#ifdef CONFIG_PM
501static int gfar_suspend(struct of_device *ofdev, pm_message_t state) 1131
1132static int gfar_suspend(struct device *dev)
502{ 1133{
503 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); 1134 struct gfar_private *priv = dev_get_drvdata(dev);
504 struct net_device *dev = priv->ndev; 1135 struct net_device *ndev = priv->ndev;
1136 struct gfar __iomem *regs = priv->gfargrp[0].regs;
505 unsigned long flags; 1137 unsigned long flags;
506 u32 tempval; 1138 u32 tempval;
507 1139
508 int magic_packet = priv->wol_en && 1140 int magic_packet = priv->wol_en &&
509 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1141 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
510 1142
511 netif_device_detach(dev); 1143 netif_device_detach(ndev);
512 1144
513 if (netif_running(dev)) { 1145 if (netif_running(ndev)) {
514 spin_lock_irqsave(&priv->txlock, flags);
515 spin_lock(&priv->rxlock);
516 1146
517 gfar_halt_nodisable(dev); 1147 local_irq_save(flags);
1148 lock_tx_qs(priv);
1149 lock_rx_qs(priv);
1150
1151 gfar_halt_nodisable(ndev);
518 1152
519 /* Disable Tx, and Rx if wake-on-LAN is disabled. */ 1153 /* Disable Tx, and Rx if wake-on-LAN is disabled. */
520 tempval = gfar_read(&priv->regs->maccfg1); 1154 tempval = gfar_read(&regs->maccfg1);
521 1155
522 tempval &= ~MACCFG1_TX_EN; 1156 tempval &= ~MACCFG1_TX_EN;
523 1157
524 if (!magic_packet) 1158 if (!magic_packet)
525 tempval &= ~MACCFG1_RX_EN; 1159 tempval &= ~MACCFG1_RX_EN;
526 1160
527 gfar_write(&priv->regs->maccfg1, tempval); 1161 gfar_write(&regs->maccfg1, tempval);
528 1162
529 spin_unlock(&priv->rxlock); 1163 unlock_rx_qs(priv);
530 spin_unlock_irqrestore(&priv->txlock, flags); 1164 unlock_tx_qs(priv);
1165 local_irq_restore(flags);
531 1166
532 napi_disable(&priv->napi); 1167 disable_napi(priv);
533 1168
534 if (magic_packet) { 1169 if (magic_packet) {
535 /* Enable interrupt on Magic Packet */ 1170 /* Enable interrupt on Magic Packet */
536 gfar_write(&priv->regs->imask, IMASK_MAG); 1171 gfar_write(&regs->imask, IMASK_MAG);
537 1172
538 /* Enable Magic Packet mode */ 1173 /* Enable Magic Packet mode */
539 tempval = gfar_read(&priv->regs->maccfg2); 1174 tempval = gfar_read(&regs->maccfg2);
540 tempval |= MACCFG2_MPEN; 1175 tempval |= MACCFG2_MPEN;
541 gfar_write(&priv->regs->maccfg2, tempval); 1176 gfar_write(&regs->maccfg2, tempval);
542 } else { 1177 } else {
543 phy_stop(priv->phydev); 1178 phy_stop(priv->phydev);
544 } 1179 }
@@ -547,17 +1182,18 @@ static int gfar_suspend(struct of_device *ofdev, pm_message_t state)
547 return 0; 1182 return 0;
548} 1183}
549 1184
550static int gfar_resume(struct of_device *ofdev) 1185static int gfar_resume(struct device *dev)
551{ 1186{
552 struct gfar_private *priv = dev_get_drvdata(&ofdev->dev); 1187 struct gfar_private *priv = dev_get_drvdata(dev);
553 struct net_device *dev = priv->ndev; 1188 struct net_device *ndev = priv->ndev;
1189 struct gfar __iomem *regs = priv->gfargrp[0].regs;
554 unsigned long flags; 1190 unsigned long flags;
555 u32 tempval; 1191 u32 tempval;
556 int magic_packet = priv->wol_en && 1192 int magic_packet = priv->wol_en &&
557 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET); 1193 (priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET);
558 1194
559 if (!netif_running(dev)) { 1195 if (!netif_running(ndev)) {
560 netif_device_attach(dev); 1196 netif_device_attach(ndev);
561 return 0; 1197 return 0;
562 } 1198 }
563 1199
@@ -567,28 +1203,80 @@ static int gfar_resume(struct of_device *ofdev)
567 /* Disable Magic Packet mode, in case something 1203 /* Disable Magic Packet mode, in case something
568 * else woke us up. 1204 * else woke us up.
569 */ 1205 */
1206 local_irq_save(flags);
1207 lock_tx_qs(priv);
1208 lock_rx_qs(priv);
570 1209
571 spin_lock_irqsave(&priv->txlock, flags); 1210 tempval = gfar_read(&regs->maccfg2);
572 spin_lock(&priv->rxlock);
573
574 tempval = gfar_read(&priv->regs->maccfg2);
575 tempval &= ~MACCFG2_MPEN; 1211 tempval &= ~MACCFG2_MPEN;
576 gfar_write(&priv->regs->maccfg2, tempval); 1212 gfar_write(&regs->maccfg2, tempval);
577 1213
578 gfar_start(dev); 1214 gfar_start(ndev);
579 1215
580 spin_unlock(&priv->rxlock); 1216 unlock_rx_qs(priv);
581 spin_unlock_irqrestore(&priv->txlock, flags); 1217 unlock_tx_qs(priv);
1218 local_irq_restore(flags);
582 1219
583 netif_device_attach(dev); 1220 netif_device_attach(ndev);
584 1221
585 napi_enable(&priv->napi); 1222 enable_napi(priv);
586 1223
587 return 0; 1224 return 0;
588} 1225}
1226
1227static int gfar_restore(struct device *dev)
1228{
1229 struct gfar_private *priv = dev_get_drvdata(dev);
1230 struct net_device *ndev = priv->ndev;
1231
1232 if (!netif_running(ndev))
1233 return 0;
1234
1235 gfar_init_bds(ndev);
1236 init_registers(ndev);
1237 gfar_set_mac_address(ndev);
1238 gfar_init_mac(ndev);
1239 gfar_start(ndev);
1240
1241 priv->oldlink = 0;
1242 priv->oldspeed = 0;
1243 priv->oldduplex = -1;
1244
1245 if (priv->phydev)
1246 phy_start(priv->phydev);
1247
1248 netif_device_attach(ndev);
1249 enable_napi(priv);
1250
1251 return 0;
1252}
1253
1254static struct dev_pm_ops gfar_pm_ops = {
1255 .suspend = gfar_suspend,
1256 .resume = gfar_resume,
1257 .freeze = gfar_suspend,
1258 .thaw = gfar_resume,
1259 .restore = gfar_restore,
1260};
1261
1262#define GFAR_PM_OPS (&gfar_pm_ops)
1263
1264static int gfar_legacy_suspend(struct of_device *ofdev, pm_message_t state)
1265{
1266 return gfar_suspend(&ofdev->dev);
1267}
1268
1269static int gfar_legacy_resume(struct of_device *ofdev)
1270{
1271 return gfar_resume(&ofdev->dev);
1272}
1273
589#else 1274#else
590#define gfar_suspend NULL 1275
591#define gfar_resume NULL 1276#define GFAR_PM_OPS NULL
1277#define gfar_legacy_suspend NULL
1278#define gfar_legacy_resume NULL
1279
592#endif 1280#endif
593 1281
594/* Reads the controller's registers to determine what interface 1282/* Reads the controller's registers to determine what interface
@@ -597,7 +1285,10 @@ static int gfar_resume(struct of_device *ofdev)
597static phy_interface_t gfar_get_interface(struct net_device *dev) 1285static phy_interface_t gfar_get_interface(struct net_device *dev)
598{ 1286{
599 struct gfar_private *priv = netdev_priv(dev); 1287 struct gfar_private *priv = netdev_priv(dev);
600 u32 ecntrl = gfar_read(&priv->regs->ecntrl); 1288 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1289 u32 ecntrl;
1290
1291 ecntrl = gfar_read(&regs->ecntrl);
601 1292
602 if (ecntrl & ECNTRL_SGMII_MODE) 1293 if (ecntrl & ECNTRL_SGMII_MODE)
603 return PHY_INTERFACE_MODE_SGMII; 1294 return PHY_INTERFACE_MODE_SGMII;
@@ -719,46 +1410,52 @@ static void gfar_configure_serdes(struct net_device *dev)
719static void init_registers(struct net_device *dev) 1410static void init_registers(struct net_device *dev)
720{ 1411{
721 struct gfar_private *priv = netdev_priv(dev); 1412 struct gfar_private *priv = netdev_priv(dev);
1413 struct gfar __iomem *regs = NULL;
1414 int i = 0;
722 1415
723 /* Clear IEVENT */ 1416 for (i = 0; i < priv->num_grps; i++) {
724 gfar_write(&priv->regs->ievent, IEVENT_INIT_CLEAR); 1417 regs = priv->gfargrp[i].regs;
1418 /* Clear IEVENT */
1419 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
725 1420
726 /* Initialize IMASK */ 1421 /* Initialize IMASK */
727 gfar_write(&priv->regs->imask, IMASK_INIT_CLEAR); 1422 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1423 }
728 1424
1425 regs = priv->gfargrp[0].regs;
729 /* Init hash registers to zero */ 1426 /* Init hash registers to zero */
730 gfar_write(&priv->regs->igaddr0, 0); 1427 gfar_write(&regs->igaddr0, 0);
731 gfar_write(&priv->regs->igaddr1, 0); 1428 gfar_write(&regs->igaddr1, 0);
732 gfar_write(&priv->regs->igaddr2, 0); 1429 gfar_write(&regs->igaddr2, 0);
733 gfar_write(&priv->regs->igaddr3, 0); 1430 gfar_write(&regs->igaddr3, 0);
734 gfar_write(&priv->regs->igaddr4, 0); 1431 gfar_write(&regs->igaddr4, 0);
735 gfar_write(&priv->regs->igaddr5, 0); 1432 gfar_write(&regs->igaddr5, 0);
736 gfar_write(&priv->regs->igaddr6, 0); 1433 gfar_write(&regs->igaddr6, 0);
737 gfar_write(&priv->regs->igaddr7, 0); 1434 gfar_write(&regs->igaddr7, 0);
738 1435
739 gfar_write(&priv->regs->gaddr0, 0); 1436 gfar_write(&regs->gaddr0, 0);
740 gfar_write(&priv->regs->gaddr1, 0); 1437 gfar_write(&regs->gaddr1, 0);
741 gfar_write(&priv->regs->gaddr2, 0); 1438 gfar_write(&regs->gaddr2, 0);
742 gfar_write(&priv->regs->gaddr3, 0); 1439 gfar_write(&regs->gaddr3, 0);
743 gfar_write(&priv->regs->gaddr4, 0); 1440 gfar_write(&regs->gaddr4, 0);
744 gfar_write(&priv->regs->gaddr5, 0); 1441 gfar_write(&regs->gaddr5, 0);
745 gfar_write(&priv->regs->gaddr6, 0); 1442 gfar_write(&regs->gaddr6, 0);
746 gfar_write(&priv->regs->gaddr7, 0); 1443 gfar_write(&regs->gaddr7, 0);
747 1444
748 /* Zero out the rmon mib registers if it has them */ 1445 /* Zero out the rmon mib registers if it has them */
749 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 1446 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
750 memset_io(&(priv->regs->rmon), 0, sizeof (struct rmon_mib)); 1447 memset_io(&(regs->rmon), 0, sizeof (struct rmon_mib));
751 1448
752 /* Mask off the CAM interrupts */ 1449 /* Mask off the CAM interrupts */
753 gfar_write(&priv->regs->rmon.cam1, 0xffffffff); 1450 gfar_write(&regs->rmon.cam1, 0xffffffff);
754 gfar_write(&priv->regs->rmon.cam2, 0xffffffff); 1451 gfar_write(&regs->rmon.cam2, 0xffffffff);
755 } 1452 }
756 1453
757 /* Initialize the max receive buffer length */ 1454 /* Initialize the max receive buffer length */
758 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 1455 gfar_write(&regs->mrblr, priv->rx_buffer_size);
759 1456
760 /* Initialize the Minimum Frame Length Register */ 1457 /* Initialize the Minimum Frame Length Register */
761 gfar_write(&priv->regs->minflr, MINFLR_INIT_SETTINGS); 1458 gfar_write(&regs->minflr, MINFLR_INIT_SETTINGS);
762} 1459}
763 1460
764 1461
@@ -766,23 +1463,28 @@ static void init_registers(struct net_device *dev)
766static void gfar_halt_nodisable(struct net_device *dev) 1463static void gfar_halt_nodisable(struct net_device *dev)
767{ 1464{
768 struct gfar_private *priv = netdev_priv(dev); 1465 struct gfar_private *priv = netdev_priv(dev);
769 struct gfar __iomem *regs = priv->regs; 1466 struct gfar __iomem *regs = NULL;
770 u32 tempval; 1467 u32 tempval;
1468 int i = 0;
771 1469
772 /* Mask all interrupts */ 1470 for (i = 0; i < priv->num_grps; i++) {
773 gfar_write(&regs->imask, IMASK_INIT_CLEAR); 1471 regs = priv->gfargrp[i].regs;
1472 /* Mask all interrupts */
1473 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
774 1474
775 /* Clear all interrupts */ 1475 /* Clear all interrupts */
776 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR); 1476 gfar_write(&regs->ievent, IEVENT_INIT_CLEAR);
1477 }
777 1478
1479 regs = priv->gfargrp[0].regs;
778 /* Stop the DMA, and wait for it to stop */ 1480 /* Stop the DMA, and wait for it to stop */
779 tempval = gfar_read(&priv->regs->dmactrl); 1481 tempval = gfar_read(&regs->dmactrl);
780 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS)) 1482 if ((tempval & (DMACTRL_GRS | DMACTRL_GTS))
781 != (DMACTRL_GRS | DMACTRL_GTS)) { 1483 != (DMACTRL_GRS | DMACTRL_GTS)) {
782 tempval |= (DMACTRL_GRS | DMACTRL_GTS); 1484 tempval |= (DMACTRL_GRS | DMACTRL_GTS);
783 gfar_write(&priv->regs->dmactrl, tempval); 1485 gfar_write(&regs->dmactrl, tempval);
784 1486
785 while (!(gfar_read(&priv->regs->ievent) & 1487 while (!(gfar_read(&regs->ievent) &
786 (IEVENT_GRSC | IEVENT_GTSC))) 1488 (IEVENT_GRSC | IEVENT_GTSC)))
787 cpu_relax(); 1489 cpu_relax();
788 } 1490 }
@@ -792,7 +1494,7 @@ static void gfar_halt_nodisable(struct net_device *dev)
792void gfar_halt(struct net_device *dev) 1494void gfar_halt(struct net_device *dev)
793{ 1495{
794 struct gfar_private *priv = netdev_priv(dev); 1496 struct gfar_private *priv = netdev_priv(dev);
795 struct gfar __iomem *regs = priv->regs; 1497 struct gfar __iomem *regs = priv->gfargrp[0].regs;
796 u32 tempval; 1498 u32 tempval;
797 1499
798 gfar_halt_nodisable(dev); 1500 gfar_halt_nodisable(dev);
@@ -803,101 +1505,131 @@ void gfar_halt(struct net_device *dev)
803 gfar_write(&regs->maccfg1, tempval); 1505 gfar_write(&regs->maccfg1, tempval);
804} 1506}
805 1507
1508static void free_grp_irqs(struct gfar_priv_grp *grp)
1509{
1510 free_irq(grp->interruptError, grp);
1511 free_irq(grp->interruptTransmit, grp);
1512 free_irq(grp->interruptReceive, grp);
1513}
1514
806void stop_gfar(struct net_device *dev) 1515void stop_gfar(struct net_device *dev)
807{ 1516{
808 struct gfar_private *priv = netdev_priv(dev); 1517 struct gfar_private *priv = netdev_priv(dev);
809 struct gfar __iomem *regs = priv->regs;
810 unsigned long flags; 1518 unsigned long flags;
1519 int i;
811 1520
812 phy_stop(priv->phydev); 1521 phy_stop(priv->phydev);
813 1522
1523
814 /* Lock it down */ 1524 /* Lock it down */
815 spin_lock_irqsave(&priv->txlock, flags); 1525 local_irq_save(flags);
816 spin_lock(&priv->rxlock); 1526 lock_tx_qs(priv);
1527 lock_rx_qs(priv);
817 1528
818 gfar_halt(dev); 1529 gfar_halt(dev);
819 1530
820 spin_unlock(&priv->rxlock); 1531 unlock_rx_qs(priv);
821 spin_unlock_irqrestore(&priv->txlock, flags); 1532 unlock_tx_qs(priv);
1533 local_irq_restore(flags);
822 1534
823 /* Free the IRQs */ 1535 /* Free the IRQs */
824 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1536 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
825 free_irq(priv->interruptError, dev); 1537 for (i = 0; i < priv->num_grps; i++)
826 free_irq(priv->interruptTransmit, dev); 1538 free_grp_irqs(&priv->gfargrp[i]);
827 free_irq(priv->interruptReceive, dev);
828 } else { 1539 } else {
829 free_irq(priv->interruptTransmit, dev); 1540 for (i = 0; i < priv->num_grps; i++)
1541 free_irq(priv->gfargrp[i].interruptTransmit,
1542 &priv->gfargrp[i]);
830 } 1543 }
831 1544
832 free_skb_resources(priv); 1545 free_skb_resources(priv);
833
834 dma_free_coherent(&priv->ofdev->dev,
835 sizeof(struct txbd8)*priv->tx_ring_size
836 + sizeof(struct rxbd8)*priv->rx_ring_size,
837 priv->tx_bd_base,
838 gfar_read(&regs->tbase0));
839} 1546}
840 1547
841/* If there are any tx skbs or rx skbs still around, free them. 1548static void free_skb_tx_queue(struct gfar_priv_tx_q *tx_queue)
842 * Then free tx_skbuff and rx_skbuff */
843static void free_skb_resources(struct gfar_private *priv)
844{ 1549{
845 struct rxbd8 *rxbdp;
846 struct txbd8 *txbdp; 1550 struct txbd8 *txbdp;
1551 struct gfar_private *priv = netdev_priv(tx_queue->dev);
847 int i, j; 1552 int i, j;
848 1553
849 /* Go through all the buffer descriptors and free their data buffers */ 1554 txbdp = tx_queue->tx_bd_base;
850 txbdp = priv->tx_bd_base;
851 1555
852 for (i = 0; i < priv->tx_ring_size; i++) { 1556 for (i = 0; i < tx_queue->tx_ring_size; i++) {
853 if (!priv->tx_skbuff[i]) 1557 if (!tx_queue->tx_skbuff[i])
854 continue; 1558 continue;
855 1559
856 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr, 1560 dma_unmap_single(&priv->ofdev->dev, txbdp->bufPtr,
857 txbdp->length, DMA_TO_DEVICE); 1561 txbdp->length, DMA_TO_DEVICE);
858 txbdp->lstatus = 0; 1562 txbdp->lstatus = 0;
859 for (j = 0; j < skb_shinfo(priv->tx_skbuff[i])->nr_frags; j++) { 1563 for (j = 0; j < skb_shinfo(tx_queue->tx_skbuff[i])->nr_frags;
1564 j++) {
860 txbdp++; 1565 txbdp++;
861 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr, 1566 dma_unmap_page(&priv->ofdev->dev, txbdp->bufPtr,
862 txbdp->length, DMA_TO_DEVICE); 1567 txbdp->length, DMA_TO_DEVICE);
863 } 1568 }
864 txbdp++; 1569 txbdp++;
865 dev_kfree_skb_any(priv->tx_skbuff[i]); 1570 dev_kfree_skb_any(tx_queue->tx_skbuff[i]);
866 priv->tx_skbuff[i] = NULL; 1571 tx_queue->tx_skbuff[i] = NULL;
867 } 1572 }
1573 kfree(tx_queue->tx_skbuff);
1574}
868 1575
869 kfree(priv->tx_skbuff); 1576static void free_skb_rx_queue(struct gfar_priv_rx_q *rx_queue)
870 1577{
871 rxbdp = priv->rx_bd_base; 1578 struct rxbd8 *rxbdp;
1579 struct gfar_private *priv = netdev_priv(rx_queue->dev);
1580 int i;
872 1581
873 /* rx_skbuff is not guaranteed to be allocated, so only 1582 rxbdp = rx_queue->rx_bd_base;
874 * free it and its contents if it is allocated */
875 if(priv->rx_skbuff != NULL) {
876 for (i = 0; i < priv->rx_ring_size; i++) {
877 if (priv->rx_skbuff[i]) {
878 dma_unmap_single(&priv->ofdev->dev, rxbdp->bufPtr,
879 priv->rx_buffer_size,
880 DMA_FROM_DEVICE);
881 1583
882 dev_kfree_skb_any(priv->rx_skbuff[i]); 1584 for (i = 0; i < rx_queue->rx_ring_size; i++) {
883 priv->rx_skbuff[i] = NULL; 1585 if (rx_queue->rx_skbuff[i]) {
884 } 1586 dma_unmap_single(&priv->ofdev->dev,
1587 rxbdp->bufPtr, priv->rx_buffer_size,
1588 DMA_FROM_DEVICE);
1589 dev_kfree_skb_any(rx_queue->rx_skbuff[i]);
1590 rx_queue->rx_skbuff[i] = NULL;
1591 }
1592 rxbdp->lstatus = 0;
1593 rxbdp->bufPtr = 0;
1594 rxbdp++;
1595 }
1596 kfree(rx_queue->rx_skbuff);
1597}
885 1598
886 rxbdp->lstatus = 0; 1599/* If there are any tx skbs or rx skbs still around, free them.
887 rxbdp->bufPtr = 0; 1600 * Then free tx_skbuff and rx_skbuff */
1601static void free_skb_resources(struct gfar_private *priv)
1602{
1603 struct gfar_priv_tx_q *tx_queue = NULL;
1604 struct gfar_priv_rx_q *rx_queue = NULL;
1605 int i;
888 1606
889 rxbdp++; 1607 /* Go through all the buffer descriptors and free their data buffers */
890 } 1608 for (i = 0; i < priv->num_tx_queues; i++) {
1609 tx_queue = priv->tx_queue[i];
1610 if(!tx_queue->tx_skbuff)
1611 free_skb_tx_queue(tx_queue);
1612 }
891 1613
892 kfree(priv->rx_skbuff); 1614 for (i = 0; i < priv->num_rx_queues; i++) {
1615 rx_queue = priv->rx_queue[i];
1616 if(!rx_queue->rx_skbuff)
1617 free_skb_rx_queue(rx_queue);
893 } 1618 }
1619
1620 dma_free_coherent(&priv->ofdev->dev,
1621 sizeof(struct txbd8) * priv->total_tx_ring_size +
1622 sizeof(struct rxbd8) * priv->total_rx_ring_size,
1623 priv->tx_queue[0]->tx_bd_base,
1624 priv->tx_queue[0]->tx_bd_dma_base);
894} 1625}
895 1626
896void gfar_start(struct net_device *dev) 1627void gfar_start(struct net_device *dev)
897{ 1628{
898 struct gfar_private *priv = netdev_priv(dev); 1629 struct gfar_private *priv = netdev_priv(dev);
899 struct gfar __iomem *regs = priv->regs; 1630 struct gfar __iomem *regs = priv->gfargrp[0].regs;
900 u32 tempval; 1631 u32 tempval;
1632 int i = 0;
901 1633
902 /* Enable Rx and Tx in MACCFG1 */ 1634 /* Enable Rx and Tx in MACCFG1 */
903 tempval = gfar_read(&regs->maccfg1); 1635 tempval = gfar_read(&regs->maccfg1);
@@ -905,269 +1637,159 @@ void gfar_start(struct net_device *dev)
905 gfar_write(&regs->maccfg1, tempval); 1637 gfar_write(&regs->maccfg1, tempval);
906 1638
907 /* Initialize DMACTRL to have WWR and WOP */ 1639 /* Initialize DMACTRL to have WWR and WOP */
908 tempval = gfar_read(&priv->regs->dmactrl); 1640 tempval = gfar_read(&regs->dmactrl);
909 tempval |= DMACTRL_INIT_SETTINGS; 1641 tempval |= DMACTRL_INIT_SETTINGS;
910 gfar_write(&priv->regs->dmactrl, tempval); 1642 gfar_write(&regs->dmactrl, tempval);
911 1643
912 /* Make sure we aren't stopped */ 1644 /* Make sure we aren't stopped */
913 tempval = gfar_read(&priv->regs->dmactrl); 1645 tempval = gfar_read(&regs->dmactrl);
914 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS); 1646 tempval &= ~(DMACTRL_GRS | DMACTRL_GTS);
915 gfar_write(&priv->regs->dmactrl, tempval); 1647 gfar_write(&regs->dmactrl, tempval);
916 1648
917 /* Clear THLT/RHLT, so that the DMA starts polling now */ 1649 for (i = 0; i < priv->num_grps; i++) {
918 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT); 1650 regs = priv->gfargrp[i].regs;
919 gfar_write(&regs->rstat, RSTAT_CLEAR_RHALT); 1651 /* Clear THLT/RHLT, so that the DMA starts polling now */
920 1652 gfar_write(&regs->tstat, priv->gfargrp[i].tstat);
921 /* Unmask the interrupts we look for */ 1653 gfar_write(&regs->rstat, priv->gfargrp[i].rstat);
922 gfar_write(&regs->imask, IMASK_DEFAULT); 1654 /* Unmask the interrupts we look for */
1655 gfar_write(&regs->imask, IMASK_DEFAULT);
1656 }
923 1657
924 dev->trans_start = jiffies; 1658 dev->trans_start = jiffies;
925} 1659}
926 1660
927/* Bring the controller up and running */ 1661void gfar_configure_coalescing(struct gfar_private *priv,
928int startup_gfar(struct net_device *dev) 1662 unsigned long tx_mask, unsigned long rx_mask)
929{ 1663{
930 struct txbd8 *txbdp; 1664 struct gfar __iomem *regs = priv->gfargrp[0].regs;
931 struct rxbd8 *rxbdp; 1665 u32 __iomem *baddr;
932 dma_addr_t addr = 0; 1666 int i = 0;
933 unsigned long vaddr;
934 int i;
935 struct gfar_private *priv = netdev_priv(dev);
936 struct gfar __iomem *regs = priv->regs;
937 int err = 0;
938 u32 rctrl = 0;
939 u32 tctrl = 0;
940 u32 attrs = 0;
941
942 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
943 1667
944 /* Allocate memory for the buffer descriptors */ 1668 /* Backward compatible case ---- even if we enable
945 vaddr = (unsigned long) dma_alloc_coherent(&priv->ofdev->dev, 1669 * multiple queues, there's only single reg to program
946 sizeof (struct txbd8) * priv->tx_ring_size + 1670 */
947 sizeof (struct rxbd8) * priv->rx_ring_size, 1671 gfar_write(&regs->txic, 0);
948 &addr, GFP_KERNEL); 1672 if(likely(priv->tx_queue[0]->txcoalescing))
949 1673 gfar_write(&regs->txic, priv->tx_queue[0]->txic);
950 if (vaddr == 0) {
951 if (netif_msg_ifup(priv))
952 printk(KERN_ERR "%s: Could not allocate buffer descriptors!\n",
953 dev->name);
954 return -ENOMEM;
955 }
956
957 priv->tx_bd_base = (struct txbd8 *) vaddr;
958
959 /* enet DMA only understands physical addresses */
960 gfar_write(&regs->tbase0, addr);
961
962 /* Start the rx descriptor ring where the tx ring leaves off */
963 addr = addr + sizeof (struct txbd8) * priv->tx_ring_size;
964 vaddr = vaddr + sizeof (struct txbd8) * priv->tx_ring_size;
965 priv->rx_bd_base = (struct rxbd8 *) vaddr;
966 gfar_write(&regs->rbase0, addr);
967
968 /* Setup the skbuff rings */
969 priv->tx_skbuff =
970 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
971 priv->tx_ring_size, GFP_KERNEL);
972
973 if (NULL == priv->tx_skbuff) {
974 if (netif_msg_ifup(priv))
975 printk(KERN_ERR "%s: Could not allocate tx_skbuff\n",
976 dev->name);
977 err = -ENOMEM;
978 goto tx_skb_fail;
979 }
980
981 for (i = 0; i < priv->tx_ring_size; i++)
982 priv->tx_skbuff[i] = NULL;
983
984 priv->rx_skbuff =
985 (struct sk_buff **) kmalloc(sizeof (struct sk_buff *) *
986 priv->rx_ring_size, GFP_KERNEL);
987
988 if (NULL == priv->rx_skbuff) {
989 if (netif_msg_ifup(priv))
990 printk(KERN_ERR "%s: Could not allocate rx_skbuff\n",
991 dev->name);
992 err = -ENOMEM;
993 goto rx_skb_fail;
994 }
995
996 for (i = 0; i < priv->rx_ring_size; i++)
997 priv->rx_skbuff[i] = NULL;
998
999 /* Initialize some variables in our dev structure */
1000 priv->num_txbdfree = priv->tx_ring_size;
1001 priv->dirty_tx = priv->cur_tx = priv->tx_bd_base;
1002 priv->cur_rx = priv->rx_bd_base;
1003 priv->skb_curtx = priv->skb_dirtytx = 0;
1004 priv->skb_currx = 0;
1005
1006 /* Initialize Transmit Descriptor Ring */
1007 txbdp = priv->tx_bd_base;
1008 for (i = 0; i < priv->tx_ring_size; i++) {
1009 txbdp->lstatus = 0;
1010 txbdp->bufPtr = 0;
1011 txbdp++;
1012 }
1013
1014 /* Set the last descriptor in the ring to indicate wrap */
1015 txbdp--;
1016 txbdp->status |= TXBD_WRAP;
1017
1018 rxbdp = priv->rx_bd_base;
1019 for (i = 0; i < priv->rx_ring_size; i++) {
1020 struct sk_buff *skb;
1021
1022 skb = gfar_new_skb(dev);
1023
1024 if (!skb) {
1025 printk(KERN_ERR "%s: Can't allocate RX buffers\n",
1026 dev->name);
1027 1674
1028 goto err_rxalloc_fail; 1675 gfar_write(&regs->rxic, 0);
1676 if(unlikely(priv->rx_queue[0]->rxcoalescing))
1677 gfar_write(&regs->rxic, priv->rx_queue[0]->rxic);
1678
1679 if (priv->mode == MQ_MG_MODE) {
1680 baddr = &regs->txic0;
1681 for_each_bit (i, &tx_mask, priv->num_tx_queues) {
1682 if (likely(priv->tx_queue[i]->txcoalescing)) {
1683 gfar_write(baddr + i, 0);
1684 gfar_write(baddr + i, priv->tx_queue[i]->txic);
1685 }
1029 } 1686 }
1030 1687
1031 priv->rx_skbuff[i] = skb; 1688 baddr = &regs->rxic0;
1032 1689 for_each_bit (i, &rx_mask, priv->num_rx_queues) {
1033 gfar_new_rxbdp(dev, rxbdp, skb); 1690 if (likely(priv->rx_queue[i]->rxcoalescing)) {
1034 1691 gfar_write(baddr + i, 0);
1035 rxbdp++; 1692 gfar_write(baddr + i, priv->rx_queue[i]->rxic);
1693 }
1694 }
1036 } 1695 }
1696}
1037 1697
1038 /* Set the last descriptor in the ring to wrap */ 1698static int register_grp_irqs(struct gfar_priv_grp *grp)
1039 rxbdp--; 1699{
1040 rxbdp->status |= RXBD_WRAP; 1700 struct gfar_private *priv = grp->priv;
1701 struct net_device *dev = priv->ndev;
1702 int err;
1041 1703
1042 /* If the device has multiple interrupts, register for 1704 /* If the device has multiple interrupts, register for
1043 * them. Otherwise, only register for the one */ 1705 * them. Otherwise, only register for the one */
1044 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 1706 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1045 /* Install our interrupt handlers for Error, 1707 /* Install our interrupt handlers for Error,
1046 * Transmit, and Receive */ 1708 * Transmit, and Receive */
1047 if (request_irq(priv->interruptError, gfar_error, 1709 if ((err = request_irq(grp->interruptError, gfar_error, 0,
1048 0, priv->int_name_er, dev) < 0) { 1710 grp->int_name_er,grp)) < 0) {
1049 if (netif_msg_intr(priv)) 1711 if (netif_msg_intr(priv))
1050 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1712 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1051 dev->name, priv->interruptError); 1713 dev->name, grp->interruptError);
1052 1714
1053 err = -1; 1715 goto err_irq_fail;
1054 goto err_irq_fail;
1055 } 1716 }
1056 1717
1057 if (request_irq(priv->interruptTransmit, gfar_transmit, 1718 if ((err = request_irq(grp->interruptTransmit, gfar_transmit,
1058 0, priv->int_name_tx, dev) < 0) { 1719 0, grp->int_name_tx, grp)) < 0) {
1059 if (netif_msg_intr(priv)) 1720 if (netif_msg_intr(priv))
1060 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1721 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1061 dev->name, priv->interruptTransmit); 1722 dev->name, grp->interruptTransmit);
1062
1063 err = -1;
1064
1065 goto tx_irq_fail; 1723 goto tx_irq_fail;
1066 } 1724 }
1067 1725
1068 if (request_irq(priv->interruptReceive, gfar_receive, 1726 if ((err = request_irq(grp->interruptReceive, gfar_receive, 0,
1069 0, priv->int_name_rx, dev) < 0) { 1727 grp->int_name_rx, grp)) < 0) {
1070 if (netif_msg_intr(priv)) 1728 if (netif_msg_intr(priv))
1071 printk(KERN_ERR "%s: Can't get IRQ %d (receive0)\n", 1729 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1072 dev->name, priv->interruptReceive); 1730 dev->name, grp->interruptReceive);
1073
1074 err = -1;
1075 goto rx_irq_fail; 1731 goto rx_irq_fail;
1076 } 1732 }
1077 } else { 1733 } else {
1078 if (request_irq(priv->interruptTransmit, gfar_interrupt, 1734 if ((err = request_irq(grp->interruptTransmit, gfar_interrupt, 0,
1079 0, priv->int_name_tx, dev) < 0) { 1735 grp->int_name_tx, grp)) < 0) {
1080 if (netif_msg_intr(priv)) 1736 if (netif_msg_intr(priv))
1081 printk(KERN_ERR "%s: Can't get IRQ %d\n", 1737 printk(KERN_ERR "%s: Can't get IRQ %d\n",
1082 dev->name, priv->interruptTransmit); 1738 dev->name, grp->interruptTransmit);
1083
1084 err = -1;
1085 goto err_irq_fail; 1739 goto err_irq_fail;
1086 } 1740 }
1087 } 1741 }
1088 1742
1089 phy_start(priv->phydev); 1743 return 0;
1090
1091 /* Configure the coalescing support */
1092 gfar_write(&regs->txic, 0);
1093 if (priv->txcoalescing)
1094 gfar_write(&regs->txic, priv->txic);
1095
1096 gfar_write(&regs->rxic, 0);
1097 if (priv->rxcoalescing)
1098 gfar_write(&regs->rxic, priv->rxic);
1099
1100 if (priv->rx_csum_enable)
1101 rctrl |= RCTRL_CHECKSUMMING;
1102 1744
1103 if (priv->extended_hash) { 1745rx_irq_fail:
1104 rctrl |= RCTRL_EXTHASH; 1746 free_irq(grp->interruptTransmit, grp);
1747tx_irq_fail:
1748 free_irq(grp->interruptError, grp);
1749err_irq_fail:
1750 return err;
1105 1751
1106 gfar_clear_exact_match(dev); 1752}
1107 rctrl |= RCTRL_EMEN;
1108 }
1109 1753
1110 if (priv->padding) { 1754/* Bring the controller up and running */
1111 rctrl &= ~RCTRL_PAL_MASK; 1755int startup_gfar(struct net_device *ndev)
1112 rctrl |= RCTRL_PADDING(priv->padding); 1756{
1113 } 1757 struct gfar_private *priv = netdev_priv(ndev);
1758 struct gfar __iomem *regs = NULL;
1759 int err, i, j;
1114 1760
1115 /* keep vlan related bits if it's enabled */ 1761 for (i = 0; i < priv->num_grps; i++) {
1116 if (priv->vlgrp) { 1762 regs= priv->gfargrp[i].regs;
1117 rctrl |= RCTRL_VLEX | RCTRL_PRSDEP_INIT; 1763 gfar_write(&regs->imask, IMASK_INIT_CLEAR);
1118 tctrl |= TCTRL_VLINS;
1119 } 1764 }
1120 1765
1121 /* Init rctrl based on our settings */ 1766 regs= priv->gfargrp[0].regs;
1122 gfar_write(&priv->regs->rctrl, rctrl); 1767 err = gfar_alloc_skb_resources(ndev);
1123 1768 if (err)
1124 if (dev->features & NETIF_F_IP_CSUM) 1769 return err;
1125 tctrl |= TCTRL_INIT_CSUM;
1126
1127 gfar_write(&priv->regs->tctrl, tctrl);
1128
1129 /* Set the extraction length and index */
1130 attrs = ATTRELI_EL(priv->rx_stash_size) |
1131 ATTRELI_EI(priv->rx_stash_index);
1132
1133 gfar_write(&priv->regs->attreli, attrs);
1134
1135 /* Start with defaults, and add stashing or locking
1136 * depending on the approprate variables */
1137 attrs = ATTR_INIT_SETTINGS;
1138 1770
1139 if (priv->bd_stash_en) 1771 gfar_init_mac(ndev);
1140 attrs |= ATTR_BDSTASH;
1141 1772
1142 if (priv->rx_stash_size != 0) 1773 for (i = 0; i < priv->num_grps; i++) {
1143 attrs |= ATTR_BUFSTASH; 1774 err = register_grp_irqs(&priv->gfargrp[i]);
1775 if (err) {
1776 for (j = 0; j < i; j++)
1777 free_grp_irqs(&priv->gfargrp[j]);
1778 goto irq_fail;
1779 }
1780 }
1144 1781
1145 gfar_write(&priv->regs->attr, attrs); 1782 /* Start the controller */
1783 gfar_start(ndev);
1146 1784
1147 gfar_write(&priv->regs->fifo_tx_thr, priv->fifo_threshold); 1785 phy_start(priv->phydev);
1148 gfar_write(&priv->regs->fifo_tx_starve, priv->fifo_starve);
1149 gfar_write(&priv->regs->fifo_tx_starve_shutoff, priv->fifo_starve_off);
1150 1786
1151 /* Start the controller */ 1787 gfar_configure_coalescing(priv, 0xFF, 0xFF);
1152 gfar_start(dev);
1153 1788
1154 return 0; 1789 return 0;
1155 1790
1156rx_irq_fail: 1791irq_fail:
1157 free_irq(priv->interruptTransmit, dev);
1158tx_irq_fail:
1159 free_irq(priv->interruptError, dev);
1160err_irq_fail:
1161err_rxalloc_fail:
1162rx_skb_fail:
1163 free_skb_resources(priv); 1792 free_skb_resources(priv);
1164tx_skb_fail:
1165 dma_free_coherent(&priv->ofdev->dev,
1166 sizeof(struct txbd8)*priv->tx_ring_size
1167 + sizeof(struct rxbd8)*priv->rx_ring_size,
1168 priv->tx_bd_base,
1169 gfar_read(&regs->tbase0));
1170
1171 return err; 1793 return err;
1172} 1794}
1173 1795
@@ -1178,7 +1800,7 @@ static int gfar_enet_open(struct net_device *dev)
1178 struct gfar_private *priv = netdev_priv(dev); 1800 struct gfar_private *priv = netdev_priv(dev);
1179 int err; 1801 int err;
1180 1802
1181 napi_enable(&priv->napi); 1803 enable_napi(priv);
1182 1804
1183 skb_queue_head_init(&priv->rx_recycle); 1805 skb_queue_head_init(&priv->rx_recycle);
1184 1806
@@ -1189,18 +1811,18 @@ static int gfar_enet_open(struct net_device *dev)
1189 1811
1190 err = init_phy(dev); 1812 err = init_phy(dev);
1191 1813
1192 if(err) { 1814 if (err) {
1193 napi_disable(&priv->napi); 1815 disable_napi(priv);
1194 return err; 1816 return err;
1195 } 1817 }
1196 1818
1197 err = startup_gfar(dev); 1819 err = startup_gfar(dev);
1198 if (err) { 1820 if (err) {
1199 napi_disable(&priv->napi); 1821 disable_napi(priv);
1200 return err; 1822 return err;
1201 } 1823 }
1202 1824
1203 netif_start_queue(dev); 1825 netif_tx_start_all_queues(dev);
1204 1826
1205 device_set_wakeup_enable(&dev->dev, priv->wol_en); 1827 device_set_wakeup_enable(&dev->dev, priv->wol_en);
1206 1828
@@ -1269,15 +1891,23 @@ static inline struct txbd8 *next_txbd(struct txbd8 *bdp, struct txbd8 *base,
1269static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev) 1891static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1270{ 1892{
1271 struct gfar_private *priv = netdev_priv(dev); 1893 struct gfar_private *priv = netdev_priv(dev);
1894 struct gfar_priv_tx_q *tx_queue = NULL;
1895 struct netdev_queue *txq;
1896 struct gfar __iomem *regs = NULL;
1272 struct txfcb *fcb = NULL; 1897 struct txfcb *fcb = NULL;
1273 struct txbd8 *txbdp, *txbdp_start, *base; 1898 struct txbd8 *txbdp, *txbdp_start, *base;
1274 u32 lstatus; 1899 u32 lstatus;
1275 int i; 1900 int i, rq = 0;
1276 u32 bufaddr; 1901 u32 bufaddr;
1277 unsigned long flags; 1902 unsigned long flags;
1278 unsigned int nr_frags, length; 1903 unsigned int nr_frags, length;
1279 1904
1280 base = priv->tx_bd_base; 1905
1906 rq = skb->queue_mapping;
1907 tx_queue = priv->tx_queue[rq];
1908 txq = netdev_get_tx_queue(dev, rq);
1909 base = tx_queue->tx_bd_base;
1910 regs = tx_queue->grp->regs;
1281 1911
1282 /* make space for additional header when fcb is needed */ 1912 /* make space for additional header when fcb is needed */
1283 if (((skb->ip_summed == CHECKSUM_PARTIAL) || 1913 if (((skb->ip_summed == CHECKSUM_PARTIAL) ||
@@ -1298,21 +1928,18 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1298 /* total number of fragments in the SKB */ 1928 /* total number of fragments in the SKB */
1299 nr_frags = skb_shinfo(skb)->nr_frags; 1929 nr_frags = skb_shinfo(skb)->nr_frags;
1300 1930
1301 spin_lock_irqsave(&priv->txlock, flags);
1302
1303 /* check if there is space to queue this packet */ 1931 /* check if there is space to queue this packet */
1304 if ((nr_frags+1) > priv->num_txbdfree) { 1932 if ((nr_frags+1) > tx_queue->num_txbdfree) {
1305 /* no space, stop the queue */ 1933 /* no space, stop the queue */
1306 netif_stop_queue(dev); 1934 netif_tx_stop_queue(txq);
1307 dev->stats.tx_fifo_errors++; 1935 dev->stats.tx_fifo_errors++;
1308 spin_unlock_irqrestore(&priv->txlock, flags);
1309 return NETDEV_TX_BUSY; 1936 return NETDEV_TX_BUSY;
1310 } 1937 }
1311 1938
1312 /* Update transmit stats */ 1939 /* Update transmit stats */
1313 dev->stats.tx_bytes += skb->len; 1940 dev->stats.tx_bytes += skb->len;
1314 1941
1315 txbdp = txbdp_start = priv->cur_tx; 1942 txbdp = txbdp_start = tx_queue->cur_tx;
1316 1943
1317 if (nr_frags == 0) { 1944 if (nr_frags == 0) {
1318 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT); 1945 lstatus = txbdp->lstatus | BD_LFLAG(TXBD_LAST | TXBD_INTERRUPT);
@@ -1320,7 +1947,7 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1320 /* Place the fragment addresses and lengths into the TxBDs */ 1947 /* Place the fragment addresses and lengths into the TxBDs */
1321 for (i = 0; i < nr_frags; i++) { 1948 for (i = 0; i < nr_frags; i++) {
1322 /* Point at the next BD, wrapping as needed */ 1949 /* Point at the next BD, wrapping as needed */
1323 txbdp = next_txbd(txbdp, base, priv->tx_ring_size); 1950 txbdp = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1324 1951
1325 length = skb_shinfo(skb)->frags[i].size; 1952 length = skb_shinfo(skb)->frags[i].size;
1326 1953
@@ -1362,13 +1989,27 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1362 } 1989 }
1363 1990
1364 /* setup the TxBD length and buffer pointer for the first BD */ 1991 /* setup the TxBD length and buffer pointer for the first BD */
1365 priv->tx_skbuff[priv->skb_curtx] = skb; 1992 tx_queue->tx_skbuff[tx_queue->skb_curtx] = skb;
1366 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data, 1993 txbdp_start->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
1367 skb_headlen(skb), DMA_TO_DEVICE); 1994 skb_headlen(skb), DMA_TO_DEVICE);
1368 1995
1369 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb); 1996 lstatus |= BD_LFLAG(TXBD_CRC | TXBD_READY) | skb_headlen(skb);
1370 1997
1371 /* 1998 /*
1999 * We can work in parallel with gfar_clean_tx_ring(), except
2000 * when modifying num_txbdfree. Note that we didn't grab the lock
2001 * when we were reading the num_txbdfree and checking for available
2002 * space, that's because outside of this function it can only grow,
2003 * and once we've got needed space, it cannot suddenly disappear.
2004 *
2005 * The lock also protects us from gfar_error(), which can modify
2006 * regs->tstat and thus retrigger the transfers, which is why we
2007 * also must grab the lock before setting ready bit for the first
2008 * to be transmitted BD.
2009 */
2010 spin_lock_irqsave(&tx_queue->txlock, flags);
2011
2012 /*
1372 * The powerpc-specific eieio() is used, as wmb() has too strong 2013 * The powerpc-specific eieio() is used, as wmb() has too strong
1373 * semantics (it requires synchronization between cacheable and 2014 * semantics (it requires synchronization between cacheable and
1374 * uncacheable mappings, which eieio doesn't provide and which we 2015 * uncacheable mappings, which eieio doesn't provide and which we
@@ -1382,29 +2023,29 @@ static int gfar_start_xmit(struct sk_buff *skb, struct net_device *dev)
1382 2023
1383 /* Update the current skb pointer to the next entry we will use 2024 /* Update the current skb pointer to the next entry we will use
1384 * (wrapping if necessary) */ 2025 * (wrapping if necessary) */
1385 priv->skb_curtx = (priv->skb_curtx + 1) & 2026 tx_queue->skb_curtx = (tx_queue->skb_curtx + 1) &
1386 TX_RING_MOD_MASK(priv->tx_ring_size); 2027 TX_RING_MOD_MASK(tx_queue->tx_ring_size);
1387 2028
1388 priv->cur_tx = next_txbd(txbdp, base, priv->tx_ring_size); 2029 tx_queue->cur_tx = next_txbd(txbdp, base, tx_queue->tx_ring_size);
1389 2030
1390 /* reduce TxBD free count */ 2031 /* reduce TxBD free count */
1391 priv->num_txbdfree -= (nr_frags + 1); 2032 tx_queue->num_txbdfree -= (nr_frags + 1);
1392 2033
1393 dev->trans_start = jiffies; 2034 dev->trans_start = jiffies;
1394 2035
1395 /* If the next BD still needs to be cleaned up, then the bds 2036 /* If the next BD still needs to be cleaned up, then the bds
1396 are full. We need to tell the kernel to stop sending us stuff. */ 2037 are full. We need to tell the kernel to stop sending us stuff. */
1397 if (!priv->num_txbdfree) { 2038 if (!tx_queue->num_txbdfree) {
1398 netif_stop_queue(dev); 2039 netif_tx_stop_queue(txq);
1399 2040
1400 dev->stats.tx_fifo_errors++; 2041 dev->stats.tx_fifo_errors++;
1401 } 2042 }
1402 2043
1403 /* Tell the DMA to go go go */ 2044 /* Tell the DMA to go go go */
1404 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 2045 gfar_write(&regs->tstat, TSTAT_CLEAR_THALT >> tx_queue->qindex);
1405 2046
1406 /* Unlock priv */ 2047 /* Unlock priv */
1407 spin_unlock_irqrestore(&priv->txlock, flags); 2048 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1408 2049
1409 return NETDEV_TX_OK; 2050 return NETDEV_TX_OK;
1410} 2051}
@@ -1414,7 +2055,7 @@ static int gfar_close(struct net_device *dev)
1414{ 2055{
1415 struct gfar_private *priv = netdev_priv(dev); 2056 struct gfar_private *priv = netdev_priv(dev);
1416 2057
1417 napi_disable(&priv->napi); 2058 disable_napi(priv);
1418 2059
1419 skb_queue_purge(&priv->rx_recycle); 2060 skb_queue_purge(&priv->rx_recycle);
1420 cancel_work_sync(&priv->reset_task); 2061 cancel_work_sync(&priv->reset_task);
@@ -1424,7 +2065,7 @@ static int gfar_close(struct net_device *dev)
1424 phy_disconnect(priv->phydev); 2065 phy_disconnect(priv->phydev);
1425 priv->phydev = NULL; 2066 priv->phydev = NULL;
1426 2067
1427 netif_stop_queue(dev); 2068 netif_tx_stop_all_queues(dev);
1428 2069
1429 return 0; 2070 return 0;
1430} 2071}
@@ -1443,50 +2084,55 @@ static void gfar_vlan_rx_register(struct net_device *dev,
1443 struct vlan_group *grp) 2084 struct vlan_group *grp)
1444{ 2085{
1445 struct gfar_private *priv = netdev_priv(dev); 2086 struct gfar_private *priv = netdev_priv(dev);
2087 struct gfar __iomem *regs = NULL;
1446 unsigned long flags; 2088 unsigned long flags;
1447 u32 tempval; 2089 u32 tempval;
1448 2090
1449 spin_lock_irqsave(&priv->rxlock, flags); 2091 regs = priv->gfargrp[0].regs;
2092 local_irq_save(flags);
2093 lock_rx_qs(priv);
1450 2094
1451 priv->vlgrp = grp; 2095 priv->vlgrp = grp;
1452 2096
1453 if (grp) { 2097 if (grp) {
1454 /* Enable VLAN tag insertion */ 2098 /* Enable VLAN tag insertion */
1455 tempval = gfar_read(&priv->regs->tctrl); 2099 tempval = gfar_read(&regs->tctrl);
1456 tempval |= TCTRL_VLINS; 2100 tempval |= TCTRL_VLINS;
1457 2101
1458 gfar_write(&priv->regs->tctrl, tempval); 2102 gfar_write(&regs->tctrl, tempval);
1459 2103
1460 /* Enable VLAN tag extraction */ 2104 /* Enable VLAN tag extraction */
1461 tempval = gfar_read(&priv->regs->rctrl); 2105 tempval = gfar_read(&regs->rctrl);
1462 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT); 2106 tempval |= (RCTRL_VLEX | RCTRL_PRSDEP_INIT);
1463 gfar_write(&priv->regs->rctrl, tempval); 2107 gfar_write(&regs->rctrl, tempval);
1464 } else { 2108 } else {
1465 /* Disable VLAN tag insertion */ 2109 /* Disable VLAN tag insertion */
1466 tempval = gfar_read(&priv->regs->tctrl); 2110 tempval = gfar_read(&regs->tctrl);
1467 tempval &= ~TCTRL_VLINS; 2111 tempval &= ~TCTRL_VLINS;
1468 gfar_write(&priv->regs->tctrl, tempval); 2112 gfar_write(&regs->tctrl, tempval);
1469 2113
1470 /* Disable VLAN tag extraction */ 2114 /* Disable VLAN tag extraction */
1471 tempval = gfar_read(&priv->regs->rctrl); 2115 tempval = gfar_read(&regs->rctrl);
1472 tempval &= ~RCTRL_VLEX; 2116 tempval &= ~RCTRL_VLEX;
1473 /* If parse is no longer required, then disable parser */ 2117 /* If parse is no longer required, then disable parser */
1474 if (tempval & RCTRL_REQ_PARSER) 2118 if (tempval & RCTRL_REQ_PARSER)
1475 tempval |= RCTRL_PRSDEP_INIT; 2119 tempval |= RCTRL_PRSDEP_INIT;
1476 else 2120 else
1477 tempval &= ~RCTRL_PRSDEP_INIT; 2121 tempval &= ~RCTRL_PRSDEP_INIT;
1478 gfar_write(&priv->regs->rctrl, tempval); 2122 gfar_write(&regs->rctrl, tempval);
1479 } 2123 }
1480 2124
1481 gfar_change_mtu(dev, dev->mtu); 2125 gfar_change_mtu(dev, dev->mtu);
1482 2126
1483 spin_unlock_irqrestore(&priv->rxlock, flags); 2127 unlock_rx_qs(priv);
2128 local_irq_restore(flags);
1484} 2129}
1485 2130
1486static int gfar_change_mtu(struct net_device *dev, int new_mtu) 2131static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1487{ 2132{
1488 int tempsize, tempval; 2133 int tempsize, tempval;
1489 struct gfar_private *priv = netdev_priv(dev); 2134 struct gfar_private *priv = netdev_priv(dev);
2135 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1490 int oldsize = priv->rx_buffer_size; 2136 int oldsize = priv->rx_buffer_size;
1491 int frame_size = new_mtu + ETH_HLEN; 2137 int frame_size = new_mtu + ETH_HLEN;
1492 2138
@@ -1518,20 +2164,20 @@ static int gfar_change_mtu(struct net_device *dev, int new_mtu)
1518 2164
1519 dev->mtu = new_mtu; 2165 dev->mtu = new_mtu;
1520 2166
1521 gfar_write(&priv->regs->mrblr, priv->rx_buffer_size); 2167 gfar_write(&regs->mrblr, priv->rx_buffer_size);
1522 gfar_write(&priv->regs->maxfrm, priv->rx_buffer_size); 2168 gfar_write(&regs->maxfrm, priv->rx_buffer_size);
1523 2169
1524 /* If the mtu is larger than the max size for standard 2170 /* If the mtu is larger than the max size for standard
1525 * ethernet frames (ie, a jumbo frame), then set maccfg2 2171 * ethernet frames (ie, a jumbo frame), then set maccfg2
1526 * to allow huge frames, and to check the length */ 2172 * to allow huge frames, and to check the length */
1527 tempval = gfar_read(&priv->regs->maccfg2); 2173 tempval = gfar_read(&regs->maccfg2);
1528 2174
1529 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE) 2175 if (priv->rx_buffer_size > DEFAULT_RX_BUFFER_SIZE)
1530 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2176 tempval |= (MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1531 else 2177 else
1532 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK); 2178 tempval &= ~(MACCFG2_HUGEFRAME | MACCFG2_LENGTHCHECK);
1533 2179
1534 gfar_write(&priv->regs->maccfg2, tempval); 2180 gfar_write(&regs->maccfg2, tempval);
1535 2181
1536 if ((oldsize != tempsize) && (dev->flags & IFF_UP)) 2182 if ((oldsize != tempsize) && (dev->flags & IFF_UP))
1537 startup_gfar(dev); 2183 startup_gfar(dev);
@@ -1551,10 +2197,10 @@ static void gfar_reset_task(struct work_struct *work)
1551 struct net_device *dev = priv->ndev; 2197 struct net_device *dev = priv->ndev;
1552 2198
1553 if (dev->flags & IFF_UP) { 2199 if (dev->flags & IFF_UP) {
1554 netif_stop_queue(dev); 2200 netif_tx_stop_all_queues(dev);
1555 stop_gfar(dev); 2201 stop_gfar(dev);
1556 startup_gfar(dev); 2202 startup_gfar(dev);
1557 netif_start_queue(dev); 2203 netif_tx_start_all_queues(dev);
1558 } 2204 }
1559 2205
1560 netif_tx_schedule_all(dev); 2206 netif_tx_schedule_all(dev);
@@ -1569,24 +2215,29 @@ static void gfar_timeout(struct net_device *dev)
1569} 2215}
1570 2216
1571/* Interrupt Handler for Transmit complete */ 2217/* Interrupt Handler for Transmit complete */
1572static int gfar_clean_tx_ring(struct net_device *dev) 2218static int gfar_clean_tx_ring(struct gfar_priv_tx_q *tx_queue)
1573{ 2219{
2220 struct net_device *dev = tx_queue->dev;
1574 struct gfar_private *priv = netdev_priv(dev); 2221 struct gfar_private *priv = netdev_priv(dev);
2222 struct gfar_priv_rx_q *rx_queue = NULL;
1575 struct txbd8 *bdp; 2223 struct txbd8 *bdp;
1576 struct txbd8 *lbdp = NULL; 2224 struct txbd8 *lbdp = NULL;
1577 struct txbd8 *base = priv->tx_bd_base; 2225 struct txbd8 *base = tx_queue->tx_bd_base;
1578 struct sk_buff *skb; 2226 struct sk_buff *skb;
1579 int skb_dirtytx; 2227 int skb_dirtytx;
1580 int tx_ring_size = priv->tx_ring_size; 2228 int tx_ring_size = tx_queue->tx_ring_size;
1581 int frags = 0; 2229 int frags = 0;
1582 int i; 2230 int i;
1583 int howmany = 0; 2231 int howmany = 0;
1584 u32 lstatus; 2232 u32 lstatus;
1585 2233
1586 bdp = priv->dirty_tx; 2234 rx_queue = priv->rx_queue[tx_queue->qindex];
1587 skb_dirtytx = priv->skb_dirtytx; 2235 bdp = tx_queue->dirty_tx;
2236 skb_dirtytx = tx_queue->skb_dirtytx;
2237
2238 while ((skb = tx_queue->tx_skbuff[skb_dirtytx])) {
2239 unsigned long flags;
1588 2240
1589 while ((skb = priv->tx_skbuff[skb_dirtytx])) {
1590 frags = skb_shinfo(skb)->nr_frags; 2241 frags = skb_shinfo(skb)->nr_frags;
1591 lbdp = skip_txbd(bdp, frags, base, tx_ring_size); 2242 lbdp = skip_txbd(bdp, frags, base, tx_ring_size);
1592 2243
@@ -1618,82 +2269,73 @@ static int gfar_clean_tx_ring(struct net_device *dev)
1618 * If there's room in the queue (limit it to rx_buffer_size) 2269 * If there's room in the queue (limit it to rx_buffer_size)
1619 * we add this skb back into the pool, if it's the right size 2270 * we add this skb back into the pool, if it's the right size
1620 */ 2271 */
1621 if (skb_queue_len(&priv->rx_recycle) < priv->rx_ring_size && 2272 if (skb_queue_len(&priv->rx_recycle) < rx_queue->rx_ring_size &&
1622 skb_recycle_check(skb, priv->rx_buffer_size + 2273 skb_recycle_check(skb, priv->rx_buffer_size +
1623 RXBUF_ALIGNMENT)) 2274 RXBUF_ALIGNMENT))
1624 __skb_queue_head(&priv->rx_recycle, skb); 2275 __skb_queue_head(&priv->rx_recycle, skb);
1625 else 2276 else
1626 dev_kfree_skb_any(skb); 2277 dev_kfree_skb_any(skb);
1627 2278
1628 priv->tx_skbuff[skb_dirtytx] = NULL; 2279 tx_queue->tx_skbuff[skb_dirtytx] = NULL;
1629 2280
1630 skb_dirtytx = (skb_dirtytx + 1) & 2281 skb_dirtytx = (skb_dirtytx + 1) &
1631 TX_RING_MOD_MASK(tx_ring_size); 2282 TX_RING_MOD_MASK(tx_ring_size);
1632 2283
1633 howmany++; 2284 howmany++;
1634 priv->num_txbdfree += frags + 1; 2285 spin_lock_irqsave(&tx_queue->txlock, flags);
2286 tx_queue->num_txbdfree += frags + 1;
2287 spin_unlock_irqrestore(&tx_queue->txlock, flags);
1635 } 2288 }
1636 2289
1637 /* If we freed a buffer, we can restart transmission, if necessary */ 2290 /* If we freed a buffer, we can restart transmission, if necessary */
1638 if (netif_queue_stopped(dev) && priv->num_txbdfree) 2291 if (__netif_subqueue_stopped(dev, tx_queue->qindex) && tx_queue->num_txbdfree)
1639 netif_wake_queue(dev); 2292 netif_wake_subqueue(dev, tx_queue->qindex);
1640 2293
1641 /* Update dirty indicators */ 2294 /* Update dirty indicators */
1642 priv->skb_dirtytx = skb_dirtytx; 2295 tx_queue->skb_dirtytx = skb_dirtytx;
1643 priv->dirty_tx = bdp; 2296 tx_queue->dirty_tx = bdp;
1644 2297
1645 dev->stats.tx_packets += howmany; 2298 dev->stats.tx_packets += howmany;
1646 2299
1647 return howmany; 2300 return howmany;
1648} 2301}
1649 2302
1650static void gfar_schedule_cleanup(struct net_device *dev) 2303static void gfar_schedule_cleanup(struct gfar_priv_grp *gfargrp)
1651{ 2304{
1652 struct gfar_private *priv = netdev_priv(dev);
1653 unsigned long flags; 2305 unsigned long flags;
1654 2306
1655 spin_lock_irqsave(&priv->txlock, flags); 2307 spin_lock_irqsave(&gfargrp->grplock, flags);
1656 spin_lock(&priv->rxlock); 2308 if (napi_schedule_prep(&gfargrp->napi)) {
1657 2309 gfar_write(&gfargrp->regs->imask, IMASK_RTX_DISABLED);
1658 if (napi_schedule_prep(&priv->napi)) { 2310 __napi_schedule(&gfargrp->napi);
1659 gfar_write(&priv->regs->imask, IMASK_RTX_DISABLED);
1660 __napi_schedule(&priv->napi);
1661 } else { 2311 } else {
1662 /* 2312 /*
1663 * Clear IEVENT, so interrupts aren't called again 2313 * Clear IEVENT, so interrupts aren't called again
1664 * because of the packets that have already arrived. 2314 * because of the packets that have already arrived.
1665 */ 2315 */
1666 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); 2316 gfar_write(&gfargrp->regs->ievent, IEVENT_RTX_MASK);
1667 } 2317 }
2318 spin_unlock_irqrestore(&gfargrp->grplock, flags);
1668 2319
1669 spin_unlock(&priv->rxlock);
1670 spin_unlock_irqrestore(&priv->txlock, flags);
1671} 2320}
1672 2321
1673/* Interrupt Handler for Transmit complete */ 2322/* Interrupt Handler for Transmit complete */
1674static irqreturn_t gfar_transmit(int irq, void *dev_id) 2323static irqreturn_t gfar_transmit(int irq, void *grp_id)
1675{ 2324{
1676 gfar_schedule_cleanup((struct net_device *)dev_id); 2325 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
1677 return IRQ_HANDLED; 2326 return IRQ_HANDLED;
1678} 2327}
1679 2328
1680static void gfar_new_rxbdp(struct net_device *dev, struct rxbd8 *bdp, 2329static void gfar_new_rxbdp(struct gfar_priv_rx_q *rx_queue, struct rxbd8 *bdp,
1681 struct sk_buff *skb) 2330 struct sk_buff *skb)
1682{ 2331{
2332 struct net_device *dev = rx_queue->dev;
1683 struct gfar_private *priv = netdev_priv(dev); 2333 struct gfar_private *priv = netdev_priv(dev);
1684 u32 lstatus; 2334 dma_addr_t buf;
1685
1686 bdp->bufPtr = dma_map_single(&priv->ofdev->dev, skb->data,
1687 priv->rx_buffer_size, DMA_FROM_DEVICE);
1688
1689 lstatus = BD_LFLAG(RXBD_EMPTY | RXBD_INTERRUPT);
1690
1691 if (bdp == priv->rx_bd_base + priv->rx_ring_size - 1)
1692 lstatus |= BD_LFLAG(RXBD_WRAP);
1693
1694 eieio();
1695 2335
1696 bdp->lstatus = lstatus; 2336 buf = dma_map_single(&priv->ofdev->dev, skb->data,
2337 priv->rx_buffer_size, DMA_FROM_DEVICE);
2338 gfar_init_rxbdp(rx_queue, bdp, buf);
1697} 2339}
1698 2340
1699 2341
@@ -1760,9 +2402,9 @@ static inline void count_errors(unsigned short status, struct net_device *dev)
1760 } 2402 }
1761} 2403}
1762 2404
1763irqreturn_t gfar_receive(int irq, void *dev_id) 2405irqreturn_t gfar_receive(int irq, void *grp_id)
1764{ 2406{
1765 gfar_schedule_cleanup((struct net_device *)dev_id); 2407 gfar_schedule_cleanup((struct gfar_priv_grp *)grp_id);
1766 return IRQ_HANDLED; 2408 return IRQ_HANDLED;
1767} 2409}
1768 2410
@@ -1792,6 +2434,7 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1792 fcb = (struct rxfcb *)skb->data; 2434 fcb = (struct rxfcb *)skb->data;
1793 2435
1794 /* Remove the FCB from the skb */ 2436 /* Remove the FCB from the skb */
2437 skb_set_queue_mapping(skb, fcb->rq);
1795 /* Remove the padded bytes, if there are any */ 2438 /* Remove the padded bytes, if there are any */
1796 if (amount_pull) 2439 if (amount_pull)
1797 skb_pull(skb, amount_pull); 2440 skb_pull(skb, amount_pull);
@@ -1818,8 +2461,9 @@ static int gfar_process_frame(struct net_device *dev, struct sk_buff *skb,
1818 * until the budget/quota has been reached. Returns the number 2461 * until the budget/quota has been reached. Returns the number
1819 * of frames handled 2462 * of frames handled
1820 */ 2463 */
1821int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit) 2464int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit)
1822{ 2465{
2466 struct net_device *dev = rx_queue->dev;
1823 struct rxbd8 *bdp, *base; 2467 struct rxbd8 *bdp, *base;
1824 struct sk_buff *skb; 2468 struct sk_buff *skb;
1825 int pkt_len; 2469 int pkt_len;
@@ -1828,8 +2472,8 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1828 struct gfar_private *priv = netdev_priv(dev); 2472 struct gfar_private *priv = netdev_priv(dev);
1829 2473
1830 /* Get the first full descriptor */ 2474 /* Get the first full descriptor */
1831 bdp = priv->cur_rx; 2475 bdp = rx_queue->cur_rx;
1832 base = priv->rx_bd_base; 2476 base = rx_queue->rx_bd_base;
1833 2477
1834 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) + 2478 amount_pull = (gfar_uses_fcb(priv) ? GMAC_FCB_LEN : 0) +
1835 priv->padding; 2479 priv->padding;
@@ -1841,7 +2485,7 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1841 /* Add another skb for the future */ 2485 /* Add another skb for the future */
1842 newskb = gfar_new_skb(dev); 2486 newskb = gfar_new_skb(dev);
1843 2487
1844 skb = priv->rx_skbuff[priv->skb_currx]; 2488 skb = rx_queue->rx_skbuff[rx_queue->skb_currx];
1845 2489
1846 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr, 2490 dma_unmap_single(&priv->ofdev->dev, bdp->bufPtr,
1847 priv->rx_buffer_size, DMA_FROM_DEVICE); 2491 priv->rx_buffer_size, DMA_FROM_DEVICE);
@@ -1875,8 +2519,6 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1875 skb_put(skb, pkt_len); 2519 skb_put(skb, pkt_len);
1876 dev->stats.rx_bytes += pkt_len; 2520 dev->stats.rx_bytes += pkt_len;
1877 2521
1878 if (in_irq() || irqs_disabled())
1879 printk("Interrupt problem!\n");
1880 gfar_process_frame(dev, skb, amount_pull); 2522 gfar_process_frame(dev, skb, amount_pull);
1881 2523
1882 } else { 2524 } else {
@@ -1889,46 +2531,70 @@ int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit)
1889 2531
1890 } 2532 }
1891 2533
1892 priv->rx_skbuff[priv->skb_currx] = newskb; 2534 rx_queue->rx_skbuff[rx_queue->skb_currx] = newskb;
1893 2535
1894 /* Setup the new bdp */ 2536 /* Setup the new bdp */
1895 gfar_new_rxbdp(dev, bdp, newskb); 2537 gfar_new_rxbdp(rx_queue, bdp, newskb);
1896 2538
1897 /* Update to the next pointer */ 2539 /* Update to the next pointer */
1898 bdp = next_bd(bdp, base, priv->rx_ring_size); 2540 bdp = next_bd(bdp, base, rx_queue->rx_ring_size);
1899 2541
1900 /* update to point at the next skb */ 2542 /* update to point at the next skb */
1901 priv->skb_currx = 2543 rx_queue->skb_currx =
1902 (priv->skb_currx + 1) & 2544 (rx_queue->skb_currx + 1) &
1903 RX_RING_MOD_MASK(priv->rx_ring_size); 2545 RX_RING_MOD_MASK(rx_queue->rx_ring_size);
1904 } 2546 }
1905 2547
1906 /* Update the current rxbd pointer to be the next one */ 2548 /* Update the current rxbd pointer to be the next one */
1907 priv->cur_rx = bdp; 2549 rx_queue->cur_rx = bdp;
1908 2550
1909 return howmany; 2551 return howmany;
1910} 2552}
1911 2553
1912static int gfar_poll(struct napi_struct *napi, int budget) 2554static int gfar_poll(struct napi_struct *napi, int budget)
1913{ 2555{
1914 struct gfar_private *priv = container_of(napi, struct gfar_private, napi); 2556 struct gfar_priv_grp *gfargrp = container_of(napi,
1915 struct net_device *dev = priv->ndev; 2557 struct gfar_priv_grp, napi);
1916 int tx_cleaned = 0; 2558 struct gfar_private *priv = gfargrp->priv;
1917 int rx_cleaned = 0; 2559 struct gfar __iomem *regs = gfargrp->regs;
1918 unsigned long flags; 2560 struct gfar_priv_tx_q *tx_queue = NULL;
2561 struct gfar_priv_rx_q *rx_queue = NULL;
2562 int rx_cleaned = 0, budget_per_queue = 0, rx_cleaned_per_queue = 0;
2563 int tx_cleaned = 0, i, left_over_budget = budget;
2564 unsigned long serviced_queues = 0;
2565 int num_queues = 0;
2566
2567 num_queues = gfargrp->num_rx_queues;
2568 budget_per_queue = budget/num_queues;
1919 2569
1920 /* Clear IEVENT, so interrupts aren't called again 2570 /* Clear IEVENT, so interrupts aren't called again
1921 * because of the packets that have already arrived */ 2571 * because of the packets that have already arrived */
1922 gfar_write(&priv->regs->ievent, IEVENT_RTX_MASK); 2572 gfar_write(&regs->ievent, IEVENT_RTX_MASK);
1923 2573
1924 /* If we fail to get the lock, don't bother with the TX BDs */ 2574 while (num_queues && left_over_budget) {
1925 if (spin_trylock_irqsave(&priv->txlock, flags)) { 2575
1926 tx_cleaned = gfar_clean_tx_ring(dev); 2576 budget_per_queue = left_over_budget/num_queues;
1927 spin_unlock_irqrestore(&priv->txlock, flags); 2577 left_over_budget = 0;
2578
2579 for_each_bit(i, &gfargrp->rx_bit_map, priv->num_rx_queues) {
2580 if (test_bit(i, &serviced_queues))
2581 continue;
2582 rx_queue = priv->rx_queue[i];
2583 tx_queue = priv->tx_queue[rx_queue->qindex];
2584
2585 tx_cleaned += gfar_clean_tx_ring(tx_queue);
2586 rx_cleaned_per_queue = gfar_clean_rx_ring(rx_queue,
2587 budget_per_queue);
2588 rx_cleaned += rx_cleaned_per_queue;
2589 if(rx_cleaned_per_queue < budget_per_queue) {
2590 left_over_budget = left_over_budget +
2591 (budget_per_queue - rx_cleaned_per_queue);
2592 set_bit(i, &serviced_queues);
2593 num_queues--;
2594 }
2595 }
1928 } 2596 }
1929 2597
1930 rx_cleaned = gfar_clean_rx_ring(dev, budget);
1931
1932 if (tx_cleaned) 2598 if (tx_cleaned)
1933 return budget; 2599 return budget;
1934 2600
@@ -1936,20 +2602,14 @@ static int gfar_poll(struct napi_struct *napi, int budget)
1936 napi_complete(napi); 2602 napi_complete(napi);
1937 2603
1938 /* Clear the halt bit in RSTAT */ 2604 /* Clear the halt bit in RSTAT */
1939 gfar_write(&priv->regs->rstat, RSTAT_CLEAR_RHALT); 2605 gfar_write(&regs->rstat, gfargrp->rstat);
1940 2606
1941 gfar_write(&priv->regs->imask, IMASK_DEFAULT); 2607 gfar_write(&regs->imask, IMASK_DEFAULT);
1942 2608
1943 /* If we are coalescing interrupts, update the timer */ 2609 /* If we are coalescing interrupts, update the timer */
1944 /* Otherwise, clear it */ 2610 /* Otherwise, clear it */
1945 if (likely(priv->rxcoalescing)) { 2611 gfar_configure_coalescing(priv,
1946 gfar_write(&priv->regs->rxic, 0); 2612 gfargrp->rx_bit_map, gfargrp->tx_bit_map);
1947 gfar_write(&priv->regs->rxic, priv->rxic);
1948 }
1949 if (likely(priv->txcoalescing)) {
1950 gfar_write(&priv->regs->txic, 0);
1951 gfar_write(&priv->regs->txic, priv->txic);
1952 }
1953 } 2613 }
1954 2614
1955 return rx_cleaned; 2615 return rx_cleaned;
@@ -1964,44 +2624,49 @@ static int gfar_poll(struct napi_struct *napi, int budget)
1964static void gfar_netpoll(struct net_device *dev) 2624static void gfar_netpoll(struct net_device *dev)
1965{ 2625{
1966 struct gfar_private *priv = netdev_priv(dev); 2626 struct gfar_private *priv = netdev_priv(dev);
2627 int i = 0;
1967 2628
1968 /* If the device has multiple interrupts, run tx/rx */ 2629 /* If the device has multiple interrupts, run tx/rx */
1969 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) { 2630 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_MULTI_INTR) {
1970 disable_irq(priv->interruptTransmit); 2631 for (i = 0; i < priv->num_grps; i++) {
1971 disable_irq(priv->interruptReceive); 2632 disable_irq(priv->gfargrp[i].interruptTransmit);
1972 disable_irq(priv->interruptError); 2633 disable_irq(priv->gfargrp[i].interruptReceive);
1973 gfar_interrupt(priv->interruptTransmit, dev); 2634 disable_irq(priv->gfargrp[i].interruptError);
1974 enable_irq(priv->interruptError); 2635 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
1975 enable_irq(priv->interruptReceive); 2636 &priv->gfargrp[i]);
1976 enable_irq(priv->interruptTransmit); 2637 enable_irq(priv->gfargrp[i].interruptError);
2638 enable_irq(priv->gfargrp[i].interruptReceive);
2639 enable_irq(priv->gfargrp[i].interruptTransmit);
2640 }
1977 } else { 2641 } else {
1978 disable_irq(priv->interruptTransmit); 2642 for (i = 0; i < priv->num_grps; i++) {
1979 gfar_interrupt(priv->interruptTransmit, dev); 2643 disable_irq(priv->gfargrp[i].interruptTransmit);
1980 enable_irq(priv->interruptTransmit); 2644 gfar_interrupt(priv->gfargrp[i].interruptTransmit,
2645 &priv->gfargrp[i]);
2646 enable_irq(priv->gfargrp[i].interruptTransmit);
1981 } 2647 }
1982} 2648}
1983#endif 2649#endif
1984 2650
1985/* The interrupt handler for devices with one interrupt */ 2651/* The interrupt handler for devices with one interrupt */
1986static irqreturn_t gfar_interrupt(int irq, void *dev_id) 2652static irqreturn_t gfar_interrupt(int irq, void *grp_id)
1987{ 2653{
1988 struct net_device *dev = dev_id; 2654 struct gfar_priv_grp *gfargrp = grp_id;
1989 struct gfar_private *priv = netdev_priv(dev);
1990 2655
1991 /* Save ievent for future reference */ 2656 /* Save ievent for future reference */
1992 u32 events = gfar_read(&priv->regs->ievent); 2657 u32 events = gfar_read(&gfargrp->regs->ievent);
1993 2658
1994 /* Check for reception */ 2659 /* Check for reception */
1995 if (events & IEVENT_RX_MASK) 2660 if (events & IEVENT_RX_MASK)
1996 gfar_receive(irq, dev_id); 2661 gfar_receive(irq, grp_id);
1997 2662
1998 /* Check for transmit completion */ 2663 /* Check for transmit completion */
1999 if (events & IEVENT_TX_MASK) 2664 if (events & IEVENT_TX_MASK)
2000 gfar_transmit(irq, dev_id); 2665 gfar_transmit(irq, grp_id);
2001 2666
2002 /* Check for errors */ 2667 /* Check for errors */
2003 if (events & IEVENT_ERR_MASK) 2668 if (events & IEVENT_ERR_MASK)
2004 gfar_error(irq, dev_id); 2669 gfar_error(irq, grp_id);
2005 2670
2006 return IRQ_HANDLED; 2671 return IRQ_HANDLED;
2007} 2672}
@@ -2015,12 +2680,14 @@ static irqreturn_t gfar_interrupt(int irq, void *dev_id)
2015static void adjust_link(struct net_device *dev) 2680static void adjust_link(struct net_device *dev)
2016{ 2681{
2017 struct gfar_private *priv = netdev_priv(dev); 2682 struct gfar_private *priv = netdev_priv(dev);
2018 struct gfar __iomem *regs = priv->regs; 2683 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2019 unsigned long flags; 2684 unsigned long flags;
2020 struct phy_device *phydev = priv->phydev; 2685 struct phy_device *phydev = priv->phydev;
2021 int new_state = 0; 2686 int new_state = 0;
2022 2687
2023 spin_lock_irqsave(&priv->txlock, flags); 2688 local_irq_save(flags);
2689 lock_tx_qs(priv);
2690
2024 if (phydev->link) { 2691 if (phydev->link) {
2025 u32 tempval = gfar_read(&regs->maccfg2); 2692 u32 tempval = gfar_read(&regs->maccfg2);
2026 u32 ecntrl = gfar_read(&regs->ecntrl); 2693 u32 ecntrl = gfar_read(&regs->ecntrl);
@@ -2085,8 +2752,8 @@ static void adjust_link(struct net_device *dev)
2085 2752
2086 if (new_state && netif_msg_link(priv)) 2753 if (new_state && netif_msg_link(priv))
2087 phy_print_status(phydev); 2754 phy_print_status(phydev);
2088 2755 unlock_tx_qs(priv);
2089 spin_unlock_irqrestore(&priv->txlock, flags); 2756 local_irq_restore(flags);
2090} 2757}
2091 2758
2092/* Update the hash table based on the current list of multicast 2759/* Update the hash table based on the current list of multicast
@@ -2097,10 +2764,10 @@ static void gfar_set_multi(struct net_device *dev)
2097{ 2764{
2098 struct dev_mc_list *mc_ptr; 2765 struct dev_mc_list *mc_ptr;
2099 struct gfar_private *priv = netdev_priv(dev); 2766 struct gfar_private *priv = netdev_priv(dev);
2100 struct gfar __iomem *regs = priv->regs; 2767 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2101 u32 tempval; 2768 u32 tempval;
2102 2769
2103 if(dev->flags & IFF_PROMISC) { 2770 if (dev->flags & IFF_PROMISC) {
2104 /* Set RCTRL to PROM */ 2771 /* Set RCTRL to PROM */
2105 tempval = gfar_read(&regs->rctrl); 2772 tempval = gfar_read(&regs->rctrl);
2106 tempval |= RCTRL_PROM; 2773 tempval |= RCTRL_PROM;
@@ -2112,7 +2779,7 @@ static void gfar_set_multi(struct net_device *dev)
2112 gfar_write(&regs->rctrl, tempval); 2779 gfar_write(&regs->rctrl, tempval);
2113 } 2780 }
2114 2781
2115 if(dev->flags & IFF_ALLMULTI) { 2782 if (dev->flags & IFF_ALLMULTI) {
2116 /* Set the hash to rx all multicast frames */ 2783 /* Set the hash to rx all multicast frames */
2117 gfar_write(&regs->igaddr0, 0xffffffff); 2784 gfar_write(&regs->igaddr0, 0xffffffff);
2118 gfar_write(&regs->igaddr1, 0xffffffff); 2785 gfar_write(&regs->igaddr1, 0xffffffff);
@@ -2164,7 +2831,7 @@ static void gfar_set_multi(struct net_device *dev)
2164 em_num = 0; 2831 em_num = 0;
2165 } 2832 }
2166 2833
2167 if(dev->mc_count == 0) 2834 if (dev->mc_count == 0)
2168 return; 2835 return;
2169 2836
2170 /* Parse the list, and set the appropriate bits */ 2837 /* Parse the list, and set the appropriate bits */
@@ -2230,10 +2897,11 @@ static void gfar_set_hash_for_addr(struct net_device *dev, u8 *addr)
2230static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr) 2897static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
2231{ 2898{
2232 struct gfar_private *priv = netdev_priv(dev); 2899 struct gfar_private *priv = netdev_priv(dev);
2900 struct gfar __iomem *regs = priv->gfargrp[0].regs;
2233 int idx; 2901 int idx;
2234 char tmpbuf[MAC_ADDR_LEN]; 2902 char tmpbuf[MAC_ADDR_LEN];
2235 u32 tempval; 2903 u32 tempval;
2236 u32 __iomem *macptr = &priv->regs->macstnaddr1; 2904 u32 __iomem *macptr = &regs->macstnaddr1;
2237 2905
2238 macptr += num*2; 2906 macptr += num*2;
2239 2907
@@ -2250,16 +2918,18 @@ static void gfar_set_mac_for_addr(struct net_device *dev, int num, u8 *addr)
2250} 2918}
2251 2919
2252/* GFAR error interrupt handler */ 2920/* GFAR error interrupt handler */
2253static irqreturn_t gfar_error(int irq, void *dev_id) 2921static irqreturn_t gfar_error(int irq, void *grp_id)
2254{ 2922{
2255 struct net_device *dev = dev_id; 2923 struct gfar_priv_grp *gfargrp = grp_id;
2256 struct gfar_private *priv = netdev_priv(dev); 2924 struct gfar __iomem *regs = gfargrp->regs;
2925 struct gfar_private *priv= gfargrp->priv;
2926 struct net_device *dev = priv->ndev;
2257 2927
2258 /* Save ievent for future reference */ 2928 /* Save ievent for future reference */
2259 u32 events = gfar_read(&priv->regs->ievent); 2929 u32 events = gfar_read(&regs->ievent);
2260 2930
2261 /* Clear IEVENT */ 2931 /* Clear IEVENT */
2262 gfar_write(&priv->regs->ievent, events & IEVENT_ERR_MASK); 2932 gfar_write(&regs->ievent, events & IEVENT_ERR_MASK);
2263 2933
2264 /* Magic Packet is not an error. */ 2934 /* Magic Packet is not an error. */
2265 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) && 2935 if ((priv->device_flags & FSL_GIANFAR_DEV_HAS_MAGIC_PACKET) &&
@@ -2269,7 +2939,7 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
2269 /* Hmm... */ 2939 /* Hmm... */
2270 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv)) 2940 if (netif_msg_rx_err(priv) || netif_msg_tx_err(priv))
2271 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n", 2941 printk(KERN_DEBUG "%s: error interrupt (ievent=0x%08x imask=0x%08x)\n",
2272 dev->name, events, gfar_read(&priv->regs->imask)); 2942 dev->name, events, gfar_read(&regs->imask));
2273 2943
2274 /* Update the error counters */ 2944 /* Update the error counters */
2275 if (events & IEVENT_TXE) { 2945 if (events & IEVENT_TXE) {
@@ -2280,14 +2950,22 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
2280 if (events & IEVENT_CRL) 2950 if (events & IEVENT_CRL)
2281 dev->stats.tx_aborted_errors++; 2951 dev->stats.tx_aborted_errors++;
2282 if (events & IEVENT_XFUN) { 2952 if (events & IEVENT_XFUN) {
2953 unsigned long flags;
2954
2283 if (netif_msg_tx_err(priv)) 2955 if (netif_msg_tx_err(priv))
2284 printk(KERN_DEBUG "%s: TX FIFO underrun, " 2956 printk(KERN_DEBUG "%s: TX FIFO underrun, "
2285 "packet dropped.\n", dev->name); 2957 "packet dropped.\n", dev->name);
2286 dev->stats.tx_dropped++; 2958 dev->stats.tx_dropped++;
2287 priv->extra_stats.tx_underrun++; 2959 priv->extra_stats.tx_underrun++;
2288 2960
2961 local_irq_save(flags);
2962 lock_tx_qs(priv);
2963
2289 /* Reactivate the Tx Queues */ 2964 /* Reactivate the Tx Queues */
2290 gfar_write(&priv->regs->tstat, TSTAT_CLEAR_THALT); 2965 gfar_write(&regs->tstat, gfargrp->tstat);
2966
2967 unlock_tx_qs(priv);
2968 local_irq_restore(flags);
2291 } 2969 }
2292 if (netif_msg_tx_err(priv)) 2970 if (netif_msg_tx_err(priv))
2293 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name); 2971 printk(KERN_DEBUG "%s: Transmit Error\n", dev->name);
@@ -2296,11 +2974,11 @@ static irqreturn_t gfar_error(int irq, void *dev_id)
2296 dev->stats.rx_errors++; 2974 dev->stats.rx_errors++;
2297 priv->extra_stats.rx_bsy++; 2975 priv->extra_stats.rx_bsy++;
2298 2976
2299 gfar_receive(irq, dev_id); 2977 gfar_receive(irq, grp_id);
2300 2978
2301 if (netif_msg_rx_err(priv)) 2979 if (netif_msg_rx_err(priv))
2302 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n", 2980 printk(KERN_DEBUG "%s: busy error (rstat: %x)\n",
2303 dev->name, gfar_read(&priv->regs->rstat)); 2981 dev->name, gfar_read(&regs->rstat));
2304 } 2982 }
2305 if (events & IEVENT_BABR) { 2983 if (events & IEVENT_BABR) {
2306 dev->stats.rx_errors++; 2984 dev->stats.rx_errors++;
@@ -2331,6 +3009,9 @@ static struct of_device_id gfar_match[] =
2331 .type = "network", 3009 .type = "network",
2332 .compatible = "gianfar", 3010 .compatible = "gianfar",
2333 }, 3011 },
3012 {
3013 .compatible = "fsl,etsec2",
3014 },
2334 {}, 3015 {},
2335}; 3016};
2336MODULE_DEVICE_TABLE(of, gfar_match); 3017MODULE_DEVICE_TABLE(of, gfar_match);
@@ -2342,8 +3023,9 @@ static struct of_platform_driver gfar_driver = {
2342 3023
2343 .probe = gfar_probe, 3024 .probe = gfar_probe,
2344 .remove = gfar_remove, 3025 .remove = gfar_remove,
2345 .suspend = gfar_suspend, 3026 .suspend = gfar_legacy_suspend,
2346 .resume = gfar_resume, 3027 .resume = gfar_legacy_resume,
3028 .driver.pm = GFAR_PM_OPS,
2347}; 3029};
2348 3030
2349static int __init gfar_init(void) 3031static int __init gfar_init(void)
diff --git a/drivers/net/gianfar.h b/drivers/net/gianfar.h
index 2cd94338b5d3..cbb451011cb5 100644
--- a/drivers/net/gianfar.h
+++ b/drivers/net/gianfar.h
@@ -7,8 +7,9 @@
7 * 7 *
8 * Author: Andy Fleming 8 * Author: Andy Fleming
9 * Maintainer: Kumar Gala 9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
10 * 11 *
11 * Copyright (c) 2002-2004 Freescale Semiconductor, Inc. 12 * Copyright 2002-2009 Freescale Semiconductor, Inc.
12 * 13 *
13 * This program is free software; you can redistribute it and/or modify it 14 * This program is free software; you can redistribute it and/or modify it
14 * under the terms of the GNU General Public License as published by the 15 * under the terms of the GNU General Public License as published by the
@@ -74,6 +75,13 @@
74extern const char gfar_driver_name[]; 75extern const char gfar_driver_name[];
75extern const char gfar_driver_version[]; 76extern const char gfar_driver_version[];
76 77
78/* MAXIMUM NUMBER OF QUEUES SUPPORTED */
79#define MAX_TX_QS 0x8
80#define MAX_RX_QS 0x8
81
82/* MAXIMUM NUMBER OF GROUPS SUPPORTED */
83#define MAXGROUPS 0x2
84
77/* These need to be powers of 2 for this driver */ 85/* These need to be powers of 2 for this driver */
78#define DEFAULT_TX_RING_SIZE 256 86#define DEFAULT_TX_RING_SIZE 256
79#define DEFAULT_RX_RING_SIZE 256 87#define DEFAULT_RX_RING_SIZE 256
@@ -171,12 +179,63 @@ extern const char gfar_driver_version[];
171 179
172#define MINFLR_INIT_SETTINGS 0x00000040 180#define MINFLR_INIT_SETTINGS 0x00000040
173 181
182/* Tqueue control */
183#define TQUEUE_EN0 0x00008000
184#define TQUEUE_EN1 0x00004000
185#define TQUEUE_EN2 0x00002000
186#define TQUEUE_EN3 0x00001000
187#define TQUEUE_EN4 0x00000800
188#define TQUEUE_EN5 0x00000400
189#define TQUEUE_EN6 0x00000200
190#define TQUEUE_EN7 0x00000100
191#define TQUEUE_EN_ALL 0x0000FF00
192
193#define TR03WT_WT0_MASK 0xFF000000
194#define TR03WT_WT1_MASK 0x00FF0000
195#define TR03WT_WT2_MASK 0x0000FF00
196#define TR03WT_WT3_MASK 0x000000FF
197
198#define TR47WT_WT4_MASK 0xFF000000
199#define TR47WT_WT5_MASK 0x00FF0000
200#define TR47WT_WT6_MASK 0x0000FF00
201#define TR47WT_WT7_MASK 0x000000FF
202
203/* Rqueue control */
204#define RQUEUE_EX0 0x00800000
205#define RQUEUE_EX1 0x00400000
206#define RQUEUE_EX2 0x00200000
207#define RQUEUE_EX3 0x00100000
208#define RQUEUE_EX4 0x00080000
209#define RQUEUE_EX5 0x00040000
210#define RQUEUE_EX6 0x00020000
211#define RQUEUE_EX7 0x00010000
212#define RQUEUE_EX_ALL 0x00FF0000
213
214#define RQUEUE_EN0 0x00000080
215#define RQUEUE_EN1 0x00000040
216#define RQUEUE_EN2 0x00000020
217#define RQUEUE_EN3 0x00000010
218#define RQUEUE_EN4 0x00000008
219#define RQUEUE_EN5 0x00000004
220#define RQUEUE_EN6 0x00000002
221#define RQUEUE_EN7 0x00000001
222#define RQUEUE_EN_ALL 0x000000FF
223
174/* Init to do tx snooping for buffers and descriptors */ 224/* Init to do tx snooping for buffers and descriptors */
175#define DMACTRL_INIT_SETTINGS 0x000000c3 225#define DMACTRL_INIT_SETTINGS 0x000000c3
176#define DMACTRL_GRS 0x00000010 226#define DMACTRL_GRS 0x00000010
177#define DMACTRL_GTS 0x00000008 227#define DMACTRL_GTS 0x00000008
178 228
179#define TSTAT_CLEAR_THALT 0x80000000 229#define TSTAT_CLEAR_THALT_ALL 0xFF000000
230#define TSTAT_CLEAR_THALT 0x80000000
231#define TSTAT_CLEAR_THALT0 0x80000000
232#define TSTAT_CLEAR_THALT1 0x40000000
233#define TSTAT_CLEAR_THALT2 0x20000000
234#define TSTAT_CLEAR_THALT3 0x10000000
235#define TSTAT_CLEAR_THALT4 0x08000000
236#define TSTAT_CLEAR_THALT5 0x04000000
237#define TSTAT_CLEAR_THALT6 0x02000000
238#define TSTAT_CLEAR_THALT7 0x01000000
180 239
181/* Interrupt coalescing macros */ 240/* Interrupt coalescing macros */
182#define IC_ICEN 0x80000000 241#define IC_ICEN 0x80000000
@@ -227,6 +286,13 @@ extern const char gfar_driver_version[];
227#define TCTRL_IPCSEN 0x00004000 286#define TCTRL_IPCSEN 0x00004000
228#define TCTRL_TUCSEN 0x00002000 287#define TCTRL_TUCSEN 0x00002000
229#define TCTRL_VLINS 0x00001000 288#define TCTRL_VLINS 0x00001000
289#define TCTRL_THDF 0x00000800
290#define TCTRL_RFCPAUSE 0x00000010
291#define TCTRL_TFCPAUSE 0x00000008
292#define TCTRL_TXSCHED_MASK 0x00000006
293#define TCTRL_TXSCHED_INIT 0x00000000
294#define TCTRL_TXSCHED_PRIO 0x00000002
295#define TCTRL_TXSCHED_WRRS 0x00000004
230#define TCTRL_INIT_CSUM (TCTRL_TUCSEN | TCTRL_IPCSEN) 296#define TCTRL_INIT_CSUM (TCTRL_TUCSEN | TCTRL_IPCSEN)
231 297
232#define IEVENT_INIT_CLEAR 0xffffffff 298#define IEVENT_INIT_CLEAR 0xffffffff
@@ -315,6 +381,84 @@ extern const char gfar_driver_version[];
315#define BD_LFLAG(flags) ((flags) << 16) 381#define BD_LFLAG(flags) ((flags) << 16)
316#define BD_LENGTH_MASK 0x0000ffff 382#define BD_LENGTH_MASK 0x0000ffff
317 383
384#define CLASS_CODE_UNRECOG 0x00
385#define CLASS_CODE_DUMMY1 0x01
386#define CLASS_CODE_ETHERTYPE1 0x02
387#define CLASS_CODE_ETHERTYPE2 0x03
388#define CLASS_CODE_USER_PROG1 0x04
389#define CLASS_CODE_USER_PROG2 0x05
390#define CLASS_CODE_USER_PROG3 0x06
391#define CLASS_CODE_USER_PROG4 0x07
392#define CLASS_CODE_TCP_IPV4 0x08
393#define CLASS_CODE_UDP_IPV4 0x09
394#define CLASS_CODE_AH_ESP_IPV4 0x0a
395#define CLASS_CODE_SCTP_IPV4 0x0b
396#define CLASS_CODE_TCP_IPV6 0x0c
397#define CLASS_CODE_UDP_IPV6 0x0d
398#define CLASS_CODE_AH_ESP_IPV6 0x0e
399#define CLASS_CODE_SCTP_IPV6 0x0f
400
401#define FPR_FILER_MASK 0xFFFFFFFF
402#define MAX_FILER_IDX 0xFF
403
404/* RQFCR register bits */
405#define RQFCR_GPI 0x80000000
406#define RQFCR_HASHTBL_Q 0x00000000
407#define RQFCR_HASHTBL_0 0x00020000
408#define RQFCR_HASHTBL_1 0x00040000
409#define RQFCR_HASHTBL_2 0x00060000
410#define RQFCR_HASHTBL_3 0x00080000
411#define RQFCR_HASH 0x00010000
412#define RQFCR_CLE 0x00000200
413#define RQFCR_RJE 0x00000100
414#define RQFCR_AND 0x00000080
415#define RQFCR_CMP_EXACT 0x00000000
416#define RQFCR_CMP_MATCH 0x00000020
417#define RQFCR_CMP_NOEXACT 0x00000040
418#define RQFCR_CMP_NOMATCH 0x00000060
419
420/* RQFCR PID values */
421#define RQFCR_PID_MASK 0x00000000
422#define RQFCR_PID_PARSE 0x00000001
423#define RQFCR_PID_ARB 0x00000002
424#define RQFCR_PID_DAH 0x00000003
425#define RQFCR_PID_DAL 0x00000004
426#define RQFCR_PID_SAH 0x00000005
427#define RQFCR_PID_SAL 0x00000006
428#define RQFCR_PID_ETY 0x00000007
429#define RQFCR_PID_VID 0x00000008
430#define RQFCR_PID_PRI 0x00000009
431#define RQFCR_PID_TOS 0x0000000A
432#define RQFCR_PID_L4P 0x0000000B
433#define RQFCR_PID_DIA 0x0000000C
434#define RQFCR_PID_SIA 0x0000000D
435#define RQFCR_PID_DPT 0x0000000E
436#define RQFCR_PID_SPT 0x0000000F
437
438/* RQFPR when PID is 0x0001 */
439#define RQFPR_HDR_GE_512 0x00200000
440#define RQFPR_LERR 0x00100000
441#define RQFPR_RAR 0x00080000
442#define RQFPR_RARQ 0x00040000
443#define RQFPR_AR 0x00020000
444#define RQFPR_ARQ 0x00010000
445#define RQFPR_EBC 0x00008000
446#define RQFPR_VLN 0x00004000
447#define RQFPR_CFI 0x00002000
448#define RQFPR_JUM 0x00001000
449#define RQFPR_IPF 0x00000800
450#define RQFPR_FIF 0x00000400
451#define RQFPR_IPV4 0x00000200
452#define RQFPR_IPV6 0x00000100
453#define RQFPR_ICC 0x00000080
454#define RQFPR_ICV 0x00000040
455#define RQFPR_TCP 0x00000020
456#define RQFPR_UDP 0x00000010
457#define RQFPR_TUC 0x00000008
458#define RQFPR_TUV 0x00000004
459#define RQFPR_PER 0x00000002
460#define RQFPR_EER 0x00000001
461
318/* TxBD status field bits */ 462/* TxBD status field bits */
319#define TXBD_READY 0x8000 463#define TXBD_READY 0x8000
320#define TXBD_PADCRC 0x4000 464#define TXBD_PADCRC 0x4000
@@ -503,25 +647,32 @@ struct gfar_stats {
503 647
504struct gfar { 648struct gfar {
505 u32 tsec_id; /* 0x.000 - Controller ID register */ 649 u32 tsec_id; /* 0x.000 - Controller ID register */
506 u8 res1[12]; 650 u32 tsec_id2; /* 0x.004 - Controller ID2 register */
651 u8 res1[8];
507 u32 ievent; /* 0x.010 - Interrupt Event Register */ 652 u32 ievent; /* 0x.010 - Interrupt Event Register */
508 u32 imask; /* 0x.014 - Interrupt Mask Register */ 653 u32 imask; /* 0x.014 - Interrupt Mask Register */
509 u32 edis; /* 0x.018 - Error Disabled Register */ 654 u32 edis; /* 0x.018 - Error Disabled Register */
510 u8 res2[4]; 655 u32 emapg; /* 0x.01c - Group Error mapping register */
511 u32 ecntrl; /* 0x.020 - Ethernet Control Register */ 656 u32 ecntrl; /* 0x.020 - Ethernet Control Register */
512 u32 minflr; /* 0x.024 - Minimum Frame Length Register */ 657 u32 minflr; /* 0x.024 - Minimum Frame Length Register */
513 u32 ptv; /* 0x.028 - Pause Time Value Register */ 658 u32 ptv; /* 0x.028 - Pause Time Value Register */
514 u32 dmactrl; /* 0x.02c - DMA Control Register */ 659 u32 dmactrl; /* 0x.02c - DMA Control Register */
515 u32 tbipa; /* 0x.030 - TBI PHY Address Register */ 660 u32 tbipa; /* 0x.030 - TBI PHY Address Register */
516 u8 res3[88]; 661 u8 res2[28];
662 u32 fifo_rx_pause; /* 0x.050 - FIFO receive pause start threshold
663 register */
664 u32 fifo_rx_pause_shutoff; /* x.054 - FIFO receive starve shutoff
665 register */
666 u32 fifo_rx_alarm; /* 0x.058 - FIFO receive alarm start threshold
667 register */
668 u32 fifo_rx_alarm_shutoff; /*0x.05c - FIFO receive alarm starve
669 shutoff register */
670 u8 res3[44];
517 u32 fifo_tx_thr; /* 0x.08c - FIFO transmit threshold register */ 671 u32 fifo_tx_thr; /* 0x.08c - FIFO transmit threshold register */
518 u8 res4[8]; 672 u8 res4[8];
519 u32 fifo_tx_starve; /* 0x.098 - FIFO transmit starve register */ 673 u32 fifo_tx_starve; /* 0x.098 - FIFO transmit starve register */
520 u32 fifo_tx_starve_shutoff; /* 0x.09c - FIFO transmit starve shutoff register */ 674 u32 fifo_tx_starve_shutoff; /* 0x.09c - FIFO transmit starve shutoff register */
521 u8 res5[4]; 675 u8 res5[96];
522 u32 fifo_rx_pause; /* 0x.0a4 - FIFO receive pause threshold register */
523 u32 fifo_rx_alarm; /* 0x.0a8 - FIFO receive alarm threshold register */
524 u8 res6[84];
525 u32 tctrl; /* 0x.100 - Transmit Control Register */ 676 u32 tctrl; /* 0x.100 - Transmit Control Register */
526 u32 tstat; /* 0x.104 - Transmit Status Register */ 677 u32 tstat; /* 0x.104 - Transmit Status Register */
527 u32 dfvlan; /* 0x.108 - Default VLAN Control word */ 678 u32 dfvlan; /* 0x.108 - Default VLAN Control word */
@@ -572,7 +723,11 @@ struct gfar {
572 u8 res12[8]; 723 u8 res12[8];
573 u32 rxic; /* 0x.310 - Receive Interrupt Coalescing Configuration Register */ 724 u32 rxic; /* 0x.310 - Receive Interrupt Coalescing Configuration Register */
574 u32 rqueue; /* 0x.314 - Receive queue control register */ 725 u32 rqueue; /* 0x.314 - Receive queue control register */
575 u8 res13[24]; 726 u32 rir0; /* 0x.318 - Ring mapping register 0 */
727 u32 rir1; /* 0x.31c - Ring mapping register 1 */
728 u32 rir2; /* 0x.320 - Ring mapping register 2 */
729 u32 rir3; /* 0x.324 - Ring mapping register 3 */
730 u8 res13[8];
576 u32 rbifx; /* 0x.330 - Receive bit field extract control register */ 731 u32 rbifx; /* 0x.330 - Receive bit field extract control register */
577 u32 rqfar; /* 0x.334 - Receive queue filing table address register */ 732 u32 rqfar; /* 0x.334 - Receive queue filing table address register */
578 u32 rqfcr; /* 0x.338 - Receive queue filing table control register */ 733 u32 rqfcr; /* 0x.338 - Receive queue filing table control register */
@@ -621,7 +776,7 @@ struct gfar {
621 u32 maxfrm; /* 0x.510 - Maximum Frame Length Register */ 776 u32 maxfrm; /* 0x.510 - Maximum Frame Length Register */
622 u8 res18[12]; 777 u8 res18[12];
623 u8 gfar_mii_regs[24]; /* See gianfar_phy.h */ 778 u8 gfar_mii_regs[24]; /* See gianfar_phy.h */
624 u8 res19[4]; 779 u32 ifctrl; /* 0x.538 - Interface control register */
625 u32 ifstat; /* 0x.53c - Interface Status Register */ 780 u32 ifstat; /* 0x.53c - Interface Status Register */
626 u32 macstnaddr1; /* 0x.540 - Station Address Part 1 Register */ 781 u32 macstnaddr1; /* 0x.540 - Station Address Part 1 Register */
627 u32 macstnaddr2; /* 0x.544 - Station Address Part 2 Register */ 782 u32 macstnaddr2; /* 0x.544 - Station Address Part 2 Register */
@@ -682,8 +837,30 @@ struct gfar {
682 u8 res23c[248]; 837 u8 res23c[248];
683 u32 attr; /* 0x.bf8 - Attributes Register */ 838 u32 attr; /* 0x.bf8 - Attributes Register */
684 u32 attreli; /* 0x.bfc - Attributes Extract Length and Extract Index Register */ 839 u32 attreli; /* 0x.bfc - Attributes Extract Length and Extract Index Register */
685 u8 res24[1024]; 840 u8 res24[688];
686 841 u32 isrg0; /* 0x.eb0 - Interrupt steering group 0 register */
842 u32 isrg1; /* 0x.eb4 - Interrupt steering group 1 register */
843 u32 isrg2; /* 0x.eb8 - Interrupt steering group 2 register */
844 u32 isrg3; /* 0x.ebc - Interrupt steering group 3 register */
845 u8 res25[16];
846 u32 rxic0; /* 0x.ed0 - Ring 0 Rx interrupt coalescing */
847 u32 rxic1; /* 0x.ed4 - Ring 1 Rx interrupt coalescing */
848 u32 rxic2; /* 0x.ed8 - Ring 2 Rx interrupt coalescing */
849 u32 rxic3; /* 0x.edc - Ring 3 Rx interrupt coalescing */
850 u32 rxic4; /* 0x.ee0 - Ring 4 Rx interrupt coalescing */
851 u32 rxic5; /* 0x.ee4 - Ring 5 Rx interrupt coalescing */
852 u32 rxic6; /* 0x.ee8 - Ring 6 Rx interrupt coalescing */
853 u32 rxic7; /* 0x.eec - Ring 7 Rx interrupt coalescing */
854 u8 res26[32];
855 u32 txic0; /* 0x.f10 - Ring 0 Tx interrupt coalescing */
856 u32 txic1; /* 0x.f14 - Ring 1 Tx interrupt coalescing */
857 u32 txic2; /* 0x.f18 - Ring 2 Tx interrupt coalescing */
858 u32 txic3; /* 0x.f1c - Ring 3 Tx interrupt coalescing */
859 u32 txic4; /* 0x.f20 - Ring 4 Tx interrupt coalescing */
860 u32 txic5; /* 0x.f24 - Ring 5 Tx interrupt coalescing */
861 u32 txic6; /* 0x.f28 - Ring 6 Tx interrupt coalescing */
862 u32 txic7; /* 0x.f2c - Ring 7 Tx interrupt coalescing */
863 u8 res27[208];
687}; 864};
688 865
689/* Flags related to gianfar device features */ 866/* Flags related to gianfar device features */
@@ -699,6 +876,133 @@ struct gfar {
699#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200 876#define FSL_GIANFAR_DEV_HAS_BD_STASHING 0x00000200
700#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400 877#define FSL_GIANFAR_DEV_HAS_BUF_STASHING 0x00000400
701 878
879#if (MAXGROUPS == 2)
880#define DEFAULT_MAPPING 0xAA
881#else
882#define DEFAULT_MAPPING 0xFF
883#endif
884
885#define ISRG_SHIFT_TX 0x10
886#define ISRG_SHIFT_RX 0x18
887
888/* The same driver can operate in two modes */
889/* SQ_SG_MODE: Single Queue Single Group Mode
890 * (Backward compatible mode)
891 * MQ_MG_MODE: Multi Queue Multi Group mode
892 */
893enum {
894 SQ_SG_MODE = 0,
895 MQ_MG_MODE
896};
897
898/**
899 * struct gfar_priv_tx_q - per tx queue structure
900 * @txlock: per queue tx spin lock
901 * @tx_skbuff:skb pointers
902 * @skb_curtx: to be used skb pointer
903 * @skb_dirtytx:the last used skb pointer
904 * @qindex: index of this queue
905 * @dev: back pointer to the dev structure
906 * @grp: back pointer to the group to which this queue belongs
907 * @tx_bd_base: First tx buffer descriptor
908 * @cur_tx: Next free ring entry
909 * @dirty_tx: First buffer in line to be transmitted
910 * @tx_ring_size: Tx ring size
911 * @num_txbdfree: number of free TxBds
912 * @txcoalescing: enable/disable tx coalescing
913 * @txic: transmit interrupt coalescing value
914 * @txcount: coalescing value if based on tx frame count
915 * @txtime: coalescing value if based on time
916 */
917struct gfar_priv_tx_q {
918 spinlock_t txlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
919 struct sk_buff ** tx_skbuff;
920 /* Buffer descriptor pointers */
921 dma_addr_t tx_bd_dma_base;
922 struct txbd8 *tx_bd_base;
923 struct txbd8 *cur_tx;
924 struct txbd8 *dirty_tx;
925 struct net_device *dev;
926 struct gfar_priv_grp *grp;
927 u16 skb_curtx;
928 u16 skb_dirtytx;
929 u16 qindex;
930 unsigned int tx_ring_size;
931 unsigned int num_txbdfree;
932 /* Configuration info for the coalescing features */
933 unsigned char txcoalescing;
934 unsigned long txic;
935 unsigned short txcount;
936 unsigned short txtime;
937};
938
939/**
940 * struct gfar_priv_rx_q - per rx queue structure
941 * @rxlock: per queue rx spin lock
942 * @rx_skbuff: skb pointers
943 * @skb_currx: currently use skb pointer
944 * @rx_bd_base: First rx buffer descriptor
945 * @cur_rx: Next free rx ring entry
946 * @qindex: index of this queue
947 * @dev: back pointer to the dev structure
948 * @rx_ring_size: Rx ring size
949 * @rxcoalescing: enable/disable rx-coalescing
950 * @rxic: receive interrupt coalescing vlaue
951 */
952
953struct gfar_priv_rx_q {
954 spinlock_t rxlock __attribute__ ((aligned (SMP_CACHE_BYTES)));
955 struct sk_buff ** rx_skbuff;
956 dma_addr_t rx_bd_dma_base;
957 struct rxbd8 *rx_bd_base;
958 struct rxbd8 *cur_rx;
959 struct net_device *dev;
960 struct gfar_priv_grp *grp;
961 u16 skb_currx;
962 u16 qindex;
963 unsigned int rx_ring_size;
964 /* RX Coalescing values */
965 unsigned char rxcoalescing;
966 unsigned long rxic;
967};
968
969/**
970 * struct gfar_priv_grp - per group structure
971 * @napi: the napi poll function
972 * @priv: back pointer to the priv structure
973 * @regs: the ioremapped register space for this group
974 * @grp_id: group id for this group
975 * @interruptTransmit: The TX interrupt number for this group
976 * @interruptReceive: The RX interrupt number for this group
977 * @interruptError: The ERROR interrupt number for this group
978 * @int_name_tx: tx interrupt name for this group
979 * @int_name_rx: rx interrupt name for this group
980 * @int_name_er: er interrupt name for this group
981 */
982
983struct gfar_priv_grp {
984 spinlock_t grplock __attribute__ ((aligned (SMP_CACHE_BYTES)));
985 struct napi_struct napi;
986 struct gfar_private *priv;
987 struct gfar __iomem *regs;
988 unsigned int grp_id;
989 unsigned long rx_bit_map;
990 unsigned long tx_bit_map;
991 unsigned long num_tx_queues;
992 unsigned long num_rx_queues;
993 unsigned int rstat;
994 unsigned int tstat;
995 unsigned int imask;
996 unsigned int ievent;
997 unsigned int interruptTransmit;
998 unsigned int interruptReceive;
999 unsigned int interruptError;
1000
1001 char int_name_tx[GFAR_INT_NAME_MAX];
1002 char int_name_rx[GFAR_INT_NAME_MAX];
1003 char int_name_er[GFAR_INT_NAME_MAX];
1004};
1005
702/* Struct stolen almost completely (and shamelessly) from the FCC enet source 1006/* Struct stolen almost completely (and shamelessly) from the FCC enet source
703 * (Ok, that's not so true anymore, but there is a family resemblence) 1007 * (Ok, that's not so true anymore, but there is a family resemblence)
704 * The GFAR buffer descriptors track the ring buffers. The rx_bd_base 1008 * The GFAR buffer descriptors track the ring buffers. The rx_bd_base
@@ -709,62 +1013,36 @@ struct gfar {
709 * the buffer descriptor determines the actual condition. 1013 * the buffer descriptor determines the actual condition.
710 */ 1014 */
711struct gfar_private { 1015struct gfar_private {
712 /* Fields controlled by TX lock */
713 spinlock_t txlock;
714 1016
715 /* Pointer to the array of skbuffs */ 1017 /* Indicates how many tx, rx queues are enabled */
716 struct sk_buff ** tx_skbuff; 1018 unsigned int num_tx_queues;
1019 unsigned int num_rx_queues;
1020 unsigned int num_grps;
1021 unsigned int mode;
717 1022
718 /* next free skb in the array */ 1023 /* The total tx and rx ring size for the enabled queues */
719 u16 skb_curtx; 1024 unsigned int total_tx_ring_size;
720 1025 unsigned int total_rx_ring_size;
721 /* First skb in line to be transmitted */
722 u16 skb_dirtytx;
723
724 /* Configuration info for the coalescing features */
725 unsigned char txcoalescing;
726 unsigned long txic;
727
728 /* Buffer descriptor pointers */
729 struct txbd8 *tx_bd_base; /* First tx buffer descriptor */
730 struct txbd8 *cur_tx; /* Next free ring entry */
731 struct txbd8 *dirty_tx; /* First buffer in line
732 to be transmitted */
733 unsigned int tx_ring_size;
734 unsigned int num_txbdfree; /* number of TxBDs free */
735
736 /* RX Locked fields */
737 spinlock_t rxlock;
738 1026
739 struct device_node *node; 1027 struct device_node *node;
740 struct net_device *ndev; 1028 struct net_device *ndev;
741 struct of_device *ofdev; 1029 struct of_device *ofdev;
742 struct napi_struct napi;
743
744 /* skb array and index */
745 struct sk_buff ** rx_skbuff;
746 u16 skb_currx;
747
748 /* RX Coalescing values */
749 unsigned char rxcoalescing;
750 unsigned long rxic;
751 1030
752 struct rxbd8 *rx_bd_base; /* First Rx buffers */ 1031 struct gfar_priv_grp gfargrp[MAXGROUPS];
753 struct rxbd8 *cur_rx; /* Next free rx ring entry */ 1032 struct gfar_priv_tx_q *tx_queue[MAX_TX_QS];
1033 struct gfar_priv_rx_q *rx_queue[MAX_RX_QS];
754 1034
755 /* RX parameters */ 1035 /* RX per device parameters */
756 unsigned int rx_ring_size;
757 unsigned int rx_buffer_size; 1036 unsigned int rx_buffer_size;
758 unsigned int rx_stash_size; 1037 unsigned int rx_stash_size;
759 unsigned int rx_stash_index; 1038 unsigned int rx_stash_index;
760 1039
1040 u32 cur_filer_idx;
1041
761 struct sk_buff_head rx_recycle; 1042 struct sk_buff_head rx_recycle;
762 1043
763 struct vlan_group *vlgrp; 1044 struct vlan_group *vlgrp;
764 1045
765 /* Unprotected fields */
766 /* Pointer to the GFAR memory mapped Registers */
767 struct gfar __iomem *regs;
768 1046
769 /* Hash registers and their width */ 1047 /* Hash registers and their width */
770 u32 __iomem *hash_regs[16]; 1048 u32 __iomem *hash_regs[16];
@@ -785,13 +1063,10 @@ struct gfar_private {
785 unsigned char rx_csum_enable:1, 1063 unsigned char rx_csum_enable:1,
786 extended_hash:1, 1064 extended_hash:1,
787 bd_stash_en:1, 1065 bd_stash_en:1,
1066 rx_filer_enable:1,
788 wol_en:1; /* Wake-on-LAN enabled */ 1067 wol_en:1; /* Wake-on-LAN enabled */
789 unsigned short padding; 1068 unsigned short padding;
790 1069
791 unsigned int interruptTransmit;
792 unsigned int interruptReceive;
793 unsigned int interruptError;
794
795 /* PHY stuff */ 1070 /* PHY stuff */
796 struct phy_device *phydev; 1071 struct phy_device *phydev;
797 struct mii_bus *mii_bus; 1072 struct mii_bus *mii_bus;
@@ -803,14 +1078,13 @@ struct gfar_private {
803 1078
804 struct work_struct reset_task; 1079 struct work_struct reset_task;
805 1080
806 char int_name_tx[GFAR_INT_NAME_MAX];
807 char int_name_rx[GFAR_INT_NAME_MAX];
808 char int_name_er[GFAR_INT_NAME_MAX];
809
810 /* Network Statistics */ 1081 /* Network Statistics */
811 struct gfar_extra_stats extra_stats; 1082 struct gfar_extra_stats extra_stats;
812}; 1083};
813 1084
1085extern unsigned int ftp_rqfpr[MAX_FILER_IDX + 1];
1086extern unsigned int ftp_rqfcr[MAX_FILER_IDX + 1];
1087
814static inline u32 gfar_read(volatile unsigned __iomem *addr) 1088static inline u32 gfar_read(volatile unsigned __iomem *addr)
815{ 1089{
816 u32 val; 1090 u32 val;
@@ -823,12 +1097,28 @@ static inline void gfar_write(volatile unsigned __iomem *addr, u32 val)
823 out_be32(addr, val); 1097 out_be32(addr, val);
824} 1098}
825 1099
1100static inline void gfar_write_filer(struct gfar_private *priv,
1101 unsigned int far, unsigned int fcr, unsigned int fpr)
1102{
1103 struct gfar __iomem *regs = priv->gfargrp[0].regs;
1104
1105 gfar_write(&regs->rqfar, far);
1106 gfar_write(&regs->rqfcr, fcr);
1107 gfar_write(&regs->rqfpr, fpr);
1108}
1109
1110extern void lock_rx_qs(struct gfar_private *priv);
1111extern void lock_tx_qs(struct gfar_private *priv);
1112extern void unlock_rx_qs(struct gfar_private *priv);
1113extern void unlock_tx_qs(struct gfar_private *priv);
826extern irqreturn_t gfar_receive(int irq, void *dev_id); 1114extern irqreturn_t gfar_receive(int irq, void *dev_id);
827extern int startup_gfar(struct net_device *dev); 1115extern int startup_gfar(struct net_device *dev);
828extern void stop_gfar(struct net_device *dev); 1116extern void stop_gfar(struct net_device *dev);
829extern void gfar_halt(struct net_device *dev); 1117extern void gfar_halt(struct net_device *dev);
830extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev, 1118extern void gfar_phy_test(struct mii_bus *bus, struct phy_device *phydev,
831 int enable, u32 regnum, u32 read); 1119 int enable, u32 regnum, u32 read);
1120extern void gfar_configure_coalescing(struct gfar_private *priv,
1121 unsigned long tx_mask, unsigned long rx_mask);
832void gfar_init_sysfs(struct net_device *dev); 1122void gfar_init_sysfs(struct net_device *dev);
833 1123
834extern const struct ethtool_ops gfar_ethtool_ops; 1124extern const struct ethtool_ops gfar_ethtool_ops;
diff --git a/drivers/net/gianfar_ethtool.c b/drivers/net/gianfar_ethtool.c
index 6c144b525b47..1010367695e4 100644
--- a/drivers/net/gianfar_ethtool.c
+++ b/drivers/net/gianfar_ethtool.c
@@ -7,8 +7,9 @@
7 * 7 *
8 * Author: Andy Fleming 8 * Author: Andy Fleming
9 * Maintainer: Kumar Gala 9 * Maintainer: Kumar Gala
10 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
10 * 11 *
11 * Copyright (c) 2003,2004 Freescale Semiconductor, Inc. 12 * Copyright 2003-2006, 2008-2009 Freescale Semiconductor, Inc.
12 * 13 *
13 * This software may be used and distributed according to 14 * This software may be used and distributed according to
14 * the terms of the GNU Public License, Version 2, incorporated herein 15 * the terms of the GNU Public License, Version 2, incorporated herein
@@ -41,7 +42,7 @@
41#include "gianfar.h" 42#include "gianfar.h"
42 43
43extern void gfar_start(struct net_device *dev); 44extern void gfar_start(struct net_device *dev);
44extern int gfar_clean_rx_ring(struct net_device *dev, int rx_work_limit); 45extern int gfar_clean_rx_ring(struct gfar_priv_rx_q *rx_queue, int rx_work_limit);
45 46
46#define GFAR_MAX_COAL_USECS 0xffff 47#define GFAR_MAX_COAL_USECS 0xffff
47#define GFAR_MAX_COAL_FRAMES 0xff 48#define GFAR_MAX_COAL_FRAMES 0xff
@@ -136,10 +137,11 @@ static void gfar_fill_stats(struct net_device *dev, struct ethtool_stats *dummy,
136{ 137{
137 int i; 138 int i;
138 struct gfar_private *priv = netdev_priv(dev); 139 struct gfar_private *priv = netdev_priv(dev);
140 struct gfar __iomem *regs = priv->gfargrp[0].regs;
139 u64 *extra = (u64 *) & priv->extra_stats; 141 u64 *extra = (u64 *) & priv->extra_stats;
140 142
141 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) { 143 if (priv->device_flags & FSL_GIANFAR_DEV_HAS_RMON) {
142 u32 __iomem *rmon = (u32 __iomem *) & priv->regs->rmon; 144 u32 __iomem *rmon = (u32 __iomem *) &regs->rmon;
143 struct gfar_stats *stats = (struct gfar_stats *) buf; 145 struct gfar_stats *stats = (struct gfar_stats *) buf;
144 146
145 for (i = 0; i < GFAR_RMON_LEN; i++) 147 for (i = 0; i < GFAR_RMON_LEN; i++)
@@ -197,12 +199,18 @@ static int gfar_gsettings(struct net_device *dev, struct ethtool_cmd *cmd)
197{ 199{
198 struct gfar_private *priv = netdev_priv(dev); 200 struct gfar_private *priv = netdev_priv(dev);
199 struct phy_device *phydev = priv->phydev; 201 struct phy_device *phydev = priv->phydev;
202 struct gfar_priv_rx_q *rx_queue = NULL;
203 struct gfar_priv_tx_q *tx_queue = NULL;
200 204
201 if (NULL == phydev) 205 if (NULL == phydev)
202 return -ENODEV; 206 return -ENODEV;
207 tx_queue = priv->tx_queue[0];
208 rx_queue = priv->rx_queue[0];
203 209
204 cmd->maxtxpkt = get_icft_value(priv->txic); 210 /* etsec-1.7 and older versions have only one txic
205 cmd->maxrxpkt = get_icft_value(priv->rxic); 211 * and rxic regs although they support multiple queues */
212 cmd->maxtxpkt = get_icft_value(tx_queue->txic);
213 cmd->maxrxpkt = get_icft_value(rx_queue->rxic);
206 214
207 return phy_ethtool_gset(phydev, cmd); 215 return phy_ethtool_gset(phydev, cmd);
208} 216}
@@ -218,7 +226,7 @@ static void gfar_get_regs(struct net_device *dev, struct ethtool_regs *regs, voi
218{ 226{
219 int i; 227 int i;
220 struct gfar_private *priv = netdev_priv(dev); 228 struct gfar_private *priv = netdev_priv(dev);
221 u32 __iomem *theregs = (u32 __iomem *) priv->regs; 229 u32 __iomem *theregs = (u32 __iomem *) priv->gfargrp[0].regs;
222 u32 *buf = (u32 *) regbuf; 230 u32 *buf = (u32 *) regbuf;
223 231
224 for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++) 232 for (i = 0; i < sizeof (struct gfar) / sizeof (u32); i++)
@@ -279,6 +287,8 @@ static unsigned int gfar_ticks2usecs(struct gfar_private *priv, unsigned int tic
279static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) 287static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
280{ 288{
281 struct gfar_private *priv = netdev_priv(dev); 289 struct gfar_private *priv = netdev_priv(dev);
290 struct gfar_priv_rx_q *rx_queue = NULL;
291 struct gfar_priv_tx_q *tx_queue = NULL;
282 unsigned long rxtime; 292 unsigned long rxtime;
283 unsigned long rxcount; 293 unsigned long rxcount;
284 unsigned long txtime; 294 unsigned long txtime;
@@ -290,10 +300,13 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
290 if (NULL == priv->phydev) 300 if (NULL == priv->phydev)
291 return -ENODEV; 301 return -ENODEV;
292 302
293 rxtime = get_ictt_value(priv->rxic); 303 rx_queue = priv->rx_queue[0];
294 rxcount = get_icft_value(priv->rxic); 304 tx_queue = priv->tx_queue[0];
295 txtime = get_ictt_value(priv->txic); 305
296 txcount = get_icft_value(priv->txic); 306 rxtime = get_ictt_value(rx_queue->rxic);
307 rxcount = get_icft_value(rx_queue->rxic);
308 txtime = get_ictt_value(tx_queue->txic);
309 txcount = get_icft_value(tx_queue->txic);
297 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime); 310 cvals->rx_coalesce_usecs = gfar_ticks2usecs(priv, rxtime);
298 cvals->rx_max_coalesced_frames = rxcount; 311 cvals->rx_max_coalesced_frames = rxcount;
299 312
@@ -339,16 +352,23 @@ static int gfar_gcoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
339static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals) 352static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals)
340{ 353{
341 struct gfar_private *priv = netdev_priv(dev); 354 struct gfar_private *priv = netdev_priv(dev);
355 int i = 0;
342 356
343 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE)) 357 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_COALESCE))
344 return -EOPNOTSUPP; 358 return -EOPNOTSUPP;
345 359
346 /* Set up rx coalescing */ 360 /* Set up rx coalescing */
361 /* As of now, we will enable/disable coalescing for all
362 * queues together in case of eTSEC2, this will be modified
363 * along with the ethtool interface */
347 if ((cvals->rx_coalesce_usecs == 0) || 364 if ((cvals->rx_coalesce_usecs == 0) ||
348 (cvals->rx_max_coalesced_frames == 0)) 365 (cvals->rx_max_coalesced_frames == 0)) {
349 priv->rxcoalescing = 0; 366 for (i = 0; i < priv->num_rx_queues; i++)
350 else 367 priv->rx_queue[i]->rxcoalescing = 0;
351 priv->rxcoalescing = 1; 368 } else {
369 for (i = 0; i < priv->num_rx_queues; i++)
370 priv->rx_queue[i]->rxcoalescing = 1;
371 }
352 372
353 if (NULL == priv->phydev) 373 if (NULL == priv->phydev)
354 return -ENODEV; 374 return -ENODEV;
@@ -366,15 +386,21 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
366 return -EINVAL; 386 return -EINVAL;
367 } 387 }
368 388
369 priv->rxic = mk_ic_value(cvals->rx_max_coalesced_frames, 389 for (i = 0; i < priv->num_rx_queues; i++) {
370 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs)); 390 priv->rx_queue[i]->rxic = mk_ic_value(
391 cvals->rx_max_coalesced_frames,
392 gfar_usecs2ticks(priv, cvals->rx_coalesce_usecs));
393 }
371 394
372 /* Set up tx coalescing */ 395 /* Set up tx coalescing */
373 if ((cvals->tx_coalesce_usecs == 0) || 396 if ((cvals->tx_coalesce_usecs == 0) ||
374 (cvals->tx_max_coalesced_frames == 0)) 397 (cvals->tx_max_coalesced_frames == 0)) {
375 priv->txcoalescing = 0; 398 for (i = 0; i < priv->num_tx_queues; i++)
376 else 399 priv->tx_queue[i]->txcoalescing = 0;
377 priv->txcoalescing = 1; 400 } else {
401 for (i = 0; i < priv->num_tx_queues; i++)
402 priv->tx_queue[i]->txcoalescing = 1;
403 }
378 404
379 /* Check the bounds of the values */ 405 /* Check the bounds of the values */
380 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) { 406 if (cvals->tx_coalesce_usecs > GFAR_MAX_COAL_USECS) {
@@ -389,16 +415,13 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
389 return -EINVAL; 415 return -EINVAL;
390 } 416 }
391 417
392 priv->txic = mk_ic_value(cvals->tx_max_coalesced_frames, 418 for (i = 0; i < priv->num_tx_queues; i++) {
393 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs)); 419 priv->tx_queue[i]->txic = mk_ic_value(
394 420 cvals->tx_max_coalesced_frames,
395 gfar_write(&priv->regs->rxic, 0); 421 gfar_usecs2ticks(priv, cvals->tx_coalesce_usecs));
396 if (priv->rxcoalescing) 422 }
397 gfar_write(&priv->regs->rxic, priv->rxic);
398 423
399 gfar_write(&priv->regs->txic, 0); 424 gfar_configure_coalescing(priv, 0xFF, 0xFF);
400 if (priv->txcoalescing)
401 gfar_write(&priv->regs->txic, priv->txic);
402 425
403 return 0; 426 return 0;
404} 427}
@@ -409,6 +432,11 @@ static int gfar_scoalesce(struct net_device *dev, struct ethtool_coalesce *cvals
409static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals) 432static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
410{ 433{
411 struct gfar_private *priv = netdev_priv(dev); 434 struct gfar_private *priv = netdev_priv(dev);
435 struct gfar_priv_tx_q *tx_queue = NULL;
436 struct gfar_priv_rx_q *rx_queue = NULL;
437
438 tx_queue = priv->tx_queue[0];
439 rx_queue = priv->rx_queue[0];
412 440
413 rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE; 441 rvals->rx_max_pending = GFAR_RX_MAX_RING_SIZE;
414 rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE; 442 rvals->rx_mini_max_pending = GFAR_RX_MAX_RING_SIZE;
@@ -418,10 +446,10 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
418 /* Values changeable by the user. The valid values are 446 /* Values changeable by the user. The valid values are
419 * in the range 1 to the "*_max_pending" counterpart above. 447 * in the range 1 to the "*_max_pending" counterpart above.
420 */ 448 */
421 rvals->rx_pending = priv->rx_ring_size; 449 rvals->rx_pending = rx_queue->rx_ring_size;
422 rvals->rx_mini_pending = priv->rx_ring_size; 450 rvals->rx_mini_pending = rx_queue->rx_ring_size;
423 rvals->rx_jumbo_pending = priv->rx_ring_size; 451 rvals->rx_jumbo_pending = rx_queue->rx_ring_size;
424 rvals->tx_pending = priv->tx_ring_size; 452 rvals->tx_pending = tx_queue->tx_ring_size;
425} 453}
426 454
427/* Change the current ring parameters, stopping the controller if 455/* Change the current ring parameters, stopping the controller if
@@ -431,7 +459,7 @@ static void gfar_gringparam(struct net_device *dev, struct ethtool_ringparam *rv
431static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals) 459static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rvals)
432{ 460{
433 struct gfar_private *priv = netdev_priv(dev); 461 struct gfar_private *priv = netdev_priv(dev);
434 int err = 0; 462 int err = 0, i = 0;
435 463
436 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE) 464 if (rvals->rx_pending > GFAR_RX_MAX_RING_SIZE)
437 return -EINVAL; 465 return -EINVAL;
@@ -451,34 +479,41 @@ static int gfar_sringparam(struct net_device *dev, struct ethtool_ringparam *rva
451 return -EINVAL; 479 return -EINVAL;
452 } 480 }
453 481
482
454 if (dev->flags & IFF_UP) { 483 if (dev->flags & IFF_UP) {
455 unsigned long flags; 484 unsigned long flags;
456 485
457 /* Halt TX and RX, and process the frames which 486 /* Halt TX and RX, and process the frames which
458 * have already been received */ 487 * have already been received */
459 spin_lock_irqsave(&priv->txlock, flags); 488 local_irq_save(flags);
460 spin_lock(&priv->rxlock); 489 lock_tx_qs(priv);
490 lock_rx_qs(priv);
461 491
462 gfar_halt(dev); 492 gfar_halt(dev);
463 493
464 spin_unlock(&priv->rxlock); 494 unlock_rx_qs(priv);
465 spin_unlock_irqrestore(&priv->txlock, flags); 495 unlock_tx_qs(priv);
496 local_irq_restore(flags);
466 497
467 gfar_clean_rx_ring(dev, priv->rx_ring_size); 498 for (i = 0; i < priv->num_rx_queues; i++)
499 gfar_clean_rx_ring(priv->rx_queue[i],
500 priv->rx_queue[i]->rx_ring_size);
468 501
469 /* Now we take down the rings to rebuild them */ 502 /* Now we take down the rings to rebuild them */
470 stop_gfar(dev); 503 stop_gfar(dev);
471 } 504 }
472 505
473 /* Change the size */ 506 /* Change the size */
474 priv->rx_ring_size = rvals->rx_pending; 507 for (i = 0; i < priv->num_rx_queues; i++) {
475 priv->tx_ring_size = rvals->tx_pending; 508 priv->rx_queue[i]->rx_ring_size = rvals->rx_pending;
476 priv->num_txbdfree = priv->tx_ring_size; 509 priv->tx_queue[i]->tx_ring_size = rvals->tx_pending;
510 priv->tx_queue[i]->num_txbdfree = priv->tx_queue[i]->tx_ring_size;
511 }
477 512
478 /* Rebuild the rings with the new size */ 513 /* Rebuild the rings with the new size */
479 if (dev->flags & IFF_UP) { 514 if (dev->flags & IFF_UP) {
480 err = startup_gfar(dev); 515 err = startup_gfar(dev);
481 netif_wake_queue(dev); 516 netif_tx_wake_all_queues(dev);
482 } 517 }
483 return err; 518 return err;
484} 519}
@@ -487,23 +522,28 @@ static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
487{ 522{
488 struct gfar_private *priv = netdev_priv(dev); 523 struct gfar_private *priv = netdev_priv(dev);
489 unsigned long flags; 524 unsigned long flags;
490 int err = 0; 525 int err = 0, i = 0;
491 526
492 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM)) 527 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_CSUM))
493 return -EOPNOTSUPP; 528 return -EOPNOTSUPP;
494 529
530
495 if (dev->flags & IFF_UP) { 531 if (dev->flags & IFF_UP) {
496 /* Halt TX and RX, and process the frames which 532 /* Halt TX and RX, and process the frames which
497 * have already been received */ 533 * have already been received */
498 spin_lock_irqsave(&priv->txlock, flags); 534 local_irq_save(flags);
499 spin_lock(&priv->rxlock); 535 lock_tx_qs(priv);
536 lock_rx_qs(priv);
500 537
501 gfar_halt(dev); 538 gfar_halt(dev);
502 539
503 spin_unlock(&priv->rxlock); 540 unlock_tx_qs(priv);
504 spin_unlock_irqrestore(&priv->txlock, flags); 541 unlock_rx_qs(priv);
542 local_irq_save(flags);
505 543
506 gfar_clean_rx_ring(dev, priv->rx_ring_size); 544 for (i = 0; i < priv->num_rx_queues; i++)
545 gfar_clean_rx_ring(priv->rx_queue[i],
546 priv->rx_queue[i]->rx_ring_size);
507 547
508 /* Now we take down the rings to rebuild them */ 548 /* Now we take down the rings to rebuild them */
509 stop_gfar(dev); 549 stop_gfar(dev);
@@ -515,7 +555,7 @@ static int gfar_set_rx_csum(struct net_device *dev, uint32_t data)
515 555
516 if (dev->flags & IFF_UP) { 556 if (dev->flags & IFF_UP) {
517 err = startup_gfar(dev); 557 err = startup_gfar(dev);
518 netif_wake_queue(dev); 558 netif_tx_wake_all_queues(dev);
519 } 559 }
520 return err; 560 return err;
521} 561}
@@ -605,6 +645,241 @@ static int gfar_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
605} 645}
606#endif 646#endif
607 647
648static int gfar_ethflow_to_class(int flow_type, u64 *class)
649{
650 switch (flow_type) {
651 case TCP_V4_FLOW:
652 *class = CLASS_CODE_TCP_IPV4;
653 break;
654 case UDP_V4_FLOW:
655 *class = CLASS_CODE_UDP_IPV4;
656 break;
657 case AH_V4_FLOW:
658 case ESP_V4_FLOW:
659 *class = CLASS_CODE_AH_ESP_IPV4;
660 break;
661 case SCTP_V4_FLOW:
662 *class = CLASS_CODE_SCTP_IPV4;
663 break;
664 case TCP_V6_FLOW:
665 *class = CLASS_CODE_TCP_IPV6;
666 break;
667 case UDP_V6_FLOW:
668 *class = CLASS_CODE_UDP_IPV6;
669 break;
670 case AH_V6_FLOW:
671 case ESP_V6_FLOW:
672 *class = CLASS_CODE_AH_ESP_IPV6;
673 break;
674 case SCTP_V6_FLOW:
675 *class = CLASS_CODE_SCTP_IPV6;
676 break;
677 default:
678 return 0;
679 }
680
681 return 1;
682}
683
684static void ethflow_to_filer_rules (struct gfar_private *priv, u64 ethflow)
685{
686 u32 fcr = 0x0, fpr = FPR_FILER_MASK;
687
688 if (ethflow & RXH_L2DA) {
689 fcr = RQFCR_PID_DAH |RQFCR_CMP_NOMATCH |
690 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
691 ftp_rqfpr[priv->cur_filer_idx] = fpr;
692 ftp_rqfcr[priv->cur_filer_idx] = fcr;
693 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
694 priv->cur_filer_idx = priv->cur_filer_idx - 1;
695
696 fcr = RQFCR_PID_DAL | RQFCR_AND | RQFCR_CMP_NOMATCH |
697 RQFCR_HASH | RQFCR_AND | RQFCR_HASHTBL_0;
698 ftp_rqfpr[priv->cur_filer_idx] = fpr;
699 ftp_rqfcr[priv->cur_filer_idx] = fcr;
700 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
701 priv->cur_filer_idx = priv->cur_filer_idx - 1;
702 }
703
704 if (ethflow & RXH_VLAN) {
705 fcr = RQFCR_PID_VID | RQFCR_CMP_NOMATCH | RQFCR_HASH |
706 RQFCR_AND | RQFCR_HASHTBL_0;
707 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
708 ftp_rqfpr[priv->cur_filer_idx] = fpr;
709 ftp_rqfcr[priv->cur_filer_idx] = fcr;
710 priv->cur_filer_idx = priv->cur_filer_idx - 1;
711 }
712
713 if (ethflow & RXH_IP_SRC) {
714 fcr = RQFCR_PID_SIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
715 RQFCR_AND | RQFCR_HASHTBL_0;
716 ftp_rqfpr[priv->cur_filer_idx] = fpr;
717 ftp_rqfcr[priv->cur_filer_idx] = fcr;
718 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
719 priv->cur_filer_idx = priv->cur_filer_idx - 1;
720 }
721
722 if (ethflow & (RXH_IP_DST)) {
723 fcr = RQFCR_PID_DIA | RQFCR_CMP_NOMATCH | RQFCR_HASH |
724 RQFCR_AND | RQFCR_HASHTBL_0;
725 ftp_rqfpr[priv->cur_filer_idx] = fpr;
726 ftp_rqfcr[priv->cur_filer_idx] = fcr;
727 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
728 priv->cur_filer_idx = priv->cur_filer_idx - 1;
729 }
730
731 if (ethflow & RXH_L3_PROTO) {
732 fcr = RQFCR_PID_L4P | RQFCR_CMP_NOMATCH | RQFCR_HASH |
733 RQFCR_AND | RQFCR_HASHTBL_0;
734 ftp_rqfpr[priv->cur_filer_idx] = fpr;
735 ftp_rqfcr[priv->cur_filer_idx] = fcr;
736 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
737 priv->cur_filer_idx = priv->cur_filer_idx - 1;
738 }
739
740 if (ethflow & RXH_L4_B_0_1) {
741 fcr = RQFCR_PID_SPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
742 RQFCR_AND | RQFCR_HASHTBL_0;
743 ftp_rqfpr[priv->cur_filer_idx] = fpr;
744 ftp_rqfcr[priv->cur_filer_idx] = fcr;
745 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
746 priv->cur_filer_idx = priv->cur_filer_idx - 1;
747 }
748
749 if (ethflow & RXH_L4_B_2_3) {
750 fcr = RQFCR_PID_DPT | RQFCR_CMP_NOMATCH | RQFCR_HASH |
751 RQFCR_AND | RQFCR_HASHTBL_0;
752 ftp_rqfpr[priv->cur_filer_idx] = fpr;
753 ftp_rqfcr[priv->cur_filer_idx] = fcr;
754 gfar_write_filer(priv, priv->cur_filer_idx, fcr, fpr);
755 priv->cur_filer_idx = priv->cur_filer_idx - 1;
756 }
757}
758
759static int gfar_ethflow_to_filer_table(struct gfar_private *priv, u64 ethflow, u64 class)
760{
761 unsigned int last_rule_idx = priv->cur_filer_idx;
762 unsigned int cmp_rqfpr;
763 unsigned int local_rqfpr[MAX_FILER_IDX + 1];
764 unsigned int local_rqfcr[MAX_FILER_IDX + 1];
765 int i = 0x0, k = 0x0;
766 int j = MAX_FILER_IDX, l = 0x0;
767
768 switch (class) {
769 case TCP_V4_FLOW:
770 cmp_rqfpr = RQFPR_IPV4 |RQFPR_TCP;
771 break;
772 case UDP_V4_FLOW:
773 cmp_rqfpr = RQFPR_IPV4 |RQFPR_UDP;
774 break;
775 case TCP_V6_FLOW:
776 cmp_rqfpr = RQFPR_IPV6 |RQFPR_TCP;
777 break;
778 case UDP_V6_FLOW:
779 cmp_rqfpr = RQFPR_IPV6 |RQFPR_UDP;
780 break;
781 case IPV4_FLOW:
782 cmp_rqfpr = RQFPR_IPV4;
783 case IPV6_FLOW:
784 cmp_rqfpr = RQFPR_IPV6;
785 break;
786 default:
787 printk(KERN_ERR "Right now this class is not supported\n");
788 return 0;
789 }
790
791 for (i = 0; i < MAX_FILER_IDX + 1; i++) {
792 local_rqfpr[j] = ftp_rqfpr[i];
793 local_rqfcr[j] = ftp_rqfcr[i];
794 j--;
795 if ((ftp_rqfcr[i] == (RQFCR_PID_PARSE |
796 RQFCR_CLE |RQFCR_AND)) &&
797 (ftp_rqfpr[i] == cmp_rqfpr))
798 break;
799 }
800
801 if (i == MAX_FILER_IDX + 1) {
802 printk(KERN_ERR "No parse rule found, ");
803 printk(KERN_ERR "can't create hash rules\n");
804 return 0;
805 }
806
807 /* If a match was found, then it begins the starting of a cluster rule
808 * if it was already programmed, we need to overwrite these rules
809 */
810 for (l = i+1; l < MAX_FILER_IDX; l++) {
811 if ((ftp_rqfcr[l] & RQFCR_CLE) &&
812 !(ftp_rqfcr[l] & RQFCR_AND)) {
813 ftp_rqfcr[l] = RQFCR_CLE | RQFCR_CMP_EXACT |
814 RQFCR_HASHTBL_0 | RQFCR_PID_MASK;
815 ftp_rqfpr[l] = FPR_FILER_MASK;
816 gfar_write_filer(priv, l, ftp_rqfcr[l], ftp_rqfpr[l]);
817 break;
818 }
819
820 if (!(ftp_rqfcr[l] & RQFCR_CLE) && (ftp_rqfcr[l] & RQFCR_AND))
821 continue;
822 else {
823 local_rqfpr[j] = ftp_rqfpr[l];
824 local_rqfcr[j] = ftp_rqfcr[l];
825 j--;
826 }
827 }
828
829 priv->cur_filer_idx = l - 1;
830 last_rule_idx = l;
831
832 /* hash rules */
833 ethflow_to_filer_rules(priv, ethflow);
834
835 /* Write back the popped out rules again */
836 for (k = j+1; k < MAX_FILER_IDX; k++) {
837 ftp_rqfpr[priv->cur_filer_idx] = local_rqfpr[k];
838 ftp_rqfcr[priv->cur_filer_idx] = local_rqfcr[k];
839 gfar_write_filer(priv, priv->cur_filer_idx,
840 local_rqfcr[k], local_rqfpr[k]);
841 if (!priv->cur_filer_idx)
842 break;
843 priv->cur_filer_idx = priv->cur_filer_idx - 1;
844 }
845
846 return 1;
847}
848
849static int gfar_set_hash_opts(struct gfar_private *priv, struct ethtool_rxnfc *cmd)
850{
851 u64 class;
852
853 if (!gfar_ethflow_to_class(cmd->flow_type, &class))
854 return -EINVAL;
855
856 if (class < CLASS_CODE_USER_PROG1 ||
857 class > CLASS_CODE_SCTP_IPV6)
858 return -EINVAL;
859
860 /* write the filer rules here */
861 if (!gfar_ethflow_to_filer_table(priv, cmd->data, cmd->flow_type))
862 return -1;
863
864 return 0;
865}
866
867static int gfar_set_nfc(struct net_device *dev, struct ethtool_rxnfc *cmd)
868{
869 struct gfar_private *priv = netdev_priv(dev);
870 int ret = 0;
871
872 switch(cmd->cmd) {
873 case ETHTOOL_SRXFH:
874 ret = gfar_set_hash_opts(priv, cmd);
875 break;
876 default:
877 ret = -EINVAL;
878 }
879
880 return ret;
881}
882
608const struct ethtool_ops gfar_ethtool_ops = { 883const struct ethtool_ops gfar_ethtool_ops = {
609 .get_settings = gfar_gsettings, 884 .get_settings = gfar_gsettings,
610 .set_settings = gfar_ssettings, 885 .set_settings = gfar_ssettings,
@@ -630,4 +905,5 @@ const struct ethtool_ops gfar_ethtool_ops = {
630 .get_wol = gfar_get_wol, 905 .get_wol = gfar_get_wol,
631 .set_wol = gfar_set_wol, 906 .set_wol = gfar_set_wol,
632#endif 907#endif
908 .set_rxnfc = gfar_set_nfc,
633}; 909};
diff --git a/drivers/net/gianfar_sysfs.c b/drivers/net/gianfar_sysfs.c
index dd26da74f27a..b31c9c8876e6 100644
--- a/drivers/net/gianfar_sysfs.c
+++ b/drivers/net/gianfar_sysfs.c
@@ -8,8 +8,9 @@
8 * 8 *
9 * Author: Andy Fleming 9 * Author: Andy Fleming
10 * Maintainer: Kumar Gala (galak@kernel.crashing.org) 10 * Maintainer: Kumar Gala (galak@kernel.crashing.org)
11 * Modifier: Sandeep Gopalpet <sandeep.kumar@freescale.com>
11 * 12 *
12 * Copyright (c) 2002-2005 Freescale Semiconductor, Inc. 13 * Copyright 2002-2009 Freescale Semiconductor, Inc.
13 * 14 *
14 * This program is free software; you can redistribute it and/or modify it 15 * This program is free software; you can redistribute it and/or modify it
15 * under the terms of the GNU General Public License as published by the 16 * under the terms of the GNU General Public License as published by the
@@ -49,6 +50,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
49 const char *buf, size_t count) 50 const char *buf, size_t count)
50{ 51{
51 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 52 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
53 struct gfar __iomem *regs = priv->gfargrp[0].regs;
52 int new_setting = 0; 54 int new_setting = 0;
53 u32 temp; 55 u32 temp;
54 unsigned long flags; 56 unsigned long flags;
@@ -56,6 +58,7 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
56 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING)) 58 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BD_STASHING))
57 return count; 59 return count;
58 60
61
59 /* Find out the new setting */ 62 /* Find out the new setting */
60 if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1)) 63 if (!strncmp("on", buf, count - 1) || !strncmp("1", buf, count - 1))
61 new_setting = 1; 64 new_setting = 1;
@@ -65,21 +68,24 @@ static ssize_t gfar_set_bd_stash(struct device *dev,
65 else 68 else
66 return count; 69 return count;
67 70
68 spin_lock_irqsave(&priv->rxlock, flags); 71
72 local_irq_save(flags);
73 lock_rx_qs(priv);
69 74
70 /* Set the new stashing value */ 75 /* Set the new stashing value */
71 priv->bd_stash_en = new_setting; 76 priv->bd_stash_en = new_setting;
72 77
73 temp = gfar_read(&priv->regs->attr); 78 temp = gfar_read(&regs->attr);
74 79
75 if (new_setting) 80 if (new_setting)
76 temp |= ATTR_BDSTASH; 81 temp |= ATTR_BDSTASH;
77 else 82 else
78 temp &= ~(ATTR_BDSTASH); 83 temp &= ~(ATTR_BDSTASH);
79 84
80 gfar_write(&priv->regs->attr, temp); 85 gfar_write(&regs->attr, temp);
81 86
82 spin_unlock_irqrestore(&priv->rxlock, flags); 87 unlock_rx_qs(priv);
88 local_irq_restore(flags);
83 89
84 return count; 90 return count;
85} 91}
@@ -99,6 +105,7 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
99 const char *buf, size_t count) 105 const char *buf, size_t count)
100{ 106{
101 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 107 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
108 struct gfar __iomem *regs = priv->gfargrp[0].regs;
102 unsigned int length = simple_strtoul(buf, NULL, 0); 109 unsigned int length = simple_strtoul(buf, NULL, 0);
103 u32 temp; 110 u32 temp;
104 unsigned long flags; 111 unsigned long flags;
@@ -106,7 +113,9 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
106 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING)) 113 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
107 return count; 114 return count;
108 115
109 spin_lock_irqsave(&priv->rxlock, flags); 116 local_irq_save(flags);
117 lock_rx_qs(priv);
118
110 if (length > priv->rx_buffer_size) 119 if (length > priv->rx_buffer_size)
111 goto out; 120 goto out;
112 121
@@ -115,23 +124,24 @@ static ssize_t gfar_set_rx_stash_size(struct device *dev,
115 124
116 priv->rx_stash_size = length; 125 priv->rx_stash_size = length;
117 126
118 temp = gfar_read(&priv->regs->attreli); 127 temp = gfar_read(&regs->attreli);
119 temp &= ~ATTRELI_EL_MASK; 128 temp &= ~ATTRELI_EL_MASK;
120 temp |= ATTRELI_EL(length); 129 temp |= ATTRELI_EL(length);
121 gfar_write(&priv->regs->attreli, temp); 130 gfar_write(&regs->attreli, temp);
122 131
123 /* Turn stashing on/off as appropriate */ 132 /* Turn stashing on/off as appropriate */
124 temp = gfar_read(&priv->regs->attr); 133 temp = gfar_read(&regs->attr);
125 134
126 if (length) 135 if (length)
127 temp |= ATTR_BUFSTASH; 136 temp |= ATTR_BUFSTASH;
128 else 137 else
129 temp &= ~(ATTR_BUFSTASH); 138 temp &= ~(ATTR_BUFSTASH);
130 139
131 gfar_write(&priv->regs->attr, temp); 140 gfar_write(&regs->attr, temp);
132 141
133out: 142out:
134 spin_unlock_irqrestore(&priv->rxlock, flags); 143 unlock_rx_qs(priv);
144 local_irq_restore(flags);
135 145
136 return count; 146 return count;
137} 147}
@@ -154,6 +164,7 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
154 const char *buf, size_t count) 164 const char *buf, size_t count)
155{ 165{
156 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 166 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
167 struct gfar __iomem *regs = priv->gfargrp[0].regs;
157 unsigned short index = simple_strtoul(buf, NULL, 0); 168 unsigned short index = simple_strtoul(buf, NULL, 0);
158 u32 temp; 169 u32 temp;
159 unsigned long flags; 170 unsigned long flags;
@@ -161,7 +172,9 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
161 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING)) 172 if (!(priv->device_flags & FSL_GIANFAR_DEV_HAS_BUF_STASHING))
162 return count; 173 return count;
163 174
164 spin_lock_irqsave(&priv->rxlock, flags); 175 local_irq_save(flags);
176 lock_rx_qs(priv);
177
165 if (index > priv->rx_stash_size) 178 if (index > priv->rx_stash_size)
166 goto out; 179 goto out;
167 180
@@ -170,13 +183,14 @@ static ssize_t gfar_set_rx_stash_index(struct device *dev,
170 183
171 priv->rx_stash_index = index; 184 priv->rx_stash_index = index;
172 185
173 temp = gfar_read(&priv->regs->attreli); 186 temp = gfar_read(&regs->attreli);
174 temp &= ~ATTRELI_EI_MASK; 187 temp &= ~ATTRELI_EI_MASK;
175 temp |= ATTRELI_EI(index); 188 temp |= ATTRELI_EI(index);
176 gfar_write(&priv->regs->attreli, flags); 189 gfar_write(&regs->attreli, temp);
177 190
178out: 191out:
179 spin_unlock_irqrestore(&priv->rxlock, flags); 192 unlock_rx_qs(priv);
193 local_irq_restore(flags);
180 194
181 return count; 195 return count;
182} 196}
@@ -198,6 +212,7 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
198 const char *buf, size_t count) 212 const char *buf, size_t count)
199{ 213{
200 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 214 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
215 struct gfar __iomem *regs = priv->gfargrp[0].regs;
201 unsigned int length = simple_strtoul(buf, NULL, 0); 216 unsigned int length = simple_strtoul(buf, NULL, 0);
202 u32 temp; 217 u32 temp;
203 unsigned long flags; 218 unsigned long flags;
@@ -205,16 +220,18 @@ static ssize_t gfar_set_fifo_threshold(struct device *dev,
205 if (length > GFAR_MAX_FIFO_THRESHOLD) 220 if (length > GFAR_MAX_FIFO_THRESHOLD)
206 return count; 221 return count;
207 222
208 spin_lock_irqsave(&priv->txlock, flags); 223 local_irq_save(flags);
224 lock_tx_qs(priv);
209 225
210 priv->fifo_threshold = length; 226 priv->fifo_threshold = length;
211 227
212 temp = gfar_read(&priv->regs->fifo_tx_thr); 228 temp = gfar_read(&regs->fifo_tx_thr);
213 temp &= ~FIFO_TX_THR_MASK; 229 temp &= ~FIFO_TX_THR_MASK;
214 temp |= length; 230 temp |= length;
215 gfar_write(&priv->regs->fifo_tx_thr, temp); 231 gfar_write(&regs->fifo_tx_thr, temp);
216 232
217 spin_unlock_irqrestore(&priv->txlock, flags); 233 unlock_tx_qs(priv);
234 local_irq_restore(flags);
218 235
219 return count; 236 return count;
220} 237}
@@ -235,6 +252,7 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
235 const char *buf, size_t count) 252 const char *buf, size_t count)
236{ 253{
237 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 254 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
255 struct gfar __iomem *regs = priv->gfargrp[0].regs;
238 unsigned int num = simple_strtoul(buf, NULL, 0); 256 unsigned int num = simple_strtoul(buf, NULL, 0);
239 u32 temp; 257 u32 temp;
240 unsigned long flags; 258 unsigned long flags;
@@ -242,16 +260,18 @@ static ssize_t gfar_set_fifo_starve(struct device *dev,
242 if (num > GFAR_MAX_FIFO_STARVE) 260 if (num > GFAR_MAX_FIFO_STARVE)
243 return count; 261 return count;
244 262
245 spin_lock_irqsave(&priv->txlock, flags); 263 local_irq_save(flags);
264 lock_tx_qs(priv);
246 265
247 priv->fifo_starve = num; 266 priv->fifo_starve = num;
248 267
249 temp = gfar_read(&priv->regs->fifo_tx_starve); 268 temp = gfar_read(&regs->fifo_tx_starve);
250 temp &= ~FIFO_TX_STARVE_MASK; 269 temp &= ~FIFO_TX_STARVE_MASK;
251 temp |= num; 270 temp |= num;
252 gfar_write(&priv->regs->fifo_tx_starve, temp); 271 gfar_write(&regs->fifo_tx_starve, temp);
253 272
254 spin_unlock_irqrestore(&priv->txlock, flags); 273 unlock_tx_qs(priv);
274 local_irq_restore(flags);
255 275
256 return count; 276 return count;
257} 277}
@@ -273,6 +293,7 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
273 const char *buf, size_t count) 293 const char *buf, size_t count)
274{ 294{
275 struct gfar_private *priv = netdev_priv(to_net_dev(dev)); 295 struct gfar_private *priv = netdev_priv(to_net_dev(dev));
296 struct gfar __iomem *regs = priv->gfargrp[0].regs;
276 unsigned int num = simple_strtoul(buf, NULL, 0); 297 unsigned int num = simple_strtoul(buf, NULL, 0);
277 u32 temp; 298 u32 temp;
278 unsigned long flags; 299 unsigned long flags;
@@ -280,16 +301,18 @@ static ssize_t gfar_set_fifo_starve_off(struct device *dev,
280 if (num > GFAR_MAX_FIFO_STARVE_OFF) 301 if (num > GFAR_MAX_FIFO_STARVE_OFF)
281 return count; 302 return count;
282 303
283 spin_lock_irqsave(&priv->txlock, flags); 304 local_irq_save(flags);
305 lock_tx_qs(priv);
284 306
285 priv->fifo_starve_off = num; 307 priv->fifo_starve_off = num;
286 308
287 temp = gfar_read(&priv->regs->fifo_tx_starve_shutoff); 309 temp = gfar_read(&regs->fifo_tx_starve_shutoff);
288 temp &= ~FIFO_TX_STARVE_OFF_MASK; 310 temp &= ~FIFO_TX_STARVE_OFF_MASK;
289 temp |= num; 311 temp |= num;
290 gfar_write(&priv->regs->fifo_tx_starve_shutoff, temp); 312 gfar_write(&regs->fifo_tx_starve_shutoff, temp);
291 313
292 spin_unlock_irqrestore(&priv->txlock, flags); 314 unlock_tx_qs(priv);
315 local_irq_restore(flags);
293 316
294 return count; 317 return count;
295} 318}
diff --git a/drivers/net/hamachi.c b/drivers/net/hamachi.c
index 1d5064a09aca..18bd9fe20d77 100644
--- a/drivers/net/hamachi.c
+++ b/drivers/net/hamachi.c
@@ -406,10 +406,9 @@ that case.
406/* A few values that may be tweaked. */ 406/* A few values that may be tweaked. */
407/* Size of each temporary Rx buffer, calculated as: 407/* Size of each temporary Rx buffer, calculated as:
408 * 1518 bytes (ethernet packet) + 2 bytes (to get 8 byte alignment for 408 * 1518 bytes (ethernet packet) + 2 bytes (to get 8 byte alignment for
409 * the card) + 8 bytes of status info + 8 bytes for the Rx Checksum + 409 * the card) + 8 bytes of status info + 8 bytes for the Rx Checksum
410 * 2 more because we use skb_reserve.
411 */ 410 */
412#define PKT_BUF_SZ 1538 411#define PKT_BUF_SZ 1536
413 412
414/* For now, this is going to be set to the maximum size of an ethernet 413/* For now, this is going to be set to the maximum size of an ethernet
415 * packet. Eventually, we may want to make it a variable that is 414 * packet. Eventually, we may want to make it a variable that is
@@ -1151,12 +1150,13 @@ static void hamachi_tx_timeout(struct net_device *dev)
1151 } 1150 }
1152 /* Fill in the Rx buffers. Handle allocation failure gracefully. */ 1151 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1153 for (i = 0; i < RX_RING_SIZE; i++) { 1152 for (i = 0; i < RX_RING_SIZE; i++) {
1154 struct sk_buff *skb = netdev_alloc_skb(dev, hmp->rx_buf_sz); 1153 struct sk_buff *skb;
1154
1155 skb = netdev_alloc_skb_ip_align(dev, hmp->rx_buf_sz);
1155 hmp->rx_skbuff[i] = skb; 1156 hmp->rx_skbuff[i] = skb;
1156 if (skb == NULL) 1157 if (skb == NULL)
1157 break; 1158 break;
1158 1159
1159 skb_reserve(skb, 2); /* 16 byte align the IP header. */
1160 hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev, 1160 hmp->rx_ring[i].addr = cpu_to_leXX(pci_map_single(hmp->pci_dev,
1161 skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE)); 1161 skb->data, hmp->rx_buf_sz, PCI_DMA_FROMDEVICE));
1162 hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn | 1162 hmp->rx_ring[i].status_n_length = cpu_to_le32(DescOwn |
@@ -1195,7 +1195,7 @@ static void hamachi_init_ring(struct net_device *dev)
1195 * card. -KDU 1195 * card. -KDU
1196 */ 1196 */
1197 hmp->rx_buf_sz = (dev->mtu <= 1492 ? PKT_BUF_SZ : 1197 hmp->rx_buf_sz = (dev->mtu <= 1492 ? PKT_BUF_SZ :
1198 (((dev->mtu+26+7) & ~7) + 2 + 16)); 1198 (((dev->mtu+26+7) & ~7) + 16));
1199 1199
1200 /* Initialize all Rx descriptors. */ 1200 /* Initialize all Rx descriptors. */
1201 for (i = 0; i < RX_RING_SIZE; i++) { 1201 for (i = 0; i < RX_RING_SIZE; i++) {
diff --git a/drivers/net/hamradio/6pack.c b/drivers/net/hamradio/6pack.c
index fb588301a05d..689b9bd377a5 100644
--- a/drivers/net/hamradio/6pack.c
+++ b/drivers/net/hamradio/6pack.c
@@ -34,6 +34,7 @@
34#include <linux/ip.h> 34#include <linux/ip.h>
35#include <linux/tcp.h> 35#include <linux/tcp.h>
36#include <linux/semaphore.h> 36#include <linux/semaphore.h>
37#include <linux/compat.h>
37#include <asm/atomic.h> 38#include <asm/atomic.h>
38 39
39#define SIXPACK_VERSION "Revision: 0.3.0" 40#define SIXPACK_VERSION "Revision: 0.3.0"
@@ -777,6 +778,23 @@ static int sixpack_ioctl(struct tty_struct *tty, struct file *file,
777 return err; 778 return err;
778} 779}
779 780
781#ifdef CONFIG_COMPAT
782static long sixpack_compat_ioctl(struct tty_struct * tty, struct file * file,
783 unsigned int cmd, unsigned long arg)
784{
785 switch (cmd) {
786 case SIOCGIFNAME:
787 case SIOCGIFENCAP:
788 case SIOCSIFENCAP:
789 case SIOCSIFHWADDR:
790 return sixpack_ioctl(tty, file, cmd,
791 (unsigned long)compat_ptr(arg));
792 }
793
794 return -ENOIOCTLCMD;
795}
796#endif
797
780static struct tty_ldisc_ops sp_ldisc = { 798static struct tty_ldisc_ops sp_ldisc = {
781 .owner = THIS_MODULE, 799 .owner = THIS_MODULE,
782 .magic = TTY_LDISC_MAGIC, 800 .magic = TTY_LDISC_MAGIC,
@@ -784,6 +802,9 @@ static struct tty_ldisc_ops sp_ldisc = {
784 .open = sixpack_open, 802 .open = sixpack_open,
785 .close = sixpack_close, 803 .close = sixpack_close,
786 .ioctl = sixpack_ioctl, 804 .ioctl = sixpack_ioctl,
805#ifdef CONFIG_COMPAT
806 .compat_ioctl = sixpack_compat_ioctl,
807#endif
787 .receive_buf = sixpack_receive_buf, 808 .receive_buf = sixpack_receive_buf,
788 .write_wakeup = sixpack_write_wakeup, 809 .write_wakeup = sixpack_write_wakeup,
789}; 810};
diff --git a/drivers/net/hamradio/baycom_epp.c b/drivers/net/hamradio/baycom_epp.c
index 7bcaf7c66243..b3cf95d76040 100644
--- a/drivers/net/hamradio/baycom_epp.c
+++ b/drivers/net/hamradio/baycom_epp.c
@@ -595,16 +595,16 @@ static int receive(struct net_device *dev, int cnt)
595 if (!(notbitstream & (0x1fc << j))) 595 if (!(notbitstream & (0x1fc << j)))
596 state = 0; 596 state = 0;
597 597
598 /* not flag received */ 598 /* flag received */
599 else if (!(bitstream & (0x1fe << j)) != (0x0fc << j)) { 599 else if ((bitstream & (0x1fe << j)) == (0x0fc << j)) {
600 if (state) 600 if (state)
601 do_rxpacket(dev); 601 do_rxpacket(dev);
602 bc->hdlcrx.bufcnt = 0; 602 bc->hdlcrx.bufcnt = 0;
603 bc->hdlcrx.bufptr = bc->hdlcrx.buf; 603 bc->hdlcrx.bufptr = bc->hdlcrx.buf;
604 state = 1; 604 state = 1;
605 numbits = 7-j; 605 numbits = 7-j;
606 }
607 } 606 }
607 }
608 608
609 /* stuffed bit */ 609 /* stuffed bit */
610 else if (unlikely((bitstream & (0x1f8 << j)) == (0xf8 << j))) { 610 else if (unlikely((bitstream & (0x1f8 << j)) == (0xf8 << j))) {
diff --git a/drivers/net/hamradio/mkiss.c b/drivers/net/hamradio/mkiss.c
index db4b7f1603f6..7db0a1c3216c 100644
--- a/drivers/net/hamradio/mkiss.c
+++ b/drivers/net/hamradio/mkiss.c
@@ -36,6 +36,7 @@
36#include <linux/skbuff.h> 36#include <linux/skbuff.h>
37#include <linux/if_arp.h> 37#include <linux/if_arp.h>
38#include <linux/jiffies.h> 38#include <linux/jiffies.h>
39#include <linux/compat.h>
39 40
40#include <net/ax25.h> 41#include <net/ax25.h>
41 42
@@ -898,6 +899,23 @@ static int mkiss_ioctl(struct tty_struct *tty, struct file *file,
898 return err; 899 return err;
899} 900}
900 901
902#ifdef CONFIG_COMPAT
903static long mkiss_compat_ioctl(struct tty_struct *tty, struct file *file,
904 unsigned int cmd, unsigned long arg)
905{
906 switch (cmd) {
907 case SIOCGIFNAME:
908 case SIOCGIFENCAP:
909 case SIOCSIFENCAP:
910 case SIOCSIFHWADDR:
911 return mkiss_ioctl(tty, file, cmd,
912 (unsigned long)compat_ptr(arg));
913 }
914
915 return -ENOIOCTLCMD;
916}
917#endif
918
901/* 919/*
902 * Handle the 'receiver data ready' interrupt. 920 * Handle the 'receiver data ready' interrupt.
903 * This function is called by the 'tty_io' module in the kernel when 921 * This function is called by the 'tty_io' module in the kernel when
@@ -972,6 +990,9 @@ static struct tty_ldisc_ops ax_ldisc = {
972 .open = mkiss_open, 990 .open = mkiss_open,
973 .close = mkiss_close, 991 .close = mkiss_close,
974 .ioctl = mkiss_ioctl, 992 .ioctl = mkiss_ioctl,
993#ifdef CONFIG_COMPAT
994 .compat_ioctl = mkiss_compat_ioctl,
995#endif
975 .receive_buf = mkiss_receive_buf, 996 .receive_buf = mkiss_receive_buf,
976 .write_wakeup = mkiss_write_wakeup 997 .write_wakeup = mkiss_write_wakeup
977}; 998};
diff --git a/drivers/net/ibm_newemac/core.c b/drivers/net/ibm_newemac/core.c
index 3fae87559791..af117c626e73 100644
--- a/drivers/net/ibm_newemac/core.c
+++ b/drivers/net/ibm_newemac/core.c
@@ -2149,9 +2149,12 @@ static int emac_ethtool_nway_reset(struct net_device *ndev)
2149 return res; 2149 return res;
2150} 2150}
2151 2151
2152static int emac_ethtool_get_stats_count(struct net_device *ndev) 2152static int emac_ethtool_get_sset_count(struct net_device *ndev, int stringset)
2153{ 2153{
2154 return EMAC_ETHTOOL_STATS_COUNT; 2154 if (stringset == ETH_SS_STATS)
2155 return EMAC_ETHTOOL_STATS_COUNT;
2156 else
2157 return -EINVAL;
2155} 2158}
2156 2159
2157static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset, 2160static void emac_ethtool_get_strings(struct net_device *ndev, u32 stringset,
@@ -2182,7 +2185,6 @@ static void emac_ethtool_get_drvinfo(struct net_device *ndev,
2182 info->fw_version[0] = '\0'; 2185 info->fw_version[0] = '\0';
2183 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s", 2186 sprintf(info->bus_info, "PPC 4xx EMAC-%d %s",
2184 dev->cell_index, dev->ofdev->node->full_name); 2187 dev->cell_index, dev->ofdev->node->full_name);
2185 info->n_stats = emac_ethtool_get_stats_count(ndev);
2186 info->regdump_len = emac_ethtool_get_regs_len(ndev); 2188 info->regdump_len = emac_ethtool_get_regs_len(ndev);
2187} 2189}
2188 2190
@@ -2202,7 +2204,7 @@ static const struct ethtool_ops emac_ethtool_ops = {
2202 .get_rx_csum = emac_ethtool_get_rx_csum, 2204 .get_rx_csum = emac_ethtool_get_rx_csum,
2203 2205
2204 .get_strings = emac_ethtool_get_strings, 2206 .get_strings = emac_ethtool_get_strings,
2205 .get_stats_count = emac_ethtool_get_stats_count, 2207 .get_sset_count = emac_ethtool_get_sset_count,
2206 .get_ethtool_stats = emac_ethtool_get_ethtool_stats, 2208 .get_ethtool_stats = emac_ethtool_get_ethtool_stats,
2207 2209
2208 .get_link = ethtool_op_get_link, 2210 .get_link = ethtool_op_get_link,
diff --git a/drivers/net/ifb.c b/drivers/net/ifb.c
index 030913f8bd26..69c25668dd63 100644
--- a/drivers/net/ifb.c
+++ b/drivers/net/ifb.c
@@ -98,13 +98,15 @@ static void ri_tasklet(unsigned long dev)
98 stats->tx_packets++; 98 stats->tx_packets++;
99 stats->tx_bytes +=skb->len; 99 stats->tx_bytes +=skb->len;
100 100
101 skb->dev = dev_get_by_index(&init_net, skb->iif); 101 rcu_read_lock();
102 skb->dev = dev_get_by_index_rcu(&init_net, skb->iif);
102 if (!skb->dev) { 103 if (!skb->dev) {
104 rcu_read_unlock();
103 dev_kfree_skb(skb); 105 dev_kfree_skb(skb);
104 stats->tx_dropped++; 106 stats->tx_dropped++;
105 break; 107 break;
106 } 108 }
107 dev_put(skb->dev); 109 rcu_read_unlock();
108 skb->iif = _dev->ifindex; 110 skb->iif = _dev->ifindex;
109 111
110 if (from & AT_EGRESS) { 112 if (from & AT_EGRESS) {
diff --git a/drivers/net/igb/e1000_82575.c b/drivers/net/igb/e1000_82575.c
index f8f5772557ce..5d345e3036a4 100644
--- a/drivers/net/igb/e1000_82575.c
+++ b/drivers/net/igb/e1000_82575.c
@@ -81,6 +81,7 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
81 break; 81 break;
82 case E1000_DEV_ID_82576: 82 case E1000_DEV_ID_82576:
83 case E1000_DEV_ID_82576_NS: 83 case E1000_DEV_ID_82576_NS:
84 case E1000_DEV_ID_82576_NS_SERDES:
84 case E1000_DEV_ID_82576_FIBER: 85 case E1000_DEV_ID_82576_FIBER:
85 case E1000_DEV_ID_82576_SERDES: 86 case E1000_DEV_ID_82576_SERDES:
86 case E1000_DEV_ID_82576_QUAD_COPPER: 87 case E1000_DEV_ID_82576_QUAD_COPPER:
@@ -240,9 +241,10 @@ static s32 igb_get_invariants_82575(struct e1000_hw *hw)
240 **/ 241 **/
241static s32 igb_acquire_phy_82575(struct e1000_hw *hw) 242static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
242{ 243{
243 u16 mask; 244 u16 mask = E1000_SWFW_PHY0_SM;
244 245
245 mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM; 246 if (hw->bus.func == E1000_FUNC_1)
247 mask = E1000_SWFW_PHY1_SM;
246 248
247 return igb_acquire_swfw_sync_82575(hw, mask); 249 return igb_acquire_swfw_sync_82575(hw, mask);
248} 250}
@@ -256,9 +258,11 @@ static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
256 **/ 258 **/
257static void igb_release_phy_82575(struct e1000_hw *hw) 259static void igb_release_phy_82575(struct e1000_hw *hw)
258{ 260{
259 u16 mask; 261 u16 mask = E1000_SWFW_PHY0_SM;
262
263 if (hw->bus.func == E1000_FUNC_1)
264 mask = E1000_SWFW_PHY1_SM;
260 265
261 mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
262 igb_release_swfw_sync_82575(hw, mask); 266 igb_release_swfw_sync_82575(hw, mask);
263} 267}
264 268
@@ -274,45 +278,23 @@ static void igb_release_phy_82575(struct e1000_hw *hw)
274static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 278static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
275 u16 *data) 279 u16 *data)
276{ 280{
277 struct e1000_phy_info *phy = &hw->phy; 281 s32 ret_val = -E1000_ERR_PARAM;
278 u32 i, i2ccmd = 0;
279 282
280 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 283 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
281 hw_dbg("PHY Address %u is out of range\n", offset); 284 hw_dbg("PHY Address %u is out of range\n", offset);
282 return -E1000_ERR_PARAM; 285 goto out;
283 } 286 }
284 287
285 /* 288 ret_val = hw->phy.ops.acquire(hw);
286 * Set up Op-code, Phy Address, and register address in the I2CCMD 289 if (ret_val)
287 * register. The MAC will take care of interfacing with the 290 goto out;
288 * PHY to retrieve the desired data.
289 */
290 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
291 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
292 (E1000_I2CCMD_OPCODE_READ));
293
294 wr32(E1000_I2CCMD, i2ccmd);
295 291
296 /* Poll the ready bit to see if the I2C read completed */ 292 ret_val = igb_read_phy_reg_i2c(hw, offset, data);
297 for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
298 udelay(50);
299 i2ccmd = rd32(E1000_I2CCMD);
300 if (i2ccmd & E1000_I2CCMD_READY)
301 break;
302 }
303 if (!(i2ccmd & E1000_I2CCMD_READY)) {
304 hw_dbg("I2CCMD Read did not complete\n");
305 return -E1000_ERR_PHY;
306 }
307 if (i2ccmd & E1000_I2CCMD_ERROR) {
308 hw_dbg("I2CCMD Error bit set\n");
309 return -E1000_ERR_PHY;
310 }
311 293
312 /* Need to byte-swap the 16-bit value. */ 294 hw->phy.ops.release(hw);
313 *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
314 295
315 return 0; 296out:
297 return ret_val;
316} 298}
317 299
318/** 300/**
@@ -327,47 +309,24 @@ static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
327static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset, 309static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
328 u16 data) 310 u16 data)
329{ 311{
330 struct e1000_phy_info *phy = &hw->phy; 312 s32 ret_val = -E1000_ERR_PARAM;
331 u32 i, i2ccmd = 0; 313
332 u16 phy_data_swapped;
333 314
334 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) { 315 if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
335 hw_dbg("PHY Address %d is out of range\n", offset); 316 hw_dbg("PHY Address %d is out of range\n", offset);
336 return -E1000_ERR_PARAM; 317 goto out;
337 } 318 }
338 319
339 /* Swap the data bytes for the I2C interface */ 320 ret_val = hw->phy.ops.acquire(hw);
340 phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00); 321 if (ret_val)
322 goto out;
341 323
342 /* 324 ret_val = igb_write_phy_reg_i2c(hw, offset, data);
343 * Set up Op-code, Phy Address, and register address in the I2CCMD
344 * register. The MAC will take care of interfacing with the
345 * PHY to retrieve the desired data.
346 */
347 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
348 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
349 E1000_I2CCMD_OPCODE_WRITE |
350 phy_data_swapped);
351
352 wr32(E1000_I2CCMD, i2ccmd);
353
354 /* Poll the ready bit to see if the I2C read completed */
355 for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
356 udelay(50);
357 i2ccmd = rd32(E1000_I2CCMD);
358 if (i2ccmd & E1000_I2CCMD_READY)
359 break;
360 }
361 if (!(i2ccmd & E1000_I2CCMD_READY)) {
362 hw_dbg("I2CCMD Write did not complete\n");
363 return -E1000_ERR_PHY;
364 }
365 if (i2ccmd & E1000_I2CCMD_ERROR) {
366 hw_dbg("I2CCMD Error bit set\n");
367 return -E1000_ERR_PHY;
368 }
369 325
370 return 0; 326 hw->phy.ops.release(hw);
327
328out:
329 return ret_val;
371} 330}
372 331
373/** 332/**
@@ -706,9 +665,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
706 s32 ret_val; 665 s32 ret_val;
707 u16 speed, duplex; 666 u16 speed, duplex;
708 667
709 /* SGMII link check is done through the PCS register. */ 668 if (hw->phy.media_type != e1000_media_type_copper) {
710 if ((hw->phy.media_type != e1000_media_type_copper) ||
711 (igb_sgmii_active_82575(hw))) {
712 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed, 669 ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
713 &duplex); 670 &duplex);
714 /* 671 /*
@@ -723,6 +680,7 @@ static s32 igb_check_for_link_82575(struct e1000_hw *hw)
723 680
724 return ret_val; 681 return ret_val;
725} 682}
683
726/** 684/**
727 * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex 685 * igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
728 * @hw: pointer to the HW structure 686 * @hw: pointer to the HW structure
@@ -788,13 +746,23 @@ static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
788void igb_shutdown_serdes_link_82575(struct e1000_hw *hw) 746void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
789{ 747{
790 u32 reg; 748 u32 reg;
749 u16 eeprom_data = 0;
791 750
792 if (hw->phy.media_type != e1000_media_type_internal_serdes || 751 if (hw->phy.media_type != e1000_media_type_internal_serdes ||
793 igb_sgmii_active_82575(hw)) 752 igb_sgmii_active_82575(hw))
794 return; 753 return;
795 754
796 /* if the management interface is not enabled, then power down */ 755 if (hw->bus.func == E1000_FUNC_0)
797 if (!igb_enable_mng_pass_thru(hw)) { 756 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
757 else if (hw->bus.func == E1000_FUNC_1)
758 hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
759
760 /*
761 * If APM is not enabled in the EEPROM and management interface is
762 * not enabled, then power down.
763 */
764 if (!(eeprom_data & E1000_NVM_APME_82575) &&
765 !igb_enable_mng_pass_thru(hw)) {
798 /* Disable PCS to turn off link */ 766 /* Disable PCS to turn off link */
799 reg = rd32(E1000_PCS_CFG0); 767 reg = rd32(E1000_PCS_CFG0);
800 reg &= ~E1000_PCS_CFG_PCS_EN; 768 reg &= ~E1000_PCS_CFG_PCS_EN;
@@ -908,6 +876,11 @@ static s32 igb_init_hw_82575(struct e1000_hw *hw)
908 for (i = 0; i < mac->mta_reg_count; i++) 876 for (i = 0; i < mac->mta_reg_count; i++)
909 array_wr32(E1000_MTA, i, 0); 877 array_wr32(E1000_MTA, i, 0);
910 878
879 /* Zero out the Unicast HASH table */
880 hw_dbg("Zeroing the UTA\n");
881 for (i = 0; i < mac->uta_reg_count; i++)
882 array_wr32(E1000_UTA, i, 0);
883
911 /* Setup link and flow control */ 884 /* Setup link and flow control */
912 ret_val = igb_setup_link(hw); 885 ret_val = igb_setup_link(hw);
913 886
@@ -934,7 +907,6 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
934{ 907{
935 u32 ctrl; 908 u32 ctrl;
936 s32 ret_val; 909 s32 ret_val;
937 bool link;
938 910
939 ctrl = rd32(E1000_CTRL); 911 ctrl = rd32(E1000_CTRL);
940 ctrl |= E1000_CTRL_SLU; 912 ctrl |= E1000_CTRL_SLU;
@@ -967,53 +939,19 @@ static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
967 if (ret_val) 939 if (ret_val)
968 goto out; 940 goto out;
969 941
970 if (hw->mac.autoneg) { 942 ret_val = igb_setup_copper_link(hw);
971 /*
972 * Setup autoneg and flow control advertisement
973 * and perform autonegotiation.
974 */
975 ret_val = igb_copper_link_autoneg(hw);
976 if (ret_val)
977 goto out;
978 } else {
979 /*
980 * PHY will be set to 10H, 10F, 100H or 100F
981 * depending on user settings.
982 */
983 hw_dbg("Forcing Speed and Duplex\n");
984 ret_val = hw->phy.ops.force_speed_duplex(hw);
985 if (ret_val) {
986 hw_dbg("Error Forcing Speed and Duplex\n");
987 goto out;
988 }
989 }
990
991 /*
992 * Check link status. Wait up to 100 microseconds for link to become
993 * valid.
994 */
995 ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link);
996 if (ret_val)
997 goto out;
998
999 if (link) {
1000 hw_dbg("Valid link established!!!\n");
1001 /* Config the MAC and PHY after link is up */
1002 igb_config_collision_dist(hw);
1003 ret_val = igb_config_fc_after_link_up(hw);
1004 } else {
1005 hw_dbg("Unable to establish link!!!\n");
1006 }
1007
1008out: 943out:
1009 return ret_val; 944 return ret_val;
1010} 945}
1011 946
1012/** 947/**
1013 * igb_setup_serdes_link_82575 - Setup link for fiber/serdes 948 * igb_setup_serdes_link_82575 - Setup link for serdes
1014 * @hw: pointer to the HW structure 949 * @hw: pointer to the HW structure
1015 * 950 *
1016 * Configures speed and duplex for fiber and serdes links. 951 * Configure the physical coding sub-layer (PCS) link. The PCS link is
952 * used on copper connections where the serialized gigabit media independent
953 * interface (sgmii), or serdes fiber is being used. Configures the link
954 * for auto-negotiation or forces speed/duplex.
1017 **/ 955 **/
1018static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw) 956static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1019{ 957{
@@ -1086,18 +1024,27 @@ static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
1086 */ 1024 */
1087 if (hw->mac.autoneg || igb_sgmii_active_82575(hw)) { 1025 if (hw->mac.autoneg || igb_sgmii_active_82575(hw)) {
1088 /* Set PCS register for autoneg */ 1026 /* Set PCS register for autoneg */
1089 reg |= E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ 1027 reg |= E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */
1090 E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ 1028 E1000_PCS_LCTL_FDV_FULL | /* SerDes Full dplx */
1091 E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */ 1029 E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
1092 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */ 1030 E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
1093 hw_dbg("Configuring Autoneg; PCS_LCTL = 0x%08X\n", reg); 1031 hw_dbg("Configuring Autoneg; PCS_LCTL = 0x%08X\n", reg);
1094 } else { 1032 } else {
1095 /* Set PCS register for forced speed */ 1033 /* Check for duplex first */
1096 reg |= E1000_PCS_LCTL_FLV_LINK_UP | /* Force link up */ 1034 if (hw->mac.forced_speed_duplex & E1000_ALL_FULL_DUPLEX)
1097 E1000_PCS_LCTL_FSV_1000 | /* Force 1000 */ 1035 reg |= E1000_PCS_LCTL_FDV_FULL;
1098 E1000_PCS_LCTL_FDV_FULL | /* SerDes Full duplex */ 1036
1099 E1000_PCS_LCTL_FSD | /* Force Speed */ 1037 /* No need to check for 1000/full since the spec states that
1100 E1000_PCS_LCTL_FORCE_LINK; /* Force Link */ 1038 * it requires autoneg to be enabled */
1039 /* Now set speed */
1040 if (hw->mac.forced_speed_duplex & E1000_ALL_100_SPEED)
1041 reg |= E1000_PCS_LCTL_FSV_100;
1042
1043 /* Force speed and force link */
1044 reg |= E1000_PCS_LCTL_FSD |
1045 E1000_PCS_LCTL_FORCE_LINK |
1046 E1000_PCS_LCTL_FLV_LINK_UP;
1047
1101 hw_dbg("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg); 1048 hw_dbg("Configuring Forced Link; PCS_LCTL = 0x%08X\n", reg);
1102 } 1049 }
1103 1050
@@ -1167,9 +1114,18 @@ static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
1167{ 1114{
1168 s32 ret_val = 0; 1115 s32 ret_val = 0;
1169 1116
1170 if (igb_check_alt_mac_addr(hw)) 1117 /*
1171 ret_val = igb_read_mac_addr(hw); 1118 * If there's an alternate MAC address place it in RAR0
1119 * so that it will override the Si installed default perm
1120 * address.
1121 */
1122 ret_val = igb_check_alt_mac_addr(hw);
1123 if (ret_val)
1124 goto out;
1125
1126 ret_val = igb_read_mac_addr(hw);
1172 1127
1128out:
1173 return ret_val; 1129 return ret_val;
1174} 1130}
1175 1131
@@ -1181,61 +1137,59 @@ static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
1181 **/ 1137 **/
1182static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw) 1138static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
1183{ 1139{
1184 u32 temp;
1185
1186 igb_clear_hw_cntrs_base(hw); 1140 igb_clear_hw_cntrs_base(hw);
1187 1141
1188 temp = rd32(E1000_PRC64); 1142 rd32(E1000_PRC64);
1189 temp = rd32(E1000_PRC127); 1143 rd32(E1000_PRC127);
1190 temp = rd32(E1000_PRC255); 1144 rd32(E1000_PRC255);
1191 temp = rd32(E1000_PRC511); 1145 rd32(E1000_PRC511);
1192 temp = rd32(E1000_PRC1023); 1146 rd32(E1000_PRC1023);
1193 temp = rd32(E1000_PRC1522); 1147 rd32(E1000_PRC1522);
1194 temp = rd32(E1000_PTC64); 1148 rd32(E1000_PTC64);
1195 temp = rd32(E1000_PTC127); 1149 rd32(E1000_PTC127);
1196 temp = rd32(E1000_PTC255); 1150 rd32(E1000_PTC255);
1197 temp = rd32(E1000_PTC511); 1151 rd32(E1000_PTC511);
1198 temp = rd32(E1000_PTC1023); 1152 rd32(E1000_PTC1023);
1199 temp = rd32(E1000_PTC1522); 1153 rd32(E1000_PTC1522);
1200 1154
1201 temp = rd32(E1000_ALGNERRC); 1155 rd32(E1000_ALGNERRC);
1202 temp = rd32(E1000_RXERRC); 1156 rd32(E1000_RXERRC);
1203 temp = rd32(E1000_TNCRS); 1157 rd32(E1000_TNCRS);
1204 temp = rd32(E1000_CEXTERR); 1158 rd32(E1000_CEXTERR);
1205 temp = rd32(E1000_TSCTC); 1159 rd32(E1000_TSCTC);
1206 temp = rd32(E1000_TSCTFC); 1160 rd32(E1000_TSCTFC);
1207 1161
1208 temp = rd32(E1000_MGTPRC); 1162 rd32(E1000_MGTPRC);
1209 temp = rd32(E1000_MGTPDC); 1163 rd32(E1000_MGTPDC);
1210 temp = rd32(E1000_MGTPTC); 1164 rd32(E1000_MGTPTC);
1211 1165
1212 temp = rd32(E1000_IAC); 1166 rd32(E1000_IAC);
1213 temp = rd32(E1000_ICRXOC); 1167 rd32(E1000_ICRXOC);
1214 1168
1215 temp = rd32(E1000_ICRXPTC); 1169 rd32(E1000_ICRXPTC);
1216 temp = rd32(E1000_ICRXATC); 1170 rd32(E1000_ICRXATC);
1217 temp = rd32(E1000_ICTXPTC); 1171 rd32(E1000_ICTXPTC);
1218 temp = rd32(E1000_ICTXATC); 1172 rd32(E1000_ICTXATC);
1219 temp = rd32(E1000_ICTXQEC); 1173 rd32(E1000_ICTXQEC);
1220 temp = rd32(E1000_ICTXQMTC); 1174 rd32(E1000_ICTXQMTC);
1221 temp = rd32(E1000_ICRXDMTC); 1175 rd32(E1000_ICRXDMTC);
1222 1176
1223 temp = rd32(E1000_CBTMPC); 1177 rd32(E1000_CBTMPC);
1224 temp = rd32(E1000_HTDPMC); 1178 rd32(E1000_HTDPMC);
1225 temp = rd32(E1000_CBRMPC); 1179 rd32(E1000_CBRMPC);
1226 temp = rd32(E1000_RPTHC); 1180 rd32(E1000_RPTHC);
1227 temp = rd32(E1000_HGPTC); 1181 rd32(E1000_HGPTC);
1228 temp = rd32(E1000_HTCBDPC); 1182 rd32(E1000_HTCBDPC);
1229 temp = rd32(E1000_HGORCL); 1183 rd32(E1000_HGORCL);
1230 temp = rd32(E1000_HGORCH); 1184 rd32(E1000_HGORCH);
1231 temp = rd32(E1000_HGOTCL); 1185 rd32(E1000_HGOTCL);
1232 temp = rd32(E1000_HGOTCH); 1186 rd32(E1000_HGOTCH);
1233 temp = rd32(E1000_LENERRS); 1187 rd32(E1000_LENERRS);
1234 1188
1235 /* This register should not be read in copper configurations */ 1189 /* This register should not be read in copper configurations */
1236 if (hw->phy.media_type == e1000_media_type_internal_serdes || 1190 if (hw->phy.media_type == e1000_media_type_internal_serdes ||
1237 igb_sgmii_active_82575(hw)) 1191 igb_sgmii_active_82575(hw))
1238 temp = rd32(E1000_SCVPC); 1192 rd32(E1000_SCVPC);
1239} 1193}
1240 1194
1241/** 1195/**
diff --git a/drivers/net/igb/e1000_82575.h b/drivers/net/igb/e1000_82575.h
index ebd146fd4e15..b3808ca49ef5 100644
--- a/drivers/net/igb/e1000_82575.h
+++ b/drivers/net/igb/e1000_82575.h
@@ -66,6 +66,8 @@ extern void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
66 E1000_EICR_RX_QUEUE3) 66 E1000_EICR_RX_QUEUE3)
67 67
68/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */ 68/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
69#define E1000_IMIREXT_SIZE_BP 0x00001000 /* Packet size bypass */
70#define E1000_IMIREXT_CTRL_BP 0x00080000 /* Bypass check of ctrl bits */
69 71
70/* Receive Descriptor - Advanced */ 72/* Receive Descriptor - Advanced */
71union e1000_adv_rx_desc { 73union e1000_adv_rx_desc {
@@ -98,6 +100,7 @@ union e1000_adv_rx_desc {
98 100
99#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0 101#define E1000_RXDADV_HDRBUFLEN_MASK 0x7FE0
100#define E1000_RXDADV_HDRBUFLEN_SHIFT 5 102#define E1000_RXDADV_HDRBUFLEN_SHIFT 5
103#define E1000_RXDADV_STAT_TS 0x10000 /* Pkt was time stamped */
101 104
102/* Transmit Descriptor - Advanced */ 105/* Transmit Descriptor - Advanced */
103union e1000_adv_tx_desc { 106union e1000_adv_tx_desc {
@@ -167,6 +170,18 @@ struct e1000_adv_tx_context_desc {
167#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */ 170#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */
168#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */ 171#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
169 172
173/* ETQF register bit definitions */
174#define E1000_ETQF_FILTER_ENABLE (1 << 26)
175#define E1000_ETQF_1588 (1 << 30)
176
177/* FTQF register bit definitions */
178#define E1000_FTQF_VF_BP 0x00008000
179#define E1000_FTQF_1588_TIME_STAMP 0x08000000
180#define E1000_FTQF_MASK 0xF0000000
181#define E1000_FTQF_MASK_PROTO_BP 0x10000000
182#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
183
184#define E1000_NVM_APME_82575 0x0400
170#define MAX_NUM_VFS 8 185#define MAX_NUM_VFS 8
171 186
172#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */ 187#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31) /* global VF LB enable */
@@ -202,8 +217,19 @@ struct e1000_adv_tx_context_desc {
202#define E1000_IOVCTL 0x05BBC 217#define E1000_IOVCTL 0x05BBC
203#define E1000_IOVCTL_REUSE_VFQ 0x00000001 218#define E1000_IOVCTL_REUSE_VFQ 0x00000001
204 219
220#define E1000_RPLOLR_STRVLAN 0x40000000
221#define E1000_RPLOLR_STRCRC 0x80000000
222
223#define E1000_DTXCTL_8023LL 0x0004
224#define E1000_DTXCTL_VLAN_ADDED 0x0008
225#define E1000_DTXCTL_OOS_ENABLE 0x0010
226#define E1000_DTXCTL_MDP_EN 0x0020
227#define E1000_DTXCTL_SPOOF_INT 0x0040
228
205#define ALL_QUEUES 0xFFFF 229#define ALL_QUEUES 0xFFFF
206 230
231/* RX packet buffer size defines */
232#define E1000_RXPBS_SIZE_MASK_82576 0x0000007F
207void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool); 233void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
208void igb_vmdq_set_replication_pf(struct e1000_hw *, bool); 234void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
209 235
diff --git a/drivers/net/igb/e1000_defines.h b/drivers/net/igb/e1000_defines.h
index cb916833f303..48fcab03b752 100644
--- a/drivers/net/igb/e1000_defines.h
+++ b/drivers/net/igb/e1000_defines.h
@@ -435,6 +435,39 @@
435/* Flow Control */ 435/* Flow Control */
436#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */ 436#define E1000_FCRTL_XONE 0x80000000 /* Enable XON frame transmission */
437 437
438#define E1000_TSYNCTXCTL_VALID 0x00000001 /* tx timestamp valid */
439#define E1000_TSYNCTXCTL_ENABLED 0x00000010 /* enable tx timestampping */
440
441#define E1000_TSYNCRXCTL_VALID 0x00000001 /* rx timestamp valid */
442#define E1000_TSYNCRXCTL_TYPE_MASK 0x0000000E /* rx type mask */
443#define E1000_TSYNCRXCTL_TYPE_L2_V2 0x00
444#define E1000_TSYNCRXCTL_TYPE_L4_V1 0x02
445#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2 0x04
446#define E1000_TSYNCRXCTL_TYPE_ALL 0x08
447#define E1000_TSYNCRXCTL_TYPE_EVENT_V2 0x0A
448#define E1000_TSYNCRXCTL_ENABLED 0x00000010 /* enable rx timestampping */
449
450#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK 0x000000FF
451#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE 0x00
452#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE 0x01
453#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE 0x02
454#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
455#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
456
457#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK 0x00000F00
458#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE 0x0000
459#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE 0x0100
460#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE 0x0200
461#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE 0x0300
462#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE 0x0800
463#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE 0x0900
464#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE 0x0A00
465#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE 0x0B00
466#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE 0x0C00
467#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE 0x0D00
468
469#define E1000_TIMINCA_16NS_SHIFT 24
470
438/* PCI Express Control */ 471/* PCI Express Control */
439#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000 472#define E1000_GCR_CMPL_TMOUT_MASK 0x0000F000
440#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000 473#define E1000_GCR_CMPL_TMOUT_10ms 0x00001000
diff --git a/drivers/net/igb/e1000_hw.h b/drivers/net/igb/e1000_hw.h
index 119869b1124d..2dc929419df0 100644
--- a/drivers/net/igb/e1000_hw.h
+++ b/drivers/net/igb/e1000_hw.h
@@ -42,6 +42,7 @@ struct e1000_hw;
42#define E1000_DEV_ID_82576_SERDES 0x10E7 42#define E1000_DEV_ID_82576_SERDES 0x10E7
43#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8 43#define E1000_DEV_ID_82576_QUAD_COPPER 0x10E8
44#define E1000_DEV_ID_82576_NS 0x150A 44#define E1000_DEV_ID_82576_NS 0x150A
45#define E1000_DEV_ID_82576_NS_SERDES 0x1518
45#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D 46#define E1000_DEV_ID_82576_SERDES_QUAD 0x150D
46#define E1000_DEV_ID_82575EB_COPPER 0x10A7 47#define E1000_DEV_ID_82575EB_COPPER 0x10A7
47#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9 48#define E1000_DEV_ID_82575EB_FIBER_SERDES 0x10A9
@@ -50,8 +51,11 @@ struct e1000_hw;
50#define E1000_REVISION_2 2 51#define E1000_REVISION_2 2
51#define E1000_REVISION_4 4 52#define E1000_REVISION_4 4
52 53
54#define E1000_FUNC_0 0
53#define E1000_FUNC_1 1 55#define E1000_FUNC_1 1
54 56
57#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1 3
58
55enum e1000_mac_type { 59enum e1000_mac_type {
56 e1000_undefined = 0, 60 e1000_undefined = 0,
57 e1000_82575, 61 e1000_82575,
@@ -70,7 +74,6 @@ enum e1000_nvm_type {
70 e1000_nvm_unknown = 0, 74 e1000_nvm_unknown = 0,
71 e1000_nvm_none, 75 e1000_nvm_none,
72 e1000_nvm_eeprom_spi, 76 e1000_nvm_eeprom_spi,
73 e1000_nvm_eeprom_microwire,
74 e1000_nvm_flash_hw, 77 e1000_nvm_flash_hw,
75 e1000_nvm_flash_sw 78 e1000_nvm_flash_sw
76}; 79};
@@ -79,8 +82,6 @@ enum e1000_nvm_override {
79 e1000_nvm_override_none = 0, 82 e1000_nvm_override_none = 0,
80 e1000_nvm_override_spi_small, 83 e1000_nvm_override_spi_small,
81 e1000_nvm_override_spi_large, 84 e1000_nvm_override_spi_large,
82 e1000_nvm_override_microwire_small,
83 e1000_nvm_override_microwire_large
84}; 85};
85 86
86enum e1000_phy_type { 87enum e1000_phy_type {
@@ -339,6 +340,7 @@ struct e1000_mac_info {
339 u16 ifs_ratio; 340 u16 ifs_ratio;
340 u16 ifs_step_size; 341 u16 ifs_step_size;
341 u16 mta_reg_count; 342 u16 mta_reg_count;
343 u16 uta_reg_count;
342 344
343 /* Maximum size of the MTA register table in all supported adapters */ 345 /* Maximum size of the MTA register table in all supported adapters */
344 #define MAX_MTA_REG 128 346 #define MAX_MTA_REG 128
diff --git a/drivers/net/igb/e1000_mac.c b/drivers/net/igb/e1000_mac.c
index 7d76bb085e10..2ad358a240bf 100644
--- a/drivers/net/igb/e1000_mac.c
+++ b/drivers/net/igb/e1000_mac.c
@@ -185,13 +185,12 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
185 } 185 }
186 186
187 if (nvm_alt_mac_addr_offset == 0xFFFF) { 187 if (nvm_alt_mac_addr_offset == 0xFFFF) {
188 ret_val = -(E1000_NOT_IMPLEMENTED); 188 /* There is no Alternate MAC Address */
189 goto out; 189 goto out;
190 } 190 }
191 191
192 if (hw->bus.func == E1000_FUNC_1) 192 if (hw->bus.func == E1000_FUNC_1)
193 nvm_alt_mac_addr_offset += ETH_ALEN/sizeof(u16); 193 nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
194
195 for (i = 0; i < ETH_ALEN; i += 2) { 194 for (i = 0; i < ETH_ALEN; i += 2) {
196 offset = nvm_alt_mac_addr_offset + (i >> 1); 195 offset = nvm_alt_mac_addr_offset + (i >> 1);
197 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data); 196 ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
@@ -206,14 +205,16 @@ s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
206 205
207 /* if multicast bit is set, the alternate address will not be used */ 206 /* if multicast bit is set, the alternate address will not be used */
208 if (alt_mac_addr[0] & 0x01) { 207 if (alt_mac_addr[0] & 0x01) {
209 ret_val = -(E1000_NOT_IMPLEMENTED); 208 hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
210 goto out; 209 goto out;
211 } 210 }
212 211
213 for (i = 0; i < ETH_ALEN; i++) 212 /*
214 hw->mac.addr[i] = hw->mac.perm_addr[i] = alt_mac_addr[i]; 213 * We have a valid alternate MAC address, and we want to treat it the
215 214 * same as the normal permanent MAC address stored by the HW into the
216 hw->mac.ops.rar_set(hw, hw->mac.perm_addr, 0); 215 * RAR. Do this by mapping this address into RAR0.
216 */
217 hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
217 218
218out: 219out:
219 return ret_val; 220 return ret_val;
@@ -246,8 +247,15 @@ void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
246 if (rar_low || rar_high) 247 if (rar_low || rar_high)
247 rar_high |= E1000_RAH_AV; 248 rar_high |= E1000_RAH_AV;
248 249
250 /*
251 * Some bridges will combine consecutive 32-bit writes into
252 * a single burst write, which will malfunction on some parts.
253 * The flushes avoid this.
254 */
249 wr32(E1000_RAL(index), rar_low); 255 wr32(E1000_RAL(index), rar_low);
256 wrfl();
250 wr32(E1000_RAH(index), rar_high); 257 wr32(E1000_RAH(index), rar_high);
258 wrfl();
251} 259}
252 260
253/** 261/**
@@ -399,45 +407,43 @@ void igb_update_mc_addr_list(struct e1000_hw *hw,
399 **/ 407 **/
400void igb_clear_hw_cntrs_base(struct e1000_hw *hw) 408void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
401{ 409{
402 u32 temp; 410 rd32(E1000_CRCERRS);
403 411 rd32(E1000_SYMERRS);
404 temp = rd32(E1000_CRCERRS); 412 rd32(E1000_MPC);
405 temp = rd32(E1000_SYMERRS); 413 rd32(E1000_SCC);
406 temp = rd32(E1000_MPC); 414 rd32(E1000_ECOL);
407 temp = rd32(E1000_SCC); 415 rd32(E1000_MCC);
408 temp = rd32(E1000_ECOL); 416 rd32(E1000_LATECOL);
409 temp = rd32(E1000_MCC); 417 rd32(E1000_COLC);
410 temp = rd32(E1000_LATECOL); 418 rd32(E1000_DC);
411 temp = rd32(E1000_COLC); 419 rd32(E1000_SEC);
412 temp = rd32(E1000_DC); 420 rd32(E1000_RLEC);
413 temp = rd32(E1000_SEC); 421 rd32(E1000_XONRXC);
414 temp = rd32(E1000_RLEC); 422 rd32(E1000_XONTXC);
415 temp = rd32(E1000_XONRXC); 423 rd32(E1000_XOFFRXC);
416 temp = rd32(E1000_XONTXC); 424 rd32(E1000_XOFFTXC);
417 temp = rd32(E1000_XOFFRXC); 425 rd32(E1000_FCRUC);
418 temp = rd32(E1000_XOFFTXC); 426 rd32(E1000_GPRC);
419 temp = rd32(E1000_FCRUC); 427 rd32(E1000_BPRC);
420 temp = rd32(E1000_GPRC); 428 rd32(E1000_MPRC);
421 temp = rd32(E1000_BPRC); 429 rd32(E1000_GPTC);
422 temp = rd32(E1000_MPRC); 430 rd32(E1000_GORCL);
423 temp = rd32(E1000_GPTC); 431 rd32(E1000_GORCH);
424 temp = rd32(E1000_GORCL); 432 rd32(E1000_GOTCL);
425 temp = rd32(E1000_GORCH); 433 rd32(E1000_GOTCH);
426 temp = rd32(E1000_GOTCL); 434 rd32(E1000_RNBC);
427 temp = rd32(E1000_GOTCH); 435 rd32(E1000_RUC);
428 temp = rd32(E1000_RNBC); 436 rd32(E1000_RFC);
429 temp = rd32(E1000_RUC); 437 rd32(E1000_ROC);
430 temp = rd32(E1000_RFC); 438 rd32(E1000_RJC);
431 temp = rd32(E1000_ROC); 439 rd32(E1000_TORL);
432 temp = rd32(E1000_RJC); 440 rd32(E1000_TORH);
433 temp = rd32(E1000_TORL); 441 rd32(E1000_TOTL);
434 temp = rd32(E1000_TORH); 442 rd32(E1000_TOTH);
435 temp = rd32(E1000_TOTL); 443 rd32(E1000_TPR);
436 temp = rd32(E1000_TOTH); 444 rd32(E1000_TPT);
437 temp = rd32(E1000_TPR); 445 rd32(E1000_MPTC);
438 temp = rd32(E1000_TPT); 446 rd32(E1000_BPTC);
439 temp = rd32(E1000_MPTC);
440 temp = rd32(E1000_BPTC);
441} 447}
442 448
443/** 449/**
diff --git a/drivers/net/igb/e1000_mbx.c b/drivers/net/igb/e1000_mbx.c
index ed9058eca45c..c474cdb70047 100644
--- a/drivers/net/igb/e1000_mbx.c
+++ b/drivers/net/igb/e1000_mbx.c
@@ -143,12 +143,16 @@ static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
143 if (!countdown || !mbx->ops.check_for_msg) 143 if (!countdown || !mbx->ops.check_for_msg)
144 goto out; 144 goto out;
145 145
146 while (mbx->ops.check_for_msg(hw, mbx_id)) { 146 while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
147 countdown--; 147 countdown--;
148 if (!countdown) 148 if (!countdown)
149 break; 149 break;
150 udelay(mbx->usec_delay); 150 udelay(mbx->usec_delay);
151 } 151 }
152
153 /* if we failed, all future posted messages fail until reset */
154 if (!countdown)
155 mbx->timeout = 0;
152out: 156out:
153 return countdown ? 0 : -E1000_ERR_MBX; 157 return countdown ? 0 : -E1000_ERR_MBX;
154} 158}
@@ -168,12 +172,16 @@ static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
168 if (!countdown || !mbx->ops.check_for_ack) 172 if (!countdown || !mbx->ops.check_for_ack)
169 goto out; 173 goto out;
170 174
171 while (mbx->ops.check_for_ack(hw, mbx_id)) { 175 while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
172 countdown--; 176 countdown--;
173 if (!countdown) 177 if (!countdown)
174 break; 178 break;
175 udelay(mbx->usec_delay); 179 udelay(mbx->usec_delay);
176 } 180 }
181
182 /* if we failed, all future posted messages fail until reset */
183 if (!countdown)
184 mbx->timeout = 0;
177out: 185out:
178 return countdown ? 0 : -E1000_ERR_MBX; 186 return countdown ? 0 : -E1000_ERR_MBX;
179} 187}
@@ -217,12 +225,13 @@ out:
217static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id) 225static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
218{ 226{
219 struct e1000_mbx_info *mbx = &hw->mbx; 227 struct e1000_mbx_info *mbx = &hw->mbx;
220 s32 ret_val = 0; 228 s32 ret_val = -E1000_ERR_MBX;
221 229
222 if (!mbx->ops.write) 230 /* exit if either we can't write or there isn't a defined timeout */
231 if (!mbx->ops.write || !mbx->timeout)
223 goto out; 232 goto out;
224 233
225 /* send msg*/ 234 /* send msg */
226 ret_val = mbx->ops.write(hw, msg, size, mbx_id); 235 ret_val = mbx->ops.write(hw, msg, size, mbx_id);
227 236
228 /* if msg sent wait until we receive an ack */ 237 /* if msg sent wait until we receive an ack */
@@ -305,6 +314,30 @@ static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
305} 314}
306 315
307/** 316/**
317 * igb_obtain_mbx_lock_pf - obtain mailbox lock
318 * @hw: pointer to the HW structure
319 * @vf_number: the VF index
320 *
321 * return SUCCESS if we obtained the mailbox lock
322 **/
323static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
324{
325 s32 ret_val = -E1000_ERR_MBX;
326 u32 p2v_mailbox;
327
328
329 /* Take ownership of the buffer */
330 wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
331
332 /* reserve mailbox for vf use */
333 p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
334 if (p2v_mailbox & E1000_P2VMAILBOX_PFU)
335 ret_val = 0;
336
337 return ret_val;
338}
339
340/**
308 * igb_write_mbx_pf - Places a message in the mailbox 341 * igb_write_mbx_pf - Places a message in the mailbox
309 * @hw: pointer to the HW structure 342 * @hw: pointer to the HW structure
310 * @msg: The message buffer 343 * @msg: The message buffer
@@ -316,27 +349,17 @@ static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
316static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, 349static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
317 u16 vf_number) 350 u16 vf_number)
318{ 351{
319 u32 p2v_mailbox; 352 s32 ret_val;
320 s32 ret_val = 0;
321 u16 i; 353 u16 i;
322 354
323 /* Take ownership of the buffer */ 355 /* lock the mailbox to prevent pf/vf race condition */
324 wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); 356 ret_val = igb_obtain_mbx_lock_pf(hw, vf_number);
325 357 if (ret_val)
326 /* Make sure we have ownership now... */
327 p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
328 if (!(p2v_mailbox & E1000_P2VMAILBOX_PFU)) {
329 /* failed to grab ownership */
330 ret_val = -E1000_ERR_MBX;
331 goto out_no_write; 358 goto out_no_write;
332 }
333 359
334 /* 360 /* flush msg and acks as we are overwriting the message buffer */
335 * flush any ack or msg which may already be in the queue
336 * as they are likely the result of an error
337 */
338 igb_check_for_ack_pf(hw, vf_number);
339 igb_check_for_msg_pf(hw, vf_number); 361 igb_check_for_msg_pf(hw, vf_number);
362 igb_check_for_ack_pf(hw, vf_number);
340 363
341 /* copy the caller specified message to the mailbox memory buffer */ 364 /* copy the caller specified message to the mailbox memory buffer */
342 for (i = 0; i < size; i++) 365 for (i = 0; i < size; i++)
@@ -367,20 +390,13 @@ out_no_write:
367static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size, 390static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
368 u16 vf_number) 391 u16 vf_number)
369{ 392{
370 u32 p2v_mailbox; 393 s32 ret_val;
371 s32 ret_val = 0;
372 u16 i; 394 u16 i;
373 395
374 /* Take ownership of the buffer */ 396 /* lock the mailbox to prevent pf/vf race condition */
375 wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU); 397 ret_val = igb_obtain_mbx_lock_pf(hw, vf_number);
376 398 if (ret_val)
377 /* Make sure we have ownership now... */
378 p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
379 if (!(p2v_mailbox & E1000_P2VMAILBOX_PFU)) {
380 /* failed to grab ownership */
381 ret_val = -E1000_ERR_MBX;
382 goto out_no_read; 399 goto out_no_read;
383 }
384 400
385 /* copy the message to the mailbox memory buffer */ 401 /* copy the message to the mailbox memory buffer */
386 for (i = 0; i < size; i++) 402 for (i = 0; i < size; i++)
@@ -392,8 +408,6 @@ static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
392 /* update stats */ 408 /* update stats */
393 hw->mbx.stats.msgs_rx++; 409 hw->mbx.stats.msgs_rx++;
394 410
395 ret_val = 0;
396
397out_no_read: 411out_no_read:
398 return ret_val; 412 return ret_val;
399} 413}
diff --git a/drivers/net/igb/e1000_mbx.h b/drivers/net/igb/e1000_mbx.h
index ebc02ea3f198..bb112fb6c3a1 100644
--- a/drivers/net/igb/e1000_mbx.h
+++ b/drivers/net/igb/e1000_mbx.h
@@ -58,10 +58,12 @@
58#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT) 58#define E1000_VT_MSGINFO_MASK (0xFF << E1000_VT_MSGINFO_SHIFT)
59 59
60#define E1000_VF_RESET 0x01 /* VF requests reset */ 60#define E1000_VF_RESET 0x01 /* VF requests reset */
61#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests PF to set MAC addr */ 61#define E1000_VF_SET_MAC_ADDR 0x02 /* VF requests to set MAC addr */
62#define E1000_VF_SET_MULTICAST 0x03 /* VF requests PF to set MC addr */ 62#define E1000_VF_SET_MULTICAST 0x03 /* VF requests to set MC addr */
63#define E1000_VF_SET_VLAN 0x04 /* VF requests PF to set VLAN */ 63#define E1000_VF_SET_VLAN 0x04 /* VF requests to set VLAN */
64#define E1000_VF_SET_LPE 0x05 /* VF requests PF to set VMOLR.LPE */ 64#define E1000_VF_SET_LPE 0x05 /* VF requests to set VMOLR.LPE */
65#define E1000_VF_SET_PROMISC 0x06 /*VF requests to clear VMOLR.ROPE/MPME*/
66#define E1000_VF_SET_PROMISC_MULTICAST (0x02 << E1000_VT_MSGINFO_SHIFT)
65 67
66#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */ 68#define E1000_PF_CONTROL_MSG 0x0100 /* PF control message */
67 69
diff --git a/drivers/net/igb/e1000_nvm.c b/drivers/net/igb/e1000_nvm.c
index a88bfe2f1e8f..d83b77fa4038 100644
--- a/drivers/net/igb/e1000_nvm.c
+++ b/drivers/net/igb/e1000_nvm.c
@@ -78,9 +78,7 @@ static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
78 u32 mask; 78 u32 mask;
79 79
80 mask = 0x01 << (count - 1); 80 mask = 0x01 << (count - 1);
81 if (nvm->type == e1000_nvm_eeprom_microwire) 81 if (nvm->type == e1000_nvm_eeprom_spi)
82 eecd &= ~E1000_EECD_DO;
83 else if (nvm->type == e1000_nvm_eeprom_spi)
84 eecd |= E1000_EECD_DO; 82 eecd |= E1000_EECD_DO;
85 83
86 do { 84 do {
@@ -220,22 +218,7 @@ static void igb_standby_nvm(struct e1000_hw *hw)
220 struct e1000_nvm_info *nvm = &hw->nvm; 218 struct e1000_nvm_info *nvm = &hw->nvm;
221 u32 eecd = rd32(E1000_EECD); 219 u32 eecd = rd32(E1000_EECD);
222 220
223 if (nvm->type == e1000_nvm_eeprom_microwire) { 221 if (nvm->type == e1000_nvm_eeprom_spi) {
224 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
225 wr32(E1000_EECD, eecd);
226 wrfl();
227 udelay(nvm->delay_usec);
228
229 igb_raise_eec_clk(hw, &eecd);
230
231 /* Select EEPROM */
232 eecd |= E1000_EECD_CS;
233 wr32(E1000_EECD, eecd);
234 wrfl();
235 udelay(nvm->delay_usec);
236
237 igb_lower_eec_clk(hw, &eecd);
238 } else if (nvm->type == e1000_nvm_eeprom_spi) {
239 /* Toggle CS to flush commands */ 222 /* Toggle CS to flush commands */
240 eecd |= E1000_EECD_CS; 223 eecd |= E1000_EECD_CS;
241 wr32(E1000_EECD, eecd); 224 wr32(E1000_EECD, eecd);
@@ -263,12 +246,6 @@ static void e1000_stop_nvm(struct e1000_hw *hw)
263 /* Pull CS high */ 246 /* Pull CS high */
264 eecd |= E1000_EECD_CS; 247 eecd |= E1000_EECD_CS;
265 igb_lower_eec_clk(hw, &eecd); 248 igb_lower_eec_clk(hw, &eecd);
266 } else if (hw->nvm.type == e1000_nvm_eeprom_microwire) {
267 /* CS on Microcwire is active-high */
268 eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
269 wr32(E1000_EECD, eecd);
270 igb_raise_eec_clk(hw, &eecd);
271 igb_lower_eec_clk(hw, &eecd);
272 } 249 }
273} 250}
274 251
@@ -304,14 +281,7 @@ static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)
304 u8 spi_stat_reg; 281 u8 spi_stat_reg;
305 282
306 283
307 if (nvm->type == e1000_nvm_eeprom_microwire) { 284 if (nvm->type == e1000_nvm_eeprom_spi) {
308 /* Clear SK and DI */
309 eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
310 wr32(E1000_EECD, eecd);
311 /* Set CS */
312 eecd |= E1000_EECD_CS;
313 wr32(E1000_EECD, eecd);
314 } else if (nvm->type == e1000_nvm_eeprom_spi) {
315 /* Clear SK and CS */ 285 /* Clear SK and CS */
316 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK); 286 eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
317 wr32(E1000_EECD, eecd); 287 wr32(E1000_EECD, eecd);
diff --git a/drivers/net/igb/e1000_phy.c b/drivers/net/igb/e1000_phy.c
index ee460600e74b..83b706c460b3 100644
--- a/drivers/net/igb/e1000_phy.c
+++ b/drivers/net/igb/e1000_phy.c
@@ -39,6 +39,9 @@ static s32 igb_wait_autoneg(struct e1000_hw *hw);
39/* Cable length tables */ 39/* Cable length tables */
40static const u16 e1000_m88_cable_length_table[] = 40static const u16 e1000_m88_cable_length_table[] =
41 { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED }; 41 { 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
42#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
43 (sizeof(e1000_m88_cable_length_table) / \
44 sizeof(e1000_m88_cable_length_table[0]))
42 45
43static const u16 e1000_igp_2_cable_length_table[] = 46static const u16 e1000_igp_2_cable_length_table[] =
44 { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 47 { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
@@ -109,7 +112,10 @@ out:
109 **/ 112 **/
110static s32 igb_phy_reset_dsp(struct e1000_hw *hw) 113static s32 igb_phy_reset_dsp(struct e1000_hw *hw)
111{ 114{
112 s32 ret_val; 115 s32 ret_val = 0;
116
117 if (!(hw->phy.ops.write_reg))
118 goto out;
113 119
114 ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1); 120 ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
115 if (ret_val) 121 if (ret_val)
@@ -239,6 +245,103 @@ out:
239} 245}
240 246
241/** 247/**
248 * igb_read_phy_reg_i2c - Read PHY register using i2c
249 * @hw: pointer to the HW structure
250 * @offset: register offset to be read
251 * @data: pointer to the read data
252 *
253 * Reads the PHY register at offset using the i2c interface and stores the
254 * retrieved information in data.
255 **/
256s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
257{
258 struct e1000_phy_info *phy = &hw->phy;
259 u32 i, i2ccmd = 0;
260
261
262 /*
263 * Set up Op-code, Phy Address, and register address in the I2CCMD
264 * register. The MAC will take care of interfacing with the
265 * PHY to retrieve the desired data.
266 */
267 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
268 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
269 (E1000_I2CCMD_OPCODE_READ));
270
271 wr32(E1000_I2CCMD, i2ccmd);
272
273 /* Poll the ready bit to see if the I2C read completed */
274 for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
275 udelay(50);
276 i2ccmd = rd32(E1000_I2CCMD);
277 if (i2ccmd & E1000_I2CCMD_READY)
278 break;
279 }
280 if (!(i2ccmd & E1000_I2CCMD_READY)) {
281 hw_dbg("I2CCMD Read did not complete\n");
282 return -E1000_ERR_PHY;
283 }
284 if (i2ccmd & E1000_I2CCMD_ERROR) {
285 hw_dbg("I2CCMD Error bit set\n");
286 return -E1000_ERR_PHY;
287 }
288
289 /* Need to byte-swap the 16-bit value. */
290 *data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
291
292 return 0;
293}
294
295/**
296 * igb_write_phy_reg_i2c - Write PHY register using i2c
297 * @hw: pointer to the HW structure
298 * @offset: register offset to write to
299 * @data: data to write at register offset
300 *
301 * Writes the data to PHY register at the offset using the i2c interface.
302 **/
303s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
304{
305 struct e1000_phy_info *phy = &hw->phy;
306 u32 i, i2ccmd = 0;
307 u16 phy_data_swapped;
308
309
310 /* Swap the data bytes for the I2C interface */
311 phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
312
313 /*
314 * Set up Op-code, Phy Address, and register address in the I2CCMD
315 * register. The MAC will take care of interfacing with the
316 * PHY to retrieve the desired data.
317 */
318 i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
319 (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
320 E1000_I2CCMD_OPCODE_WRITE |
321 phy_data_swapped);
322
323 wr32(E1000_I2CCMD, i2ccmd);
324
325 /* Poll the ready bit to see if the I2C read completed */
326 for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
327 udelay(50);
328 i2ccmd = rd32(E1000_I2CCMD);
329 if (i2ccmd & E1000_I2CCMD_READY)
330 break;
331 }
332 if (!(i2ccmd & E1000_I2CCMD_READY)) {
333 hw_dbg("I2CCMD Write did not complete\n");
334 return -E1000_ERR_PHY;
335 }
336 if (i2ccmd & E1000_I2CCMD_ERROR) {
337 hw_dbg("I2CCMD Error bit set\n");
338 return -E1000_ERR_PHY;
339 }
340
341 return 0;
342}
343
344/**
242 * igb_read_phy_reg_igp - Read igp PHY register 345 * igb_read_phy_reg_igp - Read igp PHY register
243 * @hw: pointer to the HW structure 346 * @hw: pointer to the HW structure
244 * @offset: register offset to be read 347 * @offset: register offset to be read
@@ -572,7 +675,7 @@ out:
572 * and restart the negotiation process between the link partner. If 675 * and restart the negotiation process between the link partner. If
573 * autoneg_wait_to_complete, then wait for autoneg to complete before exiting. 676 * autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
574 **/ 677 **/
575s32 igb_copper_link_autoneg(struct e1000_hw *hw) 678static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
576{ 679{
577 struct e1000_phy_info *phy = &hw->phy; 680 struct e1000_phy_info *phy = &hw->phy;
578 s32 ret_val; 681 s32 ret_val;
@@ -796,6 +899,65 @@ out:
796} 899}
797 900
798/** 901/**
902 * igb_setup_copper_link - Configure copper link settings
903 * @hw: pointer to the HW structure
904 *
905 * Calls the appropriate function to configure the link for auto-neg or forced
906 * speed and duplex. Then we check for link, once link is established calls
907 * to configure collision distance and flow control are called. If link is
908 * not established, we return -E1000_ERR_PHY (-2).
909 **/
910s32 igb_setup_copper_link(struct e1000_hw *hw)
911{
912 s32 ret_val;
913 bool link;
914
915
916 if (hw->mac.autoneg) {
917 /*
918 * Setup autoneg and flow control advertisement and perform
919 * autonegotiation.
920 */
921 ret_val = igb_copper_link_autoneg(hw);
922 if (ret_val)
923 goto out;
924 } else {
925 /*
926 * PHY will be set to 10H, 10F, 100H or 100F
927 * depending on user settings.
928 */
929 hw_dbg("Forcing Speed and Duplex\n");
930 ret_val = hw->phy.ops.force_speed_duplex(hw);
931 if (ret_val) {
932 hw_dbg("Error Forcing Speed and Duplex\n");
933 goto out;
934 }
935 }
936
937 /*
938 * Check link status. Wait up to 100 microseconds for link to become
939 * valid.
940 */
941 ret_val = igb_phy_has_link(hw,
942 COPPER_LINK_UP_LIMIT,
943 10,
944 &link);
945 if (ret_val)
946 goto out;
947
948 if (link) {
949 hw_dbg("Valid link established!!!\n");
950 igb_config_collision_dist(hw);
951 ret_val = igb_config_fc_after_link_up(hw);
952 } else {
953 hw_dbg("Unable to establish link!!!\n");
954 }
955
956out:
957 return ret_val;
958}
959
960/**
799 * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY 961 * igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
800 * @hw: pointer to the HW structure 962 * @hw: pointer to the HW structure
801 * 963 *
@@ -903,22 +1065,19 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
903 1065
904 igb_phy_force_speed_duplex_setup(hw, &phy_data); 1066 igb_phy_force_speed_duplex_setup(hw, &phy_data);
905 1067
906 /* Reset the phy to commit changes. */
907 phy_data |= MII_CR_RESET;
908
909 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data); 1068 ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
910 if (ret_val) 1069 if (ret_val)
911 goto out; 1070 goto out;
912 1071
913 udelay(1); 1072 /* Reset the phy to commit changes. */
1073 ret_val = igb_phy_sw_reset(hw);
1074 if (ret_val)
1075 goto out;
914 1076
915 if (phy->autoneg_wait_to_complete) { 1077 if (phy->autoneg_wait_to_complete) {
916 hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n"); 1078 hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
917 1079
918 ret_val = igb_phy_has_link(hw, 1080 ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
919 PHY_FORCE_LIMIT,
920 100000,
921 &link);
922 if (ret_val) 1081 if (ret_val)
923 goto out; 1082 goto out;
924 1083
@@ -928,8 +1087,8 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
928 * Reset the DSP and cross our fingers. 1087 * Reset the DSP and cross our fingers.
929 */ 1088 */
930 ret_val = phy->ops.write_reg(hw, 1089 ret_val = phy->ops.write_reg(hw,
931 M88E1000_PHY_PAGE_SELECT, 1090 M88E1000_PHY_PAGE_SELECT,
932 0x001d); 1091 0x001d);
933 if (ret_val) 1092 if (ret_val)
934 goto out; 1093 goto out;
935 ret_val = igb_phy_reset_dsp(hw); 1094 ret_val = igb_phy_reset_dsp(hw);
@@ -939,7 +1098,7 @@ s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
939 1098
940 /* Try once more */ 1099 /* Try once more */
941 ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 1100 ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT,
942 100000, &link); 1101 100000, &link);
943 if (ret_val) 1102 if (ret_val)
944 goto out; 1103 goto out;
945 } 1104 }
@@ -1051,9 +1210,12 @@ static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
1051s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active) 1210s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
1052{ 1211{
1053 struct e1000_phy_info *phy = &hw->phy; 1212 struct e1000_phy_info *phy = &hw->phy;
1054 s32 ret_val; 1213 s32 ret_val = 0;
1055 u16 data; 1214 u16 data;
1056 1215
1216 if (!(hw->phy.ops.read_reg))
1217 goto out;
1218
1057 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data); 1219 ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
1058 if (ret_val) 1220 if (ret_val)
1059 goto out; 1221 goto out;
@@ -1288,8 +1450,14 @@ s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
1288 * it across the board. 1450 * it across the board.
1289 */ 1451 */
1290 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); 1452 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
1291 if (ret_val) 1453 if (ret_val) {
1292 break; 1454 /*
1455 * If the first read fails, another entity may have
1456 * ownership of the resources, wait and try again to
1457 * see if they have relinquished the resources yet.
1458 */
1459 udelay(usec_interval);
1460 }
1293 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status); 1461 ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
1294 if (ret_val) 1462 if (ret_val)
1295 break; 1463 break;
@@ -1333,8 +1501,13 @@ s32 igb_get_cable_length_m88(struct e1000_hw *hw)
1333 1501
1334 index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >> 1502 index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
1335 M88E1000_PSSR_CABLE_LENGTH_SHIFT; 1503 M88E1000_PSSR_CABLE_LENGTH_SHIFT;
1504 if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
1505 ret_val = -E1000_ERR_PHY;
1506 goto out;
1507 }
1508
1336 phy->min_cable_length = e1000_m88_cable_length_table[index]; 1509 phy->min_cable_length = e1000_m88_cable_length_table[index];
1337 phy->max_cable_length = e1000_m88_cable_length_table[index+1]; 1510 phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
1338 1511
1339 phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2; 1512 phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
1340 1513
diff --git a/drivers/net/igb/e1000_phy.h b/drivers/net/igb/e1000_phy.h
index ebe4b616db8a..adb9436b7336 100644
--- a/drivers/net/igb/e1000_phy.h
+++ b/drivers/net/igb/e1000_phy.h
@@ -43,7 +43,6 @@ enum e1000_smart_speed {
43 43
44s32 igb_check_downshift(struct e1000_hw *hw); 44s32 igb_check_downshift(struct e1000_hw *hw);
45s32 igb_check_reset_block(struct e1000_hw *hw); 45s32 igb_check_reset_block(struct e1000_hw *hw);
46s32 igb_copper_link_autoneg(struct e1000_hw *hw);
47s32 igb_copper_link_setup_igp(struct e1000_hw *hw); 46s32 igb_copper_link_setup_igp(struct e1000_hw *hw);
48s32 igb_copper_link_setup_m88(struct e1000_hw *hw); 47s32 igb_copper_link_setup_m88(struct e1000_hw *hw);
49s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw); 48s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw);
@@ -57,10 +56,13 @@ s32 igb_phy_sw_reset(struct e1000_hw *hw);
57s32 igb_phy_hw_reset(struct e1000_hw *hw); 56s32 igb_phy_hw_reset(struct e1000_hw *hw);
58s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data); 57s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
59s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active); 58s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active);
59s32 igb_setup_copper_link(struct e1000_hw *hw);
60s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data); 60s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
61s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations, 61s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
62 u32 usec_interval, bool *success); 62 u32 usec_interval, bool *success);
63s32 igb_phy_init_script_igp3(struct e1000_hw *hw); 63s32 igb_phy_init_script_igp3(struct e1000_hw *hw);
64s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
65s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
64 66
65/* IGP01E1000 Specific Registers */ 67/* IGP01E1000 Specific Registers */
66#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */ 68#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* Port Config */
diff --git a/drivers/net/igb/e1000_regs.h b/drivers/net/igb/e1000_regs.h
index 345d1442d6d6..934e03b053ac 100644
--- a/drivers/net/igb/e1000_regs.h
+++ b/drivers/net/igb/e1000_regs.h
@@ -76,59 +76,18 @@
76#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */ 76#define E1000_FCRTV 0x02460 /* Flow Control Refresh Timer Value - RW */
77 77
78/* IEEE 1588 TIMESYNCH */ 78/* IEEE 1588 TIMESYNCH */
79#define E1000_TSYNCTXCTL 0x0B614 79#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
80#define E1000_TSYNCTXCTL_VALID (1<<0) 80#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
81#define E1000_TSYNCTXCTL_ENABLED (1<<4) 81#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
82#define E1000_TSYNCRXCTL 0x0B620 82#define E1000_RXSTMPL 0x0B624 /* Rx timestamp Low - RO */
83#define E1000_TSYNCRXCTL_VALID (1<<0) 83#define E1000_RXSTMPH 0x0B628 /* Rx timestamp High - RO */
84#define E1000_TSYNCRXCTL_ENABLED (1<<4) 84#define E1000_RXSATRL 0x0B62C /* Rx timestamp attribute low - RO */
85enum { 85#define E1000_RXSATRH 0x0B630 /* Rx timestamp attribute high - RO */
86 E1000_TSYNCRXCTL_TYPE_L2_V2 = 0, 86#define E1000_TXSTMPL 0x0B618 /* Tx timestamp value Low - RO */
87 E1000_TSYNCRXCTL_TYPE_L4_V1 = (1<<1), 87#define E1000_TXSTMPH 0x0B61C /* Tx timestamp value High - RO */
88 E1000_TSYNCRXCTL_TYPE_L2_L4_V2 = (1<<2), 88#define E1000_SYSTIML 0x0B600 /* System time register Low - RO */
89 E1000_TSYNCRXCTL_TYPE_ALL = (1<<3), 89#define E1000_SYSTIMH 0x0B604 /* System time register High - RO */
90 E1000_TSYNCRXCTL_TYPE_EVENT_V2 = (1<<3) | (1<<1), 90#define E1000_TIMINCA 0x0B608 /* Increment attributes register - RW */
91};
92#define E1000_TSYNCRXCFG 0x05F50
93enum {
94 E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE = 0<<0,
95 E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE = 1<<0,
96 E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE = 2<<0,
97 E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE = 3<<0,
98 E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE = 4<<0,
99
100 E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE = 0<<8,
101 E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE = 1<<8,
102 E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE = 2<<8,
103 E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE = 3<<8,
104 E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE = 8<<8,
105 E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE = 9<<8,
106 E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE = 0xA<<8,
107 E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE = 0xB<<8,
108 E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE = 0xC<<8,
109 E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE = 0xD<<8,
110};
111#define E1000_SYSTIML 0x0B600
112#define E1000_SYSTIMH 0x0B604
113#define E1000_TIMINCA 0x0B608
114
115#define E1000_RXMTRL 0x0B634
116#define E1000_RXSTMPL 0x0B624
117#define E1000_RXSTMPH 0x0B628
118#define E1000_RXSATRL 0x0B62C
119#define E1000_RXSATRH 0x0B630
120
121#define E1000_TXSTMPL 0x0B618
122#define E1000_TXSTMPH 0x0B61C
123
124#define E1000_ETQF0 0x05CB0
125#define E1000_ETQF1 0x05CB4
126#define E1000_ETQF2 0x05CB8
127#define E1000_ETQF3 0x05CBC
128#define E1000_ETQF4 0x05CC0
129#define E1000_ETQF5 0x05CC4
130#define E1000_ETQF6 0x05CC8
131#define E1000_ETQF7 0x05CCC
132 91
133/* Filtering Registers */ 92/* Filtering Registers */
134#define E1000_SAQF(_n) (0x5980 + 4 * (_n)) 93#define E1000_SAQF(_n) (0x5980 + 4 * (_n))
@@ -143,7 +102,9 @@ enum {
143#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */ 102#define E1000_ETQF(_n) (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
144 103
145#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40)) 104#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
105
146/* Split and Replication RX Control - RW */ 106/* Split and Replication RX Control - RW */
107#define E1000_RXPBS 0x02404 /* Rx Packet Buffer Size - RW */
147/* 108/*
148 * Convenience macros 109 * Convenience macros
149 * 110 *
@@ -288,10 +249,17 @@ enum {
288#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */ 249#define E1000_MTA 0x05200 /* Multicast Table Array - RW Array */
289#define E1000_RA 0x05400 /* Receive Address - RW Array */ 250#define E1000_RA 0x05400 /* Receive Address - RW Array */
290#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */ 251#define E1000_RA2 0x054E0 /* 2nd half of receive address array - RW Array */
252#define E1000_PSRTYPE(_i) (0x05480 + ((_i) * 4))
291#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \ 253#define E1000_RAL(_i) (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
292 (0x054E0 + ((_i - 16) * 8))) 254 (0x054E0 + ((_i - 16) * 8)))
293#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \ 255#define E1000_RAH(_i) (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
294 (0x054E4 + ((_i - 16) * 8))) 256 (0x054E4 + ((_i - 16) * 8)))
257#define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
258#define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
259#define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
260#define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
261#define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
262#define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
295#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */ 263#define E1000_VFTA 0x05600 /* VLAN Filter Table Array - RW Array */
296#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */ 264#define E1000_VT_CTL 0x0581C /* VMDq Control - RW */
297#define E1000_WUC 0x05800 /* Wakeup Control - RW */ 265#define E1000_WUC 0x05800 /* Wakeup Control - RW */
@@ -331,6 +299,7 @@ enum {
331#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */ 299#define E1000_QDE 0x02408 /* Queue Drop Enable - RW */
332#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */ 300#define E1000_DTXSWC 0x03500 /* DMA Tx Switch Control - RW */
333#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */ 301#define E1000_RPLOLR 0x05AF0 /* Replication Offload - RW */
302#define E1000_UTA 0x0A000 /* Unicast Table Array - RW */
334#define E1000_IOVTCL 0x05BBC /* IOV Control Register */ 303#define E1000_IOVTCL 0x05BBC /* IOV Control Register */
335/* These act per VF so an array friendly macro is used */ 304/* These act per VF so an array friendly macro is used */
336#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n))) 305#define E1000_P2VMAILBOX(_n) (0x00C00 + (4 * (_n)))
diff --git a/drivers/net/igb/igb.h b/drivers/net/igb/igb.h
index 7126fea26fec..63abd1c0d75e 100644
--- a/drivers/net/igb/igb.h
+++ b/drivers/net/igb/igb.h
@@ -55,12 +55,14 @@ struct igb_adapter;
55#define IGB_DEFAULT_ITR 3 /* dynamic */ 55#define IGB_DEFAULT_ITR 3 /* dynamic */
56#define IGB_MAX_ITR_USECS 10000 56#define IGB_MAX_ITR_USECS 10000
57#define IGB_MIN_ITR_USECS 10 57#define IGB_MIN_ITR_USECS 10
58#define NON_Q_VECTORS 1
59#define MAX_Q_VECTORS 8
58 60
59/* Transmit and receive queues */ 61/* Transmit and receive queues */
60#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? \ 62#define IGB_MAX_RX_QUEUES (adapter->vfs_allocated_count ? 2 : \
61 (adapter->vfs_allocated_count > 6 ? 1 : 2) : 4) 63 (hw->mac.type > e1000_82575 ? 8 : 4))
62#define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES 64#define IGB_ABS_MAX_TX_QUEUES 8
63#define IGB_ABS_MAX_TX_QUEUES 4 65#define IGB_MAX_TX_QUEUES IGB_MAX_RX_QUEUES
64 66
65#define IGB_MAX_VF_MC_ENTRIES 30 67#define IGB_MAX_VF_MC_ENTRIES 30
66#define IGB_MAX_VF_FUNCTIONS 8 68#define IGB_MAX_VF_FUNCTIONS 8
@@ -71,9 +73,14 @@ struct vf_data_storage {
71 u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES]; 73 u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
72 u16 num_vf_mc_hashes; 74 u16 num_vf_mc_hashes;
73 u16 vlans_enabled; 75 u16 vlans_enabled;
74 bool clear_to_send; 76 u32 flags;
77 unsigned long last_nack;
75}; 78};
76 79
80#define IGB_VF_FLAG_CTS 0x00000001 /* VF is clear to send data */
81#define IGB_VF_FLAG_UNI_PROMISC 0x00000002 /* VF has unicast promisc */
82#define IGB_VF_FLAG_MULTI_PROMISC 0x00000004 /* VF has multicast promisc */
83
77/* RX descriptor control thresholds. 84/* RX descriptor control thresholds.
78 * PTHRESH - MAC will consider prefetch if it has fewer than this number of 85 * PTHRESH - MAC will consider prefetch if it has fewer than this number of
79 * descriptors available in its onboard memory. 86 * descriptors available in its onboard memory.
@@ -85,17 +92,19 @@ struct vf_data_storage {
85 * descriptors until either it has this many to write back, or the 92 * descriptors until either it has this many to write back, or the
86 * ITR timer expires. 93 * ITR timer expires.
87 */ 94 */
88#define IGB_RX_PTHRESH 16 95#define IGB_RX_PTHRESH (hw->mac.type <= e1000_82576 ? 16 : 8)
89#define IGB_RX_HTHRESH 8 96#define IGB_RX_HTHRESH 8
90#define IGB_RX_WTHRESH 1 97#define IGB_RX_WTHRESH 1
98#define IGB_TX_PTHRESH 8
99#define IGB_TX_HTHRESH 1
100#define IGB_TX_WTHRESH ((hw->mac.type == e1000_82576 && \
101 adapter->msix_entries) ? 0 : 16)
91 102
92/* this is the size past which hardware will drop packets when setting LPE=0 */ 103/* this is the size past which hardware will drop packets when setting LPE=0 */
93#define MAXIMUM_ETHERNET_VLAN_SIZE 1522 104#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
94 105
95/* Supported Rx Buffer Sizes */ 106/* Supported Rx Buffer Sizes */
96#define IGB_RXBUFFER_128 128 /* Used for packet split */ 107#define IGB_RXBUFFER_128 128 /* Used for packet split */
97#define IGB_RXBUFFER_256 256 /* Used for packet split */
98#define IGB_RXBUFFER_512 512
99#define IGB_RXBUFFER_1024 1024 108#define IGB_RXBUFFER_1024 1024
100#define IGB_RXBUFFER_2048 2048 109#define IGB_RXBUFFER_2048 2048
101#define IGB_RXBUFFER_16384 16384 110#define IGB_RXBUFFER_16384 16384
@@ -141,36 +150,55 @@ struct igb_buffer {
141struct igb_tx_queue_stats { 150struct igb_tx_queue_stats {
142 u64 packets; 151 u64 packets;
143 u64 bytes; 152 u64 bytes;
153 u64 restart_queue;
144}; 154};
145 155
146struct igb_rx_queue_stats { 156struct igb_rx_queue_stats {
147 u64 packets; 157 u64 packets;
148 u64 bytes; 158 u64 bytes;
149 u64 drops; 159 u64 drops;
160 u64 csum_err;
161 u64 alloc_failed;
150}; 162};
151 163
152struct igb_ring { 164struct igb_q_vector {
153 struct igb_adapter *adapter; /* backlink */ 165 struct igb_adapter *adapter; /* backlink */
154 void *desc; /* descriptor ring memory */ 166 struct igb_ring *rx_ring;
155 dma_addr_t dma; /* phys address of the ring */ 167 struct igb_ring *tx_ring;
156 unsigned int size; /* length of desc. ring in bytes */ 168 struct napi_struct napi;
157 unsigned int count; /* number of desc. in the ring */
158 u16 next_to_use;
159 u16 next_to_clean;
160 u16 head;
161 u16 tail;
162 struct igb_buffer *buffer_info; /* array of buffer info structs */
163 169
164 u32 eims_value; 170 u32 eims_value;
165 u32 itr_val;
166 u16 itr_register;
167 u16 cpu; 171 u16 cpu;
168 172
169 u16 queue_index; 173 u16 itr_val;
170 u16 reg_idx; 174 u8 set_itr;
175 u8 itr_shift;
176 void __iomem *itr_register;
177
178 char name[IFNAMSIZ + 9];
179};
180
181struct igb_ring {
182 struct igb_q_vector *q_vector; /* backlink to q_vector */
183 struct net_device *netdev; /* back pointer to net_device */
184 struct pci_dev *pdev; /* pci device for dma mapping */
185 dma_addr_t dma; /* phys address of the ring */
186 void *desc; /* descriptor ring memory */
187 unsigned int size; /* length of desc. ring in bytes */
188 u16 count; /* number of desc. in the ring */
189 u16 next_to_use;
190 u16 next_to_clean;
191 u8 queue_index;
192 u8 reg_idx;
193 void __iomem *head;
194 void __iomem *tail;
195 struct igb_buffer *buffer_info; /* array of buffer info structs */
196
171 unsigned int total_bytes; 197 unsigned int total_bytes;
172 unsigned int total_packets; 198 unsigned int total_packets;
173 199
200 u32 flags;
201
174 union { 202 union {
175 /* TX */ 203 /* TX */
176 struct { 204 struct {
@@ -180,16 +208,18 @@ struct igb_ring {
180 /* RX */ 208 /* RX */
181 struct { 209 struct {
182 struct igb_rx_queue_stats rx_stats; 210 struct igb_rx_queue_stats rx_stats;
183 u64 rx_queue_drops; 211 u32 rx_buffer_len;
184 struct napi_struct napi;
185 int set_itr;
186 struct igb_ring *buddy;
187 }; 212 };
188 }; 213 };
189
190 char name[IFNAMSIZ + 5];
191}; 214};
192 215
216#define IGB_RING_FLAG_RX_CSUM 0x00000001 /* RX CSUM enabled */
217#define IGB_RING_FLAG_RX_SCTP_CSUM 0x00000002 /* SCTP CSUM offload enabled */
218
219#define IGB_RING_FLAG_TX_CTX_IDX 0x00000001 /* HW requires context index */
220
221#define IGB_ADVTXD_DCMD (E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS)
222
193#define E1000_RX_DESC_ADV(R, i) \ 223#define E1000_RX_DESC_ADV(R, i) \
194 (&(((union e1000_adv_rx_desc *)((R).desc))[i])) 224 (&(((union e1000_adv_rx_desc *)((R).desc))[i]))
195#define E1000_TX_DESC_ADV(R, i) \ 225#define E1000_TX_DESC_ADV(R, i) \
@@ -197,6 +227,15 @@ struct igb_ring {
197#define E1000_TX_CTXTDESC_ADV(R, i) \ 227#define E1000_TX_CTXTDESC_ADV(R, i) \
198 (&(((struct e1000_adv_tx_context_desc *)((R).desc))[i])) 228 (&(((struct e1000_adv_tx_context_desc *)((R).desc))[i]))
199 229
230/* igb_desc_unused - calculate if we have unused descriptors */
231static inline int igb_desc_unused(struct igb_ring *ring)
232{
233 if (ring->next_to_clean > ring->next_to_use)
234 return ring->next_to_clean - ring->next_to_use - 1;
235
236 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
237}
238
200/* board specific private data structure */ 239/* board specific private data structure */
201 240
202struct igb_adapter { 241struct igb_adapter {
@@ -205,18 +244,14 @@ struct igb_adapter {
205 struct vlan_group *vlgrp; 244 struct vlan_group *vlgrp;
206 u16 mng_vlan_id; 245 u16 mng_vlan_id;
207 u32 bd_number; 246 u32 bd_number;
208 u32 rx_buffer_len;
209 u32 wol; 247 u32 wol;
210 u32 en_mng_pt; 248 u32 en_mng_pt;
211 u16 link_speed; 249 u16 link_speed;
212 u16 link_duplex; 250 u16 link_duplex;
213 unsigned int total_tx_bytes; 251
214 unsigned int total_tx_packets;
215 unsigned int total_rx_bytes;
216 unsigned int total_rx_packets;
217 /* Interrupt Throttle Rate */ 252 /* Interrupt Throttle Rate */
218 u32 itr; 253 u32 rx_itr_setting;
219 u32 itr_setting; 254 u32 tx_itr_setting;
220 u16 tx_itr; 255 u16 tx_itr;
221 u16 rx_itr; 256 u16 rx_itr;
222 257
@@ -229,13 +264,7 @@ struct igb_adapter {
229 264
230 /* TX */ 265 /* TX */
231 struct igb_ring *tx_ring; /* One per active queue */ 266 struct igb_ring *tx_ring; /* One per active queue */
232 unsigned int restart_queue;
233 unsigned long tx_queue_len; 267 unsigned long tx_queue_len;
234 u32 txd_cmd;
235 u32 gotc;
236 u64 gotc_old;
237 u64 tpt_old;
238 u64 colc_old;
239 u32 tx_timeout_count; 268 u32 tx_timeout_count;
240 269
241 /* RX */ 270 /* RX */
@@ -243,20 +272,12 @@ struct igb_adapter {
243 int num_tx_queues; 272 int num_tx_queues;
244 int num_rx_queues; 273 int num_rx_queues;
245 274
246 u64 hw_csum_err;
247 u64 hw_csum_good;
248 u32 alloc_rx_buff_failed;
249 u32 gorc;
250 u64 gorc_old;
251 u16 rx_ps_hdr_size;
252 u32 max_frame_size; 275 u32 max_frame_size;
253 u32 min_frame_size; 276 u32 min_frame_size;
254 277
255 /* OS defined structs */ 278 /* OS defined structs */
256 struct net_device *netdev; 279 struct net_device *netdev;
257 struct napi_struct napi;
258 struct pci_dev *pdev; 280 struct pci_dev *pdev;
259 struct net_device_stats net_stats;
260 struct cyclecounter cycles; 281 struct cyclecounter cycles;
261 struct timecounter clock; 282 struct timecounter clock;
262 struct timecompare compare; 283 struct timecompare compare;
@@ -273,6 +294,9 @@ struct igb_adapter {
273 struct igb_ring test_rx_ring; 294 struct igb_ring test_rx_ring;
274 295
275 int msg_enable; 296 int msg_enable;
297
298 unsigned int num_q_vectors;
299 struct igb_q_vector *q_vector[MAX_Q_VECTORS];
276 struct msix_entry *msix_entries; 300 struct msix_entry *msix_entries;
277 u32 eims_enable_mask; 301 u32 eims_enable_mask;
278 u32 eims_other; 302 u32 eims_other;
@@ -283,18 +307,19 @@ struct igb_adapter {
283 u32 eeprom_wol; 307 u32 eeprom_wol;
284 308
285 struct igb_ring *multi_tx_table[IGB_ABS_MAX_TX_QUEUES]; 309 struct igb_ring *multi_tx_table[IGB_ABS_MAX_TX_QUEUES];
286 unsigned int tx_ring_count; 310 u16 tx_ring_count;
287 unsigned int rx_ring_count; 311 u16 rx_ring_count;
288 unsigned int vfs_allocated_count; 312 unsigned int vfs_allocated_count;
289 struct vf_data_storage *vf_data; 313 struct vf_data_storage *vf_data;
314 u32 rss_queues;
290}; 315};
291 316
292#define IGB_FLAG_HAS_MSI (1 << 0) 317#define IGB_FLAG_HAS_MSI (1 << 0)
293#define IGB_FLAG_DCA_ENABLED (1 << 1) 318#define IGB_FLAG_DCA_ENABLED (1 << 1)
294#define IGB_FLAG_QUAD_PORT_A (1 << 2) 319#define IGB_FLAG_QUAD_PORT_A (1 << 2)
295#define IGB_FLAG_NEED_CTX_IDX (1 << 3) 320#define IGB_FLAG_QUEUE_PAIRS (1 << 3)
296#define IGB_FLAG_RX_CSUM_DISABLED (1 << 4)
297 321
322#define IGB_82576_TSYNC_SHIFT 19
298enum e1000_state_t { 323enum e1000_state_t {
299 __IGB_TESTING, 324 __IGB_TESTING,
300 __IGB_RESETTING, 325 __IGB_RESETTING,
@@ -314,10 +339,18 @@ extern void igb_down(struct igb_adapter *);
314extern void igb_reinit_locked(struct igb_adapter *); 339extern void igb_reinit_locked(struct igb_adapter *);
315extern void igb_reset(struct igb_adapter *); 340extern void igb_reset(struct igb_adapter *);
316extern int igb_set_spd_dplx(struct igb_adapter *, u16); 341extern int igb_set_spd_dplx(struct igb_adapter *, u16);
317extern int igb_setup_tx_resources(struct igb_adapter *, struct igb_ring *); 342extern int igb_setup_tx_resources(struct igb_ring *);
318extern int igb_setup_rx_resources(struct igb_adapter *, struct igb_ring *); 343extern int igb_setup_rx_resources(struct igb_ring *);
319extern void igb_free_tx_resources(struct igb_ring *); 344extern void igb_free_tx_resources(struct igb_ring *);
320extern void igb_free_rx_resources(struct igb_ring *); 345extern void igb_free_rx_resources(struct igb_ring *);
346extern void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
347extern void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
348extern void igb_setup_tctl(struct igb_adapter *);
349extern void igb_setup_rctl(struct igb_adapter *);
350extern netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, struct igb_ring *);
351extern void igb_unmap_and_free_tx_resource(struct igb_ring *,
352 struct igb_buffer *);
353extern void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
321extern void igb_update_stats(struct igb_adapter *); 354extern void igb_update_stats(struct igb_adapter *);
322extern void igb_set_ethtool_ops(struct net_device *); 355extern void igb_set_ethtool_ops(struct net_device *);
323 356
diff --git a/drivers/net/igb/igb_ethtool.c b/drivers/net/igb/igb_ethtool.c
index aab3d971af51..c1cde5b44906 100644
--- a/drivers/net/igb/igb_ethtool.c
+++ b/drivers/net/igb/igb_ethtool.c
@@ -43,78 +43,94 @@ struct igb_stats {
43 int stat_offset; 43 int stat_offset;
44}; 44};
45 45
46#define IGB_STAT(m) FIELD_SIZEOF(struct igb_adapter, m), \ 46#define IGB_STAT(_name, _stat) { \
47 offsetof(struct igb_adapter, m) 47 .stat_string = _name, \
48 .sizeof_stat = FIELD_SIZEOF(struct igb_adapter, _stat), \
49 .stat_offset = offsetof(struct igb_adapter, _stat) \
50}
48static const struct igb_stats igb_gstrings_stats[] = { 51static const struct igb_stats igb_gstrings_stats[] = {
49 { "rx_packets", IGB_STAT(stats.gprc) }, 52 IGB_STAT("rx_packets", stats.gprc),
50 { "tx_packets", IGB_STAT(stats.gptc) }, 53 IGB_STAT("tx_packets", stats.gptc),
51 { "rx_bytes", IGB_STAT(stats.gorc) }, 54 IGB_STAT("rx_bytes", stats.gorc),
52 { "tx_bytes", IGB_STAT(stats.gotc) }, 55 IGB_STAT("tx_bytes", stats.gotc),
53 { "rx_broadcast", IGB_STAT(stats.bprc) }, 56 IGB_STAT("rx_broadcast", stats.bprc),
54 { "tx_broadcast", IGB_STAT(stats.bptc) }, 57 IGB_STAT("tx_broadcast", stats.bptc),
55 { "rx_multicast", IGB_STAT(stats.mprc) }, 58 IGB_STAT("rx_multicast", stats.mprc),
56 { "tx_multicast", IGB_STAT(stats.mptc) }, 59 IGB_STAT("tx_multicast", stats.mptc),
57 { "rx_errors", IGB_STAT(net_stats.rx_errors) }, 60 IGB_STAT("multicast", stats.mprc),
58 { "tx_errors", IGB_STAT(net_stats.tx_errors) }, 61 IGB_STAT("collisions", stats.colc),
59 { "tx_dropped", IGB_STAT(net_stats.tx_dropped) }, 62 IGB_STAT("rx_crc_errors", stats.crcerrs),
60 { "multicast", IGB_STAT(stats.mprc) }, 63 IGB_STAT("rx_no_buffer_count", stats.rnbc),
61 { "collisions", IGB_STAT(stats.colc) }, 64 IGB_STAT("rx_missed_errors", stats.mpc),
62 { "rx_length_errors", IGB_STAT(net_stats.rx_length_errors) }, 65 IGB_STAT("tx_aborted_errors", stats.ecol),
63 { "rx_over_errors", IGB_STAT(net_stats.rx_over_errors) }, 66 IGB_STAT("tx_carrier_errors", stats.tncrs),
64 { "rx_crc_errors", IGB_STAT(stats.crcerrs) }, 67 IGB_STAT("tx_window_errors", stats.latecol),
65 { "rx_frame_errors", IGB_STAT(net_stats.rx_frame_errors) }, 68 IGB_STAT("tx_abort_late_coll", stats.latecol),
66 { "rx_no_buffer_count", IGB_STAT(stats.rnbc) }, 69 IGB_STAT("tx_deferred_ok", stats.dc),
67 { "rx_queue_drop_packet_count", IGB_STAT(net_stats.rx_fifo_errors) }, 70 IGB_STAT("tx_single_coll_ok", stats.scc),
68 { "rx_missed_errors", IGB_STAT(stats.mpc) }, 71 IGB_STAT("tx_multi_coll_ok", stats.mcc),
69 { "tx_aborted_errors", IGB_STAT(stats.ecol) }, 72 IGB_STAT("tx_timeout_count", tx_timeout_count),
70 { "tx_carrier_errors", IGB_STAT(stats.tncrs) }, 73 IGB_STAT("rx_long_length_errors", stats.roc),
71 { "tx_fifo_errors", IGB_STAT(net_stats.tx_fifo_errors) }, 74 IGB_STAT("rx_short_length_errors", stats.ruc),
72 { "tx_heartbeat_errors", IGB_STAT(net_stats.tx_heartbeat_errors) }, 75 IGB_STAT("rx_align_errors", stats.algnerrc),
73 { "tx_window_errors", IGB_STAT(stats.latecol) }, 76 IGB_STAT("tx_tcp_seg_good", stats.tsctc),
74 { "tx_abort_late_coll", IGB_STAT(stats.latecol) }, 77 IGB_STAT("tx_tcp_seg_failed", stats.tsctfc),
75 { "tx_deferred_ok", IGB_STAT(stats.dc) }, 78 IGB_STAT("rx_flow_control_xon", stats.xonrxc),
76 { "tx_single_coll_ok", IGB_STAT(stats.scc) }, 79 IGB_STAT("rx_flow_control_xoff", stats.xoffrxc),
77 { "tx_multi_coll_ok", IGB_STAT(stats.mcc) }, 80 IGB_STAT("tx_flow_control_xon", stats.xontxc),
78 { "tx_timeout_count", IGB_STAT(tx_timeout_count) }, 81 IGB_STAT("tx_flow_control_xoff", stats.xofftxc),
79 { "tx_restart_queue", IGB_STAT(restart_queue) }, 82 IGB_STAT("rx_long_byte_count", stats.gorc),
80 { "rx_long_length_errors", IGB_STAT(stats.roc) }, 83 IGB_STAT("tx_dma_out_of_sync", stats.doosync),
81 { "rx_short_length_errors", IGB_STAT(stats.ruc) }, 84 IGB_STAT("tx_smbus", stats.mgptc),
82 { "rx_align_errors", IGB_STAT(stats.algnerrc) }, 85 IGB_STAT("rx_smbus", stats.mgprc),
83 { "tx_tcp_seg_good", IGB_STAT(stats.tsctc) }, 86 IGB_STAT("dropped_smbus", stats.mgpdc),
84 { "tx_tcp_seg_failed", IGB_STAT(stats.tsctfc) }, 87};
85 { "rx_flow_control_xon", IGB_STAT(stats.xonrxc) }, 88
86 { "rx_flow_control_xoff", IGB_STAT(stats.xoffrxc) }, 89#define IGB_NETDEV_STAT(_net_stat) { \
87 { "tx_flow_control_xon", IGB_STAT(stats.xontxc) }, 90 .stat_string = __stringify(_net_stat), \
88 { "tx_flow_control_xoff", IGB_STAT(stats.xofftxc) }, 91 .sizeof_stat = FIELD_SIZEOF(struct net_device_stats, _net_stat), \
89 { "rx_long_byte_count", IGB_STAT(stats.gorc) }, 92 .stat_offset = offsetof(struct net_device_stats, _net_stat) \
90 { "rx_csum_offload_good", IGB_STAT(hw_csum_good) }, 93}
91 { "rx_csum_offload_errors", IGB_STAT(hw_csum_err) }, 94static const struct igb_stats igb_gstrings_net_stats[] = {
92 { "tx_dma_out_of_sync", IGB_STAT(stats.doosync) }, 95 IGB_NETDEV_STAT(rx_errors),
93 { "alloc_rx_buff_failed", IGB_STAT(alloc_rx_buff_failed) }, 96 IGB_NETDEV_STAT(tx_errors),
94 { "tx_smbus", IGB_STAT(stats.mgptc) }, 97 IGB_NETDEV_STAT(tx_dropped),
95 { "rx_smbus", IGB_STAT(stats.mgprc) }, 98 IGB_NETDEV_STAT(rx_length_errors),
96 { "dropped_smbus", IGB_STAT(stats.mgpdc) }, 99 IGB_NETDEV_STAT(rx_over_errors),
100 IGB_NETDEV_STAT(rx_frame_errors),
101 IGB_NETDEV_STAT(rx_fifo_errors),
102 IGB_NETDEV_STAT(tx_fifo_errors),
103 IGB_NETDEV_STAT(tx_heartbeat_errors)
97}; 104};
98 105
99#define IGB_QUEUE_STATS_LEN \
100 (((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues)* \
101 (sizeof(struct igb_rx_queue_stats) / sizeof(u64))) + \
102 ((((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues) * \
103 (sizeof(struct igb_tx_queue_stats) / sizeof(u64))))
104#define IGB_GLOBAL_STATS_LEN \ 106#define IGB_GLOBAL_STATS_LEN \
105 sizeof(igb_gstrings_stats) / sizeof(struct igb_stats) 107 (sizeof(igb_gstrings_stats) / sizeof(struct igb_stats))
106#define IGB_STATS_LEN (IGB_GLOBAL_STATS_LEN + IGB_QUEUE_STATS_LEN) 108#define IGB_NETDEV_STATS_LEN \
109 (sizeof(igb_gstrings_net_stats) / sizeof(struct igb_stats))
110#define IGB_RX_QUEUE_STATS_LEN \
111 (sizeof(struct igb_rx_queue_stats) / sizeof(u64))
112#define IGB_TX_QUEUE_STATS_LEN \
113 (sizeof(struct igb_tx_queue_stats) / sizeof(u64))
114#define IGB_QUEUE_STATS_LEN \
115 ((((struct igb_adapter *)netdev_priv(netdev))->num_rx_queues * \
116 IGB_RX_QUEUE_STATS_LEN) + \
117 (((struct igb_adapter *)netdev_priv(netdev))->num_tx_queues * \
118 IGB_TX_QUEUE_STATS_LEN))
119#define IGB_STATS_LEN \
120 (IGB_GLOBAL_STATS_LEN + IGB_NETDEV_STATS_LEN + IGB_QUEUE_STATS_LEN)
121
107static const char igb_gstrings_test[][ETH_GSTRING_LEN] = { 122static const char igb_gstrings_test[][ETH_GSTRING_LEN] = {
108 "Register test (offline)", "Eeprom test (offline)", 123 "Register test (offline)", "Eeprom test (offline)",
109 "Interrupt test (offline)", "Loopback test (offline)", 124 "Interrupt test (offline)", "Loopback test (offline)",
110 "Link test (on/offline)" 125 "Link test (on/offline)"
111}; 126};
112#define IGB_TEST_LEN sizeof(igb_gstrings_test) / ETH_GSTRING_LEN 127#define IGB_TEST_LEN (sizeof(igb_gstrings_test) / ETH_GSTRING_LEN)
113 128
114static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd) 129static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
115{ 130{
116 struct igb_adapter *adapter = netdev_priv(netdev); 131 struct igb_adapter *adapter = netdev_priv(netdev);
117 struct e1000_hw *hw = &adapter->hw; 132 struct e1000_hw *hw = &adapter->hw;
133 u32 status;
118 134
119 if (hw->phy.media_type == e1000_media_type_copper) { 135 if (hw->phy.media_type == e1000_media_type_copper) {
120 136
@@ -149,17 +165,20 @@ static int igb_get_settings(struct net_device *netdev, struct ethtool_cmd *ecmd)
149 165
150 ecmd->transceiver = XCVR_INTERNAL; 166 ecmd->transceiver = XCVR_INTERNAL;
151 167
152 if (rd32(E1000_STATUS) & E1000_STATUS_LU) { 168 status = rd32(E1000_STATUS);
153 169
154 adapter->hw.mac.ops.get_speed_and_duplex(hw, 170 if (status & E1000_STATUS_LU) {
155 &adapter->link_speed,
156 &adapter->link_duplex);
157 ecmd->speed = adapter->link_speed;
158 171
159 /* unfortunately FULL_DUPLEX != DUPLEX_FULL 172 if ((status & E1000_STATUS_SPEED_1000) ||
160 * and HALF_DUPLEX != DUPLEX_HALF */ 173 hw->phy.media_type != e1000_media_type_copper)
174 ecmd->speed = SPEED_1000;
175 else if (status & E1000_STATUS_SPEED_100)
176 ecmd->speed = SPEED_100;
177 else
178 ecmd->speed = SPEED_10;
161 179
162 if (adapter->link_duplex == FULL_DUPLEX) 180 if ((status & E1000_STATUS_FD) ||
181 hw->phy.media_type != e1000_media_type_copper)
163 ecmd->duplex = DUPLEX_FULL; 182 ecmd->duplex = DUPLEX_FULL;
164 else 183 else
165 ecmd->duplex = DUPLEX_HALF; 184 ecmd->duplex = DUPLEX_HALF;
@@ -250,8 +269,9 @@ static int igb_set_pauseparam(struct net_device *netdev,
250 if (netif_running(adapter->netdev)) { 269 if (netif_running(adapter->netdev)) {
251 igb_down(adapter); 270 igb_down(adapter);
252 igb_up(adapter); 271 igb_up(adapter);
253 } else 272 } else {
254 igb_reset(adapter); 273 igb_reset(adapter);
274 }
255 } else { 275 } else {
256 if (pause->rx_pause && pause->tx_pause) 276 if (pause->rx_pause && pause->tx_pause)
257 hw->fc.requested_mode = e1000_fc_full; 277 hw->fc.requested_mode = e1000_fc_full;
@@ -275,17 +295,20 @@ static int igb_set_pauseparam(struct net_device *netdev,
275static u32 igb_get_rx_csum(struct net_device *netdev) 295static u32 igb_get_rx_csum(struct net_device *netdev)
276{ 296{
277 struct igb_adapter *adapter = netdev_priv(netdev); 297 struct igb_adapter *adapter = netdev_priv(netdev);
278 return !(adapter->flags & IGB_FLAG_RX_CSUM_DISABLED); 298 return !!(adapter->rx_ring[0].flags & IGB_RING_FLAG_RX_CSUM);
279} 299}
280 300
281static int igb_set_rx_csum(struct net_device *netdev, u32 data) 301static int igb_set_rx_csum(struct net_device *netdev, u32 data)
282{ 302{
283 struct igb_adapter *adapter = netdev_priv(netdev); 303 struct igb_adapter *adapter = netdev_priv(netdev);
304 int i;
284 305
285 if (data) 306 for (i = 0; i < adapter->num_rx_queues; i++) {
286 adapter->flags &= ~IGB_FLAG_RX_CSUM_DISABLED; 307 if (data)
287 else 308 adapter->rx_ring[i].flags |= IGB_RING_FLAG_RX_CSUM;
288 adapter->flags |= IGB_FLAG_RX_CSUM_DISABLED; 309 else
310 adapter->rx_ring[i].flags &= ~IGB_RING_FLAG_RX_CSUM;
311 }
289 312
290 return 0; 313 return 0;
291} 314}
@@ -301,7 +324,7 @@ static int igb_set_tx_csum(struct net_device *netdev, u32 data)
301 324
302 if (data) { 325 if (data) {
303 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM); 326 netdev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM);
304 if (adapter->hw.mac.type == e1000_82576) 327 if (adapter->hw.mac.type >= e1000_82576)
305 netdev->features |= NETIF_F_SCTP_CSUM; 328 netdev->features |= NETIF_F_SCTP_CSUM;
306 } else { 329 } else {
307 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | 330 netdev->features &= ~(NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
@@ -495,19 +518,10 @@ static void igb_get_regs(struct net_device *netdev,
495 regs_buff[119] = adapter->stats.scvpc; 518 regs_buff[119] = adapter->stats.scvpc;
496 regs_buff[120] = adapter->stats.hrmpc; 519 regs_buff[120] = adapter->stats.hrmpc;
497 520
498 /* These should probably be added to e1000_regs.h instead */
499 #define E1000_PSRTYPE_REG(_i) (0x05480 + ((_i) * 4))
500 #define E1000_IP4AT_REG(_i) (0x05840 + ((_i) * 8))
501 #define E1000_IP6AT_REG(_i) (0x05880 + ((_i) * 4))
502 #define E1000_WUPM_REG(_i) (0x05A00 + ((_i) * 4))
503 #define E1000_FFMT_REG(_i) (0x09000 + ((_i) * 8))
504 #define E1000_FFVT_REG(_i) (0x09800 + ((_i) * 8))
505 #define E1000_FFLT_REG(_i) (0x05F00 + ((_i) * 8))
506
507 for (i = 0; i < 4; i++) 521 for (i = 0; i < 4; i++)
508 regs_buff[121 + i] = rd32(E1000_SRRCTL(i)); 522 regs_buff[121 + i] = rd32(E1000_SRRCTL(i));
509 for (i = 0; i < 4; i++) 523 for (i = 0; i < 4; i++)
510 regs_buff[125 + i] = rd32(E1000_PSRTYPE_REG(i)); 524 regs_buff[125 + i] = rd32(E1000_PSRTYPE(i));
511 for (i = 0; i < 4; i++) 525 for (i = 0; i < 4; i++)
512 regs_buff[129 + i] = rd32(E1000_RDBAL(i)); 526 regs_buff[129 + i] = rd32(E1000_RDBAL(i));
513 for (i = 0; i < 4; i++) 527 for (i = 0; i < 4; i++)
@@ -732,17 +746,17 @@ static int igb_set_ringparam(struct net_device *netdev,
732 struct igb_adapter *adapter = netdev_priv(netdev); 746 struct igb_adapter *adapter = netdev_priv(netdev);
733 struct igb_ring *temp_ring; 747 struct igb_ring *temp_ring;
734 int i, err = 0; 748 int i, err = 0;
735 u32 new_rx_count, new_tx_count; 749 u16 new_rx_count, new_tx_count;
736 750
737 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending)) 751 if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
738 return -EINVAL; 752 return -EINVAL;
739 753
740 new_rx_count = max(ring->rx_pending, (u32)IGB_MIN_RXD); 754 new_rx_count = min_t(u32, ring->rx_pending, IGB_MAX_RXD);
741 new_rx_count = min(new_rx_count, (u32)IGB_MAX_RXD); 755 new_rx_count = max_t(u16, new_rx_count, IGB_MIN_RXD);
742 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE); 756 new_rx_count = ALIGN(new_rx_count, REQ_RX_DESCRIPTOR_MULTIPLE);
743 757
744 new_tx_count = max(ring->tx_pending, (u32)IGB_MIN_TXD); 758 new_tx_count = min_t(u32, ring->tx_pending, IGB_MAX_TXD);
745 new_tx_count = min(new_tx_count, (u32)IGB_MAX_TXD); 759 new_tx_count = max_t(u16, new_tx_count, IGB_MIN_TXD);
746 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE); 760 new_tx_count = ALIGN(new_tx_count, REQ_TX_DESCRIPTOR_MULTIPLE);
747 761
748 if ((new_tx_count == adapter->tx_ring_count) && 762 if ((new_tx_count == adapter->tx_ring_count) &&
@@ -787,7 +801,7 @@ static int igb_set_ringparam(struct net_device *netdev,
787 801
788 for (i = 0; i < adapter->num_tx_queues; i++) { 802 for (i = 0; i < adapter->num_tx_queues; i++) {
789 temp_ring[i].count = new_tx_count; 803 temp_ring[i].count = new_tx_count;
790 err = igb_setup_tx_resources(adapter, &temp_ring[i]); 804 err = igb_setup_tx_resources(&temp_ring[i]);
791 if (err) { 805 if (err) {
792 while (i) { 806 while (i) {
793 i--; 807 i--;
@@ -812,7 +826,7 @@ static int igb_set_ringparam(struct net_device *netdev,
812 826
813 for (i = 0; i < adapter->num_rx_queues; i++) { 827 for (i = 0; i < adapter->num_rx_queues; i++) {
814 temp_ring[i].count = new_rx_count; 828 temp_ring[i].count = new_rx_count;
815 err = igb_setup_rx_resources(adapter, &temp_ring[i]); 829 err = igb_setup_rx_resources(&temp_ring[i]);
816 if (err) { 830 if (err) {
817 while (i) { 831 while (i) {
818 i--; 832 i--;
@@ -943,7 +957,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
943{ 957{
944 struct e1000_hw *hw = &adapter->hw; 958 struct e1000_hw *hw = &adapter->hw;
945 u32 pat, val; 959 u32 pat, val;
946 u32 _test[] = 960 static const u32 _test[] =
947 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF}; 961 {0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
948 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) { 962 for (pat = 0; pat < ARRAY_SIZE(_test); pat++) {
949 wr32(reg, (_test[pat] & write)); 963 wr32(reg, (_test[pat] & write));
@@ -956,6 +970,7 @@ static bool reg_pattern_test(struct igb_adapter *adapter, u64 *data,
956 return 1; 970 return 1;
957 } 971 }
958 } 972 }
973
959 return 0; 974 return 0;
960} 975}
961 976
@@ -973,6 +988,7 @@ static bool reg_set_and_check(struct igb_adapter *adapter, u64 *data,
973 *data = reg; 988 *data = reg;
974 return 1; 989 return 1;
975 } 990 }
991
976 return 0; 992 return 0;
977} 993}
978 994
@@ -995,14 +1011,14 @@ static int igb_reg_test(struct igb_adapter *adapter, u64 *data)
995 u32 value, before, after; 1011 u32 value, before, after;
996 u32 i, toggle; 1012 u32 i, toggle;
997 1013
998 toggle = 0x7FFFF3FF;
999
1000 switch (adapter->hw.mac.type) { 1014 switch (adapter->hw.mac.type) {
1001 case e1000_82576: 1015 case e1000_82576:
1002 test = reg_test_82576; 1016 test = reg_test_82576;
1017 toggle = 0x7FFFF3FF;
1003 break; 1018 break;
1004 default: 1019 default:
1005 test = reg_test_82575; 1020 test = reg_test_82575;
1021 toggle = 0x7FFFF3FF;
1006 break; 1022 break;
1007 } 1023 }
1008 1024
@@ -1080,8 +1096,7 @@ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
1080 *data = 0; 1096 *data = 0;
1081 /* Read and add up the contents of the EEPROM */ 1097 /* Read and add up the contents of the EEPROM */
1082 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) { 1098 for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
1083 if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) 1099 if ((adapter->hw.nvm.ops.read(&adapter->hw, i, 1, &temp)) < 0) {
1084 < 0) {
1085 *data = 1; 1100 *data = 1;
1086 break; 1101 break;
1087 } 1102 }
@@ -1097,8 +1112,7 @@ static int igb_eeprom_test(struct igb_adapter *adapter, u64 *data)
1097 1112
1098static irqreturn_t igb_test_intr(int irq, void *data) 1113static irqreturn_t igb_test_intr(int irq, void *data)
1099{ 1114{
1100 struct net_device *netdev = (struct net_device *) data; 1115 struct igb_adapter *adapter = (struct igb_adapter *) data;
1101 struct igb_adapter *adapter = netdev_priv(netdev);
1102 struct e1000_hw *hw = &adapter->hw; 1116 struct e1000_hw *hw = &adapter->hw;
1103 1117
1104 adapter->test_icr |= rd32(E1000_ICR); 1118 adapter->test_icr |= rd32(E1000_ICR);
@@ -1116,32 +1130,36 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1116 *data = 0; 1130 *data = 0;
1117 1131
1118 /* Hook up test interrupt handler just for this test */ 1132 /* Hook up test interrupt handler just for this test */
1119 if (adapter->msix_entries) 1133 if (adapter->msix_entries) {
1120 /* NOTE: we don't test MSI-X interrupts here, yet */ 1134 if (request_irq(adapter->msix_entries[0].vector,
1121 return 0; 1135 &igb_test_intr, 0, netdev->name, adapter)) {
1122 1136 *data = 1;
1123 if (adapter->flags & IGB_FLAG_HAS_MSI) { 1137 return -1;
1138 }
1139 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
1124 shared_int = false; 1140 shared_int = false;
1125 if (request_irq(irq, &igb_test_intr, 0, netdev->name, netdev)) { 1141 if (request_irq(irq,
1142 &igb_test_intr, 0, netdev->name, adapter)) {
1126 *data = 1; 1143 *data = 1;
1127 return -1; 1144 return -1;
1128 } 1145 }
1129 } else if (!request_irq(irq, &igb_test_intr, IRQF_PROBE_SHARED, 1146 } else if (!request_irq(irq, &igb_test_intr, IRQF_PROBE_SHARED,
1130 netdev->name, netdev)) { 1147 netdev->name, adapter)) {
1131 shared_int = false; 1148 shared_int = false;
1132 } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED, 1149 } else if (request_irq(irq, &igb_test_intr, IRQF_SHARED,
1133 netdev->name, netdev)) { 1150 netdev->name, adapter)) {
1134 *data = 1; 1151 *data = 1;
1135 return -1; 1152 return -1;
1136 } 1153 }
1137 dev_info(&adapter->pdev->dev, "testing %s interrupt\n", 1154 dev_info(&adapter->pdev->dev, "testing %s interrupt\n",
1138 (shared_int ? "shared" : "unshared")); 1155 (shared_int ? "shared" : "unshared"));
1156
1139 /* Disable all the interrupts */ 1157 /* Disable all the interrupts */
1140 wr32(E1000_IMC, 0xFFFFFFFF); 1158 wr32(E1000_IMC, ~0);
1141 msleep(10); 1159 msleep(10);
1142 1160
1143 /* Define all writable bits for ICS */ 1161 /* Define all writable bits for ICS */
1144 switch(hw->mac.type) { 1162 switch (hw->mac.type) {
1145 case e1000_82575: 1163 case e1000_82575:
1146 ics_mask = 0x37F47EDD; 1164 ics_mask = 0x37F47EDD;
1147 break; 1165 break;
@@ -1231,190 +1249,61 @@ static int igb_intr_test(struct igb_adapter *adapter, u64 *data)
1231 msleep(10); 1249 msleep(10);
1232 1250
1233 /* Unhook test interrupt handler */ 1251 /* Unhook test interrupt handler */
1234 free_irq(irq, netdev); 1252 if (adapter->msix_entries)
1253 free_irq(adapter->msix_entries[0].vector, adapter);
1254 else
1255 free_irq(irq, adapter);
1235 1256
1236 return *data; 1257 return *data;
1237} 1258}
1238 1259
1239static void igb_free_desc_rings(struct igb_adapter *adapter) 1260static void igb_free_desc_rings(struct igb_adapter *adapter)
1240{ 1261{
1241 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1262 igb_free_tx_resources(&adapter->test_tx_ring);
1242 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1263 igb_free_rx_resources(&adapter->test_rx_ring);
1243 struct pci_dev *pdev = adapter->pdev;
1244 int i;
1245
1246 if (tx_ring->desc && tx_ring->buffer_info) {
1247 for (i = 0; i < tx_ring->count; i++) {
1248 struct igb_buffer *buf = &(tx_ring->buffer_info[i]);
1249 if (buf->dma)
1250 pci_unmap_single(pdev, buf->dma, buf->length,
1251 PCI_DMA_TODEVICE);
1252 if (buf->skb)
1253 dev_kfree_skb(buf->skb);
1254 }
1255 }
1256
1257 if (rx_ring->desc && rx_ring->buffer_info) {
1258 for (i = 0; i < rx_ring->count; i++) {
1259 struct igb_buffer *buf = &(rx_ring->buffer_info[i]);
1260 if (buf->dma)
1261 pci_unmap_single(pdev, buf->dma,
1262 IGB_RXBUFFER_2048,
1263 PCI_DMA_FROMDEVICE);
1264 if (buf->skb)
1265 dev_kfree_skb(buf->skb);
1266 }
1267 }
1268
1269 if (tx_ring->desc) {
1270 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc,
1271 tx_ring->dma);
1272 tx_ring->desc = NULL;
1273 }
1274 if (rx_ring->desc) {
1275 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc,
1276 rx_ring->dma);
1277 rx_ring->desc = NULL;
1278 }
1279
1280 kfree(tx_ring->buffer_info);
1281 tx_ring->buffer_info = NULL;
1282 kfree(rx_ring->buffer_info);
1283 rx_ring->buffer_info = NULL;
1284
1285 return;
1286} 1264}
1287 1265
1288static int igb_setup_desc_rings(struct igb_adapter *adapter) 1266static int igb_setup_desc_rings(struct igb_adapter *adapter)
1289{ 1267{
1290 struct e1000_hw *hw = &adapter->hw;
1291 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1268 struct igb_ring *tx_ring = &adapter->test_tx_ring;
1292 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1269 struct igb_ring *rx_ring = &adapter->test_rx_ring;
1293 struct pci_dev *pdev = adapter->pdev; 1270 struct e1000_hw *hw = &adapter->hw;
1294 struct igb_buffer *buffer_info; 1271 int ret_val;
1295 u32 rctl;
1296 int i, ret_val;
1297 1272
1298 /* Setup Tx descriptor ring and Tx buffers */ 1273 /* Setup Tx descriptor ring and Tx buffers */
1274 tx_ring->count = IGB_DEFAULT_TXD;
1275 tx_ring->pdev = adapter->pdev;
1276 tx_ring->netdev = adapter->netdev;
1277 tx_ring->reg_idx = adapter->vfs_allocated_count;
1299 1278
1300 if (!tx_ring->count) 1279 if (igb_setup_tx_resources(tx_ring)) {
1301 tx_ring->count = IGB_DEFAULT_TXD;
1302
1303 tx_ring->buffer_info = kcalloc(tx_ring->count,
1304 sizeof(struct igb_buffer),
1305 GFP_KERNEL);
1306 if (!tx_ring->buffer_info) {
1307 ret_val = 1; 1280 ret_val = 1;
1308 goto err_nomem; 1281 goto err_nomem;
1309 } 1282 }
1310 1283
1311 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 1284 igb_setup_tctl(adapter);
1312 tx_ring->size = ALIGN(tx_ring->size, 4096); 1285 igb_configure_tx_ring(adapter, tx_ring);
1313 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size,
1314 &tx_ring->dma);
1315 if (!tx_ring->desc) {
1316 ret_val = 2;
1317 goto err_nomem;
1318 }
1319 tx_ring->next_to_use = tx_ring->next_to_clean = 0;
1320
1321 wr32(E1000_TDBAL(0),
1322 ((u64) tx_ring->dma & 0x00000000FFFFFFFF));
1323 wr32(E1000_TDBAH(0), ((u64) tx_ring->dma >> 32));
1324 wr32(E1000_TDLEN(0),
1325 tx_ring->count * sizeof(union e1000_adv_tx_desc));
1326 wr32(E1000_TDH(0), 0);
1327 wr32(E1000_TDT(0), 0);
1328 wr32(E1000_TCTL,
1329 E1000_TCTL_PSP | E1000_TCTL_EN |
1330 E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
1331 E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
1332
1333 for (i = 0; i < tx_ring->count; i++) {
1334 union e1000_adv_tx_desc *tx_desc;
1335 struct sk_buff *skb;
1336 unsigned int size = 1024;
1337
1338 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
1339 skb = alloc_skb(size, GFP_KERNEL);
1340 if (!skb) {
1341 ret_val = 3;
1342 goto err_nomem;
1343 }
1344 skb_put(skb, size);
1345 buffer_info = &tx_ring->buffer_info[i];
1346 buffer_info->skb = skb;
1347 buffer_info->length = skb->len;
1348 buffer_info->dma = pci_map_single(pdev, skb->data, skb->len,
1349 PCI_DMA_TODEVICE);
1350 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
1351 tx_desc->read.olinfo_status = cpu_to_le32(skb->len) <<
1352 E1000_ADVTXD_PAYLEN_SHIFT;
1353 tx_desc->read.cmd_type_len = cpu_to_le32(skb->len);
1354 tx_desc->read.cmd_type_len |= cpu_to_le32(E1000_TXD_CMD_EOP |
1355 E1000_TXD_CMD_IFCS |
1356 E1000_TXD_CMD_RS |
1357 E1000_ADVTXD_DTYP_DATA |
1358 E1000_ADVTXD_DCMD_DEXT);
1359 }
1360 1286
1361 /* Setup Rx descriptor ring and Rx buffers */ 1287 /* Setup Rx descriptor ring and Rx buffers */
1362 1288 rx_ring->count = IGB_DEFAULT_RXD;
1363 if (!rx_ring->count) 1289 rx_ring->pdev = adapter->pdev;
1364 rx_ring->count = IGB_DEFAULT_RXD; 1290 rx_ring->netdev = adapter->netdev;
1365 1291 rx_ring->rx_buffer_len = IGB_RXBUFFER_2048;
1366 rx_ring->buffer_info = kcalloc(rx_ring->count, 1292 rx_ring->reg_idx = adapter->vfs_allocated_count;
1367 sizeof(struct igb_buffer), 1293
1368 GFP_KERNEL); 1294 if (igb_setup_rx_resources(rx_ring)) {
1369 if (!rx_ring->buffer_info) { 1295 ret_val = 3;
1370 ret_val = 4;
1371 goto err_nomem; 1296 goto err_nomem;
1372 } 1297 }
1373 1298
1374 rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc); 1299 /* set the default queue to queue 0 of PF */
1375 rx_ring->desc = pci_alloc_consistent(pdev, rx_ring->size, 1300 wr32(E1000_MRQC, adapter->vfs_allocated_count << 3);
1376 &rx_ring->dma);
1377 if (!rx_ring->desc) {
1378 ret_val = 5;
1379 goto err_nomem;
1380 }
1381 rx_ring->next_to_use = rx_ring->next_to_clean = 0;
1382 1301
1383 rctl = rd32(E1000_RCTL); 1302 /* enable receive ring */
1384 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN); 1303 igb_setup_rctl(adapter);
1385 wr32(E1000_RDBAL(0), 1304 igb_configure_rx_ring(adapter, rx_ring);
1386 ((u64) rx_ring->dma & 0xFFFFFFFF)); 1305
1387 wr32(E1000_RDBAH(0), 1306 igb_alloc_rx_buffers_adv(rx_ring, igb_desc_unused(rx_ring));
1388 ((u64) rx_ring->dma >> 32));
1389 wr32(E1000_RDLEN(0), rx_ring->size);
1390 wr32(E1000_RDH(0), 0);
1391 wr32(E1000_RDT(0), 0);
1392 rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
1393 rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
1394 (adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
1395 wr32(E1000_RCTL, rctl);
1396 wr32(E1000_SRRCTL(0), E1000_SRRCTL_DESCTYPE_ADV_ONEBUF);
1397
1398 for (i = 0; i < rx_ring->count; i++) {
1399 union e1000_adv_rx_desc *rx_desc;
1400 struct sk_buff *skb;
1401
1402 buffer_info = &rx_ring->buffer_info[i];
1403 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
1404 skb = alloc_skb(IGB_RXBUFFER_2048 + NET_IP_ALIGN,
1405 GFP_KERNEL);
1406 if (!skb) {
1407 ret_val = 6;
1408 goto err_nomem;
1409 }
1410 skb_reserve(skb, NET_IP_ALIGN);
1411 buffer_info->skb = skb;
1412 buffer_info->dma = pci_map_single(pdev, skb->data,
1413 IGB_RXBUFFER_2048,
1414 PCI_DMA_FROMDEVICE);
1415 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
1416 memset(skb->data, 0x00, skb->len);
1417 }
1418 1307
1419 return 0; 1308 return 0;
1420 1309
@@ -1490,7 +1379,10 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
1490 struct e1000_hw *hw = &adapter->hw; 1379 struct e1000_hw *hw = &adapter->hw;
1491 u32 reg; 1380 u32 reg;
1492 1381
1493 if (hw->phy.media_type == e1000_media_type_internal_serdes) { 1382 reg = rd32(E1000_CTRL_EXT);
1383
1384 /* use CTRL_EXT to identify link type as SGMII can appear as copper */
1385 if (reg & E1000_CTRL_EXT_LINK_MODE_MASK) {
1494 reg = rd32(E1000_RCTL); 1386 reg = rd32(E1000_RCTL);
1495 reg |= E1000_RCTL_LBM_TCVR; 1387 reg |= E1000_RCTL_LBM_TCVR;
1496 wr32(E1000_RCTL, reg); 1388 wr32(E1000_RCTL, reg);
@@ -1521,11 +1413,9 @@ static int igb_setup_loopback_test(struct igb_adapter *adapter)
1521 wr32(E1000_PCS_LCTL, reg); 1413 wr32(E1000_PCS_LCTL, reg);
1522 1414
1523 return 0; 1415 return 0;
1524 } else if (hw->phy.media_type == e1000_media_type_copper) {
1525 return igb_set_phy_loopback(adapter);
1526 } 1416 }
1527 1417
1528 return 7; 1418 return igb_set_phy_loopback(adapter);
1529} 1419}
1530 1420
1531static void igb_loopback_cleanup(struct igb_adapter *adapter) 1421static void igb_loopback_cleanup(struct igb_adapter *adapter)
@@ -1551,35 +1441,99 @@ static void igb_create_lbtest_frame(struct sk_buff *skb,
1551 unsigned int frame_size) 1441 unsigned int frame_size)
1552{ 1442{
1553 memset(skb->data, 0xFF, frame_size); 1443 memset(skb->data, 0xFF, frame_size);
1554 frame_size &= ~1; 1444 frame_size /= 2;
1555 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1); 1445 memset(&skb->data[frame_size], 0xAA, frame_size - 1);
1556 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1); 1446 memset(&skb->data[frame_size + 10], 0xBE, 1);
1557 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1); 1447 memset(&skb->data[frame_size + 12], 0xAF, 1);
1558} 1448}
1559 1449
1560static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size) 1450static int igb_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
1561{ 1451{
1562 frame_size &= ~1; 1452 frame_size /= 2;
1563 if (*(skb->data + 3) == 0xFF) 1453 if (*(skb->data + 3) == 0xFF) {
1564 if ((*(skb->data + frame_size / 2 + 10) == 0xBE) && 1454 if ((*(skb->data + frame_size + 10) == 0xBE) &&
1565 (*(skb->data + frame_size / 2 + 12) == 0xAF)) 1455 (*(skb->data + frame_size + 12) == 0xAF)) {
1566 return 0; 1456 return 0;
1457 }
1458 }
1567 return 13; 1459 return 13;
1568} 1460}
1569 1461
1462static int igb_clean_test_rings(struct igb_ring *rx_ring,
1463 struct igb_ring *tx_ring,
1464 unsigned int size)
1465{
1466 union e1000_adv_rx_desc *rx_desc;
1467 struct igb_buffer *buffer_info;
1468 int rx_ntc, tx_ntc, count = 0;
1469 u32 staterr;
1470
1471 /* initialize next to clean and descriptor values */
1472 rx_ntc = rx_ring->next_to_clean;
1473 tx_ntc = tx_ring->next_to_clean;
1474 rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc);
1475 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1476
1477 while (staterr & E1000_RXD_STAT_DD) {
1478 /* check rx buffer */
1479 buffer_info = &rx_ring->buffer_info[rx_ntc];
1480
1481 /* unmap rx buffer, will be remapped by alloc_rx_buffers */
1482 pci_unmap_single(rx_ring->pdev,
1483 buffer_info->dma,
1484 rx_ring->rx_buffer_len,
1485 PCI_DMA_FROMDEVICE);
1486 buffer_info->dma = 0;
1487
1488 /* verify contents of skb */
1489 if (!igb_check_lbtest_frame(buffer_info->skb, size))
1490 count++;
1491
1492 /* unmap buffer on tx side */
1493 buffer_info = &tx_ring->buffer_info[tx_ntc];
1494 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
1495
1496 /* increment rx/tx next to clean counters */
1497 rx_ntc++;
1498 if (rx_ntc == rx_ring->count)
1499 rx_ntc = 0;
1500 tx_ntc++;
1501 if (tx_ntc == tx_ring->count)
1502 tx_ntc = 0;
1503
1504 /* fetch next descriptor */
1505 rx_desc = E1000_RX_DESC_ADV(*rx_ring, rx_ntc);
1506 staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
1507 }
1508
1509 /* re-map buffers to ring, store next to clean values */
1510 igb_alloc_rx_buffers_adv(rx_ring, count);
1511 rx_ring->next_to_clean = rx_ntc;
1512 tx_ring->next_to_clean = tx_ntc;
1513
1514 return count;
1515}
1516
1570static int igb_run_loopback_test(struct igb_adapter *adapter) 1517static int igb_run_loopback_test(struct igb_adapter *adapter)
1571{ 1518{
1572 struct e1000_hw *hw = &adapter->hw;
1573 struct igb_ring *tx_ring = &adapter->test_tx_ring; 1519 struct igb_ring *tx_ring = &adapter->test_tx_ring;
1574 struct igb_ring *rx_ring = &adapter->test_rx_ring; 1520 struct igb_ring *rx_ring = &adapter->test_rx_ring;
1575 struct pci_dev *pdev = adapter->pdev; 1521 int i, j, lc, good_cnt, ret_val = 0;
1576 int i, j, k, l, lc, good_cnt; 1522 unsigned int size = 1024;
1577 int ret_val = 0; 1523 netdev_tx_t tx_ret_val;
1578 unsigned long time; 1524 struct sk_buff *skb;
1579 1525
1580 wr32(E1000_RDT(0), rx_ring->count - 1); 1526 /* allocate test skb */
1527 skb = alloc_skb(size, GFP_KERNEL);
1528 if (!skb)
1529 return 11;
1581 1530
1582 /* Calculate the loop count based on the largest descriptor ring 1531 /* place data into test skb */
1532 igb_create_lbtest_frame(skb, size);
1533 skb_put(skb, size);
1534
1535 /*
1536 * Calculate the loop count based on the largest descriptor ring
1583 * The idea is to wrap the largest ring a number of times using 64 1537 * The idea is to wrap the largest ring a number of times using 64
1584 * send/receive pairs during each loop 1538 * send/receive pairs during each loop
1585 */ 1539 */
@@ -1589,50 +1543,36 @@ static int igb_run_loopback_test(struct igb_adapter *adapter)
1589 else 1543 else
1590 lc = ((rx_ring->count / 64) * 2) + 1; 1544 lc = ((rx_ring->count / 64) * 2) + 1;
1591 1545
1592 k = l = 0;
1593 for (j = 0; j <= lc; j++) { /* loop count loop */ 1546 for (j = 0; j <= lc; j++) { /* loop count loop */
1594 for (i = 0; i < 64; i++) { /* send the packets */ 1547 /* reset count of good packets */
1595 igb_create_lbtest_frame(tx_ring->buffer_info[k].skb,
1596 1024);
1597 pci_dma_sync_single_for_device(pdev,
1598 tx_ring->buffer_info[k].dma,
1599 tx_ring->buffer_info[k].length,
1600 PCI_DMA_TODEVICE);
1601 k++;
1602 if (k == tx_ring->count)
1603 k = 0;
1604 }
1605 wr32(E1000_TDT(0), k);
1606 msleep(200);
1607 time = jiffies; /* set the start time for the receive */
1608 good_cnt = 0; 1548 good_cnt = 0;
1609 do { /* receive the sent packets */ 1549
1610 pci_dma_sync_single_for_cpu(pdev, 1550 /* place 64 packets on the transmit queue*/
1611 rx_ring->buffer_info[l].dma, 1551 for (i = 0; i < 64; i++) {
1612 IGB_RXBUFFER_2048, 1552 skb_get(skb);
1613 PCI_DMA_FROMDEVICE); 1553 tx_ret_val = igb_xmit_frame_ring_adv(skb, tx_ring);
1614 1554 if (tx_ret_val == NETDEV_TX_OK)
1615 ret_val = igb_check_lbtest_frame(
1616 rx_ring->buffer_info[l].skb, 1024);
1617 if (!ret_val)
1618 good_cnt++; 1555 good_cnt++;
1619 l++; 1556 }
1620 if (l == rx_ring->count) 1557
1621 l = 0;
1622 /* time + 20 msecs (200 msecs on 2.4) is more than
1623 * enough time to complete the receives, if it's
1624 * exceeded, break and error off
1625 */
1626 } while (good_cnt < 64 && jiffies < (time + 20));
1627 if (good_cnt != 64) { 1558 if (good_cnt != 64) {
1628 ret_val = 13; /* ret_val is the same as mis-compare */ 1559 ret_val = 12;
1629 break; 1560 break;
1630 } 1561 }
1631 if (jiffies >= (time + 20)) { 1562
1632 ret_val = 14; /* error code for time out error */ 1563 /* allow 200 milliseconds for packets to go from tx to rx */
1564 msleep(200);
1565
1566 good_cnt = igb_clean_test_rings(rx_ring, tx_ring, size);
1567 if (good_cnt != 64) {
1568 ret_val = 13;
1633 break; 1569 break;
1634 } 1570 }
1635 } /* end loop count loop */ 1571 } /* end loop count loop */
1572
1573 /* free the original skb */
1574 kfree_skb(skb);
1575
1636 return ret_val; 1576 return ret_val;
1637} 1577}
1638 1578
@@ -1685,8 +1625,7 @@ static int igb_link_test(struct igb_adapter *adapter, u64 *data)
1685 if (hw->mac.autoneg) 1625 if (hw->mac.autoneg)
1686 msleep(4000); 1626 msleep(4000);
1687 1627
1688 if (!(rd32(E1000_STATUS) & 1628 if (!(rd32(E1000_STATUS) & E1000_STATUS_LU))
1689 E1000_STATUS_LU))
1690 *data = 1; 1629 *data = 1;
1691 } 1630 }
1692 return *data; 1631 return *data;
@@ -1868,7 +1807,6 @@ static int igb_set_wol(struct net_device *netdev, struct ethtool_wolinfo *wol)
1868 adapter->wol |= E1000_WUFC_BC; 1807 adapter->wol |= E1000_WUFC_BC;
1869 if (wol->wolopts & WAKE_MAGIC) 1808 if (wol->wolopts & WAKE_MAGIC)
1870 adapter->wol |= E1000_WUFC_MAG; 1809 adapter->wol |= E1000_WUFC_MAG;
1871
1872 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol); 1810 device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
1873 1811
1874 return 0; 1812 return 0;
@@ -1881,12 +1819,19 @@ static int igb_phys_id(struct net_device *netdev, u32 data)
1881{ 1819{
1882 struct igb_adapter *adapter = netdev_priv(netdev); 1820 struct igb_adapter *adapter = netdev_priv(netdev);
1883 struct e1000_hw *hw = &adapter->hw; 1821 struct e1000_hw *hw = &adapter->hw;
1822 unsigned long timeout;
1884 1823
1885 if (!data || data > (u32)(MAX_SCHEDULE_TIMEOUT / HZ)) 1824 timeout = data * 1000;
1886 data = (u32)(MAX_SCHEDULE_TIMEOUT / HZ); 1825
1826 /*
1827 * msleep_interruptable only accepts unsigned int so we are limited
1828 * in how long a duration we can wait
1829 */
1830 if (!timeout || timeout > UINT_MAX)
1831 timeout = UINT_MAX;
1887 1832
1888 igb_blink_led(hw); 1833 igb_blink_led(hw);
1889 msleep_interruptible(data * 1000); 1834 msleep_interruptible(timeout);
1890 1835
1891 igb_led_off(hw); 1836 igb_led_off(hw);
1892 clear_bit(IGB_LED_ON, &adapter->led_status); 1837 clear_bit(IGB_LED_ON, &adapter->led_status);
@@ -1899,7 +1844,6 @@ static int igb_set_coalesce(struct net_device *netdev,
1899 struct ethtool_coalesce *ec) 1844 struct ethtool_coalesce *ec)
1900{ 1845{
1901 struct igb_adapter *adapter = netdev_priv(netdev); 1846 struct igb_adapter *adapter = netdev_priv(netdev);
1902 struct e1000_hw *hw = &adapter->hw;
1903 int i; 1847 int i;
1904 1848
1905 if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) || 1849 if ((ec->rx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
@@ -1908,17 +1852,39 @@ static int igb_set_coalesce(struct net_device *netdev,
1908 (ec->rx_coalesce_usecs == 2)) 1852 (ec->rx_coalesce_usecs == 2))
1909 return -EINVAL; 1853 return -EINVAL;
1910 1854
1855 if ((ec->tx_coalesce_usecs > IGB_MAX_ITR_USECS) ||
1856 ((ec->tx_coalesce_usecs > 3) &&
1857 (ec->tx_coalesce_usecs < IGB_MIN_ITR_USECS)) ||
1858 (ec->tx_coalesce_usecs == 2))
1859 return -EINVAL;
1860
1861 if ((adapter->flags & IGB_FLAG_QUEUE_PAIRS) && ec->tx_coalesce_usecs)
1862 return -EINVAL;
1863
1911 /* convert to rate of irq's per second */ 1864 /* convert to rate of irq's per second */
1912 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3) { 1865 if (ec->rx_coalesce_usecs && ec->rx_coalesce_usecs <= 3)
1913 adapter->itr_setting = ec->rx_coalesce_usecs; 1866 adapter->rx_itr_setting = ec->rx_coalesce_usecs;
1914 adapter->itr = IGB_START_ITR; 1867 else
1915 } else { 1868 adapter->rx_itr_setting = ec->rx_coalesce_usecs << 2;
1916 adapter->itr_setting = ec->rx_coalesce_usecs << 2;
1917 adapter->itr = adapter->itr_setting;
1918 }
1919 1869
1920 for (i = 0; i < adapter->num_rx_queues; i++) 1870 /* convert to rate of irq's per second */
1921 wr32(adapter->rx_ring[i].itr_register, adapter->itr); 1871 if (adapter->flags & IGB_FLAG_QUEUE_PAIRS)
1872 adapter->tx_itr_setting = adapter->rx_itr_setting;
1873 else if (ec->tx_coalesce_usecs && ec->tx_coalesce_usecs <= 3)
1874 adapter->tx_itr_setting = ec->tx_coalesce_usecs;
1875 else
1876 adapter->tx_itr_setting = ec->tx_coalesce_usecs << 2;
1877
1878 for (i = 0; i < adapter->num_q_vectors; i++) {
1879 struct igb_q_vector *q_vector = adapter->q_vector[i];
1880 if (q_vector->rx_ring)
1881 q_vector->itr_val = adapter->rx_itr_setting;
1882 else
1883 q_vector->itr_val = adapter->tx_itr_setting;
1884 if (q_vector->itr_val && q_vector->itr_val <= 3)
1885 q_vector->itr_val = IGB_START_ITR;
1886 q_vector->set_itr = 1;
1887 }
1922 1888
1923 return 0; 1889 return 0;
1924} 1890}
@@ -1928,15 +1894,21 @@ static int igb_get_coalesce(struct net_device *netdev,
1928{ 1894{
1929 struct igb_adapter *adapter = netdev_priv(netdev); 1895 struct igb_adapter *adapter = netdev_priv(netdev);
1930 1896
1931 if (adapter->itr_setting <= 3) 1897 if (adapter->rx_itr_setting <= 3)
1932 ec->rx_coalesce_usecs = adapter->itr_setting; 1898 ec->rx_coalesce_usecs = adapter->rx_itr_setting;
1933 else 1899 else
1934 ec->rx_coalesce_usecs = adapter->itr_setting >> 2; 1900 ec->rx_coalesce_usecs = adapter->rx_itr_setting >> 2;
1901
1902 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS)) {
1903 if (adapter->tx_itr_setting <= 3)
1904 ec->tx_coalesce_usecs = adapter->tx_itr_setting;
1905 else
1906 ec->tx_coalesce_usecs = adapter->tx_itr_setting >> 2;
1907 }
1935 1908
1936 return 0; 1909 return 0;
1937} 1910}
1938 1911
1939
1940static int igb_nway_reset(struct net_device *netdev) 1912static int igb_nway_reset(struct net_device *netdev)
1941{ 1913{
1942 struct igb_adapter *adapter = netdev_priv(netdev); 1914 struct igb_adapter *adapter = netdev_priv(netdev);
@@ -1961,31 +1933,32 @@ static void igb_get_ethtool_stats(struct net_device *netdev,
1961 struct ethtool_stats *stats, u64 *data) 1933 struct ethtool_stats *stats, u64 *data)
1962{ 1934{
1963 struct igb_adapter *adapter = netdev_priv(netdev); 1935 struct igb_adapter *adapter = netdev_priv(netdev);
1936 struct net_device_stats *net_stats = &netdev->stats;
1964 u64 *queue_stat; 1937 u64 *queue_stat;
1965 int stat_count_tx = sizeof(struct igb_tx_queue_stats) / sizeof(u64); 1938 int i, j, k;
1966 int stat_count_rx = sizeof(struct igb_rx_queue_stats) / sizeof(u64); 1939 char *p;
1967 int j;
1968 int i;
1969 1940
1970 igb_update_stats(adapter); 1941 igb_update_stats(adapter);
1942
1971 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) { 1943 for (i = 0; i < IGB_GLOBAL_STATS_LEN; i++) {
1972 char *p = (char *)adapter+igb_gstrings_stats[i].stat_offset; 1944 p = (char *)adapter + igb_gstrings_stats[i].stat_offset;
1973 data[i] = (igb_gstrings_stats[i].sizeof_stat == 1945 data[i] = (igb_gstrings_stats[i].sizeof_stat ==
1974 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 1946 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1975 } 1947 }
1948 for (j = 0; j < IGB_NETDEV_STATS_LEN; j++, i++) {
1949 p = (char *)net_stats + igb_gstrings_net_stats[j].stat_offset;
1950 data[i] = (igb_gstrings_net_stats[j].sizeof_stat ==
1951 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
1952 }
1976 for (j = 0; j < adapter->num_tx_queues; j++) { 1953 for (j = 0; j < adapter->num_tx_queues; j++) {
1977 int k;
1978 queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats; 1954 queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats;
1979 for (k = 0; k < stat_count_tx; k++) 1955 for (k = 0; k < IGB_TX_QUEUE_STATS_LEN; k++, i++)
1980 data[i + k] = queue_stat[k]; 1956 data[i] = queue_stat[k];
1981 i += k;
1982 } 1957 }
1983 for (j = 0; j < adapter->num_rx_queues; j++) { 1958 for (j = 0; j < adapter->num_rx_queues; j++) {
1984 int k;
1985 queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats; 1959 queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats;
1986 for (k = 0; k < stat_count_rx; k++) 1960 for (k = 0; k < IGB_RX_QUEUE_STATS_LEN; k++, i++)
1987 data[i + k] = queue_stat[k]; 1961 data[i] = queue_stat[k];
1988 i += k;
1989 } 1962 }
1990} 1963}
1991 1964
@@ -2006,11 +1979,18 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2006 ETH_GSTRING_LEN); 1979 ETH_GSTRING_LEN);
2007 p += ETH_GSTRING_LEN; 1980 p += ETH_GSTRING_LEN;
2008 } 1981 }
1982 for (i = 0; i < IGB_NETDEV_STATS_LEN; i++) {
1983 memcpy(p, igb_gstrings_net_stats[i].stat_string,
1984 ETH_GSTRING_LEN);
1985 p += ETH_GSTRING_LEN;
1986 }
2009 for (i = 0; i < adapter->num_tx_queues; i++) { 1987 for (i = 0; i < adapter->num_tx_queues; i++) {
2010 sprintf(p, "tx_queue_%u_packets", i); 1988 sprintf(p, "tx_queue_%u_packets", i);
2011 p += ETH_GSTRING_LEN; 1989 p += ETH_GSTRING_LEN;
2012 sprintf(p, "tx_queue_%u_bytes", i); 1990 sprintf(p, "tx_queue_%u_bytes", i);
2013 p += ETH_GSTRING_LEN; 1991 p += ETH_GSTRING_LEN;
1992 sprintf(p, "tx_queue_%u_restart", i);
1993 p += ETH_GSTRING_LEN;
2014 } 1994 }
2015 for (i = 0; i < adapter->num_rx_queues; i++) { 1995 for (i = 0; i < adapter->num_rx_queues; i++) {
2016 sprintf(p, "rx_queue_%u_packets", i); 1996 sprintf(p, "rx_queue_%u_packets", i);
@@ -2019,6 +1999,10 @@ static void igb_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2019 p += ETH_GSTRING_LEN; 1999 p += ETH_GSTRING_LEN;
2020 sprintf(p, "rx_queue_%u_drops", i); 2000 sprintf(p, "rx_queue_%u_drops", i);
2021 p += ETH_GSTRING_LEN; 2001 p += ETH_GSTRING_LEN;
2002 sprintf(p, "rx_queue_%u_csum_err", i);
2003 p += ETH_GSTRING_LEN;
2004 sprintf(p, "rx_queue_%u_alloc_failed", i);
2005 p += ETH_GSTRING_LEN;
2022 } 2006 }
2023/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */ 2007/* BUG_ON(p - data != IGB_STATS_LEN * ETH_GSTRING_LEN); */
2024 break; 2008 break;
diff --git a/drivers/net/igb/igb_main.c b/drivers/net/igb/igb_main.c
index 714c3a4a44ef..0cab5e2b0894 100644
--- a/drivers/net/igb/igb_main.c
+++ b/drivers/net/igb/igb_main.c
@@ -63,6 +63,7 @@ static const struct e1000_info *igb_info_tbl[] = {
63static struct pci_device_id igb_pci_tbl[] = { 63static struct pci_device_id igb_pci_tbl[] = {
64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 }, 64 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 }, 65 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
66 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 }, 67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
67 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 }, 68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
68 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 }, 69 { PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
@@ -81,6 +82,7 @@ static int igb_setup_all_tx_resources(struct igb_adapter *);
81static int igb_setup_all_rx_resources(struct igb_adapter *); 82static int igb_setup_all_rx_resources(struct igb_adapter *);
82static void igb_free_all_tx_resources(struct igb_adapter *); 83static void igb_free_all_tx_resources(struct igb_adapter *);
83static void igb_free_all_rx_resources(struct igb_adapter *); 84static void igb_free_all_rx_resources(struct igb_adapter *);
85static void igb_setup_mrqc(struct igb_adapter *);
84void igb_update_stats(struct igb_adapter *); 86void igb_update_stats(struct igb_adapter *);
85static int igb_probe(struct pci_dev *, const struct pci_device_id *); 87static int igb_probe(struct pci_dev *, const struct pci_device_id *);
86static void __devexit igb_remove(struct pci_dev *pdev); 88static void __devexit igb_remove(struct pci_dev *pdev);
@@ -89,7 +91,6 @@ static int igb_open(struct net_device *);
89static int igb_close(struct net_device *); 91static int igb_close(struct net_device *);
90static void igb_configure_tx(struct igb_adapter *); 92static void igb_configure_tx(struct igb_adapter *);
91static void igb_configure_rx(struct igb_adapter *); 93static void igb_configure_rx(struct igb_adapter *);
92static void igb_setup_rctl(struct igb_adapter *);
93static void igb_clean_all_tx_rings(struct igb_adapter *); 94static void igb_clean_all_tx_rings(struct igb_adapter *);
94static void igb_clean_all_rx_rings(struct igb_adapter *); 95static void igb_clean_all_rx_rings(struct igb_adapter *);
95static void igb_clean_tx_ring(struct igb_ring *); 96static void igb_clean_tx_ring(struct igb_ring *);
@@ -98,28 +99,22 @@ static void igb_set_rx_mode(struct net_device *);
98static void igb_update_phy_info(unsigned long); 99static void igb_update_phy_info(unsigned long);
99static void igb_watchdog(unsigned long); 100static void igb_watchdog(unsigned long);
100static void igb_watchdog_task(struct work_struct *); 101static void igb_watchdog_task(struct work_struct *);
101static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *, 102static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb, struct net_device *);
102 struct net_device *,
103 struct igb_ring *);
104static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
105 struct net_device *);
106static struct net_device_stats *igb_get_stats(struct net_device *); 103static struct net_device_stats *igb_get_stats(struct net_device *);
107static int igb_change_mtu(struct net_device *, int); 104static int igb_change_mtu(struct net_device *, int);
108static int igb_set_mac(struct net_device *, void *); 105static int igb_set_mac(struct net_device *, void *);
106static void igb_set_uta(struct igb_adapter *adapter);
109static irqreturn_t igb_intr(int irq, void *); 107static irqreturn_t igb_intr(int irq, void *);
110static irqreturn_t igb_intr_msi(int irq, void *); 108static irqreturn_t igb_intr_msi(int irq, void *);
111static irqreturn_t igb_msix_other(int irq, void *); 109static irqreturn_t igb_msix_other(int irq, void *);
112static irqreturn_t igb_msix_rx(int irq, void *); 110static irqreturn_t igb_msix_ring(int irq, void *);
113static irqreturn_t igb_msix_tx(int irq, void *);
114#ifdef CONFIG_IGB_DCA 111#ifdef CONFIG_IGB_DCA
115static void igb_update_rx_dca(struct igb_ring *); 112static void igb_update_dca(struct igb_q_vector *);
116static void igb_update_tx_dca(struct igb_ring *);
117static void igb_setup_dca(struct igb_adapter *); 113static void igb_setup_dca(struct igb_adapter *);
118#endif /* CONFIG_IGB_DCA */ 114#endif /* CONFIG_IGB_DCA */
119static bool igb_clean_tx_irq(struct igb_ring *); 115static bool igb_clean_tx_irq(struct igb_q_vector *);
120static int igb_poll(struct napi_struct *, int); 116static int igb_poll(struct napi_struct *, int);
121static bool igb_clean_rx_irq_adv(struct igb_ring *, int *, int); 117static bool igb_clean_rx_irq_adv(struct igb_q_vector *, int *, int);
122static void igb_alloc_rx_buffers_adv(struct igb_ring *, int);
123static int igb_ioctl(struct net_device *, struct ifreq *, int cmd); 118static int igb_ioctl(struct net_device *, struct ifreq *, int cmd);
124static void igb_tx_timeout(struct net_device *); 119static void igb_tx_timeout(struct net_device *);
125static void igb_reset_task(struct work_struct *); 120static void igb_reset_task(struct work_struct *);
@@ -127,57 +122,13 @@ static void igb_vlan_rx_register(struct net_device *, struct vlan_group *);
127static void igb_vlan_rx_add_vid(struct net_device *, u16); 122static void igb_vlan_rx_add_vid(struct net_device *, u16);
128static void igb_vlan_rx_kill_vid(struct net_device *, u16); 123static void igb_vlan_rx_kill_vid(struct net_device *, u16);
129static void igb_restore_vlan(struct igb_adapter *); 124static void igb_restore_vlan(struct igb_adapter *);
125static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
130static void igb_ping_all_vfs(struct igb_adapter *); 126static void igb_ping_all_vfs(struct igb_adapter *);
131static void igb_msg_task(struct igb_adapter *); 127static void igb_msg_task(struct igb_adapter *);
132static int igb_rcv_msg_from_vf(struct igb_adapter *, u32);
133static inline void igb_set_rah_pool(struct e1000_hw *, int , int);
134static void igb_vmm_control(struct igb_adapter *); 128static void igb_vmm_control(struct igb_adapter *);
135static int igb_set_vf_mac(struct igb_adapter *adapter, int, unsigned char *); 129static int igb_set_vf_mac(struct igb_adapter *, int, unsigned char *);
136static void igb_restore_vf_multicasts(struct igb_adapter *adapter); 130static void igb_restore_vf_multicasts(struct igb_adapter *adapter);
137 131
138static inline void igb_set_vmolr(struct e1000_hw *hw, int vfn)
139{
140 u32 reg_data;
141
142 reg_data = rd32(E1000_VMOLR(vfn));
143 reg_data |= E1000_VMOLR_BAM | /* Accept broadcast */
144 E1000_VMOLR_ROPE | /* Accept packets matched in UTA */
145 E1000_VMOLR_ROMPE | /* Accept packets matched in MTA */
146 E1000_VMOLR_AUPE | /* Accept untagged packets */
147 E1000_VMOLR_STRVLAN; /* Strip vlan tags */
148 wr32(E1000_VMOLR(vfn), reg_data);
149}
150
151static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
152 int vfn)
153{
154 struct e1000_hw *hw = &adapter->hw;
155 u32 vmolr;
156
157 /* if it isn't the PF check to see if VFs are enabled and
158 * increase the size to support vlan tags */
159 if (vfn < adapter->vfs_allocated_count &&
160 adapter->vf_data[vfn].vlans_enabled)
161 size += VLAN_TAG_SIZE;
162
163 vmolr = rd32(E1000_VMOLR(vfn));
164 vmolr &= ~E1000_VMOLR_RLPML_MASK;
165 vmolr |= size | E1000_VMOLR_LPE;
166 wr32(E1000_VMOLR(vfn), vmolr);
167
168 return 0;
169}
170
171static inline void igb_set_rah_pool(struct e1000_hw *hw, int pool, int entry)
172{
173 u32 reg_data;
174
175 reg_data = rd32(E1000_RAH(entry));
176 reg_data &= ~E1000_RAH_POOL_MASK;
177 reg_data |= E1000_RAH_POOL_1 << pool;;
178 wr32(E1000_RAH(entry), reg_data);
179}
180
181#ifdef CONFIG_PM 132#ifdef CONFIG_PM
182static int igb_suspend(struct pci_dev *, pm_message_t); 133static int igb_suspend(struct pci_dev *, pm_message_t);
183static int igb_resume(struct pci_dev *); 134static int igb_resume(struct pci_dev *);
@@ -228,46 +179,12 @@ static struct pci_driver igb_driver = {
228 .err_handler = &igb_err_handler 179 .err_handler = &igb_err_handler
229}; 180};
230 181
231static int global_quad_port_a; /* global quad port a indication */
232
233MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>"); 182MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
234MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver"); 183MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
235MODULE_LICENSE("GPL"); 184MODULE_LICENSE("GPL");
236MODULE_VERSION(DRV_VERSION); 185MODULE_VERSION(DRV_VERSION);
237 186
238/** 187/**
239 * Scale the NIC clock cycle by a large factor so that
240 * relatively small clock corrections can be added or
241 * substracted at each clock tick. The drawbacks of a
242 * large factor are a) that the clock register overflows
243 * more quickly (not such a big deal) and b) that the
244 * increment per tick has to fit into 24 bits.
245 *
246 * Note that
247 * TIMINCA = IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS *
248 * IGB_TSYNC_SCALE
249 * TIMINCA += TIMINCA * adjustment [ppm] / 1e9
250 *
251 * The base scale factor is intentionally a power of two
252 * so that the division in %struct timecounter can be done with
253 * a shift.
254 */
255#define IGB_TSYNC_SHIFT (19)
256#define IGB_TSYNC_SCALE (1<<IGB_TSYNC_SHIFT)
257
258/**
259 * The duration of one clock cycle of the NIC.
260 *
261 * @todo This hard-coded value is part of the specification and might change
262 * in future hardware revisions. Add revision check.
263 */
264#define IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS 16
265
266#if (IGB_TSYNC_SCALE * IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS) >= (1<<24)
267# error IGB_TSYNC_SCALE and/or IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS are too large to fit into TIMINCA
268#endif
269
270/**
271 * igb_read_clock - read raw cycle counter (to be used by time counter) 188 * igb_read_clock - read raw cycle counter (to be used by time counter)
272 */ 189 */
273static cycle_t igb_read_clock(const struct cyclecounter *tc) 190static cycle_t igb_read_clock(const struct cyclecounter *tc)
@@ -275,11 +192,11 @@ static cycle_t igb_read_clock(const struct cyclecounter *tc)
275 struct igb_adapter *adapter = 192 struct igb_adapter *adapter =
276 container_of(tc, struct igb_adapter, cycles); 193 container_of(tc, struct igb_adapter, cycles);
277 struct e1000_hw *hw = &adapter->hw; 194 struct e1000_hw *hw = &adapter->hw;
278 u64 stamp; 195 u64 stamp = 0;
279 196 int shift = 0;
280 stamp = rd32(E1000_SYSTIML);
281 stamp |= (u64)rd32(E1000_SYSTIMH) << 32ULL;
282 197
198 stamp |= (u64)rd32(E1000_SYSTIML) << shift;
199 stamp |= (u64)rd32(E1000_SYSTIMH) << (shift + 32);
283 return stamp; 200 return stamp;
284} 201}
285 202
@@ -320,17 +237,6 @@ static char *igb_get_time_str(struct igb_adapter *adapter,
320#endif 237#endif
321 238
322/** 239/**
323 * igb_desc_unused - calculate if we have unused descriptors
324 **/
325static int igb_desc_unused(struct igb_ring *ring)
326{
327 if (ring->next_to_clean > ring->next_to_use)
328 return ring->next_to_clean - ring->next_to_use - 1;
329
330 return ring->count + ring->next_to_clean - ring->next_to_use - 1;
331}
332
333/**
334 * igb_init_module - Driver Registration Routine 240 * igb_init_module - Driver Registration Routine
335 * 241 *
336 * igb_init_module is the first routine called when the driver is 242 * igb_init_module is the first routine called when the driver is
@@ -344,12 +250,9 @@ static int __init igb_init_module(void)
344 250
345 printk(KERN_INFO "%s\n", igb_copyright); 251 printk(KERN_INFO "%s\n", igb_copyright);
346 252
347 global_quad_port_a = 0;
348
349#ifdef CONFIG_IGB_DCA 253#ifdef CONFIG_IGB_DCA
350 dca_register_notify(&dca_notifier); 254 dca_register_notify(&dca_notifier);
351#endif 255#endif
352
353 ret = pci_register_driver(&igb_driver); 256 ret = pci_register_driver(&igb_driver);
354 return ret; 257 return ret;
355} 258}
@@ -382,8 +285,8 @@ module_exit(igb_exit_module);
382 **/ 285 **/
383static void igb_cache_ring_register(struct igb_adapter *adapter) 286static void igb_cache_ring_register(struct igb_adapter *adapter)
384{ 287{
385 int i; 288 int i = 0, j = 0;
386 unsigned int rbase_offset = adapter->vfs_allocated_count; 289 u32 rbase_offset = adapter->vfs_allocated_count;
387 290
388 switch (adapter->hw.mac.type) { 291 switch (adapter->hw.mac.type) {
389 case e1000_82576: 292 case e1000_82576:
@@ -392,23 +295,36 @@ static void igb_cache_ring_register(struct igb_adapter *adapter)
392 * In order to avoid collision we start at the first free queue 295 * In order to avoid collision we start at the first free queue
393 * and continue consuming queues in the same sequence 296 * and continue consuming queues in the same sequence
394 */ 297 */
395 for (i = 0; i < adapter->num_rx_queues; i++) 298 if (adapter->vfs_allocated_count) {
396 adapter->rx_ring[i].reg_idx = rbase_offset + 299 for (; i < adapter->rss_queues; i++)
397 Q_IDX_82576(i); 300 adapter->rx_ring[i].reg_idx = rbase_offset +
398 for (i = 0; i < adapter->num_tx_queues; i++) 301 Q_IDX_82576(i);
399 adapter->tx_ring[i].reg_idx = rbase_offset + 302 for (; j < adapter->rss_queues; j++)
400 Q_IDX_82576(i); 303 adapter->tx_ring[j].reg_idx = rbase_offset +
401 break; 304 Q_IDX_82576(j);
305 }
402 case e1000_82575: 306 case e1000_82575:
403 default: 307 default:
404 for (i = 0; i < adapter->num_rx_queues; i++) 308 for (; i < adapter->num_rx_queues; i++)
405 adapter->rx_ring[i].reg_idx = i; 309 adapter->rx_ring[i].reg_idx = rbase_offset + i;
406 for (i = 0; i < adapter->num_tx_queues; i++) 310 for (; j < adapter->num_tx_queues; j++)
407 adapter->tx_ring[i].reg_idx = i; 311 adapter->tx_ring[j].reg_idx = rbase_offset + j;
408 break; 312 break;
409 } 313 }
410} 314}
411 315
316static void igb_free_queues(struct igb_adapter *adapter)
317{
318 kfree(adapter->tx_ring);
319 kfree(adapter->rx_ring);
320
321 adapter->tx_ring = NULL;
322 adapter->rx_ring = NULL;
323
324 adapter->num_rx_queues = 0;
325 adapter->num_tx_queues = 0;
326}
327
412/** 328/**
413 * igb_alloc_queues - Allocate memory for all rings 329 * igb_alloc_queues - Allocate memory for all rings
414 * @adapter: board private structure to initialize 330 * @adapter: board private structure to initialize
@@ -423,59 +339,61 @@ static int igb_alloc_queues(struct igb_adapter *adapter)
423 adapter->tx_ring = kcalloc(adapter->num_tx_queues, 339 adapter->tx_ring = kcalloc(adapter->num_tx_queues,
424 sizeof(struct igb_ring), GFP_KERNEL); 340 sizeof(struct igb_ring), GFP_KERNEL);
425 if (!adapter->tx_ring) 341 if (!adapter->tx_ring)
426 return -ENOMEM; 342 goto err;
427 343
428 adapter->rx_ring = kcalloc(adapter->num_rx_queues, 344 adapter->rx_ring = kcalloc(adapter->num_rx_queues,
429 sizeof(struct igb_ring), GFP_KERNEL); 345 sizeof(struct igb_ring), GFP_KERNEL);
430 if (!adapter->rx_ring) { 346 if (!adapter->rx_ring)
431 kfree(adapter->tx_ring); 347 goto err;
432 return -ENOMEM;
433 }
434
435 adapter->rx_ring->buddy = adapter->tx_ring;
436 348
437 for (i = 0; i < adapter->num_tx_queues; i++) { 349 for (i = 0; i < adapter->num_tx_queues; i++) {
438 struct igb_ring *ring = &(adapter->tx_ring[i]); 350 struct igb_ring *ring = &(adapter->tx_ring[i]);
439 ring->count = adapter->tx_ring_count; 351 ring->count = adapter->tx_ring_count;
440 ring->adapter = adapter;
441 ring->queue_index = i; 352 ring->queue_index = i;
353 ring->pdev = adapter->pdev;
354 ring->netdev = adapter->netdev;
355 /* For 82575, context index must be unique per ring. */
356 if (adapter->hw.mac.type == e1000_82575)
357 ring->flags = IGB_RING_FLAG_TX_CTX_IDX;
442 } 358 }
359
443 for (i = 0; i < adapter->num_rx_queues; i++) { 360 for (i = 0; i < adapter->num_rx_queues; i++) {
444 struct igb_ring *ring = &(adapter->rx_ring[i]); 361 struct igb_ring *ring = &(adapter->rx_ring[i]);
445 ring->count = adapter->rx_ring_count; 362 ring->count = adapter->rx_ring_count;
446 ring->adapter = adapter;
447 ring->queue_index = i; 363 ring->queue_index = i;
448 ring->itr_register = E1000_ITR; 364 ring->pdev = adapter->pdev;
449 365 ring->netdev = adapter->netdev;
450 /* set a default napi handler for each rx_ring */ 366 ring->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
451 netif_napi_add(adapter->netdev, &ring->napi, igb_poll, 64); 367 ring->flags = IGB_RING_FLAG_RX_CSUM; /* enable rx checksum */
368 /* set flag indicating ring supports SCTP checksum offload */
369 if (adapter->hw.mac.type >= e1000_82576)
370 ring->flags |= IGB_RING_FLAG_RX_SCTP_CSUM;
452 } 371 }
453 372
454 igb_cache_ring_register(adapter); 373 igb_cache_ring_register(adapter);
455 return 0;
456}
457
458static void igb_free_queues(struct igb_adapter *adapter)
459{
460 int i;
461 374
462 for (i = 0; i < adapter->num_rx_queues; i++) 375 return 0;
463 netif_napi_del(&adapter->rx_ring[i].napi);
464 376
465 adapter->num_rx_queues = 0; 377err:
466 adapter->num_tx_queues = 0; 378 igb_free_queues(adapter);
467 379
468 kfree(adapter->tx_ring); 380 return -ENOMEM;
469 kfree(adapter->rx_ring);
470} 381}
471 382
472#define IGB_N0_QUEUE -1 383#define IGB_N0_QUEUE -1
473static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue, 384static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
474 int tx_queue, int msix_vector)
475{ 385{
476 u32 msixbm = 0; 386 u32 msixbm = 0;
387 struct igb_adapter *adapter = q_vector->adapter;
477 struct e1000_hw *hw = &adapter->hw; 388 struct e1000_hw *hw = &adapter->hw;
478 u32 ivar, index; 389 u32 ivar, index;
390 int rx_queue = IGB_N0_QUEUE;
391 int tx_queue = IGB_N0_QUEUE;
392
393 if (q_vector->rx_ring)
394 rx_queue = q_vector->rx_ring->reg_idx;
395 if (q_vector->tx_ring)
396 tx_queue = q_vector->tx_ring->reg_idx;
479 397
480 switch (hw->mac.type) { 398 switch (hw->mac.type) {
481 case e1000_82575: 399 case e1000_82575:
@@ -483,16 +401,12 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
483 bitmask for the EICR/EIMS/EIMC registers. To assign one 401 bitmask for the EICR/EIMS/EIMC registers. To assign one
484 or more queues to a vector, we write the appropriate bits 402 or more queues to a vector, we write the appropriate bits
485 into the MSIXBM register for that vector. */ 403 into the MSIXBM register for that vector. */
486 if (rx_queue > IGB_N0_QUEUE) { 404 if (rx_queue > IGB_N0_QUEUE)
487 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue; 405 msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
488 adapter->rx_ring[rx_queue].eims_value = msixbm; 406 if (tx_queue > IGB_N0_QUEUE)
489 }
490 if (tx_queue > IGB_N0_QUEUE) {
491 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue; 407 msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
492 adapter->tx_ring[tx_queue].eims_value =
493 E1000_EICR_TX_QUEUE0 << tx_queue;
494 }
495 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm); 408 array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
409 q_vector->eims_value = msixbm;
496 break; 410 break;
497 case e1000_82576: 411 case e1000_82576:
498 /* 82576 uses a table-based method for assigning vectors. 412 /* 82576 uses a table-based method for assigning vectors.
@@ -500,35 +414,34 @@ static void igb_assign_vector(struct igb_adapter *adapter, int rx_queue,
500 a vector number along with a "valid" bit. Sadly, the layout 414 a vector number along with a "valid" bit. Sadly, the layout
501 of the table is somewhat counterintuitive. */ 415 of the table is somewhat counterintuitive. */
502 if (rx_queue > IGB_N0_QUEUE) { 416 if (rx_queue > IGB_N0_QUEUE) {
503 index = (rx_queue >> 1) + adapter->vfs_allocated_count; 417 index = (rx_queue & 0x7);
504 ivar = array_rd32(E1000_IVAR0, index); 418 ivar = array_rd32(E1000_IVAR0, index);
505 if (rx_queue & 0x1) { 419 if (rx_queue < 8) {
506 /* vector goes into third byte of register */
507 ivar = ivar & 0xFF00FFFF;
508 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
509 } else {
510 /* vector goes into low byte of register */ 420 /* vector goes into low byte of register */
511 ivar = ivar & 0xFFFFFF00; 421 ivar = ivar & 0xFFFFFF00;
512 ivar |= msix_vector | E1000_IVAR_VALID; 422 ivar |= msix_vector | E1000_IVAR_VALID;
423 } else {
424 /* vector goes into third byte of register */
425 ivar = ivar & 0xFF00FFFF;
426 ivar |= (msix_vector | E1000_IVAR_VALID) << 16;
513 } 427 }
514 adapter->rx_ring[rx_queue].eims_value= 1 << msix_vector;
515 array_wr32(E1000_IVAR0, index, ivar); 428 array_wr32(E1000_IVAR0, index, ivar);
516 } 429 }
517 if (tx_queue > IGB_N0_QUEUE) { 430 if (tx_queue > IGB_N0_QUEUE) {
518 index = (tx_queue >> 1) + adapter->vfs_allocated_count; 431 index = (tx_queue & 0x7);
519 ivar = array_rd32(E1000_IVAR0, index); 432 ivar = array_rd32(E1000_IVAR0, index);
520 if (tx_queue & 0x1) { 433 if (tx_queue < 8) {
521 /* vector goes into high byte of register */
522 ivar = ivar & 0x00FFFFFF;
523 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
524 } else {
525 /* vector goes into second byte of register */ 434 /* vector goes into second byte of register */
526 ivar = ivar & 0xFFFF00FF; 435 ivar = ivar & 0xFFFF00FF;
527 ivar |= (msix_vector | E1000_IVAR_VALID) << 8; 436 ivar |= (msix_vector | E1000_IVAR_VALID) << 8;
437 } else {
438 /* vector goes into high byte of register */
439 ivar = ivar & 0x00FFFFFF;
440 ivar |= (msix_vector | E1000_IVAR_VALID) << 24;
528 } 441 }
529 adapter->tx_ring[tx_queue].eims_value= 1 << msix_vector;
530 array_wr32(E1000_IVAR0, index, ivar); 442 array_wr32(E1000_IVAR0, index, ivar);
531 } 443 }
444 q_vector->eims_value = 1 << msix_vector;
532 break; 445 break;
533 default: 446 default:
534 BUG(); 447 BUG();
@@ -549,43 +462,10 @@ static void igb_configure_msix(struct igb_adapter *adapter)
549 struct e1000_hw *hw = &adapter->hw; 462 struct e1000_hw *hw = &adapter->hw;
550 463
551 adapter->eims_enable_mask = 0; 464 adapter->eims_enable_mask = 0;
552 if (hw->mac.type == e1000_82576)
553 /* Turn on MSI-X capability first, or our settings
554 * won't stick. And it will take days to debug. */
555 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
556 E1000_GPIE_PBA | E1000_GPIE_EIAME |
557 E1000_GPIE_NSICR);
558
559 for (i = 0; i < adapter->num_tx_queues; i++) {
560 struct igb_ring *tx_ring = &adapter->tx_ring[i];
561 igb_assign_vector(adapter, IGB_N0_QUEUE, i, vector++);
562 adapter->eims_enable_mask |= tx_ring->eims_value;
563 if (tx_ring->itr_val)
564 writel(tx_ring->itr_val,
565 hw->hw_addr + tx_ring->itr_register);
566 else
567 writel(1, hw->hw_addr + tx_ring->itr_register);
568 }
569
570 for (i = 0; i < adapter->num_rx_queues; i++) {
571 struct igb_ring *rx_ring = &adapter->rx_ring[i];
572 rx_ring->buddy = NULL;
573 igb_assign_vector(adapter, i, IGB_N0_QUEUE, vector++);
574 adapter->eims_enable_mask |= rx_ring->eims_value;
575 if (rx_ring->itr_val)
576 writel(rx_ring->itr_val,
577 hw->hw_addr + rx_ring->itr_register);
578 else
579 writel(1, hw->hw_addr + rx_ring->itr_register);
580 }
581
582 465
583 /* set vector for other causes, i.e. link changes */ 466 /* set vector for other causes, i.e. link changes */
584 switch (hw->mac.type) { 467 switch (hw->mac.type) {
585 case e1000_82575: 468 case e1000_82575:
586 array_wr32(E1000_MSIXBM(0), vector++,
587 E1000_EIMS_OTHER);
588
589 tmp = rd32(E1000_CTRL_EXT); 469 tmp = rd32(E1000_CTRL_EXT);
590 /* enable MSI-X PBA support*/ 470 /* enable MSI-X PBA support*/
591 tmp |= E1000_CTRL_EXT_PBA_CLR; 471 tmp |= E1000_CTRL_EXT_PBA_CLR;
@@ -595,22 +475,40 @@ static void igb_configure_msix(struct igb_adapter *adapter)
595 tmp |= E1000_CTRL_EXT_IRCA; 475 tmp |= E1000_CTRL_EXT_IRCA;
596 476
597 wr32(E1000_CTRL_EXT, tmp); 477 wr32(E1000_CTRL_EXT, tmp);
598 adapter->eims_enable_mask |= E1000_EIMS_OTHER; 478
479 /* enable msix_other interrupt */
480 array_wr32(E1000_MSIXBM(0), vector++,
481 E1000_EIMS_OTHER);
599 adapter->eims_other = E1000_EIMS_OTHER; 482 adapter->eims_other = E1000_EIMS_OTHER;
600 483
601 break; 484 break;
602 485
603 case e1000_82576: 486 case e1000_82576:
487 /* Turn on MSI-X capability first, or our settings
488 * won't stick. And it will take days to debug. */
489 wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
490 E1000_GPIE_PBA | E1000_GPIE_EIAME |
491 E1000_GPIE_NSICR);
492
493 /* enable msix_other interrupt */
494 adapter->eims_other = 1 << vector;
604 tmp = (vector++ | E1000_IVAR_VALID) << 8; 495 tmp = (vector++ | E1000_IVAR_VALID) << 8;
605 wr32(E1000_IVAR_MISC, tmp);
606 496
607 adapter->eims_enable_mask = (1 << (vector)) - 1; 497 wr32(E1000_IVAR_MISC, tmp);
608 adapter->eims_other = 1 << (vector - 1);
609 break; 498 break;
610 default: 499 default:
611 /* do nothing, since nothing else supports MSI-X */ 500 /* do nothing, since nothing else supports MSI-X */
612 break; 501 break;
613 } /* switch (hw->mac.type) */ 502 } /* switch (hw->mac.type) */
503
504 adapter->eims_enable_mask |= adapter->eims_other;
505
506 for (i = 0; i < adapter->num_q_vectors; i++) {
507 struct igb_q_vector *q_vector = adapter->q_vector[i];
508 igb_assign_vector(q_vector, vector++);
509 adapter->eims_enable_mask |= q_vector->eims_value;
510 }
511
614 wrfl(); 512 wrfl();
615} 513}
616 514
@@ -623,43 +521,40 @@ static void igb_configure_msix(struct igb_adapter *adapter)
623static int igb_request_msix(struct igb_adapter *adapter) 521static int igb_request_msix(struct igb_adapter *adapter)
624{ 522{
625 struct net_device *netdev = adapter->netdev; 523 struct net_device *netdev = adapter->netdev;
524 struct e1000_hw *hw = &adapter->hw;
626 int i, err = 0, vector = 0; 525 int i, err = 0, vector = 0;
627 526
628 vector = 0; 527 err = request_irq(adapter->msix_entries[vector].vector,
629 528 &igb_msix_other, 0, netdev->name, adapter);
630 for (i = 0; i < adapter->num_tx_queues; i++) { 529 if (err)
631 struct igb_ring *ring = &(adapter->tx_ring[i]); 530 goto out;
632 sprintf(ring->name, "%s-tx-%d", netdev->name, i); 531 vector++;
633 err = request_irq(adapter->msix_entries[vector].vector, 532
634 &igb_msix_tx, 0, ring->name, 533 for (i = 0; i < adapter->num_q_vectors; i++) {
635 &(adapter->tx_ring[i])); 534 struct igb_q_vector *q_vector = adapter->q_vector[i];
636 if (err) 535
637 goto out; 536 q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
638 ring->itr_register = E1000_EITR(0) + (vector << 2); 537
639 ring->itr_val = 976; /* ~4000 ints/sec */ 538 if (q_vector->rx_ring && q_vector->tx_ring)
640 vector++; 539 sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
641 } 540 q_vector->rx_ring->queue_index);
642 for (i = 0; i < adapter->num_rx_queues; i++) { 541 else if (q_vector->tx_ring)
643 struct igb_ring *ring = &(adapter->rx_ring[i]); 542 sprintf(q_vector->name, "%s-tx-%u", netdev->name,
644 if (strlen(netdev->name) < (IFNAMSIZ - 5)) 543 q_vector->tx_ring->queue_index);
645 sprintf(ring->name, "%s-rx-%d", netdev->name, i); 544 else if (q_vector->rx_ring)
545 sprintf(q_vector->name, "%s-rx-%u", netdev->name,
546 q_vector->rx_ring->queue_index);
646 else 547 else
647 memcpy(ring->name, netdev->name, IFNAMSIZ); 548 sprintf(q_vector->name, "%s-unused", netdev->name);
549
648 err = request_irq(adapter->msix_entries[vector].vector, 550 err = request_irq(adapter->msix_entries[vector].vector,
649 &igb_msix_rx, 0, ring->name, 551 &igb_msix_ring, 0, q_vector->name,
650 &(adapter->rx_ring[i])); 552 q_vector);
651 if (err) 553 if (err)
652 goto out; 554 goto out;
653 ring->itr_register = E1000_EITR(0) + (vector << 2);
654 ring->itr_val = adapter->itr;
655 vector++; 555 vector++;
656 } 556 }
657 557
658 err = request_irq(adapter->msix_entries[vector].vector,
659 &igb_msix_other, 0, netdev->name, netdev);
660 if (err)
661 goto out;
662
663 igb_configure_msix(adapter); 558 igb_configure_msix(adapter);
664 return 0; 559 return 0;
665out: 560out:
@@ -672,11 +567,44 @@ static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
672 pci_disable_msix(adapter->pdev); 567 pci_disable_msix(adapter->pdev);
673 kfree(adapter->msix_entries); 568 kfree(adapter->msix_entries);
674 adapter->msix_entries = NULL; 569 adapter->msix_entries = NULL;
675 } else if (adapter->flags & IGB_FLAG_HAS_MSI) 570 } else if (adapter->flags & IGB_FLAG_HAS_MSI) {
676 pci_disable_msi(adapter->pdev); 571 pci_disable_msi(adapter->pdev);
677 return; 572 }
678} 573}
679 574
575/**
576 * igb_free_q_vectors - Free memory allocated for interrupt vectors
577 * @adapter: board private structure to initialize
578 *
579 * This function frees the memory allocated to the q_vectors. In addition if
580 * NAPI is enabled it will delete any references to the NAPI struct prior
581 * to freeing the q_vector.
582 **/
583static void igb_free_q_vectors(struct igb_adapter *adapter)
584{
585 int v_idx;
586
587 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
588 struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
589 adapter->q_vector[v_idx] = NULL;
590 netif_napi_del(&q_vector->napi);
591 kfree(q_vector);
592 }
593 adapter->num_q_vectors = 0;
594}
595
596/**
597 * igb_clear_interrupt_scheme - reset the device to a state of no interrupts
598 *
599 * This function resets the device so that it has 0 rx queues, tx queues, and
600 * MSI-X interrupts allocated.
601 */
602static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
603{
604 igb_free_queues(adapter);
605 igb_free_q_vectors(adapter);
606 igb_reset_interrupt_capability(adapter);
607}
680 608
681/** 609/**
682 * igb_set_interrupt_capability - set MSI or MSI-X if supported 610 * igb_set_interrupt_capability - set MSI or MSI-X if supported
@@ -690,11 +618,21 @@ static void igb_set_interrupt_capability(struct igb_adapter *adapter)
690 int numvecs, i; 618 int numvecs, i;
691 619
692 /* Number of supported queues. */ 620 /* Number of supported queues. */
693 /* Having more queues than CPUs doesn't make sense. */ 621 adapter->num_rx_queues = adapter->rss_queues;
694 adapter->num_rx_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus()); 622 adapter->num_tx_queues = adapter->rss_queues;
695 adapter->num_tx_queues = min_t(u32, IGB_MAX_TX_QUEUES, num_online_cpus()); 623
624 /* start with one vector for every rx queue */
625 numvecs = adapter->num_rx_queues;
626
627 /* if tx handler is seperate add 1 for every tx queue */
628 if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
629 numvecs += adapter->num_tx_queues;
696 630
697 numvecs = adapter->num_tx_queues + adapter->num_rx_queues + 1; 631 /* store the number of vectors reserved for queues */
632 adapter->num_q_vectors = numvecs;
633
634 /* add 1 vector for link status interrupts */
635 numvecs++;
698 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry), 636 adapter->msix_entries = kcalloc(numvecs, sizeof(struct msix_entry),
699 GFP_KERNEL); 637 GFP_KERNEL);
700 if (!adapter->msix_entries) 638 if (!adapter->msix_entries)
@@ -728,8 +666,12 @@ msi_only:
728 dev_info(&adapter->pdev->dev, "IOV Disabled\n"); 666 dev_info(&adapter->pdev->dev, "IOV Disabled\n");
729 } 667 }
730#endif 668#endif
669 adapter->vfs_allocated_count = 0;
670 adapter->rss_queues = 1;
671 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
731 adapter->num_rx_queues = 1; 672 adapter->num_rx_queues = 1;
732 adapter->num_tx_queues = 1; 673 adapter->num_tx_queues = 1;
674 adapter->num_q_vectors = 1;
733 if (!pci_enable_msi(adapter->pdev)) 675 if (!pci_enable_msi(adapter->pdev))
734 adapter->flags |= IGB_FLAG_HAS_MSI; 676 adapter->flags |= IGB_FLAG_HAS_MSI;
735out: 677out:
@@ -739,6 +681,143 @@ out:
739} 681}
740 682
741/** 683/**
684 * igb_alloc_q_vectors - Allocate memory for interrupt vectors
685 * @adapter: board private structure to initialize
686 *
687 * We allocate one q_vector per queue interrupt. If allocation fails we
688 * return -ENOMEM.
689 **/
690static int igb_alloc_q_vectors(struct igb_adapter *adapter)
691{
692 struct igb_q_vector *q_vector;
693 struct e1000_hw *hw = &adapter->hw;
694 int v_idx;
695
696 for (v_idx = 0; v_idx < adapter->num_q_vectors; v_idx++) {
697 q_vector = kzalloc(sizeof(struct igb_q_vector), GFP_KERNEL);
698 if (!q_vector)
699 goto err_out;
700 q_vector->adapter = adapter;
701 q_vector->itr_shift = (hw->mac.type == e1000_82575) ? 16 : 0;
702 q_vector->itr_register = hw->hw_addr + E1000_EITR(0);
703 q_vector->itr_val = IGB_START_ITR;
704 q_vector->set_itr = 1;
705 netif_napi_add(adapter->netdev, &q_vector->napi, igb_poll, 64);
706 adapter->q_vector[v_idx] = q_vector;
707 }
708 return 0;
709
710err_out:
711 while (v_idx) {
712 v_idx--;
713 q_vector = adapter->q_vector[v_idx];
714 netif_napi_del(&q_vector->napi);
715 kfree(q_vector);
716 adapter->q_vector[v_idx] = NULL;
717 }
718 return -ENOMEM;
719}
720
721static void igb_map_rx_ring_to_vector(struct igb_adapter *adapter,
722 int ring_idx, int v_idx)
723{
724 struct igb_q_vector *q_vector;
725
726 q_vector = adapter->q_vector[v_idx];
727 q_vector->rx_ring = &adapter->rx_ring[ring_idx];
728 q_vector->rx_ring->q_vector = q_vector;
729 q_vector->itr_val = adapter->rx_itr_setting;
730 if (q_vector->itr_val && q_vector->itr_val <= 3)
731 q_vector->itr_val = IGB_START_ITR;
732}
733
734static void igb_map_tx_ring_to_vector(struct igb_adapter *adapter,
735 int ring_idx, int v_idx)
736{
737 struct igb_q_vector *q_vector;
738
739 q_vector = adapter->q_vector[v_idx];
740 q_vector->tx_ring = &adapter->tx_ring[ring_idx];
741 q_vector->tx_ring->q_vector = q_vector;
742 q_vector->itr_val = adapter->tx_itr_setting;
743 if (q_vector->itr_val && q_vector->itr_val <= 3)
744 q_vector->itr_val = IGB_START_ITR;
745}
746
747/**
748 * igb_map_ring_to_vector - maps allocated queues to vectors
749 *
750 * This function maps the recently allocated queues to vectors.
751 **/
752static int igb_map_ring_to_vector(struct igb_adapter *adapter)
753{
754 int i;
755 int v_idx = 0;
756
757 if ((adapter->num_q_vectors < adapter->num_rx_queues) ||
758 (adapter->num_q_vectors < adapter->num_tx_queues))
759 return -ENOMEM;
760
761 if (adapter->num_q_vectors >=
762 (adapter->num_rx_queues + adapter->num_tx_queues)) {
763 for (i = 0; i < adapter->num_rx_queues; i++)
764 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
765 for (i = 0; i < adapter->num_tx_queues; i++)
766 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
767 } else {
768 for (i = 0; i < adapter->num_rx_queues; i++) {
769 if (i < adapter->num_tx_queues)
770 igb_map_tx_ring_to_vector(adapter, i, v_idx);
771 igb_map_rx_ring_to_vector(adapter, i, v_idx++);
772 }
773 for (; i < adapter->num_tx_queues; i++)
774 igb_map_tx_ring_to_vector(adapter, i, v_idx++);
775 }
776 return 0;
777}
778
779/**
780 * igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
781 *
782 * This function initializes the interrupts and allocates all of the queues.
783 **/
784static int igb_init_interrupt_scheme(struct igb_adapter *adapter)
785{
786 struct pci_dev *pdev = adapter->pdev;
787 int err;
788
789 igb_set_interrupt_capability(adapter);
790
791 err = igb_alloc_q_vectors(adapter);
792 if (err) {
793 dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
794 goto err_alloc_q_vectors;
795 }
796
797 err = igb_alloc_queues(adapter);
798 if (err) {
799 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
800 goto err_alloc_queues;
801 }
802
803 err = igb_map_ring_to_vector(adapter);
804 if (err) {
805 dev_err(&pdev->dev, "Invalid q_vector to ring mapping\n");
806 goto err_map_queues;
807 }
808
809
810 return 0;
811err_map_queues:
812 igb_free_queues(adapter);
813err_alloc_queues:
814 igb_free_q_vectors(adapter);
815err_alloc_q_vectors:
816 igb_reset_interrupt_capability(adapter);
817 return err;
818}
819
820/**
742 * igb_request_irq - initialize interrupts 821 * igb_request_irq - initialize interrupts
743 * 822 *
744 * Attempts to configure interrupts using the best available 823 * Attempts to configure interrupts using the best available
@@ -747,6 +826,7 @@ out:
747static int igb_request_irq(struct igb_adapter *adapter) 826static int igb_request_irq(struct igb_adapter *adapter)
748{ 827{
749 struct net_device *netdev = adapter->netdev; 828 struct net_device *netdev = adapter->netdev;
829 struct pci_dev *pdev = adapter->pdev;
750 struct e1000_hw *hw = &adapter->hw; 830 struct e1000_hw *hw = &adapter->hw;
751 int err = 0; 831 int err = 0;
752 832
@@ -755,18 +835,36 @@ static int igb_request_irq(struct igb_adapter *adapter)
755 if (!err) 835 if (!err)
756 goto request_done; 836 goto request_done;
757 /* fall back to MSI */ 837 /* fall back to MSI */
758 igb_reset_interrupt_capability(adapter); 838 igb_clear_interrupt_scheme(adapter);
759 if (!pci_enable_msi(adapter->pdev)) 839 if (!pci_enable_msi(adapter->pdev))
760 adapter->flags |= IGB_FLAG_HAS_MSI; 840 adapter->flags |= IGB_FLAG_HAS_MSI;
761 igb_free_all_tx_resources(adapter); 841 igb_free_all_tx_resources(adapter);
762 igb_free_all_rx_resources(adapter); 842 igb_free_all_rx_resources(adapter);
843 adapter->num_tx_queues = 1;
763 adapter->num_rx_queues = 1; 844 adapter->num_rx_queues = 1;
764 igb_alloc_queues(adapter); 845 adapter->num_q_vectors = 1;
846 err = igb_alloc_q_vectors(adapter);
847 if (err) {
848 dev_err(&pdev->dev,
849 "Unable to allocate memory for vectors\n");
850 goto request_done;
851 }
852 err = igb_alloc_queues(adapter);
853 if (err) {
854 dev_err(&pdev->dev,
855 "Unable to allocate memory for queues\n");
856 igb_free_q_vectors(adapter);
857 goto request_done;
858 }
859 igb_setup_all_tx_resources(adapter);
860 igb_setup_all_rx_resources(adapter);
765 } else { 861 } else {
766 switch (hw->mac.type) { 862 switch (hw->mac.type) {
767 case e1000_82575: 863 case e1000_82575:
768 wr32(E1000_MSIXBM(0), 864 wr32(E1000_MSIXBM(0),
769 (E1000_EICR_RX_QUEUE0 | E1000_EIMS_OTHER)); 865 (E1000_EICR_RX_QUEUE0 |
866 E1000_EICR_TX_QUEUE0 |
867 E1000_EIMS_OTHER));
770 break; 868 break;
771 case e1000_82576: 869 case e1000_82576:
772 wr32(E1000_IVAR0, E1000_IVAR_VALID); 870 wr32(E1000_IVAR0, E1000_IVAR_VALID);
@@ -778,16 +876,17 @@ static int igb_request_irq(struct igb_adapter *adapter)
778 876
779 if (adapter->flags & IGB_FLAG_HAS_MSI) { 877 if (adapter->flags & IGB_FLAG_HAS_MSI) {
780 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0, 878 err = request_irq(adapter->pdev->irq, &igb_intr_msi, 0,
781 netdev->name, netdev); 879 netdev->name, adapter);
782 if (!err) 880 if (!err)
783 goto request_done; 881 goto request_done;
882
784 /* fall back to legacy interrupts */ 883 /* fall back to legacy interrupts */
785 igb_reset_interrupt_capability(adapter); 884 igb_reset_interrupt_capability(adapter);
786 adapter->flags &= ~IGB_FLAG_HAS_MSI; 885 adapter->flags &= ~IGB_FLAG_HAS_MSI;
787 } 886 }
788 887
789 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED, 888 err = request_irq(adapter->pdev->irq, &igb_intr, IRQF_SHARED,
790 netdev->name, netdev); 889 netdev->name, adapter);
791 890
792 if (err) 891 if (err)
793 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n", 892 dev_err(&adapter->pdev->dev, "Error %d getting interrupt\n",
@@ -799,23 +898,19 @@ request_done:
799 898
800static void igb_free_irq(struct igb_adapter *adapter) 899static void igb_free_irq(struct igb_adapter *adapter)
801{ 900{
802 struct net_device *netdev = adapter->netdev;
803
804 if (adapter->msix_entries) { 901 if (adapter->msix_entries) {
805 int vector = 0, i; 902 int vector = 0, i;
806 903
807 for (i = 0; i < adapter->num_tx_queues; i++) 904 free_irq(adapter->msix_entries[vector++].vector, adapter);
808 free_irq(adapter->msix_entries[vector++].vector,
809 &(adapter->tx_ring[i]));
810 for (i = 0; i < adapter->num_rx_queues; i++)
811 free_irq(adapter->msix_entries[vector++].vector,
812 &(adapter->rx_ring[i]));
813 905
814 free_irq(adapter->msix_entries[vector++].vector, netdev); 906 for (i = 0; i < adapter->num_q_vectors; i++) {
815 return; 907 struct igb_q_vector *q_vector = adapter->q_vector[i];
908 free_irq(adapter->msix_entries[vector++].vector,
909 q_vector);
910 }
911 } else {
912 free_irq(adapter->pdev->irq, adapter);
816 } 913 }
817
818 free_irq(adapter->pdev->irq, netdev);
819} 914}
820 915
821/** 916/**
@@ -826,6 +921,11 @@ static void igb_irq_disable(struct igb_adapter *adapter)
826{ 921{
827 struct e1000_hw *hw = &adapter->hw; 922 struct e1000_hw *hw = &adapter->hw;
828 923
924 /*
925 * we need to be careful when disabling interrupts. The VFs are also
926 * mapped into these registers and so clearing the bits can cause
927 * issues on the VF drivers so we only need to clear what we set
928 */
829 if (adapter->msix_entries) { 929 if (adapter->msix_entries) {
830 u32 regval = rd32(E1000_EIAM); 930 u32 regval = rd32(E1000_EIAM);
831 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask); 931 wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
@@ -849,15 +949,17 @@ static void igb_irq_enable(struct igb_adapter *adapter)
849 struct e1000_hw *hw = &adapter->hw; 949 struct e1000_hw *hw = &adapter->hw;
850 950
851 if (adapter->msix_entries) { 951 if (adapter->msix_entries) {
952 u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC;
852 u32 regval = rd32(E1000_EIAC); 953 u32 regval = rd32(E1000_EIAC);
853 wr32(E1000_EIAC, regval | adapter->eims_enable_mask); 954 wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
854 regval = rd32(E1000_EIAM); 955 regval = rd32(E1000_EIAM);
855 wr32(E1000_EIAM, regval | adapter->eims_enable_mask); 956 wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
856 wr32(E1000_EIMS, adapter->eims_enable_mask); 957 wr32(E1000_EIMS, adapter->eims_enable_mask);
857 if (adapter->vfs_allocated_count) 958 if (adapter->vfs_allocated_count) {
858 wr32(E1000_MBVFIMR, 0xFF); 959 wr32(E1000_MBVFIMR, 0xFF);
859 wr32(E1000_IMS, (E1000_IMS_LSC | E1000_IMS_VMMB | 960 ims |= E1000_IMS_VMMB;
860 E1000_IMS_DOUTSYNC)); 961 }
962 wr32(E1000_IMS, ims);
861 } else { 963 } else {
862 wr32(E1000_IMS, IMS_ENABLE_MASK); 964 wr32(E1000_IMS, IMS_ENABLE_MASK);
863 wr32(E1000_IAM, IMS_ENABLE_MASK); 965 wr32(E1000_IAM, IMS_ENABLE_MASK);
@@ -866,24 +968,23 @@ static void igb_irq_enable(struct igb_adapter *adapter)
866 968
867static void igb_update_mng_vlan(struct igb_adapter *adapter) 969static void igb_update_mng_vlan(struct igb_adapter *adapter)
868{ 970{
869 struct net_device *netdev = adapter->netdev; 971 struct e1000_hw *hw = &adapter->hw;
870 u16 vid = adapter->hw.mng_cookie.vlan_id; 972 u16 vid = adapter->hw.mng_cookie.vlan_id;
871 u16 old_vid = adapter->mng_vlan_id; 973 u16 old_vid = adapter->mng_vlan_id;
872 if (adapter->vlgrp) {
873 if (!vlan_group_get_device(adapter->vlgrp, vid)) {
874 if (adapter->hw.mng_cookie.status &
875 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
876 igb_vlan_rx_add_vid(netdev, vid);
877 adapter->mng_vlan_id = vid;
878 } else
879 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
880 974
881 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) && 975 if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
882 (vid != old_vid) && 976 /* add VID to filter table */
883 !vlan_group_get_device(adapter->vlgrp, old_vid)) 977 igb_vfta_set(hw, vid, true);
884 igb_vlan_rx_kill_vid(netdev, old_vid); 978 adapter->mng_vlan_id = vid;
885 } else 979 } else {
886 adapter->mng_vlan_id = vid; 980 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
981 }
982
983 if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
984 (vid != old_vid) &&
985 !vlan_group_get_device(adapter->vlgrp, old_vid)) {
986 /* remove VID from filter table */
987 igb_vfta_set(hw, old_vid, false);
887 } 988 }
888} 989}
889 990
@@ -907,7 +1008,6 @@ static void igb_release_hw_control(struct igb_adapter *adapter)
907 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD); 1008 ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
908} 1009}
909 1010
910
911/** 1011/**
912 * igb_get_hw_control - get control of the h/w from f/w 1012 * igb_get_hw_control - get control of the h/w from f/w
913 * @adapter: address of board private structure 1013 * @adapter: address of board private structure
@@ -942,8 +1042,11 @@ static void igb_configure(struct igb_adapter *adapter)
942 1042
943 igb_restore_vlan(adapter); 1043 igb_restore_vlan(adapter);
944 1044
945 igb_configure_tx(adapter); 1045 igb_setup_tctl(adapter);
1046 igb_setup_mrqc(adapter);
946 igb_setup_rctl(adapter); 1047 igb_setup_rctl(adapter);
1048
1049 igb_configure_tx(adapter);
947 igb_configure_rx(adapter); 1050 igb_configure_rx(adapter);
948 1051
949 igb_rx_fifo_flush_82575(&adapter->hw); 1052 igb_rx_fifo_flush_82575(&adapter->hw);
@@ -965,7 +1068,6 @@ static void igb_configure(struct igb_adapter *adapter)
965 * igb_up - Open the interface and prepare it to handle traffic 1068 * igb_up - Open the interface and prepare it to handle traffic
966 * @adapter: board private structure 1069 * @adapter: board private structure
967 **/ 1070 **/
968
969int igb_up(struct igb_adapter *adapter) 1071int igb_up(struct igb_adapter *adapter)
970{ 1072{
971 struct e1000_hw *hw = &adapter->hw; 1073 struct e1000_hw *hw = &adapter->hw;
@@ -976,30 +1078,37 @@ int igb_up(struct igb_adapter *adapter)
976 1078
977 clear_bit(__IGB_DOWN, &adapter->state); 1079 clear_bit(__IGB_DOWN, &adapter->state);
978 1080
979 for (i = 0; i < adapter->num_rx_queues; i++) 1081 for (i = 0; i < adapter->num_q_vectors; i++) {
980 napi_enable(&adapter->rx_ring[i].napi); 1082 struct igb_q_vector *q_vector = adapter->q_vector[i];
1083 napi_enable(&q_vector->napi);
1084 }
981 if (adapter->msix_entries) 1085 if (adapter->msix_entries)
982 igb_configure_msix(adapter); 1086 igb_configure_msix(adapter);
983 1087
984 igb_vmm_control(adapter);
985 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
986 igb_set_vmolr(hw, adapter->vfs_allocated_count);
987
988 /* Clear any pending interrupts. */ 1088 /* Clear any pending interrupts. */
989 rd32(E1000_ICR); 1089 rd32(E1000_ICR);
990 igb_irq_enable(adapter); 1090 igb_irq_enable(adapter);
991 1091
1092 /* notify VFs that reset has been completed */
1093 if (adapter->vfs_allocated_count) {
1094 u32 reg_data = rd32(E1000_CTRL_EXT);
1095 reg_data |= E1000_CTRL_EXT_PFRSTD;
1096 wr32(E1000_CTRL_EXT, reg_data);
1097 }
1098
992 netif_tx_start_all_queues(adapter->netdev); 1099 netif_tx_start_all_queues(adapter->netdev);
993 1100
994 /* Fire a link change interrupt to start the watchdog. */ 1101 /* start the watchdog. */
995 wr32(E1000_ICS, E1000_ICS_LSC); 1102 hw->mac.get_link_status = 1;
1103 schedule_work(&adapter->watchdog_task);
1104
996 return 0; 1105 return 0;
997} 1106}
998 1107
999void igb_down(struct igb_adapter *adapter) 1108void igb_down(struct igb_adapter *adapter)
1000{ 1109{
1001 struct e1000_hw *hw = &adapter->hw;
1002 struct net_device *netdev = adapter->netdev; 1110 struct net_device *netdev = adapter->netdev;
1111 struct e1000_hw *hw = &adapter->hw;
1003 u32 tctl, rctl; 1112 u32 tctl, rctl;
1004 int i; 1113 int i;
1005 1114
@@ -1022,8 +1131,10 @@ void igb_down(struct igb_adapter *adapter)
1022 wrfl(); 1131 wrfl();
1023 msleep(10); 1132 msleep(10);
1024 1133
1025 for (i = 0; i < adapter->num_rx_queues; i++) 1134 for (i = 0; i < adapter->num_q_vectors; i++) {
1026 napi_disable(&adapter->rx_ring[i].napi); 1135 struct igb_q_vector *q_vector = adapter->q_vector[i];
1136 napi_disable(&q_vector->napi);
1137 }
1027 1138
1028 igb_irq_disable(adapter); 1139 igb_irq_disable(adapter);
1029 1140
@@ -1062,6 +1173,7 @@ void igb_reinit_locked(struct igb_adapter *adapter)
1062 1173
1063void igb_reset(struct igb_adapter *adapter) 1174void igb_reset(struct igb_adapter *adapter)
1064{ 1175{
1176 struct pci_dev *pdev = adapter->pdev;
1065 struct e1000_hw *hw = &adapter->hw; 1177 struct e1000_hw *hw = &adapter->hw;
1066 struct e1000_mac_info *mac = &hw->mac; 1178 struct e1000_mac_info *mac = &hw->mac;
1067 struct e1000_fc_info *fc = &hw->fc; 1179 struct e1000_fc_info *fc = &hw->fc;
@@ -1073,7 +1185,8 @@ void igb_reset(struct igb_adapter *adapter)
1073 */ 1185 */
1074 switch (mac->type) { 1186 switch (mac->type) {
1075 case e1000_82576: 1187 case e1000_82576:
1076 pba = E1000_PBA_64K; 1188 pba = rd32(E1000_RXPBS);
1189 pba &= E1000_RXPBS_SIZE_MASK_82576;
1077 break; 1190 break;
1078 case e1000_82575: 1191 case e1000_82575:
1079 default: 1192 default:
@@ -1148,10 +1261,10 @@ void igb_reset(struct igb_adapter *adapter)
1148 if (adapter->vfs_allocated_count) { 1261 if (adapter->vfs_allocated_count) {
1149 int i; 1262 int i;
1150 for (i = 0 ; i < adapter->vfs_allocated_count; i++) 1263 for (i = 0 ; i < adapter->vfs_allocated_count; i++)
1151 adapter->vf_data[i].clear_to_send = false; 1264 adapter->vf_data[i].flags = 0;
1152 1265
1153 /* ping all the active vfs to let them know we are going down */ 1266 /* ping all the active vfs to let them know we are going down */
1154 igb_ping_all_vfs(adapter); 1267 igb_ping_all_vfs(adapter);
1155 1268
1156 /* disable transmits and receives */ 1269 /* disable transmits and receives */
1157 wr32(E1000_VFRE, 0); 1270 wr32(E1000_VFRE, 0);
@@ -1159,23 +1272,23 @@ void igb_reset(struct igb_adapter *adapter)
1159 } 1272 }
1160 1273
1161 /* Allow time for pending master requests to run */ 1274 /* Allow time for pending master requests to run */
1162 adapter->hw.mac.ops.reset_hw(&adapter->hw); 1275 hw->mac.ops.reset_hw(hw);
1163 wr32(E1000_WUC, 0); 1276 wr32(E1000_WUC, 0);
1164 1277
1165 if (adapter->hw.mac.ops.init_hw(&adapter->hw)) 1278 if (hw->mac.ops.init_hw(hw))
1166 dev_err(&adapter->pdev->dev, "Hardware Error\n"); 1279 dev_err(&pdev->dev, "Hardware Error\n");
1167 1280
1168 igb_update_mng_vlan(adapter); 1281 igb_update_mng_vlan(adapter);
1169 1282
1170 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */ 1283 /* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
1171 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE); 1284 wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
1172 1285
1173 igb_reset_adaptive(&adapter->hw); 1286 igb_reset_adaptive(hw);
1174 igb_get_phy_info(&adapter->hw); 1287 igb_get_phy_info(hw);
1175} 1288}
1176 1289
1177static const struct net_device_ops igb_netdev_ops = { 1290static const struct net_device_ops igb_netdev_ops = {
1178 .ndo_open = igb_open, 1291 .ndo_open = igb_open,
1179 .ndo_stop = igb_close, 1292 .ndo_stop = igb_close,
1180 .ndo_start_xmit = igb_xmit_frame_adv, 1293 .ndo_start_xmit = igb_xmit_frame_adv,
1181 .ndo_get_stats = igb_get_stats, 1294 .ndo_get_stats = igb_get_stats,
@@ -1211,10 +1324,11 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1211 struct net_device *netdev; 1324 struct net_device *netdev;
1212 struct igb_adapter *adapter; 1325 struct igb_adapter *adapter;
1213 struct e1000_hw *hw; 1326 struct e1000_hw *hw;
1327 u16 eeprom_data = 0;
1328 static int global_quad_port_a; /* global quad port a indication */
1214 const struct e1000_info *ei = igb_info_tbl[ent->driver_data]; 1329 const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
1215 unsigned long mmio_start, mmio_len; 1330 unsigned long mmio_start, mmio_len;
1216 int err, pci_using_dac; 1331 int err, pci_using_dac;
1217 u16 eeprom_data = 0;
1218 u16 eeprom_apme_mask = IGB_EEPROM_APME; 1332 u16 eeprom_apme_mask = IGB_EEPROM_APME;
1219 u32 part_num; 1333 u32 part_num;
1220 1334
@@ -1291,8 +1405,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1291 hw->subsystem_vendor_id = pdev->subsystem_vendor; 1405 hw->subsystem_vendor_id = pdev->subsystem_vendor;
1292 hw->subsystem_device_id = pdev->subsystem_device; 1406 hw->subsystem_device_id = pdev->subsystem_device;
1293 1407
1294 /* setup the private structure */
1295 hw->back = adapter;
1296 /* Copy the default MAC, PHY and NVM function pointers */ 1408 /* Copy the default MAC, PHY and NVM function pointers */
1297 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops)); 1409 memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
1298 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops)); 1410 memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
@@ -1302,46 +1414,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1302 if (err) 1414 if (err)
1303 goto err_sw_init; 1415 goto err_sw_init;
1304 1416
1305#ifdef CONFIG_PCI_IOV
1306 /* since iov functionality isn't critical to base device function we
1307 * can accept failure. If it fails we don't allow iov to be enabled */
1308 if (hw->mac.type == e1000_82576) {
1309 /* 82576 supports a maximum of 7 VFs in addition to the PF */
1310 unsigned int num_vfs = (max_vfs > 7) ? 7 : max_vfs;
1311 int i;
1312 unsigned char mac_addr[ETH_ALEN];
1313
1314 if (num_vfs) {
1315 adapter->vf_data = kcalloc(num_vfs,
1316 sizeof(struct vf_data_storage),
1317 GFP_KERNEL);
1318 if (!adapter->vf_data) {
1319 dev_err(&pdev->dev,
1320 "Could not allocate VF private data - "
1321 "IOV enable failed\n");
1322 } else {
1323 err = pci_enable_sriov(pdev, num_vfs);
1324 if (!err) {
1325 adapter->vfs_allocated_count = num_vfs;
1326 dev_info(&pdev->dev,
1327 "%d vfs allocated\n",
1328 num_vfs);
1329 for (i = 0;
1330 i < adapter->vfs_allocated_count;
1331 i++) {
1332 random_ether_addr(mac_addr);
1333 igb_set_vf_mac(adapter, i,
1334 mac_addr);
1335 }
1336 } else {
1337 kfree(adapter->vf_data);
1338 adapter->vf_data = NULL;
1339 }
1340 }
1341 }
1342 }
1343
1344#endif
1345 /* setup the private structure */ 1417 /* setup the private structure */
1346 err = igb_sw_init(adapter); 1418 err = igb_sw_init(adapter);
1347 if (err) 1419 if (err)
@@ -1349,16 +1421,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1349 1421
1350 igb_get_bus_info_pcie(hw); 1422 igb_get_bus_info_pcie(hw);
1351 1423
1352 /* set flags */
1353 switch (hw->mac.type) {
1354 case e1000_82575:
1355 adapter->flags |= IGB_FLAG_NEED_CTX_IDX;
1356 break;
1357 case e1000_82576:
1358 default:
1359 break;
1360 }
1361
1362 hw->phy.autoneg_wait_to_complete = false; 1424 hw->phy.autoneg_wait_to_complete = false;
1363 hw->mac.adaptive_ifs = true; 1425 hw->mac.adaptive_ifs = true;
1364 1426
@@ -1382,7 +1444,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1382 netdev->features |= NETIF_F_IPV6_CSUM; 1444 netdev->features |= NETIF_F_IPV6_CSUM;
1383 netdev->features |= NETIF_F_TSO; 1445 netdev->features |= NETIF_F_TSO;
1384 netdev->features |= NETIF_F_TSO6; 1446 netdev->features |= NETIF_F_TSO6;
1385
1386 netdev->features |= NETIF_F_GRO; 1447 netdev->features |= NETIF_F_GRO;
1387 1448
1388 netdev->vlan_features |= NETIF_F_TSO; 1449 netdev->vlan_features |= NETIF_F_TSO;
@@ -1394,10 +1455,10 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1394 if (pci_using_dac) 1455 if (pci_using_dac)
1395 netdev->features |= NETIF_F_HIGHDMA; 1456 netdev->features |= NETIF_F_HIGHDMA;
1396 1457
1397 if (adapter->hw.mac.type == e1000_82576) 1458 if (hw->mac.type >= e1000_82576)
1398 netdev->features |= NETIF_F_SCTP_CSUM; 1459 netdev->features |= NETIF_F_SCTP_CSUM;
1399 1460
1400 adapter->en_mng_pt = igb_enable_mng_pass_thru(&adapter->hw); 1461 adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
1401 1462
1402 /* before reading the NVM, reset the controller to put the device in a 1463 /* before reading the NVM, reset the controller to put the device in a
1403 * known good starting state */ 1464 * known good starting state */
@@ -1439,9 +1500,6 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1439 hw->fc.requested_mode = e1000_fc_default; 1500 hw->fc.requested_mode = e1000_fc_default;
1440 hw->fc.current_mode = e1000_fc_default; 1501 hw->fc.current_mode = e1000_fc_default;
1441 1502
1442 adapter->itr_setting = IGB_DEFAULT_ITR;
1443 adapter->itr = IGB_START_ITR;
1444
1445 igb_validate_mdi_setting(hw); 1503 igb_validate_mdi_setting(hw);
1446 1504
1447 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM, 1505 /* Initial Wake on LAN setting If APM wake is enabled in the EEPROM,
@@ -1508,66 +1566,14 @@ static int __devinit igb_probe(struct pci_dev *pdev,
1508 dev_info(&pdev->dev, "DCA enabled\n"); 1566 dev_info(&pdev->dev, "DCA enabled\n");
1509 igb_setup_dca(adapter); 1567 igb_setup_dca(adapter);
1510 } 1568 }
1511#endif
1512
1513 /*
1514 * Initialize hardware timer: we keep it running just in case
1515 * that some program needs it later on.
1516 */
1517 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1518 adapter->cycles.read = igb_read_clock;
1519 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1520 adapter->cycles.mult = 1;
1521 adapter->cycles.shift = IGB_TSYNC_SHIFT;
1522 wr32(E1000_TIMINCA,
1523 (1<<24) |
1524 IGB_TSYNC_CYCLE_TIME_IN_NANOSECONDS * IGB_TSYNC_SCALE);
1525#if 0
1526 /*
1527 * Avoid rollover while we initialize by resetting the time counter.
1528 */
1529 wr32(E1000_SYSTIML, 0x00000000);
1530 wr32(E1000_SYSTIMH, 0x00000000);
1531#else
1532 /*
1533 * Set registers so that rollover occurs soon to test this.
1534 */
1535 wr32(E1000_SYSTIML, 0x00000000);
1536 wr32(E1000_SYSTIMH, 0xFF800000);
1537#endif
1538 wrfl();
1539 timecounter_init(&adapter->clock,
1540 &adapter->cycles,
1541 ktime_to_ns(ktime_get_real()));
1542
1543 /*
1544 * Synchronize our NIC clock against system wall clock. NIC
1545 * time stamp reading requires ~3us per sample, each sample
1546 * was pretty stable even under load => only require 10
1547 * samples for each offset comparison.
1548 */
1549 memset(&adapter->compare, 0, sizeof(adapter->compare));
1550 adapter->compare.source = &adapter->clock;
1551 adapter->compare.target = ktime_get_real;
1552 adapter->compare.num_samples = 10;
1553 timecompare_update(&adapter->compare, 0);
1554 1569
1555#ifdef DEBUG
1556 {
1557 char buffer[160];
1558 printk(KERN_DEBUG
1559 "igb: %s: hw %p initialized timer\n",
1560 igb_get_time_str(adapter, buffer),
1561 &adapter->hw);
1562 }
1563#endif 1570#endif
1564
1565 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n"); 1571 dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
1566 /* print bus type/speed/width info */ 1572 /* print bus type/speed/width info */
1567 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n", 1573 dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
1568 netdev->name, 1574 netdev->name,
1569 ((hw->bus.speed == e1000_bus_speed_2500) 1575 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
1570 ? "2.5Gb/s" : "unknown"), 1576 "unknown"),
1571 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" : 1577 ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
1572 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" : 1578 (hw->bus.width == e1000_bus_width_pcie_x2) ? "Width x2" :
1573 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" : 1579 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
@@ -1594,15 +1600,14 @@ err_eeprom:
1594 1600
1595 if (hw->flash_address) 1601 if (hw->flash_address)
1596 iounmap(hw->flash_address); 1602 iounmap(hw->flash_address);
1597
1598 igb_free_queues(adapter);
1599err_sw_init: 1603err_sw_init:
1604 igb_clear_interrupt_scheme(adapter);
1600 iounmap(hw->hw_addr); 1605 iounmap(hw->hw_addr);
1601err_ioremap: 1606err_ioremap:
1602 free_netdev(netdev); 1607 free_netdev(netdev);
1603err_alloc_etherdev: 1608err_alloc_etherdev:
1604 pci_release_selected_regions(pdev, pci_select_bars(pdev, 1609 pci_release_selected_regions(pdev,
1605 IORESOURCE_MEM)); 1610 pci_select_bars(pdev, IORESOURCE_MEM));
1606err_pci_reg: 1611err_pci_reg:
1607err_dma: 1612err_dma:
1608 pci_disable_device(pdev); 1613 pci_disable_device(pdev);
@@ -1647,12 +1652,10 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1647 1652
1648 unregister_netdev(netdev); 1653 unregister_netdev(netdev);
1649 1654
1650 if (!igb_check_reset_block(&adapter->hw)) 1655 if (!igb_check_reset_block(hw))
1651 igb_reset_phy(&adapter->hw); 1656 igb_reset_phy(hw);
1652
1653 igb_reset_interrupt_capability(adapter);
1654 1657
1655 igb_free_queues(adapter); 1658 igb_clear_interrupt_scheme(adapter);
1656 1659
1657#ifdef CONFIG_PCI_IOV 1660#ifdef CONFIG_PCI_IOV
1658 /* reclaim resources allocated to VFs */ 1661 /* reclaim resources allocated to VFs */
@@ -1668,11 +1671,12 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1668 dev_info(&pdev->dev, "IOV Disabled\n"); 1671 dev_info(&pdev->dev, "IOV Disabled\n");
1669 } 1672 }
1670#endif 1673#endif
1674
1671 iounmap(hw->hw_addr); 1675 iounmap(hw->hw_addr);
1672 if (hw->flash_address) 1676 if (hw->flash_address)
1673 iounmap(hw->flash_address); 1677 iounmap(hw->flash_address);
1674 pci_release_selected_regions(pdev, pci_select_bars(pdev, 1678 pci_release_selected_regions(pdev,
1675 IORESOURCE_MEM)); 1679 pci_select_bars(pdev, IORESOURCE_MEM));
1676 1680
1677 free_netdev(netdev); 1681 free_netdev(netdev);
1678 1682
@@ -1682,6 +1686,118 @@ static void __devexit igb_remove(struct pci_dev *pdev)
1682} 1686}
1683 1687
1684/** 1688/**
1689 * igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
1690 * @adapter: board private structure to initialize
1691 *
1692 * This function initializes the vf specific data storage and then attempts to
1693 * allocate the VFs. The reason for ordering it this way is because it is much
1694 * mor expensive time wise to disable SR-IOV than it is to allocate and free
1695 * the memory for the VFs.
1696 **/
1697static void __devinit igb_probe_vfs(struct igb_adapter * adapter)
1698{
1699#ifdef CONFIG_PCI_IOV
1700 struct pci_dev *pdev = adapter->pdev;
1701
1702 if (adapter->vfs_allocated_count > 7)
1703 adapter->vfs_allocated_count = 7;
1704
1705 if (adapter->vfs_allocated_count) {
1706 adapter->vf_data = kcalloc(adapter->vfs_allocated_count,
1707 sizeof(struct vf_data_storage),
1708 GFP_KERNEL);
1709 /* if allocation failed then we do not support SR-IOV */
1710 if (!adapter->vf_data) {
1711 adapter->vfs_allocated_count = 0;
1712 dev_err(&pdev->dev, "Unable to allocate memory for VF "
1713 "Data Storage\n");
1714 }
1715 }
1716
1717 if (pci_enable_sriov(pdev, adapter->vfs_allocated_count)) {
1718 kfree(adapter->vf_data);
1719 adapter->vf_data = NULL;
1720#endif /* CONFIG_PCI_IOV */
1721 adapter->vfs_allocated_count = 0;
1722#ifdef CONFIG_PCI_IOV
1723 } else {
1724 unsigned char mac_addr[ETH_ALEN];
1725 int i;
1726 dev_info(&pdev->dev, "%d vfs allocated\n",
1727 adapter->vfs_allocated_count);
1728 for (i = 0; i < adapter->vfs_allocated_count; i++) {
1729 random_ether_addr(mac_addr);
1730 igb_set_vf_mac(adapter, i, mac_addr);
1731 }
1732 }
1733#endif /* CONFIG_PCI_IOV */
1734}
1735
1736
1737/**
1738 * igb_init_hw_timer - Initialize hardware timer used with IEEE 1588 timestamp
1739 * @adapter: board private structure to initialize
1740 *
1741 * igb_init_hw_timer initializes the function pointer and values for the hw
1742 * timer found in hardware.
1743 **/
1744static void igb_init_hw_timer(struct igb_adapter *adapter)
1745{
1746 struct e1000_hw *hw = &adapter->hw;
1747
1748 switch (hw->mac.type) {
1749 case e1000_82576:
1750 /*
1751 * Initialize hardware timer: we keep it running just in case
1752 * that some program needs it later on.
1753 */
1754 memset(&adapter->cycles, 0, sizeof(adapter->cycles));
1755 adapter->cycles.read = igb_read_clock;
1756 adapter->cycles.mask = CLOCKSOURCE_MASK(64);
1757 adapter->cycles.mult = 1;
1758 /**
1759 * Scale the NIC clock cycle by a large factor so that
1760 * relatively small clock corrections can be added or
1761 * substracted at each clock tick. The drawbacks of a large
1762 * factor are a) that the clock register overflows more quickly
1763 * (not such a big deal) and b) that the increment per tick has
1764 * to fit into 24 bits. As a result we need to use a shift of
1765 * 19 so we can fit a value of 16 into the TIMINCA register.
1766 */
1767 adapter->cycles.shift = IGB_82576_TSYNC_SHIFT;
1768 wr32(E1000_TIMINCA,
1769 (1 << E1000_TIMINCA_16NS_SHIFT) |
1770 (16 << IGB_82576_TSYNC_SHIFT));
1771
1772 /* Set registers so that rollover occurs soon to test this. */
1773 wr32(E1000_SYSTIML, 0x00000000);
1774 wr32(E1000_SYSTIMH, 0xFF800000);
1775 wrfl();
1776
1777 timecounter_init(&adapter->clock,
1778 &adapter->cycles,
1779 ktime_to_ns(ktime_get_real()));
1780 /*
1781 * Synchronize our NIC clock against system wall clock. NIC
1782 * time stamp reading requires ~3us per sample, each sample
1783 * was pretty stable even under load => only require 10
1784 * samples for each offset comparison.
1785 */
1786 memset(&adapter->compare, 0, sizeof(adapter->compare));
1787 adapter->compare.source = &adapter->clock;
1788 adapter->compare.target = ktime_get_real;
1789 adapter->compare.num_samples = 10;
1790 timecompare_update(&adapter->compare, 0);
1791 break;
1792 case e1000_82575:
1793 /* 82575 does not support timesync */
1794 default:
1795 break;
1796 }
1797
1798}
1799
1800/**
1685 * igb_sw_init - Initialize general software structures (struct igb_adapter) 1801 * igb_sw_init - Initialize general software structures (struct igb_adapter)
1686 * @adapter: board private structure to initialize 1802 * @adapter: board private structure to initialize
1687 * 1803 *
@@ -1699,20 +1815,37 @@ static int __devinit igb_sw_init(struct igb_adapter *adapter)
1699 1815
1700 adapter->tx_ring_count = IGB_DEFAULT_TXD; 1816 adapter->tx_ring_count = IGB_DEFAULT_TXD;
1701 adapter->rx_ring_count = IGB_DEFAULT_RXD; 1817 adapter->rx_ring_count = IGB_DEFAULT_RXD;
1702 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE; 1818 adapter->rx_itr_setting = IGB_DEFAULT_ITR;
1703 adapter->rx_ps_hdr_size = 0; /* disable packet split */ 1819 adapter->tx_itr_setting = IGB_DEFAULT_ITR;
1820
1704 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN; 1821 adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
1705 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN; 1822 adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
1706 1823
1707 /* This call may decrease the number of queues depending on 1824#ifdef CONFIG_PCI_IOV
1708 * interrupt mode. */ 1825 if (hw->mac.type == e1000_82576)
1709 igb_set_interrupt_capability(adapter); 1826 adapter->vfs_allocated_count = max_vfs;
1827
1828#endif /* CONFIG_PCI_IOV */
1829 adapter->rss_queues = min_t(u32, IGB_MAX_RX_QUEUES, num_online_cpus());
1830
1831 /*
1832 * if rss_queues > 4 or vfs are going to be allocated with rss_queues
1833 * then we should combine the queues into a queue pair in order to
1834 * conserve interrupts due to limited supply
1835 */
1836 if ((adapter->rss_queues > 4) ||
1837 ((adapter->rss_queues > 1) && (adapter->vfs_allocated_count > 6)))
1838 adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
1710 1839
1711 if (igb_alloc_queues(adapter)) { 1840 /* This call may decrease the number of queues */
1841 if (igb_init_interrupt_scheme(adapter)) {
1712 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 1842 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
1713 return -ENOMEM; 1843 return -ENOMEM;
1714 } 1844 }
1715 1845
1846 igb_init_hw_timer(adapter);
1847 igb_probe_vfs(adapter);
1848
1716 /* Explicitly disable IRQ since the NIC can be in any state. */ 1849 /* Explicitly disable IRQ since the NIC can be in any state. */
1717 igb_irq_disable(adapter); 1850 igb_irq_disable(adapter);
1718 1851
@@ -1757,21 +1890,12 @@ static int igb_open(struct net_device *netdev)
1757 1890
1758 /* e1000_power_up_phy(adapter); */ 1891 /* e1000_power_up_phy(adapter); */
1759 1892
1760 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
1761 if ((adapter->hw.mng_cookie.status &
1762 E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
1763 igb_update_mng_vlan(adapter);
1764
1765 /* before we allocate an interrupt, we must be ready to handle it. 1893 /* before we allocate an interrupt, we must be ready to handle it.
1766 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt 1894 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
1767 * as soon as we call pci_request_irq, so we have to setup our 1895 * as soon as we call pci_request_irq, so we have to setup our
1768 * clean_rx handler before we do so. */ 1896 * clean_rx handler before we do so. */
1769 igb_configure(adapter); 1897 igb_configure(adapter);
1770 1898
1771 igb_vmm_control(adapter);
1772 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0);
1773 igb_set_vmolr(hw, adapter->vfs_allocated_count);
1774
1775 err = igb_request_irq(adapter); 1899 err = igb_request_irq(adapter);
1776 if (err) 1900 if (err)
1777 goto err_req_irq; 1901 goto err_req_irq;
@@ -1779,18 +1903,28 @@ static int igb_open(struct net_device *netdev)
1779 /* From here on the code is the same as igb_up() */ 1903 /* From here on the code is the same as igb_up() */
1780 clear_bit(__IGB_DOWN, &adapter->state); 1904 clear_bit(__IGB_DOWN, &adapter->state);
1781 1905
1782 for (i = 0; i < adapter->num_rx_queues; i++) 1906 for (i = 0; i < adapter->num_q_vectors; i++) {
1783 napi_enable(&adapter->rx_ring[i].napi); 1907 struct igb_q_vector *q_vector = adapter->q_vector[i];
1908 napi_enable(&q_vector->napi);
1909 }
1784 1910
1785 /* Clear any pending interrupts. */ 1911 /* Clear any pending interrupts. */
1786 rd32(E1000_ICR); 1912 rd32(E1000_ICR);
1787 1913
1788 igb_irq_enable(adapter); 1914 igb_irq_enable(adapter);
1789 1915
1916 /* notify VFs that reset has been completed */
1917 if (adapter->vfs_allocated_count) {
1918 u32 reg_data = rd32(E1000_CTRL_EXT);
1919 reg_data |= E1000_CTRL_EXT_PFRSTD;
1920 wr32(E1000_CTRL_EXT, reg_data);
1921 }
1922
1790 netif_tx_start_all_queues(netdev); 1923 netif_tx_start_all_queues(netdev);
1791 1924
1792 /* Fire a link status change interrupt to start the watchdog. */ 1925 /* start the watchdog. */
1793 wr32(E1000_ICS, E1000_ICS_LSC); 1926 hw->mac.get_link_status = 1;
1927 schedule_work(&adapter->watchdog_task);
1794 1928
1795 return 0; 1929 return 0;
1796 1930
@@ -1829,28 +1963,18 @@ static int igb_close(struct net_device *netdev)
1829 igb_free_all_tx_resources(adapter); 1963 igb_free_all_tx_resources(adapter);
1830 igb_free_all_rx_resources(adapter); 1964 igb_free_all_rx_resources(adapter);
1831 1965
1832 /* kill manageability vlan ID if supported, but not if a vlan with
1833 * the same ID is registered on the host OS (let 8021q kill it) */
1834 if ((adapter->hw.mng_cookie.status &
1835 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
1836 !(adapter->vlgrp &&
1837 vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id)))
1838 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
1839
1840 return 0; 1966 return 0;
1841} 1967}
1842 1968
1843/** 1969/**
1844 * igb_setup_tx_resources - allocate Tx resources (Descriptors) 1970 * igb_setup_tx_resources - allocate Tx resources (Descriptors)
1845 * @adapter: board private structure
1846 * @tx_ring: tx descriptor ring (for a specific queue) to setup 1971 * @tx_ring: tx descriptor ring (for a specific queue) to setup
1847 * 1972 *
1848 * Return 0 on success, negative on failure 1973 * Return 0 on success, negative on failure
1849 **/ 1974 **/
1850int igb_setup_tx_resources(struct igb_adapter *adapter, 1975int igb_setup_tx_resources(struct igb_ring *tx_ring)
1851 struct igb_ring *tx_ring)
1852{ 1976{
1853 struct pci_dev *pdev = adapter->pdev; 1977 struct pci_dev *pdev = tx_ring->pdev;
1854 int size; 1978 int size;
1855 1979
1856 size = sizeof(struct igb_buffer) * tx_ring->count; 1980 size = sizeof(struct igb_buffer) * tx_ring->count;
@@ -1863,20 +1987,20 @@ int igb_setup_tx_resources(struct igb_adapter *adapter,
1863 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc); 1987 tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
1864 tx_ring->size = ALIGN(tx_ring->size, 4096); 1988 tx_ring->size = ALIGN(tx_ring->size, 4096);
1865 1989
1866 tx_ring->desc = pci_alloc_consistent(pdev, tx_ring->size, 1990 tx_ring->desc = pci_alloc_consistent(pdev,
1991 tx_ring->size,
1867 &tx_ring->dma); 1992 &tx_ring->dma);
1868 1993
1869 if (!tx_ring->desc) 1994 if (!tx_ring->desc)
1870 goto err; 1995 goto err;
1871 1996
1872 tx_ring->adapter = adapter;
1873 tx_ring->next_to_use = 0; 1997 tx_ring->next_to_use = 0;
1874 tx_ring->next_to_clean = 0; 1998 tx_ring->next_to_clean = 0;
1875 return 0; 1999 return 0;
1876 2000
1877err: 2001err:
1878 vfree(tx_ring->buffer_info); 2002 vfree(tx_ring->buffer_info);
1879 dev_err(&adapter->pdev->dev, 2003 dev_err(&pdev->dev,
1880 "Unable to allocate memory for the transmit descriptor ring\n"); 2004 "Unable to allocate memory for the transmit descriptor ring\n");
1881 return -ENOMEM; 2005 return -ENOMEM;
1882} 2006}
@@ -1890,13 +2014,13 @@ err:
1890 **/ 2014 **/
1891static int igb_setup_all_tx_resources(struct igb_adapter *adapter) 2015static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1892{ 2016{
2017 struct pci_dev *pdev = adapter->pdev;
1893 int i, err = 0; 2018 int i, err = 0;
1894 int r_idx;
1895 2019
1896 for (i = 0; i < adapter->num_tx_queues; i++) { 2020 for (i = 0; i < adapter->num_tx_queues; i++) {
1897 err = igb_setup_tx_resources(adapter, &adapter->tx_ring[i]); 2021 err = igb_setup_tx_resources(&adapter->tx_ring[i]);
1898 if (err) { 2022 if (err) {
1899 dev_err(&adapter->pdev->dev, 2023 dev_err(&pdev->dev,
1900 "Allocation for Tx Queue %u failed\n", i); 2024 "Allocation for Tx Queue %u failed\n", i);
1901 for (i--; i >= 0; i--) 2025 for (i--; i >= 0; i--)
1902 igb_free_tx_resources(&adapter->tx_ring[i]); 2026 igb_free_tx_resources(&adapter->tx_ring[i]);
@@ -1904,57 +2028,24 @@ static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
1904 } 2028 }
1905 } 2029 }
1906 2030
1907 for (i = 0; i < IGB_MAX_TX_QUEUES; i++) { 2031 for (i = 0; i < IGB_ABS_MAX_TX_QUEUES; i++) {
1908 r_idx = i % adapter->num_tx_queues; 2032 int r_idx = i % adapter->num_tx_queues;
1909 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx]; 2033 adapter->multi_tx_table[i] = &adapter->tx_ring[r_idx];
1910 } 2034 }
1911 return err; 2035 return err;
1912} 2036}
1913 2037
1914/** 2038/**
1915 * igb_configure_tx - Configure transmit Unit after Reset 2039 * igb_setup_tctl - configure the transmit control registers
1916 * @adapter: board private structure 2040 * @adapter: Board private structure
1917 *
1918 * Configure the Tx unit of the MAC after a reset.
1919 **/ 2041 **/
1920static void igb_configure_tx(struct igb_adapter *adapter) 2042void igb_setup_tctl(struct igb_adapter *adapter)
1921{ 2043{
1922 u64 tdba;
1923 struct e1000_hw *hw = &adapter->hw; 2044 struct e1000_hw *hw = &adapter->hw;
1924 u32 tctl; 2045 u32 tctl;
1925 u32 txdctl, txctrl;
1926 int i, j;
1927
1928 for (i = 0; i < adapter->num_tx_queues; i++) {
1929 struct igb_ring *ring = &adapter->tx_ring[i];
1930 j = ring->reg_idx;
1931 wr32(E1000_TDLEN(j),
1932 ring->count * sizeof(union e1000_adv_tx_desc));
1933 tdba = ring->dma;
1934 wr32(E1000_TDBAL(j),
1935 tdba & 0x00000000ffffffffULL);
1936 wr32(E1000_TDBAH(j), tdba >> 32);
1937
1938 ring->head = E1000_TDH(j);
1939 ring->tail = E1000_TDT(j);
1940 writel(0, hw->hw_addr + ring->tail);
1941 writel(0, hw->hw_addr + ring->head);
1942 txdctl = rd32(E1000_TXDCTL(j));
1943 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
1944 wr32(E1000_TXDCTL(j), txdctl);
1945
1946 /* Turn off Relaxed Ordering on head write-backs. The
1947 * writebacks MUST be delivered in order or it will
1948 * completely screw up our bookeeping.
1949 */
1950 txctrl = rd32(E1000_DCA_TXCTRL(j));
1951 txctrl &= ~E1000_DCA_TXCTRL_TX_WB_RO_EN;
1952 wr32(E1000_DCA_TXCTRL(j), txctrl);
1953 }
1954 2046
1955 /* disable queue 0 to prevent tail bump w/o re-configuration */ 2047 /* disable queue 0 which is enabled by default on 82575 and 82576 */
1956 if (adapter->vfs_allocated_count) 2048 wr32(E1000_TXDCTL(0), 0);
1957 wr32(E1000_TXDCTL(0), 0);
1958 2049
1959 /* Program the Transmit Control Register */ 2050 /* Program the Transmit Control Register */
1960 tctl = rd32(E1000_TCTL); 2051 tctl = rd32(E1000_TCTL);
@@ -1964,9 +2055,6 @@ static void igb_configure_tx(struct igb_adapter *adapter)
1964 2055
1965 igb_config_collision_dist(hw); 2056 igb_config_collision_dist(hw);
1966 2057
1967 /* Setup Transmit Descriptor Settings for eop descriptor */
1968 adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_RS;
1969
1970 /* Enable transmits */ 2058 /* Enable transmits */
1971 tctl |= E1000_TCTL_EN; 2059 tctl |= E1000_TCTL_EN;
1972 2060
@@ -1974,16 +2062,69 @@ static void igb_configure_tx(struct igb_adapter *adapter)
1974} 2062}
1975 2063
1976/** 2064/**
1977 * igb_setup_rx_resources - allocate Rx resources (Descriptors) 2065 * igb_configure_tx_ring - Configure transmit ring after Reset
1978 * @adapter: board private structure 2066 * @adapter: board private structure
2067 * @ring: tx ring to configure
2068 *
2069 * Configure a transmit ring after a reset.
2070 **/
2071void igb_configure_tx_ring(struct igb_adapter *adapter,
2072 struct igb_ring *ring)
2073{
2074 struct e1000_hw *hw = &adapter->hw;
2075 u32 txdctl;
2076 u64 tdba = ring->dma;
2077 int reg_idx = ring->reg_idx;
2078
2079 /* disable the queue */
2080 txdctl = rd32(E1000_TXDCTL(reg_idx));
2081 wr32(E1000_TXDCTL(reg_idx),
2082 txdctl & ~E1000_TXDCTL_QUEUE_ENABLE);
2083 wrfl();
2084 mdelay(10);
2085
2086 wr32(E1000_TDLEN(reg_idx),
2087 ring->count * sizeof(union e1000_adv_tx_desc));
2088 wr32(E1000_TDBAL(reg_idx),
2089 tdba & 0x00000000ffffffffULL);
2090 wr32(E1000_TDBAH(reg_idx), tdba >> 32);
2091
2092 ring->head = hw->hw_addr + E1000_TDH(reg_idx);
2093 ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
2094 writel(0, ring->head);
2095 writel(0, ring->tail);
2096
2097 txdctl |= IGB_TX_PTHRESH;
2098 txdctl |= IGB_TX_HTHRESH << 8;
2099 txdctl |= IGB_TX_WTHRESH << 16;
2100
2101 txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
2102 wr32(E1000_TXDCTL(reg_idx), txdctl);
2103}
2104
2105/**
2106 * igb_configure_tx - Configure transmit Unit after Reset
2107 * @adapter: board private structure
2108 *
2109 * Configure the Tx unit of the MAC after a reset.
2110 **/
2111static void igb_configure_tx(struct igb_adapter *adapter)
2112{
2113 int i;
2114
2115 for (i = 0; i < adapter->num_tx_queues; i++)
2116 igb_configure_tx_ring(adapter, &adapter->tx_ring[i]);
2117}
2118
2119/**
2120 * igb_setup_rx_resources - allocate Rx resources (Descriptors)
1979 * @rx_ring: rx descriptor ring (for a specific queue) to setup 2121 * @rx_ring: rx descriptor ring (for a specific queue) to setup
1980 * 2122 *
1981 * Returns 0 on success, negative on failure 2123 * Returns 0 on success, negative on failure
1982 **/ 2124 **/
1983int igb_setup_rx_resources(struct igb_adapter *adapter, 2125int igb_setup_rx_resources(struct igb_ring *rx_ring)
1984 struct igb_ring *rx_ring)
1985{ 2126{
1986 struct pci_dev *pdev = adapter->pdev; 2127 struct pci_dev *pdev = rx_ring->pdev;
1987 int size, desc_len; 2128 int size, desc_len;
1988 2129
1989 size = sizeof(struct igb_buffer) * rx_ring->count; 2130 size = sizeof(struct igb_buffer) * rx_ring->count;
@@ -2007,13 +2148,12 @@ int igb_setup_rx_resources(struct igb_adapter *adapter,
2007 rx_ring->next_to_clean = 0; 2148 rx_ring->next_to_clean = 0;
2008 rx_ring->next_to_use = 0; 2149 rx_ring->next_to_use = 0;
2009 2150
2010 rx_ring->adapter = adapter;
2011
2012 return 0; 2151 return 0;
2013 2152
2014err: 2153err:
2015 vfree(rx_ring->buffer_info); 2154 vfree(rx_ring->buffer_info);
2016 dev_err(&adapter->pdev->dev, "Unable to allocate memory for " 2155 rx_ring->buffer_info = NULL;
2156 dev_err(&pdev->dev, "Unable to allocate memory for "
2017 "the receive descriptor ring\n"); 2157 "the receive descriptor ring\n");
2018 return -ENOMEM; 2158 return -ENOMEM;
2019} 2159}
@@ -2027,12 +2167,13 @@ err:
2027 **/ 2167 **/
2028static int igb_setup_all_rx_resources(struct igb_adapter *adapter) 2168static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2029{ 2169{
2170 struct pci_dev *pdev = adapter->pdev;
2030 int i, err = 0; 2171 int i, err = 0;
2031 2172
2032 for (i = 0; i < adapter->num_rx_queues; i++) { 2173 for (i = 0; i < adapter->num_rx_queues; i++) {
2033 err = igb_setup_rx_resources(adapter, &adapter->rx_ring[i]); 2174 err = igb_setup_rx_resources(&adapter->rx_ring[i]);
2034 if (err) { 2175 if (err) {
2035 dev_err(&adapter->pdev->dev, 2176 dev_err(&pdev->dev,
2036 "Allocation for Rx Queue %u failed\n", i); 2177 "Allocation for Rx Queue %u failed\n", i);
2037 for (i--; i >= 0; i--) 2178 for (i--; i >= 0; i--)
2038 igb_free_rx_resources(&adapter->rx_ring[i]); 2179 igb_free_rx_resources(&adapter->rx_ring[i]);
@@ -2044,15 +2185,118 @@ static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
2044} 2185}
2045 2186
2046/** 2187/**
2188 * igb_setup_mrqc - configure the multiple receive queue control registers
2189 * @adapter: Board private structure
2190 **/
2191static void igb_setup_mrqc(struct igb_adapter *adapter)
2192{
2193 struct e1000_hw *hw = &adapter->hw;
2194 u32 mrqc, rxcsum;
2195 u32 j, num_rx_queues, shift = 0, shift2 = 0;
2196 union e1000_reta {
2197 u32 dword;
2198 u8 bytes[4];
2199 } reta;
2200 static const u8 rsshash[40] = {
2201 0x6d, 0x5a, 0x56, 0xda, 0x25, 0x5b, 0x0e, 0xc2, 0x41, 0x67,
2202 0x25, 0x3d, 0x43, 0xa3, 0x8f, 0xb0, 0xd0, 0xca, 0x2b, 0xcb,
2203 0xae, 0x7b, 0x30, 0xb4, 0x77, 0xcb, 0x2d, 0xa3, 0x80, 0x30,
2204 0xf2, 0x0c, 0x6a, 0x42, 0xb7, 0x3b, 0xbe, 0xac, 0x01, 0xfa };
2205
2206 /* Fill out hash function seeds */
2207 for (j = 0; j < 10; j++) {
2208 u32 rsskey = rsshash[(j * 4)];
2209 rsskey |= rsshash[(j * 4) + 1] << 8;
2210 rsskey |= rsshash[(j * 4) + 2] << 16;
2211 rsskey |= rsshash[(j * 4) + 3] << 24;
2212 array_wr32(E1000_RSSRK(0), j, rsskey);
2213 }
2214
2215 num_rx_queues = adapter->rss_queues;
2216
2217 if (adapter->vfs_allocated_count) {
2218 /* 82575 and 82576 supports 2 RSS queues for VMDq */
2219 switch (hw->mac.type) {
2220 case e1000_82576:
2221 shift = 3;
2222 num_rx_queues = 2;
2223 break;
2224 case e1000_82575:
2225 shift = 2;
2226 shift2 = 6;
2227 default:
2228 break;
2229 }
2230 } else {
2231 if (hw->mac.type == e1000_82575)
2232 shift = 6;
2233 }
2234
2235 for (j = 0; j < (32 * 4); j++) {
2236 reta.bytes[j & 3] = (j % num_rx_queues) << shift;
2237 if (shift2)
2238 reta.bytes[j & 3] |= num_rx_queues << shift2;
2239 if ((j & 3) == 3)
2240 wr32(E1000_RETA(j >> 2), reta.dword);
2241 }
2242
2243 /*
2244 * Disable raw packet checksumming so that RSS hash is placed in
2245 * descriptor on writeback. No need to enable TCP/UDP/IP checksum
2246 * offloads as they are enabled by default
2247 */
2248 rxcsum = rd32(E1000_RXCSUM);
2249 rxcsum |= E1000_RXCSUM_PCSD;
2250
2251 if (adapter->hw.mac.type >= e1000_82576)
2252 /* Enable Receive Checksum Offload for SCTP */
2253 rxcsum |= E1000_RXCSUM_CRCOFL;
2254
2255 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2256 wr32(E1000_RXCSUM, rxcsum);
2257
2258 /* If VMDq is enabled then we set the appropriate mode for that, else
2259 * we default to RSS so that an RSS hash is calculated per packet even
2260 * if we are only using one queue */
2261 if (adapter->vfs_allocated_count) {
2262 if (hw->mac.type > e1000_82575) {
2263 /* Set the default pool for the PF's first queue */
2264 u32 vtctl = rd32(E1000_VT_CTL);
2265 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK |
2266 E1000_VT_CTL_DISABLE_DEF_POOL);
2267 vtctl |= adapter->vfs_allocated_count <<
2268 E1000_VT_CTL_DEFAULT_POOL_SHIFT;
2269 wr32(E1000_VT_CTL, vtctl);
2270 }
2271 if (adapter->rss_queues > 1)
2272 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2273 else
2274 mrqc = E1000_MRQC_ENABLE_VMDQ;
2275 } else {
2276 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2277 }
2278 igb_vmm_control(adapter);
2279
2280 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2281 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2282 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2283 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2284 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2285 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2286 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2287 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2288
2289 wr32(E1000_MRQC, mrqc);
2290}
2291
2292/**
2047 * igb_setup_rctl - configure the receive control registers 2293 * igb_setup_rctl - configure the receive control registers
2048 * @adapter: Board private structure 2294 * @adapter: Board private structure
2049 **/ 2295 **/
2050static void igb_setup_rctl(struct igb_adapter *adapter) 2296void igb_setup_rctl(struct igb_adapter *adapter)
2051{ 2297{
2052 struct e1000_hw *hw = &adapter->hw; 2298 struct e1000_hw *hw = &adapter->hw;
2053 u32 rctl; 2299 u32 rctl;
2054 u32 srrctl = 0;
2055 int i;
2056 2300
2057 rctl = rd32(E1000_RCTL); 2301 rctl = rd32(E1000_RCTL);
2058 2302
@@ -2069,75 +2313,45 @@ static void igb_setup_rctl(struct igb_adapter *adapter)
2069 */ 2313 */
2070 rctl |= E1000_RCTL_SECRC; 2314 rctl |= E1000_RCTL_SECRC;
2071 2315
2072 /* 2316 /* disable store bad packets and clear size bits. */
2073 * disable store bad packets and clear size bits.
2074 */
2075 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256); 2317 rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
2076 2318
2077 /* enable LPE when to prevent packets larger than max_frame_size */ 2319 /* enable LPE to prevent packets larger than max_frame_size */
2078 rctl |= E1000_RCTL_LPE; 2320 rctl |= E1000_RCTL_LPE;
2079 2321
2080 /* Setup buffer sizes */ 2322 /* disable queue 0 to prevent tail write w/o re-config */
2081 switch (adapter->rx_buffer_len) { 2323 wr32(E1000_RXDCTL(0), 0);
2082 case IGB_RXBUFFER_256:
2083 rctl |= E1000_RCTL_SZ_256;
2084 break;
2085 case IGB_RXBUFFER_512:
2086 rctl |= E1000_RCTL_SZ_512;
2087 break;
2088 default:
2089 srrctl = ALIGN(adapter->rx_buffer_len, 1024)
2090 >> E1000_SRRCTL_BSIZEPKT_SHIFT;
2091 break;
2092 }
2093
2094 /* 82575 and greater support packet-split where the protocol
2095 * header is placed in skb->data and the packet data is
2096 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
2097 * In the case of a non-split, skb->data is linearly filled,
2098 * followed by the page buffers. Therefore, skb->data is
2099 * sized to hold the largest protocol header.
2100 */
2101 /* allocations using alloc_page take too long for regular MTU
2102 * so only enable packet split for jumbo frames */
2103 if (adapter->netdev->mtu > ETH_DATA_LEN) {
2104 adapter->rx_ps_hdr_size = IGB_RXBUFFER_128;
2105 srrctl |= adapter->rx_ps_hdr_size <<
2106 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2107 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2108 } else {
2109 adapter->rx_ps_hdr_size = 0;
2110 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2111 }
2112 2324
2113 /* Attention!!! For SR-IOV PF driver operations you must enable 2325 /* Attention!!! For SR-IOV PF driver operations you must enable
2114 * queue drop for all VF and PF queues to prevent head of line blocking 2326 * queue drop for all VF and PF queues to prevent head of line blocking
2115 * if an un-trusted VF does not provide descriptors to hardware. 2327 * if an un-trusted VF does not provide descriptors to hardware.
2116 */ 2328 */
2117 if (adapter->vfs_allocated_count) { 2329 if (adapter->vfs_allocated_count) {
2118 u32 vmolr;
2119
2120 /* set all queue drop enable bits */ 2330 /* set all queue drop enable bits */
2121 wr32(E1000_QDE, ALL_QUEUES); 2331 wr32(E1000_QDE, ALL_QUEUES);
2122 srrctl |= E1000_SRRCTL_DROP_EN; 2332 }
2123 2333
2124 /* disable queue 0 to prevent tail write w/o re-config */ 2334 wr32(E1000_RCTL, rctl);
2125 wr32(E1000_RXDCTL(0), 0); 2335}
2126 2336
2127 vmolr = rd32(E1000_VMOLR(adapter->vfs_allocated_count)); 2337static inline int igb_set_vf_rlpml(struct igb_adapter *adapter, int size,
2128 if (rctl & E1000_RCTL_LPE) 2338 int vfn)
2129 vmolr |= E1000_VMOLR_LPE; 2339{
2130 if (adapter->num_rx_queues > 1) 2340 struct e1000_hw *hw = &adapter->hw;
2131 vmolr |= E1000_VMOLR_RSSE; 2341 u32 vmolr;
2132 wr32(E1000_VMOLR(adapter->vfs_allocated_count), vmolr);
2133 }
2134 2342
2135 for (i = 0; i < adapter->num_rx_queues; i++) { 2343 /* if it isn't the PF check to see if VFs are enabled and
2136 int j = adapter->rx_ring[i].reg_idx; 2344 * increase the size to support vlan tags */
2137 wr32(E1000_SRRCTL(j), srrctl); 2345 if (vfn < adapter->vfs_allocated_count &&
2138 } 2346 adapter->vf_data[vfn].vlans_enabled)
2347 size += VLAN_TAG_SIZE;
2139 2348
2140 wr32(E1000_RCTL, rctl); 2349 vmolr = rd32(E1000_VMOLR(vfn));
2350 vmolr &= ~E1000_VMOLR_RLPML_MASK;
2351 vmolr |= size | E1000_VMOLR_LPE;
2352 wr32(E1000_VMOLR(vfn), vmolr);
2353
2354 return 0;
2141} 2355}
2142 2356
2143/** 2357/**
@@ -2159,33 +2373,107 @@ static void igb_rlpml_set(struct igb_adapter *adapter)
2159 * size and set the VMOLR RLPML to the size we need */ 2373 * size and set the VMOLR RLPML to the size we need */
2160 if (pf_id) { 2374 if (pf_id) {
2161 igb_set_vf_rlpml(adapter, max_frame_size, pf_id); 2375 igb_set_vf_rlpml(adapter, max_frame_size, pf_id);
2162 max_frame_size = MAX_STD_JUMBO_FRAME_SIZE + VLAN_TAG_SIZE; 2376 max_frame_size = MAX_JUMBO_FRAME_SIZE;
2163 } 2377 }
2164 2378
2165 wr32(E1000_RLPML, max_frame_size); 2379 wr32(E1000_RLPML, max_frame_size);
2166} 2380}
2167 2381
2382static inline void igb_set_vmolr(struct igb_adapter *adapter, int vfn)
2383{
2384 struct e1000_hw *hw = &adapter->hw;
2385 u32 vmolr;
2386
2387 /*
2388 * This register exists only on 82576 and newer so if we are older then
2389 * we should exit and do nothing
2390 */
2391 if (hw->mac.type < e1000_82576)
2392 return;
2393
2394 vmolr = rd32(E1000_VMOLR(vfn));
2395 vmolr |= E1000_VMOLR_AUPE | /* Accept untagged packets */
2396 E1000_VMOLR_STRVLAN; /* Strip vlan tags */
2397
2398 /* clear all bits that might not be set */
2399 vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
2400
2401 if (adapter->rss_queues > 1 && vfn == adapter->vfs_allocated_count)
2402 vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
2403 /*
2404 * for VMDq only allow the VFs and pool 0 to accept broadcast and
2405 * multicast packets
2406 */
2407 if (vfn <= adapter->vfs_allocated_count)
2408 vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
2409
2410 wr32(E1000_VMOLR(vfn), vmolr);
2411}
2412
2168/** 2413/**
2169 * igb_configure_vt_default_pool - Configure VT default pool 2414 * igb_configure_rx_ring - Configure a receive ring after Reset
2170 * @adapter: board private structure 2415 * @adapter: board private structure
2416 * @ring: receive ring to be configured
2171 * 2417 *
2172 * Configure the default pool 2418 * Configure the Rx unit of the MAC after a reset.
2173 **/ 2419 **/
2174static void igb_configure_vt_default_pool(struct igb_adapter *adapter) 2420void igb_configure_rx_ring(struct igb_adapter *adapter,
2421 struct igb_ring *ring)
2175{ 2422{
2176 struct e1000_hw *hw = &adapter->hw; 2423 struct e1000_hw *hw = &adapter->hw;
2177 u16 pf_id = adapter->vfs_allocated_count; 2424 u64 rdba = ring->dma;
2178 u32 vtctl; 2425 int reg_idx = ring->reg_idx;
2426 u32 srrctl, rxdctl;
2427
2428 /* disable the queue */
2429 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2430 wr32(E1000_RXDCTL(reg_idx),
2431 rxdctl & ~E1000_RXDCTL_QUEUE_ENABLE);
2432
2433 /* Set DMA base address registers */
2434 wr32(E1000_RDBAL(reg_idx),
2435 rdba & 0x00000000ffffffffULL);
2436 wr32(E1000_RDBAH(reg_idx), rdba >> 32);
2437 wr32(E1000_RDLEN(reg_idx),
2438 ring->count * sizeof(union e1000_adv_rx_desc));
2439
2440 /* initialize head and tail */
2441 ring->head = hw->hw_addr + E1000_RDH(reg_idx);
2442 ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
2443 writel(0, ring->head);
2444 writel(0, ring->tail);
2445
2446 /* set descriptor configuration */
2447 if (ring->rx_buffer_len < IGB_RXBUFFER_1024) {
2448 srrctl = ALIGN(ring->rx_buffer_len, 64) <<
2449 E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
2450#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384
2451 srrctl |= IGB_RXBUFFER_16384 >>
2452 E1000_SRRCTL_BSIZEPKT_SHIFT;
2453#else
2454 srrctl |= (PAGE_SIZE / 2) >>
2455 E1000_SRRCTL_BSIZEPKT_SHIFT;
2456#endif
2457 srrctl |= E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS;
2458 } else {
2459 srrctl = ALIGN(ring->rx_buffer_len, 1024) >>
2460 E1000_SRRCTL_BSIZEPKT_SHIFT;
2461 srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
2462 }
2179 2463
2180 /* not in sr-iov mode - do nothing */ 2464 wr32(E1000_SRRCTL(reg_idx), srrctl);
2181 if (!pf_id) 2465
2182 return; 2466 /* set filtering for VMDQ pools */
2467 igb_set_vmolr(adapter, reg_idx & 0x7);
2183 2468
2184 vtctl = rd32(E1000_VT_CTL); 2469 /* enable receive descriptor fetching */
2185 vtctl &= ~(E1000_VT_CTL_DEFAULT_POOL_MASK | 2470 rxdctl = rd32(E1000_RXDCTL(reg_idx));
2186 E1000_VT_CTL_DISABLE_DEF_POOL); 2471 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2187 vtctl |= pf_id << E1000_VT_CTL_DEFAULT_POOL_SHIFT; 2472 rxdctl &= 0xFFF00000;
2188 wr32(E1000_VT_CTL, vtctl); 2473 rxdctl |= IGB_RX_PTHRESH;
2474 rxdctl |= IGB_RX_HTHRESH << 8;
2475 rxdctl |= IGB_RX_WTHRESH << 16;
2476 wr32(E1000_RXDCTL(reg_idx), rxdctl);
2189} 2477}
2190 2478
2191/** 2479/**
@@ -2196,112 +2484,19 @@ static void igb_configure_vt_default_pool(struct igb_adapter *adapter)
2196 **/ 2484 **/
2197static void igb_configure_rx(struct igb_adapter *adapter) 2485static void igb_configure_rx(struct igb_adapter *adapter)
2198{ 2486{
2199 u64 rdba;
2200 struct e1000_hw *hw = &adapter->hw;
2201 u32 rctl, rxcsum;
2202 u32 rxdctl;
2203 int i; 2487 int i;
2204 2488
2205 /* disable receives while setting up the descriptors */ 2489 /* set UTA to appropriate mode */
2206 rctl = rd32(E1000_RCTL); 2490 igb_set_uta(adapter);
2207 wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
2208 wrfl();
2209 mdelay(10);
2210 2491
2211 if (adapter->itr_setting > 3) 2492 /* set the correct pool for the PF default MAC address in entry 0 */
2212 wr32(E1000_ITR, adapter->itr); 2493 igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0,
2494 adapter->vfs_allocated_count);
2213 2495
2214 /* Setup the HW Rx Head and Tail Descriptor Pointers and 2496 /* Setup the HW Rx Head and Tail Descriptor Pointers and
2215 * the Base and Length of the Rx Descriptor Ring */ 2497 * the Base and Length of the Rx Descriptor Ring */
2216 for (i = 0; i < adapter->num_rx_queues; i++) { 2498 for (i = 0; i < adapter->num_rx_queues; i++)
2217 struct igb_ring *ring = &adapter->rx_ring[i]; 2499 igb_configure_rx_ring(adapter, &adapter->rx_ring[i]);
2218 int j = ring->reg_idx;
2219 rdba = ring->dma;
2220 wr32(E1000_RDBAL(j),
2221 rdba & 0x00000000ffffffffULL);
2222 wr32(E1000_RDBAH(j), rdba >> 32);
2223 wr32(E1000_RDLEN(j),
2224 ring->count * sizeof(union e1000_adv_rx_desc));
2225
2226 ring->head = E1000_RDH(j);
2227 ring->tail = E1000_RDT(j);
2228 writel(0, hw->hw_addr + ring->tail);
2229 writel(0, hw->hw_addr + ring->head);
2230
2231 rxdctl = rd32(E1000_RXDCTL(j));
2232 rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
2233 rxdctl &= 0xFFF00000;
2234 rxdctl |= IGB_RX_PTHRESH;
2235 rxdctl |= IGB_RX_HTHRESH << 8;
2236 rxdctl |= IGB_RX_WTHRESH << 16;
2237 wr32(E1000_RXDCTL(j), rxdctl);
2238 }
2239
2240 if (adapter->num_rx_queues > 1) {
2241 u32 random[10];
2242 u32 mrqc;
2243 u32 j, shift;
2244 union e1000_reta {
2245 u32 dword;
2246 u8 bytes[4];
2247 } reta;
2248
2249 get_random_bytes(&random[0], 40);
2250
2251 if (hw->mac.type >= e1000_82576)
2252 shift = 0;
2253 else
2254 shift = 6;
2255 for (j = 0; j < (32 * 4); j++) {
2256 reta.bytes[j & 3] =
2257 adapter->rx_ring[(j % adapter->num_rx_queues)].reg_idx << shift;
2258 if ((j & 3) == 3)
2259 writel(reta.dword,
2260 hw->hw_addr + E1000_RETA(0) + (j & ~3));
2261 }
2262 if (adapter->vfs_allocated_count)
2263 mrqc = E1000_MRQC_ENABLE_VMDQ_RSS_2Q;
2264 else
2265 mrqc = E1000_MRQC_ENABLE_RSS_4Q;
2266
2267 /* Fill out hash function seeds */
2268 for (j = 0; j < 10; j++)
2269 array_wr32(E1000_RSSRK(0), j, random[j]);
2270
2271 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
2272 E1000_MRQC_RSS_FIELD_IPV4_TCP);
2273 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6 |
2274 E1000_MRQC_RSS_FIELD_IPV6_TCP);
2275 mrqc |= (E1000_MRQC_RSS_FIELD_IPV4_UDP |
2276 E1000_MRQC_RSS_FIELD_IPV6_UDP);
2277 mrqc |= (E1000_MRQC_RSS_FIELD_IPV6_UDP_EX |
2278 E1000_MRQC_RSS_FIELD_IPV6_TCP_EX);
2279
2280 wr32(E1000_MRQC, mrqc);
2281 } else if (adapter->vfs_allocated_count) {
2282 /* Enable multi-queue for sr-iov */
2283 wr32(E1000_MRQC, E1000_MRQC_ENABLE_VMDQ);
2284 }
2285
2286 /* Enable Receive Checksum Offload for TCP and UDP */
2287 rxcsum = rd32(E1000_RXCSUM);
2288 /* Disable raw packet checksumming */
2289 rxcsum |= E1000_RXCSUM_PCSD;
2290
2291 if (adapter->hw.mac.type == e1000_82576)
2292 /* Enable Receive Checksum Offload for SCTP */
2293 rxcsum |= E1000_RXCSUM_CRCOFL;
2294
2295 /* Don't need to set TUOFL or IPOFL, they default to 1 */
2296 wr32(E1000_RXCSUM, rxcsum);
2297
2298 /* Set the default pool for the PF's first queue */
2299 igb_configure_vt_default_pool(adapter);
2300
2301 igb_rlpml_set(adapter);
2302
2303 /* Enable Receives */
2304 wr32(E1000_RCTL, rctl);
2305} 2500}
2306 2501
2307/** 2502/**
@@ -2312,14 +2507,17 @@ static void igb_configure_rx(struct igb_adapter *adapter)
2312 **/ 2507 **/
2313void igb_free_tx_resources(struct igb_ring *tx_ring) 2508void igb_free_tx_resources(struct igb_ring *tx_ring)
2314{ 2509{
2315 struct pci_dev *pdev = tx_ring->adapter->pdev;
2316
2317 igb_clean_tx_ring(tx_ring); 2510 igb_clean_tx_ring(tx_ring);
2318 2511
2319 vfree(tx_ring->buffer_info); 2512 vfree(tx_ring->buffer_info);
2320 tx_ring->buffer_info = NULL; 2513 tx_ring->buffer_info = NULL;
2321 2514
2322 pci_free_consistent(pdev, tx_ring->size, tx_ring->desc, tx_ring->dma); 2515 /* if not set, then don't free */
2516 if (!tx_ring->desc)
2517 return;
2518
2519 pci_free_consistent(tx_ring->pdev, tx_ring->size,
2520 tx_ring->desc, tx_ring->dma);
2323 2521
2324 tx_ring->desc = NULL; 2522 tx_ring->desc = NULL;
2325} 2523}
@@ -2338,12 +2536,13 @@ static void igb_free_all_tx_resources(struct igb_adapter *adapter)
2338 igb_free_tx_resources(&adapter->tx_ring[i]); 2536 igb_free_tx_resources(&adapter->tx_ring[i]);
2339} 2537}
2340 2538
2341static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter, 2539void igb_unmap_and_free_tx_resource(struct igb_ring *tx_ring,
2342 struct igb_buffer *buffer_info) 2540 struct igb_buffer *buffer_info)
2343{ 2541{
2344 buffer_info->dma = 0; 2542 buffer_info->dma = 0;
2345 if (buffer_info->skb) { 2543 if (buffer_info->skb) {
2346 skb_dma_unmap(&adapter->pdev->dev, buffer_info->skb, 2544 skb_dma_unmap(&tx_ring->pdev->dev,
2545 buffer_info->skb,
2347 DMA_TO_DEVICE); 2546 DMA_TO_DEVICE);
2348 dev_kfree_skb_any(buffer_info->skb); 2547 dev_kfree_skb_any(buffer_info->skb);
2349 buffer_info->skb = NULL; 2548 buffer_info->skb = NULL;
@@ -2358,7 +2557,6 @@ static void igb_unmap_and_free_tx_resource(struct igb_adapter *adapter,
2358 **/ 2557 **/
2359static void igb_clean_tx_ring(struct igb_ring *tx_ring) 2558static void igb_clean_tx_ring(struct igb_ring *tx_ring)
2360{ 2559{
2361 struct igb_adapter *adapter = tx_ring->adapter;
2362 struct igb_buffer *buffer_info; 2560 struct igb_buffer *buffer_info;
2363 unsigned long size; 2561 unsigned long size;
2364 unsigned int i; 2562 unsigned int i;
@@ -2369,21 +2567,17 @@ static void igb_clean_tx_ring(struct igb_ring *tx_ring)
2369 2567
2370 for (i = 0; i < tx_ring->count; i++) { 2568 for (i = 0; i < tx_ring->count; i++) {
2371 buffer_info = &tx_ring->buffer_info[i]; 2569 buffer_info = &tx_ring->buffer_info[i];
2372 igb_unmap_and_free_tx_resource(adapter, buffer_info); 2570 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
2373 } 2571 }
2374 2572
2375 size = sizeof(struct igb_buffer) * tx_ring->count; 2573 size = sizeof(struct igb_buffer) * tx_ring->count;
2376 memset(tx_ring->buffer_info, 0, size); 2574 memset(tx_ring->buffer_info, 0, size);
2377 2575
2378 /* Zero out the descriptor ring */ 2576 /* Zero out the descriptor ring */
2379
2380 memset(tx_ring->desc, 0, tx_ring->size); 2577 memset(tx_ring->desc, 0, tx_ring->size);
2381 2578
2382 tx_ring->next_to_use = 0; 2579 tx_ring->next_to_use = 0;
2383 tx_ring->next_to_clean = 0; 2580 tx_ring->next_to_clean = 0;
2384
2385 writel(0, adapter->hw.hw_addr + tx_ring->head);
2386 writel(0, adapter->hw.hw_addr + tx_ring->tail);
2387} 2581}
2388 2582
2389/** 2583/**
@@ -2406,14 +2600,17 @@ static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
2406 **/ 2600 **/
2407void igb_free_rx_resources(struct igb_ring *rx_ring) 2601void igb_free_rx_resources(struct igb_ring *rx_ring)
2408{ 2602{
2409 struct pci_dev *pdev = rx_ring->adapter->pdev;
2410
2411 igb_clean_rx_ring(rx_ring); 2603 igb_clean_rx_ring(rx_ring);
2412 2604
2413 vfree(rx_ring->buffer_info); 2605 vfree(rx_ring->buffer_info);
2414 rx_ring->buffer_info = NULL; 2606 rx_ring->buffer_info = NULL;
2415 2607
2416 pci_free_consistent(pdev, rx_ring->size, rx_ring->desc, rx_ring->dma); 2608 /* if not set, then don't free */
2609 if (!rx_ring->desc)
2610 return;
2611
2612 pci_free_consistent(rx_ring->pdev, rx_ring->size,
2613 rx_ring->desc, rx_ring->dma);
2417 2614
2418 rx_ring->desc = NULL; 2615 rx_ring->desc = NULL;
2419} 2616}
@@ -2438,26 +2635,21 @@ static void igb_free_all_rx_resources(struct igb_adapter *adapter)
2438 **/ 2635 **/
2439static void igb_clean_rx_ring(struct igb_ring *rx_ring) 2636static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2440{ 2637{
2441 struct igb_adapter *adapter = rx_ring->adapter;
2442 struct igb_buffer *buffer_info; 2638 struct igb_buffer *buffer_info;
2443 struct pci_dev *pdev = adapter->pdev;
2444 unsigned long size; 2639 unsigned long size;
2445 unsigned int i; 2640 unsigned int i;
2446 2641
2447 if (!rx_ring->buffer_info) 2642 if (!rx_ring->buffer_info)
2448 return; 2643 return;
2644
2449 /* Free all the Rx ring sk_buffs */ 2645 /* Free all the Rx ring sk_buffs */
2450 for (i = 0; i < rx_ring->count; i++) { 2646 for (i = 0; i < rx_ring->count; i++) {
2451 buffer_info = &rx_ring->buffer_info[i]; 2647 buffer_info = &rx_ring->buffer_info[i];
2452 if (buffer_info->dma) { 2648 if (buffer_info->dma) {
2453 if (adapter->rx_ps_hdr_size) 2649 pci_unmap_single(rx_ring->pdev,
2454 pci_unmap_single(pdev, buffer_info->dma, 2650 buffer_info->dma,
2455 adapter->rx_ps_hdr_size, 2651 rx_ring->rx_buffer_len,
2456 PCI_DMA_FROMDEVICE); 2652 PCI_DMA_FROMDEVICE);
2457 else
2458 pci_unmap_single(pdev, buffer_info->dma,
2459 adapter->rx_buffer_len,
2460 PCI_DMA_FROMDEVICE);
2461 buffer_info->dma = 0; 2653 buffer_info->dma = 0;
2462 } 2654 }
2463 2655
@@ -2465,14 +2657,16 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2465 dev_kfree_skb(buffer_info->skb); 2657 dev_kfree_skb(buffer_info->skb);
2466 buffer_info->skb = NULL; 2658 buffer_info->skb = NULL;
2467 } 2659 }
2660 if (buffer_info->page_dma) {
2661 pci_unmap_page(rx_ring->pdev,
2662 buffer_info->page_dma,
2663 PAGE_SIZE / 2,
2664 PCI_DMA_FROMDEVICE);
2665 buffer_info->page_dma = 0;
2666 }
2468 if (buffer_info->page) { 2667 if (buffer_info->page) {
2469 if (buffer_info->page_dma)
2470 pci_unmap_page(pdev, buffer_info->page_dma,
2471 PAGE_SIZE / 2,
2472 PCI_DMA_FROMDEVICE);
2473 put_page(buffer_info->page); 2668 put_page(buffer_info->page);
2474 buffer_info->page = NULL; 2669 buffer_info->page = NULL;
2475 buffer_info->page_dma = 0;
2476 buffer_info->page_offset = 0; 2670 buffer_info->page_offset = 0;
2477 } 2671 }
2478 } 2672 }
@@ -2485,9 +2679,6 @@ static void igb_clean_rx_ring(struct igb_ring *rx_ring)
2485 2679
2486 rx_ring->next_to_clean = 0; 2680 rx_ring->next_to_clean = 0;
2487 rx_ring->next_to_use = 0; 2681 rx_ring->next_to_use = 0;
2488
2489 writel(0, adapter->hw.hw_addr + rx_ring->head);
2490 writel(0, adapter->hw.hw_addr + rx_ring->tail);
2491} 2682}
2492 2683
2493/** 2684/**
@@ -2521,61 +2712,90 @@ static int igb_set_mac(struct net_device *netdev, void *p)
2521 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len); 2712 memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2522 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len); 2713 memcpy(hw->mac.addr, addr->sa_data, netdev->addr_len);
2523 2714
2524 igb_rar_set(hw, hw->mac.addr, 0); 2715 /* set the correct pool for the new PF MAC address in entry 0 */
2525 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 0); 2716 igb_rar_set_qsel(adapter, hw->mac.addr, 0,
2717 adapter->vfs_allocated_count);
2526 2718
2527 return 0; 2719 return 0;
2528} 2720}
2529 2721
2530/** 2722/**
2531 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set 2723 * igb_write_mc_addr_list - write multicast addresses to MTA
2532 * @netdev: network interface device structure 2724 * @netdev: network interface device structure
2533 * 2725 *
2534 * The set_rx_mode entry point is called whenever the unicast or multicast 2726 * Writes multicast address list to the MTA hash table.
2535 * address lists or the network interface flags are updated. This routine is 2727 * Returns: -ENOMEM on failure
2536 * responsible for configuring the hardware for proper unicast, multicast, 2728 * 0 on no addresses written
2537 * promiscuous mode, and all-multi behavior. 2729 * X on writing X addresses to MTA
2538 **/ 2730 **/
2539static void igb_set_rx_mode(struct net_device *netdev) 2731static int igb_write_mc_addr_list(struct net_device *netdev)
2540{ 2732{
2541 struct igb_adapter *adapter = netdev_priv(netdev); 2733 struct igb_adapter *adapter = netdev_priv(netdev);
2542 struct e1000_hw *hw = &adapter->hw; 2734 struct e1000_hw *hw = &adapter->hw;
2543 unsigned int rar_entries = hw->mac.rar_entry_count -
2544 (adapter->vfs_allocated_count + 1);
2545 struct dev_mc_list *mc_ptr = netdev->mc_list; 2735 struct dev_mc_list *mc_ptr = netdev->mc_list;
2546 u8 *mta_list = NULL; 2736 u8 *mta_list;
2547 u32 rctl; 2737 u32 vmolr = 0;
2548 int i; 2738 int i;
2549 2739
2550 /* Check for Promiscuous and All Multicast modes */ 2740 if (!netdev->mc_count) {
2551 rctl = rd32(E1000_RCTL); 2741 /* nothing to program, so clear mc list */
2742 igb_update_mc_addr_list(hw, NULL, 0);
2743 igb_restore_vf_multicasts(adapter);
2744 return 0;
2745 }
2552 2746
2553 if (netdev->flags & IFF_PROMISC) { 2747 mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC);
2554 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE); 2748 if (!mta_list)
2555 rctl &= ~E1000_RCTL_VFE; 2749 return -ENOMEM;
2556 } else {
2557 if (netdev->flags & IFF_ALLMULTI)
2558 rctl |= E1000_RCTL_MPE;
2559 else
2560 rctl &= ~E1000_RCTL_MPE;
2561 2750
2562 if (netdev->uc.count > rar_entries) 2751 /* set vmolr receive overflow multicast bit */
2563 rctl |= E1000_RCTL_UPE; 2752 vmolr |= E1000_VMOLR_ROMPE;
2564 else 2753
2565 rctl &= ~E1000_RCTL_UPE; 2754 /* The shared function expects a packed array of only addresses. */
2566 rctl |= E1000_RCTL_VFE; 2755 mc_ptr = netdev->mc_list;
2756
2757 for (i = 0; i < netdev->mc_count; i++) {
2758 if (!mc_ptr)
2759 break;
2760 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2761 mc_ptr = mc_ptr->next;
2567 } 2762 }
2568 wr32(E1000_RCTL, rctl); 2763 igb_update_mc_addr_list(hw, mta_list, i);
2764 kfree(mta_list);
2765
2766 return netdev->mc_count;
2767}
2768
2769/**
2770 * igb_write_uc_addr_list - write unicast addresses to RAR table
2771 * @netdev: network interface device structure
2772 *
2773 * Writes unicast address list to the RAR table.
2774 * Returns: -ENOMEM on failure/insufficient address space
2775 * 0 on no addresses written
2776 * X on writing X addresses to the RAR table
2777 **/
2778static int igb_write_uc_addr_list(struct net_device *netdev)
2779{
2780 struct igb_adapter *adapter = netdev_priv(netdev);
2781 struct e1000_hw *hw = &adapter->hw;
2782 unsigned int vfn = adapter->vfs_allocated_count;
2783 unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
2784 int count = 0;
2785
2786 /* return ENOMEM indicating insufficient memory for addresses */
2787 if (netdev->uc.count > rar_entries)
2788 return -ENOMEM;
2569 2789
2570 if (netdev->uc.count && rar_entries) { 2790 if (netdev->uc.count && rar_entries) {
2571 struct netdev_hw_addr *ha; 2791 struct netdev_hw_addr *ha;
2572 list_for_each_entry(ha, &netdev->uc.list, list) { 2792 list_for_each_entry(ha, &netdev->uc.list, list) {
2573 if (!rar_entries) 2793 if (!rar_entries)
2574 break; 2794 break;
2575 igb_rar_set(hw, ha->addr, rar_entries); 2795 igb_rar_set_qsel(adapter, ha->addr,
2576 igb_set_rah_pool(hw, adapter->vfs_allocated_count, 2796 rar_entries--,
2577 rar_entries); 2797 vfn);
2578 rar_entries--; 2798 count++;
2579 } 2799 }
2580 } 2800 }
2581 /* write the addresses in reverse order to avoid write combining */ 2801 /* write the addresses in reverse order to avoid write combining */
@@ -2585,29 +2805,79 @@ static void igb_set_rx_mode(struct net_device *netdev)
2585 } 2805 }
2586 wrfl(); 2806 wrfl();
2587 2807
2588 if (!netdev->mc_count) { 2808 return count;
2589 /* nothing to program, so clear mc list */ 2809}
2590 igb_update_mc_addr_list(hw, NULL, 0); 2810
2591 igb_restore_vf_multicasts(adapter); 2811/**
2592 return; 2812 * igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
2813 * @netdev: network interface device structure
2814 *
2815 * The set_rx_mode entry point is called whenever the unicast or multicast
2816 * address lists or the network interface flags are updated. This routine is
2817 * responsible for configuring the hardware for proper unicast, multicast,
2818 * promiscuous mode, and all-multi behavior.
2819 **/
2820static void igb_set_rx_mode(struct net_device *netdev)
2821{
2822 struct igb_adapter *adapter = netdev_priv(netdev);
2823 struct e1000_hw *hw = &adapter->hw;
2824 unsigned int vfn = adapter->vfs_allocated_count;
2825 u32 rctl, vmolr = 0;
2826 int count;
2827
2828 /* Check for Promiscuous and All Multicast modes */
2829 rctl = rd32(E1000_RCTL);
2830
2831 /* clear the effected bits */
2832 rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
2833
2834 if (netdev->flags & IFF_PROMISC) {
2835 rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
2836 vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
2837 } else {
2838 if (netdev->flags & IFF_ALLMULTI) {
2839 rctl |= E1000_RCTL_MPE;
2840 vmolr |= E1000_VMOLR_MPME;
2841 } else {
2842 /*
2843 * Write addresses to the MTA, if the attempt fails
2844 * then we should just turn on promiscous mode so
2845 * that we can at least receive multicast traffic
2846 */
2847 count = igb_write_mc_addr_list(netdev);
2848 if (count < 0) {
2849 rctl |= E1000_RCTL_MPE;
2850 vmolr |= E1000_VMOLR_MPME;
2851 } else if (count) {
2852 vmolr |= E1000_VMOLR_ROMPE;
2853 }
2854 }
2855 /*
2856 * Write addresses to available RAR registers, if there is not
2857 * sufficient space to store all the addresses then enable
2858 * unicast promiscous mode
2859 */
2860 count = igb_write_uc_addr_list(netdev);
2861 if (count < 0) {
2862 rctl |= E1000_RCTL_UPE;
2863 vmolr |= E1000_VMOLR_ROPE;
2864 }
2865 rctl |= E1000_RCTL_VFE;
2593 } 2866 }
2867 wr32(E1000_RCTL, rctl);
2594 2868
2595 mta_list = kzalloc(netdev->mc_count * 6, GFP_ATOMIC); 2869 /*
2596 if (!mta_list) { 2870 * In order to support SR-IOV and eventually VMDq it is necessary to set
2597 dev_err(&adapter->pdev->dev, 2871 * the VMOLR to enable the appropriate modes. Without this workaround
2598 "failed to allocate multicast filter list\n"); 2872 * we will have issues with VLAN tag stripping not being done for frames
2873 * that are only arriving because we are the default pool
2874 */
2875 if (hw->mac.type < e1000_82576)
2599 return; 2876 return;
2600 }
2601 2877
2602 /* The shared function expects a packed array of only addresses. */ 2878 vmolr |= rd32(E1000_VMOLR(vfn)) &
2603 for (i = 0; i < netdev->mc_count; i++) { 2879 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
2604 if (!mc_ptr) 2880 wr32(E1000_VMOLR(vfn), vmolr);
2605 break;
2606 memcpy(mta_list + (i*ETH_ALEN), mc_ptr->dmi_addr, ETH_ALEN);
2607 mc_ptr = mc_ptr->next;
2608 }
2609 igb_update_mc_addr_list(hw, mta_list, i);
2610 kfree(mta_list);
2611 igb_restore_vf_multicasts(adapter); 2881 igb_restore_vf_multicasts(adapter);
2612} 2882}
2613 2883
@@ -2669,37 +2939,33 @@ static void igb_watchdog(unsigned long data)
2669static void igb_watchdog_task(struct work_struct *work) 2939static void igb_watchdog_task(struct work_struct *work)
2670{ 2940{
2671 struct igb_adapter *adapter = container_of(work, 2941 struct igb_adapter *adapter = container_of(work,
2672 struct igb_adapter, watchdog_task); 2942 struct igb_adapter,
2943 watchdog_task);
2673 struct e1000_hw *hw = &adapter->hw; 2944 struct e1000_hw *hw = &adapter->hw;
2674 struct net_device *netdev = adapter->netdev; 2945 struct net_device *netdev = adapter->netdev;
2675 struct igb_ring *tx_ring = adapter->tx_ring;
2676 u32 link; 2946 u32 link;
2677 u32 eics = 0;
2678 int i; 2947 int i;
2679 2948
2680 link = igb_has_link(adapter); 2949 link = igb_has_link(adapter);
2681 if ((netif_carrier_ok(netdev)) && link)
2682 goto link_up;
2683
2684 if (link) { 2950 if (link) {
2685 if (!netif_carrier_ok(netdev)) { 2951 if (!netif_carrier_ok(netdev)) {
2686 u32 ctrl; 2952 u32 ctrl;
2687 hw->mac.ops.get_speed_and_duplex(&adapter->hw, 2953 hw->mac.ops.get_speed_and_duplex(hw,
2688 &adapter->link_speed, 2954 &adapter->link_speed,
2689 &adapter->link_duplex); 2955 &adapter->link_duplex);
2690 2956
2691 ctrl = rd32(E1000_CTRL); 2957 ctrl = rd32(E1000_CTRL);
2692 /* Links status message must follow this format */ 2958 /* Links status message must follow this format */
2693 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, " 2959 printk(KERN_INFO "igb: %s NIC Link is Up %d Mbps %s, "
2694 "Flow Control: %s\n", 2960 "Flow Control: %s\n",
2695 netdev->name, 2961 netdev->name,
2696 adapter->link_speed, 2962 adapter->link_speed,
2697 adapter->link_duplex == FULL_DUPLEX ? 2963 adapter->link_duplex == FULL_DUPLEX ?
2698 "Full Duplex" : "Half Duplex", 2964 "Full Duplex" : "Half Duplex",
2699 ((ctrl & E1000_CTRL_TFCE) && (ctrl & 2965 ((ctrl & E1000_CTRL_TFCE) &&
2700 E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl & 2966 (ctrl & E1000_CTRL_RFCE)) ? "RX/TX" :
2701 E1000_CTRL_RFCE) ? "RX" : ((ctrl & 2967 ((ctrl & E1000_CTRL_RFCE) ? "RX" :
2702 E1000_CTRL_TFCE) ? "TX" : "None"))); 2968 ((ctrl & E1000_CTRL_TFCE) ? "TX" : "None")));
2703 2969
2704 /* tweak tx_queue_len according to speed/duplex and 2970 /* tweak tx_queue_len according to speed/duplex and
2705 * adjust the timeout factor */ 2971 * adjust the timeout factor */
@@ -2743,46 +3009,40 @@ static void igb_watchdog_task(struct work_struct *work)
2743 } 3009 }
2744 } 3010 }
2745 3011
2746link_up:
2747 igb_update_stats(adapter); 3012 igb_update_stats(adapter);
3013 igb_update_adaptive(hw);
2748 3014
2749 hw->mac.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old; 3015 for (i = 0; i < adapter->num_tx_queues; i++) {
2750 adapter->tpt_old = adapter->stats.tpt; 3016 struct igb_ring *tx_ring = &adapter->tx_ring[i];
2751 hw->mac.collision_delta = adapter->stats.colc - adapter->colc_old; 3017 if (!netif_carrier_ok(netdev)) {
2752 adapter->colc_old = adapter->stats.colc;
2753
2754 adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
2755 adapter->gorc_old = adapter->stats.gorc;
2756 adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
2757 adapter->gotc_old = adapter->stats.gotc;
2758
2759 igb_update_adaptive(&adapter->hw);
2760
2761 if (!netif_carrier_ok(netdev)) {
2762 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
2763 /* We've lost link, so the controller stops DMA, 3018 /* We've lost link, so the controller stops DMA,
2764 * but we've got queued Tx work that's never going 3019 * but we've got queued Tx work that's never going
2765 * to get done, so reset controller to flush Tx. 3020 * to get done, so reset controller to flush Tx.
2766 * (Do the reset outside of interrupt context). */ 3021 * (Do the reset outside of interrupt context). */
2767 adapter->tx_timeout_count++; 3022 if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
2768 schedule_work(&adapter->reset_task); 3023 adapter->tx_timeout_count++;
2769 /* return immediately since reset is imminent */ 3024 schedule_work(&adapter->reset_task);
2770 return; 3025 /* return immediately since reset is imminent */
3026 return;
3027 }
2771 } 3028 }
3029
3030 /* Force detection of hung controller every watchdog period */
3031 tx_ring->detect_tx_hung = true;
2772 } 3032 }
2773 3033
2774 /* Cause software interrupt to ensure rx ring is cleaned */ 3034 /* Cause software interrupt to ensure rx ring is cleaned */
2775 if (adapter->msix_entries) { 3035 if (adapter->msix_entries) {
2776 for (i = 0; i < adapter->num_rx_queues; i++) 3036 u32 eics = 0;
2777 eics |= adapter->rx_ring[i].eims_value; 3037 for (i = 0; i < adapter->num_q_vectors; i++) {
3038 struct igb_q_vector *q_vector = adapter->q_vector[i];
3039 eics |= q_vector->eims_value;
3040 }
2778 wr32(E1000_EICS, eics); 3041 wr32(E1000_EICS, eics);
2779 } else { 3042 } else {
2780 wr32(E1000_ICS, E1000_ICS_RXDMT0); 3043 wr32(E1000_ICS, E1000_ICS_RXDMT0);
2781 } 3044 }
2782 3045
2783 /* Force detection of hung controller every watchdog period */
2784 tx_ring->detect_tx_hung = true;
2785
2786 /* Reset the timer */ 3046 /* Reset the timer */
2787 if (!test_bit(__IGB_DOWN, &adapter->state)) 3047 if (!test_bit(__IGB_DOWN, &adapter->state))
2788 mod_timer(&adapter->watchdog_timer, 3048 mod_timer(&adapter->watchdog_timer,
@@ -2796,7 +3056,6 @@ enum latency_range {
2796 latency_invalid = 255 3056 latency_invalid = 255
2797}; 3057};
2798 3058
2799
2800/** 3059/**
2801 * igb_update_ring_itr - update the dynamic ITR value based on packet size 3060 * igb_update_ring_itr - update the dynamic ITR value based on packet size
2802 * 3061 *
@@ -2811,25 +3070,37 @@ enum latency_range {
2811 * parameter (see igb_param.c) 3070 * parameter (see igb_param.c)
2812 * NOTE: This function is called only when operating in a multiqueue 3071 * NOTE: This function is called only when operating in a multiqueue
2813 * receive environment. 3072 * receive environment.
2814 * @rx_ring: pointer to ring 3073 * @q_vector: pointer to q_vector
2815 **/ 3074 **/
2816static void igb_update_ring_itr(struct igb_ring *rx_ring) 3075static void igb_update_ring_itr(struct igb_q_vector *q_vector)
2817{ 3076{
2818 int new_val = rx_ring->itr_val; 3077 int new_val = q_vector->itr_val;
2819 int avg_wire_size = 0; 3078 int avg_wire_size = 0;
2820 struct igb_adapter *adapter = rx_ring->adapter; 3079 struct igb_adapter *adapter = q_vector->adapter;
2821
2822 if (!rx_ring->total_packets)
2823 goto clear_counts; /* no packets, so don't do anything */
2824 3080
2825 /* For non-gigabit speeds, just fix the interrupt rate at 4000 3081 /* For non-gigabit speeds, just fix the interrupt rate at 4000
2826 * ints/sec - ITR timer value of 120 ticks. 3082 * ints/sec - ITR timer value of 120 ticks.
2827 */ 3083 */
2828 if (adapter->link_speed != SPEED_1000) { 3084 if (adapter->link_speed != SPEED_1000) {
2829 new_val = 120; 3085 new_val = 976;
2830 goto set_itr_val; 3086 goto set_itr_val;
2831 } 3087 }
2832 avg_wire_size = rx_ring->total_bytes / rx_ring->total_packets; 3088
3089 if (q_vector->rx_ring && q_vector->rx_ring->total_packets) {
3090 struct igb_ring *ring = q_vector->rx_ring;
3091 avg_wire_size = ring->total_bytes / ring->total_packets;
3092 }
3093
3094 if (q_vector->tx_ring && q_vector->tx_ring->total_packets) {
3095 struct igb_ring *ring = q_vector->tx_ring;
3096 avg_wire_size = max_t(u32, avg_wire_size,
3097 (ring->total_bytes /
3098 ring->total_packets));
3099 }
3100
3101 /* if avg_wire_size isn't set no work was done */
3102 if (!avg_wire_size)
3103 goto clear_counts;
2833 3104
2834 /* Add 24 bytes to size to account for CRC, preamble, and gap */ 3105 /* Add 24 bytes to size to account for CRC, preamble, and gap */
2835 avg_wire_size += 24; 3106 avg_wire_size += 24;
@@ -2844,13 +3115,19 @@ static void igb_update_ring_itr(struct igb_ring *rx_ring)
2844 new_val = avg_wire_size / 2; 3115 new_val = avg_wire_size / 2;
2845 3116
2846set_itr_val: 3117set_itr_val:
2847 if (new_val != rx_ring->itr_val) { 3118 if (new_val != q_vector->itr_val) {
2848 rx_ring->itr_val = new_val; 3119 q_vector->itr_val = new_val;
2849 rx_ring->set_itr = 1; 3120 q_vector->set_itr = 1;
2850 } 3121 }
2851clear_counts: 3122clear_counts:
2852 rx_ring->total_bytes = 0; 3123 if (q_vector->rx_ring) {
2853 rx_ring->total_packets = 0; 3124 q_vector->rx_ring->total_bytes = 0;
3125 q_vector->rx_ring->total_packets = 0;
3126 }
3127 if (q_vector->tx_ring) {
3128 q_vector->tx_ring->total_bytes = 0;
3129 q_vector->tx_ring->total_packets = 0;
3130 }
2854} 3131}
2855 3132
2856/** 3133/**
@@ -2867,7 +3144,7 @@ clear_counts:
2867 * NOTE: These calculations are only valid when operating in a single- 3144 * NOTE: These calculations are only valid when operating in a single-
2868 * queue environment. 3145 * queue environment.
2869 * @adapter: pointer to adapter 3146 * @adapter: pointer to adapter
2870 * @itr_setting: current adapter->itr 3147 * @itr_setting: current q_vector->itr_val
2871 * @packets: the number of packets during this measurement interval 3148 * @packets: the number of packets during this measurement interval
2872 * @bytes: the number of bytes during this measurement interval 3149 * @bytes: the number of bytes during this measurement interval
2873 **/ 3150 **/
@@ -2919,8 +3196,9 @@ update_itr_done:
2919 3196
2920static void igb_set_itr(struct igb_adapter *adapter) 3197static void igb_set_itr(struct igb_adapter *adapter)
2921{ 3198{
3199 struct igb_q_vector *q_vector = adapter->q_vector[0];
2922 u16 current_itr; 3200 u16 current_itr;
2923 u32 new_itr = adapter->itr; 3201 u32 new_itr = q_vector->itr_val;
2924 3202
2925 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */ 3203 /* for non-gigabit speeds, just fix the interrupt rate at 4000 */
2926 if (adapter->link_speed != SPEED_1000) { 3204 if (adapter->link_speed != SPEED_1000) {
@@ -2934,18 +3212,14 @@ static void igb_set_itr(struct igb_adapter *adapter)
2934 adapter->rx_ring->total_packets, 3212 adapter->rx_ring->total_packets,
2935 adapter->rx_ring->total_bytes); 3213 adapter->rx_ring->total_bytes);
2936 3214
2937 if (adapter->rx_ring->buddy) { 3215 adapter->tx_itr = igb_update_itr(adapter,
2938 adapter->tx_itr = igb_update_itr(adapter, 3216 adapter->tx_itr,
2939 adapter->tx_itr, 3217 adapter->tx_ring->total_packets,
2940 adapter->tx_ring->total_packets, 3218 adapter->tx_ring->total_bytes);
2941 adapter->tx_ring->total_bytes); 3219 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2942 current_itr = max(adapter->rx_itr, adapter->tx_itr);
2943 } else {
2944 current_itr = adapter->rx_itr;
2945 }
2946 3220
2947 /* conservative mode (itr 3) eliminates the lowest_latency setting */ 3221 /* conservative mode (itr 3) eliminates the lowest_latency setting */
2948 if (adapter->itr_setting == 3 && current_itr == lowest_latency) 3222 if (adapter->rx_itr_setting == 3 && current_itr == lowest_latency)
2949 current_itr = low_latency; 3223 current_itr = low_latency;
2950 3224
2951 switch (current_itr) { 3225 switch (current_itr) {
@@ -2966,18 +3240,17 @@ static void igb_set_itr(struct igb_adapter *adapter)
2966set_itr_now: 3240set_itr_now:
2967 adapter->rx_ring->total_bytes = 0; 3241 adapter->rx_ring->total_bytes = 0;
2968 adapter->rx_ring->total_packets = 0; 3242 adapter->rx_ring->total_packets = 0;
2969 if (adapter->rx_ring->buddy) { 3243 adapter->tx_ring->total_bytes = 0;
2970 adapter->rx_ring->buddy->total_bytes = 0; 3244 adapter->tx_ring->total_packets = 0;
2971 adapter->rx_ring->buddy->total_packets = 0;
2972 }
2973 3245
2974 if (new_itr != adapter->itr) { 3246 if (new_itr != q_vector->itr_val) {
2975 /* this attempts to bias the interrupt rate towards Bulk 3247 /* this attempts to bias the interrupt rate towards Bulk
2976 * by adding intermediate steps when interrupt rate is 3248 * by adding intermediate steps when interrupt rate is
2977 * increasing */ 3249 * increasing */
2978 new_itr = new_itr > adapter->itr ? 3250 new_itr = new_itr > q_vector->itr_val ?
2979 max((new_itr * adapter->itr) / 3251 max((new_itr * q_vector->itr_val) /
2980 (new_itr + (adapter->itr >> 2)), new_itr) : 3252 (new_itr + (q_vector->itr_val >> 2)),
3253 new_itr) :
2981 new_itr; 3254 new_itr;
2982 /* Don't write the value here; it resets the adapter's 3255 /* Don't write the value here; it resets the adapter's
2983 * internal timer, and causes us to delay far longer than 3256 * internal timer, and causes us to delay far longer than
@@ -2985,25 +3258,22 @@ set_itr_now:
2985 * value at the beginning of the next interrupt so the timing 3258 * value at the beginning of the next interrupt so the timing
2986 * ends up being correct. 3259 * ends up being correct.
2987 */ 3260 */
2988 adapter->itr = new_itr; 3261 q_vector->itr_val = new_itr;
2989 adapter->rx_ring->itr_val = new_itr; 3262 q_vector->set_itr = 1;
2990 adapter->rx_ring->set_itr = 1;
2991 } 3263 }
2992 3264
2993 return; 3265 return;
2994} 3266}
2995 3267
2996
2997#define IGB_TX_FLAGS_CSUM 0x00000001 3268#define IGB_TX_FLAGS_CSUM 0x00000001
2998#define IGB_TX_FLAGS_VLAN 0x00000002 3269#define IGB_TX_FLAGS_VLAN 0x00000002
2999#define IGB_TX_FLAGS_TSO 0x00000004 3270#define IGB_TX_FLAGS_TSO 0x00000004
3000#define IGB_TX_FLAGS_IPV4 0x00000008 3271#define IGB_TX_FLAGS_IPV4 0x00000008
3001#define IGB_TX_FLAGS_TSTAMP 0x00000010 3272#define IGB_TX_FLAGS_TSTAMP 0x00000010
3002#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000 3273#define IGB_TX_FLAGS_VLAN_MASK 0xffff0000
3003#define IGB_TX_FLAGS_VLAN_SHIFT 16 3274#define IGB_TX_FLAGS_VLAN_SHIFT 16
3004 3275
3005static inline int igb_tso_adv(struct igb_adapter *adapter, 3276static inline int igb_tso_adv(struct igb_ring *tx_ring,
3006 struct igb_ring *tx_ring,
3007 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len) 3277 struct sk_buff *skb, u32 tx_flags, u8 *hdr_len)
3008{ 3278{
3009 struct e1000_adv_tx_context_desc *context_desc; 3279 struct e1000_adv_tx_context_desc *context_desc;
@@ -3065,8 +3335,8 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
3065 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT); 3335 mss_l4len_idx |= (l4len << E1000_ADVTXD_L4LEN_SHIFT);
3066 3336
3067 /* For 82575, context index must be unique per ring. */ 3337 /* For 82575, context index must be unique per ring. */
3068 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX) 3338 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3069 mss_l4len_idx |= tx_ring->queue_index << 4; 3339 mss_l4len_idx |= tx_ring->reg_idx << 4;
3070 3340
3071 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); 3341 context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx);
3072 context_desc->seqnum_seed = 0; 3342 context_desc->seqnum_seed = 0;
@@ -3083,14 +3353,14 @@ static inline int igb_tso_adv(struct igb_adapter *adapter,
3083 return true; 3353 return true;
3084} 3354}
3085 3355
3086static inline bool igb_tx_csum_adv(struct igb_adapter *adapter, 3356static inline bool igb_tx_csum_adv(struct igb_ring *tx_ring,
3087 struct igb_ring *tx_ring, 3357 struct sk_buff *skb, u32 tx_flags)
3088 struct sk_buff *skb, u32 tx_flags)
3089{ 3358{
3090 struct e1000_adv_tx_context_desc *context_desc; 3359 struct e1000_adv_tx_context_desc *context_desc;
3091 unsigned int i; 3360 struct pci_dev *pdev = tx_ring->pdev;
3092 struct igb_buffer *buffer_info; 3361 struct igb_buffer *buffer_info;
3093 u32 info = 0, tu_cmd = 0; 3362 u32 info = 0, tu_cmd = 0;
3363 unsigned int i;
3094 3364
3095 if ((skb->ip_summed == CHECKSUM_PARTIAL) || 3365 if ((skb->ip_summed == CHECKSUM_PARTIAL) ||
3096 (tx_flags & IGB_TX_FLAGS_VLAN)) { 3366 (tx_flags & IGB_TX_FLAGS_VLAN)) {
@@ -3100,6 +3370,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
3100 3370
3101 if (tx_flags & IGB_TX_FLAGS_VLAN) 3371 if (tx_flags & IGB_TX_FLAGS_VLAN)
3102 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK); 3372 info |= (tx_flags & IGB_TX_FLAGS_VLAN_MASK);
3373
3103 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT); 3374 info |= (skb_network_offset(skb) << E1000_ADVTXD_MACLEN_SHIFT);
3104 if (skb->ip_summed == CHECKSUM_PARTIAL) 3375 if (skb->ip_summed == CHECKSUM_PARTIAL)
3105 info |= skb_network_header_len(skb); 3376 info |= skb_network_header_len(skb);
@@ -3137,7 +3408,7 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
3137 break; 3408 break;
3138 default: 3409 default:
3139 if (unlikely(net_ratelimit())) 3410 if (unlikely(net_ratelimit()))
3140 dev_warn(&adapter->pdev->dev, 3411 dev_warn(&pdev->dev,
3141 "partial checksum but proto=%x!\n", 3412 "partial checksum but proto=%x!\n",
3142 skb->protocol); 3413 skb->protocol);
3143 break; 3414 break;
@@ -3146,11 +3417,9 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
3146 3417
3147 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd); 3418 context_desc->type_tucmd_mlhl = cpu_to_le32(tu_cmd);
3148 context_desc->seqnum_seed = 0; 3419 context_desc->seqnum_seed = 0;
3149 if (adapter->flags & IGB_FLAG_NEED_CTX_IDX) 3420 if (tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX)
3150 context_desc->mss_l4len_idx = 3421 context_desc->mss_l4len_idx =
3151 cpu_to_le32(tx_ring->queue_index << 4); 3422 cpu_to_le32(tx_ring->reg_idx << 4);
3152 else
3153 context_desc->mss_l4len_idx = 0;
3154 3423
3155 buffer_info->time_stamp = jiffies; 3424 buffer_info->time_stamp = jiffies;
3156 buffer_info->next_to_watch = i; 3425 buffer_info->next_to_watch = i;
@@ -3169,11 +3438,11 @@ static inline bool igb_tx_csum_adv(struct igb_adapter *adapter,
3169#define IGB_MAX_TXD_PWR 16 3438#define IGB_MAX_TXD_PWR 16
3170#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR) 3439#define IGB_MAX_DATA_PER_TXD (1<<IGB_MAX_TXD_PWR)
3171 3440
3172static inline int igb_tx_map_adv(struct igb_adapter *adapter, 3441static inline int igb_tx_map_adv(struct igb_ring *tx_ring, struct sk_buff *skb,
3173 struct igb_ring *tx_ring, struct sk_buff *skb,
3174 unsigned int first) 3442 unsigned int first)
3175{ 3443{
3176 struct igb_buffer *buffer_info; 3444 struct igb_buffer *buffer_info;
3445 struct pci_dev *pdev = tx_ring->pdev;
3177 unsigned int len = skb_headlen(skb); 3446 unsigned int len = skb_headlen(skb);
3178 unsigned int count = 0, i; 3447 unsigned int count = 0, i;
3179 unsigned int f; 3448 unsigned int f;
@@ -3181,8 +3450,8 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
3181 3450
3182 i = tx_ring->next_to_use; 3451 i = tx_ring->next_to_use;
3183 3452
3184 if (skb_dma_map(&adapter->pdev->dev, skb, DMA_TO_DEVICE)) { 3453 if (skb_dma_map(&pdev->dev, skb, DMA_TO_DEVICE)) {
3185 dev_err(&adapter->pdev->dev, "TX DMA map failed\n"); 3454 dev_err(&pdev->dev, "TX DMA map failed\n");
3186 return 0; 3455 return 0;
3187 } 3456 }
3188 3457
@@ -3218,18 +3487,17 @@ static inline int igb_tx_map_adv(struct igb_adapter *adapter,
3218 tx_ring->buffer_info[i].skb = skb; 3487 tx_ring->buffer_info[i].skb = skb;
3219 tx_ring->buffer_info[first].next_to_watch = i; 3488 tx_ring->buffer_info[first].next_to_watch = i;
3220 3489
3221 return count + 1; 3490 return ++count;
3222} 3491}
3223 3492
3224static inline void igb_tx_queue_adv(struct igb_adapter *adapter, 3493static inline void igb_tx_queue_adv(struct igb_ring *tx_ring,
3225 struct igb_ring *tx_ring,
3226 int tx_flags, int count, u32 paylen, 3494 int tx_flags, int count, u32 paylen,
3227 u8 hdr_len) 3495 u8 hdr_len)
3228{ 3496{
3229 union e1000_adv_tx_desc *tx_desc = NULL; 3497 union e1000_adv_tx_desc *tx_desc;
3230 struct igb_buffer *buffer_info; 3498 struct igb_buffer *buffer_info;
3231 u32 olinfo_status = 0, cmd_type_len; 3499 u32 olinfo_status = 0, cmd_type_len;
3232 unsigned int i; 3500 unsigned int i = tx_ring->next_to_use;
3233 3501
3234 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS | 3502 cmd_type_len = (E1000_ADVTXD_DTYP_DATA | E1000_ADVTXD_DCMD_IFCS |
3235 E1000_ADVTXD_DCMD_DEXT); 3503 E1000_ADVTXD_DCMD_DEXT);
@@ -3254,27 +3522,28 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
3254 olinfo_status |= E1000_TXD_POPTS_TXSM << 8; 3522 olinfo_status |= E1000_TXD_POPTS_TXSM << 8;
3255 } 3523 }
3256 3524
3257 if ((adapter->flags & IGB_FLAG_NEED_CTX_IDX) && 3525 if ((tx_ring->flags & IGB_RING_FLAG_TX_CTX_IDX) &&
3258 (tx_flags & (IGB_TX_FLAGS_CSUM | IGB_TX_FLAGS_TSO | 3526 (tx_flags & (IGB_TX_FLAGS_CSUM |
3527 IGB_TX_FLAGS_TSO |
3259 IGB_TX_FLAGS_VLAN))) 3528 IGB_TX_FLAGS_VLAN)))
3260 olinfo_status |= tx_ring->queue_index << 4; 3529 olinfo_status |= tx_ring->reg_idx << 4;
3261 3530
3262 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT); 3531 olinfo_status |= ((paylen - hdr_len) << E1000_ADVTXD_PAYLEN_SHIFT);
3263 3532
3264 i = tx_ring->next_to_use; 3533 do {
3265 while (count--) {
3266 buffer_info = &tx_ring->buffer_info[i]; 3534 buffer_info = &tx_ring->buffer_info[i];
3267 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i); 3535 tx_desc = E1000_TX_DESC_ADV(*tx_ring, i);
3268 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma); 3536 tx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
3269 tx_desc->read.cmd_type_len = 3537 tx_desc->read.cmd_type_len =
3270 cpu_to_le32(cmd_type_len | buffer_info->length); 3538 cpu_to_le32(cmd_type_len | buffer_info->length);
3271 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status); 3539 tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
3540 count--;
3272 i++; 3541 i++;
3273 if (i == tx_ring->count) 3542 if (i == tx_ring->count)
3274 i = 0; 3543 i = 0;
3275 } 3544 } while (count > 0);
3276 3545
3277 tx_desc->read.cmd_type_len |= cpu_to_le32(adapter->txd_cmd); 3546 tx_desc->read.cmd_type_len |= cpu_to_le32(IGB_ADVTXD_DCMD);
3278 /* Force memory writes to complete before letting h/w 3547 /* Force memory writes to complete before letting h/w
3279 * know there are new descriptors to fetch. (Only 3548 * know there are new descriptors to fetch. (Only
3280 * applicable for weak-ordered memory model archs, 3549 * applicable for weak-ordered memory model archs,
@@ -3282,16 +3551,15 @@ static inline void igb_tx_queue_adv(struct igb_adapter *adapter,
3282 wmb(); 3551 wmb();
3283 3552
3284 tx_ring->next_to_use = i; 3553 tx_ring->next_to_use = i;
3285 writel(i, adapter->hw.hw_addr + tx_ring->tail); 3554 writel(i, tx_ring->tail);
3286 /* we need this if more than one processor can write to our tail 3555 /* we need this if more than one processor can write to our tail
3287 * at a time, it syncronizes IO on IA64/Altix systems */ 3556 * at a time, it syncronizes IO on IA64/Altix systems */
3288 mmiowb(); 3557 mmiowb();
3289} 3558}
3290 3559
3291static int __igb_maybe_stop_tx(struct net_device *netdev, 3560static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
3292 struct igb_ring *tx_ring, int size)
3293{ 3561{
3294 struct igb_adapter *adapter = netdev_priv(netdev); 3562 struct net_device *netdev = tx_ring->netdev;
3295 3563
3296 netif_stop_subqueue(netdev, tx_ring->queue_index); 3564 netif_stop_subqueue(netdev, tx_ring->queue_index);
3297 3565
@@ -3307,66 +3575,43 @@ static int __igb_maybe_stop_tx(struct net_device *netdev,
3307 3575
3308 /* A reprieve! */ 3576 /* A reprieve! */
3309 netif_wake_subqueue(netdev, tx_ring->queue_index); 3577 netif_wake_subqueue(netdev, tx_ring->queue_index);
3310 ++adapter->restart_queue; 3578 tx_ring->tx_stats.restart_queue++;
3311 return 0; 3579 return 0;
3312} 3580}
3313 3581
3314static int igb_maybe_stop_tx(struct net_device *netdev, 3582static int igb_maybe_stop_tx(struct igb_ring *tx_ring, int size)
3315 struct igb_ring *tx_ring, int size)
3316{ 3583{
3317 if (igb_desc_unused(tx_ring) >= size) 3584 if (igb_desc_unused(tx_ring) >= size)
3318 return 0; 3585 return 0;
3319 return __igb_maybe_stop_tx(netdev, tx_ring, size); 3586 return __igb_maybe_stop_tx(tx_ring, size);
3320} 3587}
3321 3588
3322static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb, 3589netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3323 struct net_device *netdev, 3590 struct igb_ring *tx_ring)
3324 struct igb_ring *tx_ring)
3325{ 3591{
3326 struct igb_adapter *adapter = netdev_priv(netdev); 3592 struct igb_adapter *adapter = netdev_priv(tx_ring->netdev);
3327 unsigned int first; 3593 unsigned int first;
3328 unsigned int tx_flags = 0; 3594 unsigned int tx_flags = 0;
3329 u8 hdr_len = 0; 3595 u8 hdr_len = 0;
3330 int count = 0; 3596 int tso = 0, count;
3331 int tso = 0; 3597 union skb_shared_tx *shtx = skb_tx(skb);
3332 union skb_shared_tx *shtx;
3333
3334 if (test_bit(__IGB_DOWN, &adapter->state)) {
3335 dev_kfree_skb_any(skb);
3336 return NETDEV_TX_OK;
3337 }
3338
3339 if (skb->len <= 0) {
3340 dev_kfree_skb_any(skb);
3341 return NETDEV_TX_OK;
3342 }
3343 3598
3344 /* need: 1 descriptor per page, 3599 /* need: 1 descriptor per page,
3345 * + 2 desc gap to keep tail from touching head, 3600 * + 2 desc gap to keep tail from touching head,
3346 * + 1 desc for skb->data, 3601 * + 1 desc for skb->data,
3347 * + 1 desc for context descriptor, 3602 * + 1 desc for context descriptor,
3348 * otherwise try next time */ 3603 * otherwise try next time */
3349 if (igb_maybe_stop_tx(netdev, tx_ring, skb_shinfo(skb)->nr_frags + 4)) { 3604 if (igb_maybe_stop_tx(tx_ring, skb_shinfo(skb)->nr_frags + 4)) {
3350 /* this is a hard error */ 3605 /* this is a hard error */
3351 return NETDEV_TX_BUSY; 3606 return NETDEV_TX_BUSY;
3352 } 3607 }
3353 3608
3354 /*
3355 * TODO: check that there currently is no other packet with
3356 * time stamping in the queue
3357 *
3358 * When doing time stamping, keep the connection to the socket
3359 * a while longer: it is still needed by skb_hwtstamp_tx(),
3360 * called either in igb_tx_hwtstamp() or by our caller when
3361 * doing software time stamping.
3362 */
3363 shtx = skb_tx(skb);
3364 if (unlikely(shtx->hardware)) { 3609 if (unlikely(shtx->hardware)) {
3365 shtx->in_progress = 1; 3610 shtx->in_progress = 1;
3366 tx_flags |= IGB_TX_FLAGS_TSTAMP; 3611 tx_flags |= IGB_TX_FLAGS_TSTAMP;
3367 } 3612 }
3368 3613
3369 if (adapter->vlgrp && vlan_tx_tag_present(skb)) { 3614 if (vlan_tx_tag_present(skb) && adapter->vlgrp) {
3370 tx_flags |= IGB_TX_FLAGS_VLAN; 3615 tx_flags |= IGB_TX_FLAGS_VLAN;
3371 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT); 3616 tx_flags |= (vlan_tx_tag_get(skb) << IGB_TX_FLAGS_VLAN_SHIFT);
3372 } 3617 }
@@ -3375,37 +3620,38 @@ static netdev_tx_t igb_xmit_frame_ring_adv(struct sk_buff *skb,
3375 tx_flags |= IGB_TX_FLAGS_IPV4; 3620 tx_flags |= IGB_TX_FLAGS_IPV4;
3376 3621
3377 first = tx_ring->next_to_use; 3622 first = tx_ring->next_to_use;
3378 tso = skb_is_gso(skb) ? igb_tso_adv(adapter, tx_ring, skb, tx_flags, 3623 if (skb_is_gso(skb)) {
3379 &hdr_len) : 0; 3624 tso = igb_tso_adv(tx_ring, skb, tx_flags, &hdr_len);
3380 3625
3381 if (tso < 0) { 3626 if (tso < 0) {
3382 dev_kfree_skb_any(skb); 3627 dev_kfree_skb_any(skb);
3383 return NETDEV_TX_OK; 3628 return NETDEV_TX_OK;
3629 }
3384 } 3630 }
3385 3631
3386 if (tso) 3632 if (tso)
3387 tx_flags |= IGB_TX_FLAGS_TSO; 3633 tx_flags |= IGB_TX_FLAGS_TSO;
3388 else if (igb_tx_csum_adv(adapter, tx_ring, skb, tx_flags) && 3634 else if (igb_tx_csum_adv(tx_ring, skb, tx_flags) &&
3389 (skb->ip_summed == CHECKSUM_PARTIAL)) 3635 (skb->ip_summed == CHECKSUM_PARTIAL))
3390 tx_flags |= IGB_TX_FLAGS_CSUM; 3636 tx_flags |= IGB_TX_FLAGS_CSUM;
3391 3637
3392 /* 3638 /*
3393 * count reflects descriptors mapped, if 0 then mapping error 3639 * count reflects descriptors mapped, if 0 or less then mapping error
3394 * has occured and we need to rewind the descriptor queue 3640 * has occured and we need to rewind the descriptor queue
3395 */ 3641 */
3396 count = igb_tx_map_adv(adapter, tx_ring, skb, first); 3642 count = igb_tx_map_adv(tx_ring, skb, first);
3397 3643 if (count <= 0) {
3398 if (count) {
3399 igb_tx_queue_adv(adapter, tx_ring, tx_flags, count,
3400 skb->len, hdr_len);
3401 /* Make sure there is space in the ring for the next send. */
3402 igb_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 4);
3403 } else {
3404 dev_kfree_skb_any(skb); 3644 dev_kfree_skb_any(skb);
3405 tx_ring->buffer_info[first].time_stamp = 0; 3645 tx_ring->buffer_info[first].time_stamp = 0;
3406 tx_ring->next_to_use = first; 3646 tx_ring->next_to_use = first;
3647 return NETDEV_TX_OK;
3407 } 3648 }
3408 3649
3650 igb_tx_queue_adv(tx_ring, tx_flags, count, skb->len, hdr_len);
3651
3652 /* Make sure there is space in the ring for the next send. */
3653 igb_maybe_stop_tx(tx_ring, MAX_SKB_FRAGS + 4);
3654
3409 return NETDEV_TX_OK; 3655 return NETDEV_TX_OK;
3410} 3656}
3411 3657
@@ -3414,8 +3660,18 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
3414{ 3660{
3415 struct igb_adapter *adapter = netdev_priv(netdev); 3661 struct igb_adapter *adapter = netdev_priv(netdev);
3416 struct igb_ring *tx_ring; 3662 struct igb_ring *tx_ring;
3417
3418 int r_idx = 0; 3663 int r_idx = 0;
3664
3665 if (test_bit(__IGB_DOWN, &adapter->state)) {
3666 dev_kfree_skb_any(skb);
3667 return NETDEV_TX_OK;
3668 }
3669
3670 if (skb->len <= 0) {
3671 dev_kfree_skb_any(skb);
3672 return NETDEV_TX_OK;
3673 }
3674
3419 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1); 3675 r_idx = skb->queue_mapping & (IGB_ABS_MAX_TX_QUEUES - 1);
3420 tx_ring = adapter->multi_tx_table[r_idx]; 3676 tx_ring = adapter->multi_tx_table[r_idx];
3421 3677
@@ -3423,7 +3679,7 @@ static netdev_tx_t igb_xmit_frame_adv(struct sk_buff *skb,
3423 * to a flow. Right now, performance is impacted slightly negatively 3679 * to a flow. Right now, performance is impacted slightly negatively
3424 * if using multiple tx queues. If the stack breaks away from a 3680 * if using multiple tx queues. If the stack breaks away from a
3425 * single qdisc implementation, we can look at this again. */ 3681 * single qdisc implementation, we can look at this again. */
3426 return igb_xmit_frame_ring_adv(skb, netdev, tx_ring); 3682 return igb_xmit_frame_ring_adv(skb, tx_ring);
3427} 3683}
3428 3684
3429/** 3685/**
@@ -3437,6 +3693,7 @@ static void igb_tx_timeout(struct net_device *netdev)
3437 3693
3438 /* Do the reset outside of interrupt context */ 3694 /* Do the reset outside of interrupt context */
3439 adapter->tx_timeout_count++; 3695 adapter->tx_timeout_count++;
3696
3440 schedule_work(&adapter->reset_task); 3697 schedule_work(&adapter->reset_task);
3441 wr32(E1000_EICS, 3698 wr32(E1000_EICS,
3442 (adapter->eims_enable_mask & ~adapter->eims_other)); 3699 (adapter->eims_enable_mask & ~adapter->eims_other));
@@ -3459,10 +3716,8 @@ static void igb_reset_task(struct work_struct *work)
3459 **/ 3716 **/
3460static struct net_device_stats *igb_get_stats(struct net_device *netdev) 3717static struct net_device_stats *igb_get_stats(struct net_device *netdev)
3461{ 3718{
3462 struct igb_adapter *adapter = netdev_priv(netdev);
3463
3464 /* only return the current stats */ 3719 /* only return the current stats */
3465 return &adapter->net_stats; 3720 return &netdev->stats;
3466} 3721}
3467 3722
3468/** 3723/**
@@ -3475,16 +3730,17 @@ static struct net_device_stats *igb_get_stats(struct net_device *netdev)
3475static int igb_change_mtu(struct net_device *netdev, int new_mtu) 3730static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3476{ 3731{
3477 struct igb_adapter *adapter = netdev_priv(netdev); 3732 struct igb_adapter *adapter = netdev_priv(netdev);
3733 struct pci_dev *pdev = adapter->pdev;
3478 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN; 3734 int max_frame = new_mtu + ETH_HLEN + ETH_FCS_LEN;
3735 u32 rx_buffer_len, i;
3479 3736
3480 if ((max_frame < ETH_ZLEN + ETH_FCS_LEN) || 3737 if ((new_mtu < 68) || (max_frame > MAX_JUMBO_FRAME_SIZE)) {
3481 (max_frame > MAX_JUMBO_FRAME_SIZE)) { 3738 dev_err(&pdev->dev, "Invalid MTU setting\n");
3482 dev_err(&adapter->pdev->dev, "Invalid MTU setting\n");
3483 return -EINVAL; 3739 return -EINVAL;
3484 } 3740 }
3485 3741
3486 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) { 3742 if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
3487 dev_err(&adapter->pdev->dev, "MTU > 9216 not supported.\n"); 3743 dev_err(&pdev->dev, "MTU > 9216 not supported.\n");
3488 return -EINVAL; 3744 return -EINVAL;
3489 } 3745 }
3490 3746
@@ -3493,8 +3749,6 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3493 3749
3494 /* igb_down has a dependency on max_frame_size */ 3750 /* igb_down has a dependency on max_frame_size */
3495 adapter->max_frame_size = max_frame; 3751 adapter->max_frame_size = max_frame;
3496 if (netif_running(netdev))
3497 igb_down(adapter);
3498 3752
3499 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN 3753 /* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
3500 * means we reserve 2 more, this pushes us to allocate from the next 3754 * means we reserve 2 more, this pushes us to allocate from the next
@@ -3502,35 +3756,23 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3502 * i.e. RXBUFFER_2048 --> size-4096 slab 3756 * i.e. RXBUFFER_2048 --> size-4096 slab
3503 */ 3757 */
3504 3758
3505 if (max_frame <= IGB_RXBUFFER_256) 3759 if (max_frame <= IGB_RXBUFFER_1024)
3506 adapter->rx_buffer_len = IGB_RXBUFFER_256; 3760 rx_buffer_len = IGB_RXBUFFER_1024;
3507 else if (max_frame <= IGB_RXBUFFER_512) 3761 else if (max_frame <= MAXIMUM_ETHERNET_VLAN_SIZE)
3508 adapter->rx_buffer_len = IGB_RXBUFFER_512; 3762 rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3509 else if (max_frame <= IGB_RXBUFFER_1024)
3510 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
3511 else if (max_frame <= IGB_RXBUFFER_2048)
3512 adapter->rx_buffer_len = IGB_RXBUFFER_2048;
3513 else 3763 else
3514#if (PAGE_SIZE / 2) > IGB_RXBUFFER_16384 3764 rx_buffer_len = IGB_RXBUFFER_128;
3515 adapter->rx_buffer_len = IGB_RXBUFFER_16384;
3516#else
3517 adapter->rx_buffer_len = PAGE_SIZE / 2;
3518#endif
3519
3520 /* if sr-iov is enabled we need to force buffer size to 1K or larger */
3521 if (adapter->vfs_allocated_count &&
3522 (adapter->rx_buffer_len < IGB_RXBUFFER_1024))
3523 adapter->rx_buffer_len = IGB_RXBUFFER_1024;
3524 3765
3525 /* adjust allocation if LPE protects us, and we aren't using SBP */ 3766 if (netif_running(netdev))
3526 if ((max_frame == ETH_FRAME_LEN + ETH_FCS_LEN) || 3767 igb_down(adapter);
3527 (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE))
3528 adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
3529 3768
3530 dev_info(&adapter->pdev->dev, "changing MTU from %d to %d\n", 3769 dev_info(&pdev->dev, "changing MTU from %d to %d\n",
3531 netdev->mtu, new_mtu); 3770 netdev->mtu, new_mtu);
3532 netdev->mtu = new_mtu; 3771 netdev->mtu = new_mtu;
3533 3772
3773 for (i = 0; i < adapter->num_rx_queues; i++)
3774 adapter->rx_ring[i].rx_buffer_len = rx_buffer_len;
3775
3534 if (netif_running(netdev)) 3776 if (netif_running(netdev))
3535 igb_up(adapter); 3777 igb_up(adapter);
3536 else 3778 else
@@ -3548,9 +3790,13 @@ static int igb_change_mtu(struct net_device *netdev, int new_mtu)
3548 3790
3549void igb_update_stats(struct igb_adapter *adapter) 3791void igb_update_stats(struct igb_adapter *adapter)
3550{ 3792{
3793 struct net_device_stats *net_stats = igb_get_stats(adapter->netdev);
3551 struct e1000_hw *hw = &adapter->hw; 3794 struct e1000_hw *hw = &adapter->hw;
3552 struct pci_dev *pdev = adapter->pdev; 3795 struct pci_dev *pdev = adapter->pdev;
3796 u32 rnbc;
3553 u16 phy_tmp; 3797 u16 phy_tmp;
3798 int i;
3799 u64 bytes, packets;
3554 3800
3555#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF 3801#define PHY_IDLE_ERROR_COUNT_MASK 0x00FF
3556 3802
@@ -3563,6 +3809,29 @@ void igb_update_stats(struct igb_adapter *adapter)
3563 if (pci_channel_offline(pdev)) 3809 if (pci_channel_offline(pdev))
3564 return; 3810 return;
3565 3811
3812 bytes = 0;
3813 packets = 0;
3814 for (i = 0; i < adapter->num_rx_queues; i++) {
3815 u32 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0x0FFF;
3816 adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
3817 net_stats->rx_fifo_errors += rqdpc_tmp;
3818 bytes += adapter->rx_ring[i].rx_stats.bytes;
3819 packets += adapter->rx_ring[i].rx_stats.packets;
3820 }
3821
3822 net_stats->rx_bytes = bytes;
3823 net_stats->rx_packets = packets;
3824
3825 bytes = 0;
3826 packets = 0;
3827 for (i = 0; i < adapter->num_tx_queues; i++) {
3828 bytes += adapter->tx_ring[i].tx_stats.bytes;
3829 packets += adapter->tx_ring[i].tx_stats.packets;
3830 }
3831 net_stats->tx_bytes = bytes;
3832 net_stats->tx_packets = packets;
3833
3834 /* read stats registers */
3566 adapter->stats.crcerrs += rd32(E1000_CRCERRS); 3835 adapter->stats.crcerrs += rd32(E1000_CRCERRS);
3567 adapter->stats.gprc += rd32(E1000_GPRC); 3836 adapter->stats.gprc += rd32(E1000_GPRC);
3568 adapter->stats.gorc += rd32(E1000_GORCL); 3837 adapter->stats.gorc += rd32(E1000_GORCL);
@@ -3595,7 +3864,9 @@ void igb_update_stats(struct igb_adapter *adapter)
3595 adapter->stats.gptc += rd32(E1000_GPTC); 3864 adapter->stats.gptc += rd32(E1000_GPTC);
3596 adapter->stats.gotc += rd32(E1000_GOTCL); 3865 adapter->stats.gotc += rd32(E1000_GOTCL);
3597 rd32(E1000_GOTCH); /* clear GOTCL */ 3866 rd32(E1000_GOTCH); /* clear GOTCL */
3598 adapter->stats.rnbc += rd32(E1000_RNBC); 3867 rnbc = rd32(E1000_RNBC);
3868 adapter->stats.rnbc += rnbc;
3869 net_stats->rx_fifo_errors += rnbc;
3599 adapter->stats.ruc += rd32(E1000_RUC); 3870 adapter->stats.ruc += rd32(E1000_RUC);
3600 adapter->stats.rfc += rd32(E1000_RFC); 3871 adapter->stats.rfc += rd32(E1000_RFC);
3601 adapter->stats.rjc += rd32(E1000_RJC); 3872 adapter->stats.rjc += rd32(E1000_RJC);
@@ -3614,7 +3885,6 @@ void igb_update_stats(struct igb_adapter *adapter)
3614 adapter->stats.bptc += rd32(E1000_BPTC); 3885 adapter->stats.bptc += rd32(E1000_BPTC);
3615 3886
3616 /* used for adaptive IFS */ 3887 /* used for adaptive IFS */
3617
3618 hw->mac.tx_packet_delta = rd32(E1000_TPT); 3888 hw->mac.tx_packet_delta = rd32(E1000_TPT);
3619 adapter->stats.tpt += hw->mac.tx_packet_delta; 3889 adapter->stats.tpt += hw->mac.tx_packet_delta;
3620 hw->mac.collision_delta = rd32(E1000_COLC); 3890 hw->mac.collision_delta = rd32(E1000_COLC);
@@ -3637,56 +3907,29 @@ void igb_update_stats(struct igb_adapter *adapter)
3637 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC); 3907 adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
3638 3908
3639 /* Fill out the OS statistics structure */ 3909 /* Fill out the OS statistics structure */
3640 adapter->net_stats.multicast = adapter->stats.mprc; 3910 net_stats->multicast = adapter->stats.mprc;
3641 adapter->net_stats.collisions = adapter->stats.colc; 3911 net_stats->collisions = adapter->stats.colc;
3642 3912
3643 /* Rx Errors */ 3913 /* Rx Errors */
3644 3914
3645 if (hw->mac.type != e1000_82575) {
3646 u32 rqdpc_tmp;
3647 u64 rqdpc_total = 0;
3648 int i;
3649 /* Read out drops stats per RX queue. Notice RQDPC (Receive
3650 * Queue Drop Packet Count) stats only gets incremented, if
3651 * the DROP_EN but it set (in the SRRCTL register for that
3652 * queue). If DROP_EN bit is NOT set, then the some what
3653 * equivalent count is stored in RNBC (not per queue basis).
3654 * Also note the drop count is due to lack of available
3655 * descriptors.
3656 */
3657 for (i = 0; i < adapter->num_rx_queues; i++) {
3658 rqdpc_tmp = rd32(E1000_RQDPC(i)) & 0xFFF;
3659 adapter->rx_ring[i].rx_stats.drops += rqdpc_tmp;
3660 rqdpc_total += adapter->rx_ring[i].rx_stats.drops;
3661 }
3662 adapter->net_stats.rx_fifo_errors = rqdpc_total;
3663 }
3664
3665 /* Note RNBC (Receive No Buffers Count) is an not an exact
3666 * drop count as the hardware FIFO might save the day. Thats
3667 * one of the reason for saving it in rx_fifo_errors, as its
3668 * potentially not a true drop.
3669 */
3670 adapter->net_stats.rx_fifo_errors += adapter->stats.rnbc;
3671
3672 /* RLEC on some newer hardware can be incorrect so build 3915 /* RLEC on some newer hardware can be incorrect so build
3673 * our own version based on RUC and ROC */ 3916 * our own version based on RUC and ROC */
3674 adapter->net_stats.rx_errors = adapter->stats.rxerrc + 3917 net_stats->rx_errors = adapter->stats.rxerrc +
3675 adapter->stats.crcerrs + adapter->stats.algnerrc + 3918 adapter->stats.crcerrs + adapter->stats.algnerrc +
3676 adapter->stats.ruc + adapter->stats.roc + 3919 adapter->stats.ruc + adapter->stats.roc +
3677 adapter->stats.cexterr; 3920 adapter->stats.cexterr;
3678 adapter->net_stats.rx_length_errors = adapter->stats.ruc + 3921 net_stats->rx_length_errors = adapter->stats.ruc +
3679 adapter->stats.roc; 3922 adapter->stats.roc;
3680 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 3923 net_stats->rx_crc_errors = adapter->stats.crcerrs;
3681 adapter->net_stats.rx_frame_errors = adapter->stats.algnerrc; 3924 net_stats->rx_frame_errors = adapter->stats.algnerrc;
3682 adapter->net_stats.rx_missed_errors = adapter->stats.mpc; 3925 net_stats->rx_missed_errors = adapter->stats.mpc;
3683 3926
3684 /* Tx Errors */ 3927 /* Tx Errors */
3685 adapter->net_stats.tx_errors = adapter->stats.ecol + 3928 net_stats->tx_errors = adapter->stats.ecol +
3686 adapter->stats.latecol; 3929 adapter->stats.latecol;
3687 adapter->net_stats.tx_aborted_errors = adapter->stats.ecol; 3930 net_stats->tx_aborted_errors = adapter->stats.ecol;
3688 adapter->net_stats.tx_window_errors = adapter->stats.latecol; 3931 net_stats->tx_window_errors = adapter->stats.latecol;
3689 adapter->net_stats.tx_carrier_errors = adapter->stats.tncrs; 3932 net_stats->tx_carrier_errors = adapter->stats.tncrs;
3690 3933
3691 /* Tx Dropped needs to be maintained elsewhere */ 3934 /* Tx Dropped needs to be maintained elsewhere */
3692 3935
@@ -3707,14 +3950,12 @@ void igb_update_stats(struct igb_adapter *adapter)
3707 3950
3708static irqreturn_t igb_msix_other(int irq, void *data) 3951static irqreturn_t igb_msix_other(int irq, void *data)
3709{ 3952{
3710 struct net_device *netdev = data; 3953 struct igb_adapter *adapter = data;
3711 struct igb_adapter *adapter = netdev_priv(netdev);
3712 struct e1000_hw *hw = &adapter->hw; 3954 struct e1000_hw *hw = &adapter->hw;
3713 u32 icr = rd32(E1000_ICR); 3955 u32 icr = rd32(E1000_ICR);
3714
3715 /* reading ICR causes bit 31 of EICR to be cleared */ 3956 /* reading ICR causes bit 31 of EICR to be cleared */
3716 3957
3717 if(icr & E1000_ICR_DOUTSYNC) { 3958 if (icr & E1000_ICR_DOUTSYNC) {
3718 /* HW is reporting DMA is out of sync */ 3959 /* HW is reporting DMA is out of sync */
3719 adapter->stats.doosync++; 3960 adapter->stats.doosync++;
3720 } 3961 }
@@ -3730,125 +3971,90 @@ static irqreturn_t igb_msix_other(int irq, void *data)
3730 mod_timer(&adapter->watchdog_timer, jiffies + 1); 3971 mod_timer(&adapter->watchdog_timer, jiffies + 1);
3731 } 3972 }
3732 3973
3733 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_VMMB); 3974 if (adapter->vfs_allocated_count)
3975 wr32(E1000_IMS, E1000_IMS_LSC |
3976 E1000_IMS_VMMB |
3977 E1000_IMS_DOUTSYNC);
3978 else
3979 wr32(E1000_IMS, E1000_IMS_LSC | E1000_IMS_DOUTSYNC);
3734 wr32(E1000_EIMS, adapter->eims_other); 3980 wr32(E1000_EIMS, adapter->eims_other);
3735 3981
3736 return IRQ_HANDLED; 3982 return IRQ_HANDLED;
3737} 3983}
3738 3984
3739static irqreturn_t igb_msix_tx(int irq, void *data) 3985static void igb_write_itr(struct igb_q_vector *q_vector)
3740{ 3986{
3741 struct igb_ring *tx_ring = data; 3987 u32 itr_val = q_vector->itr_val & 0x7FFC;
3742 struct igb_adapter *adapter = tx_ring->adapter;
3743 struct e1000_hw *hw = &adapter->hw;
3744 3988
3745#ifdef CONFIG_IGB_DCA 3989 if (!q_vector->set_itr)
3746 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 3990 return;
3747 igb_update_tx_dca(tx_ring);
3748#endif
3749 3991
3750 tx_ring->total_bytes = 0; 3992 if (!itr_val)
3751 tx_ring->total_packets = 0; 3993 itr_val = 0x4;
3752 3994
3753 /* auto mask will automatically reenable the interrupt when we write 3995 if (q_vector->itr_shift)
3754 * EICS */ 3996 itr_val |= itr_val << q_vector->itr_shift;
3755 if (!igb_clean_tx_irq(tx_ring))
3756 /* Ring was not completely cleaned, so fire another interrupt */
3757 wr32(E1000_EICS, tx_ring->eims_value);
3758 else 3997 else
3759 wr32(E1000_EIMS, tx_ring->eims_value); 3998 itr_val |= 0x8000000;
3760 3999
3761 return IRQ_HANDLED; 4000 writel(itr_val, q_vector->itr_register);
4001 q_vector->set_itr = 0;
3762} 4002}
3763 4003
3764static void igb_write_itr(struct igb_ring *ring) 4004static irqreturn_t igb_msix_ring(int irq, void *data)
3765{ 4005{
3766 struct e1000_hw *hw = &ring->adapter->hw; 4006 struct igb_q_vector *q_vector = data;
3767 if ((ring->adapter->itr_setting & 3) && ring->set_itr) {
3768 switch (hw->mac.type) {
3769 case e1000_82576:
3770 wr32(ring->itr_register, ring->itr_val |
3771 0x80000000);
3772 break;
3773 default:
3774 wr32(ring->itr_register, ring->itr_val |
3775 (ring->itr_val << 16));
3776 break;
3777 }
3778 ring->set_itr = 0;
3779 }
3780}
3781 4007
3782static irqreturn_t igb_msix_rx(int irq, void *data) 4008 /* Write the ITR value calculated from the previous interrupt. */
3783{ 4009 igb_write_itr(q_vector);
3784 struct igb_ring *rx_ring = data;
3785 4010
3786 /* Write the ITR value calculated at the end of the 4011 napi_schedule(&q_vector->napi);
3787 * previous interrupt.
3788 */
3789
3790 igb_write_itr(rx_ring);
3791 4012
3792 if (napi_schedule_prep(&rx_ring->napi)) 4013 return IRQ_HANDLED;
3793 __napi_schedule(&rx_ring->napi);
3794
3795#ifdef CONFIG_IGB_DCA
3796 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED)
3797 igb_update_rx_dca(rx_ring);
3798#endif
3799 return IRQ_HANDLED;
3800} 4014}
3801 4015
3802#ifdef CONFIG_IGB_DCA 4016#ifdef CONFIG_IGB_DCA
3803static void igb_update_rx_dca(struct igb_ring *rx_ring) 4017static void igb_update_dca(struct igb_q_vector *q_vector)
3804{ 4018{
3805 u32 dca_rxctrl; 4019 struct igb_adapter *adapter = q_vector->adapter;
3806 struct igb_adapter *adapter = rx_ring->adapter;
3807 struct e1000_hw *hw = &adapter->hw; 4020 struct e1000_hw *hw = &adapter->hw;
3808 int cpu = get_cpu(); 4021 int cpu = get_cpu();
3809 int q = rx_ring->reg_idx;
3810 4022
3811 if (rx_ring->cpu != cpu) { 4023 if (q_vector->cpu == cpu)
3812 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q)); 4024 goto out_no_update;
3813 if (hw->mac.type == e1000_82576) { 4025
3814 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576; 4026 if (q_vector->tx_ring) {
3815 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) << 4027 int q = q_vector->tx_ring->reg_idx;
3816 E1000_DCA_RXCTRL_CPUID_SHIFT; 4028 u32 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
4029 if (hw->mac.type == e1000_82575) {
4030 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
4031 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
3817 } else { 4032 } else {
4033 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
4034 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4035 E1000_DCA_TXCTRL_CPUID_SHIFT;
4036 }
4037 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
4038 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
4039 }
4040 if (q_vector->rx_ring) {
4041 int q = q_vector->rx_ring->reg_idx;
4042 u32 dca_rxctrl = rd32(E1000_DCA_RXCTRL(q));
4043 if (hw->mac.type == e1000_82575) {
3818 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK; 4044 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK;
3819 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu); 4045 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
4046 } else {
4047 dca_rxctrl &= ~E1000_DCA_RXCTRL_CPUID_MASK_82576;
4048 dca_rxctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
4049 E1000_DCA_RXCTRL_CPUID_SHIFT;
3820 } 4050 }
3821 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN; 4051 dca_rxctrl |= E1000_DCA_RXCTRL_DESC_DCA_EN;
3822 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN; 4052 dca_rxctrl |= E1000_DCA_RXCTRL_HEAD_DCA_EN;
3823 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN; 4053 dca_rxctrl |= E1000_DCA_RXCTRL_DATA_DCA_EN;
3824 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl); 4054 wr32(E1000_DCA_RXCTRL(q), dca_rxctrl);
3825 rx_ring->cpu = cpu;
3826 }
3827 put_cpu();
3828}
3829
3830static void igb_update_tx_dca(struct igb_ring *tx_ring)
3831{
3832 u32 dca_txctrl;
3833 struct igb_adapter *adapter = tx_ring->adapter;
3834 struct e1000_hw *hw = &adapter->hw;
3835 int cpu = get_cpu();
3836 int q = tx_ring->reg_idx;
3837
3838 if (tx_ring->cpu != cpu) {
3839 dca_txctrl = rd32(E1000_DCA_TXCTRL(q));
3840 if (hw->mac.type == e1000_82576) {
3841 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK_82576;
3842 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu) <<
3843 E1000_DCA_TXCTRL_CPUID_SHIFT;
3844 } else {
3845 dca_txctrl &= ~E1000_DCA_TXCTRL_CPUID_MASK;
3846 dca_txctrl |= dca3_get_tag(&adapter->pdev->dev, cpu);
3847 }
3848 dca_txctrl |= E1000_DCA_TXCTRL_DESC_DCA_EN;
3849 wr32(E1000_DCA_TXCTRL(q), dca_txctrl);
3850 tx_ring->cpu = cpu;
3851 } 4055 }
4056 q_vector->cpu = cpu;
4057out_no_update:
3852 put_cpu(); 4058 put_cpu();
3853} 4059}
3854 4060
@@ -3863,13 +4069,10 @@ static void igb_setup_dca(struct igb_adapter *adapter)
3863 /* Always use CB2 mode, difference is masked in the CB driver. */ 4069 /* Always use CB2 mode, difference is masked in the CB driver. */
3864 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2); 4070 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
3865 4071
3866 for (i = 0; i < adapter->num_tx_queues; i++) { 4072 for (i = 0; i < adapter->num_q_vectors; i++) {
3867 adapter->tx_ring[i].cpu = -1; 4073 struct igb_q_vector *q_vector = adapter->q_vector[i];
3868 igb_update_tx_dca(&adapter->tx_ring[i]); 4074 q_vector->cpu = -1;
3869 } 4075 igb_update_dca(q_vector);
3870 for (i = 0; i < adapter->num_rx_queues; i++) {
3871 adapter->rx_ring[i].cpu = -1;
3872 igb_update_rx_dca(&adapter->rx_ring[i]);
3873 } 4076 }
3874} 4077}
3875 4078
@@ -3877,6 +4080,7 @@ static int __igb_notify_dca(struct device *dev, void *data)
3877{ 4080{
3878 struct net_device *netdev = dev_get_drvdata(dev); 4081 struct net_device *netdev = dev_get_drvdata(dev);
3879 struct igb_adapter *adapter = netdev_priv(netdev); 4082 struct igb_adapter *adapter = netdev_priv(netdev);
4083 struct pci_dev *pdev = adapter->pdev;
3880 struct e1000_hw *hw = &adapter->hw; 4084 struct e1000_hw *hw = &adapter->hw;
3881 unsigned long event = *(unsigned long *)data; 4085 unsigned long event = *(unsigned long *)data;
3882 4086
@@ -3885,12 +4089,9 @@ static int __igb_notify_dca(struct device *dev, void *data)
3885 /* if already enabled, don't do it again */ 4089 /* if already enabled, don't do it again */
3886 if (adapter->flags & IGB_FLAG_DCA_ENABLED) 4090 if (adapter->flags & IGB_FLAG_DCA_ENABLED)
3887 break; 4091 break;
3888 /* Always use CB2 mode, difference is masked
3889 * in the CB driver. */
3890 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_CB2);
3891 if (dca_add_requester(dev) == 0) { 4092 if (dca_add_requester(dev) == 0) {
3892 adapter->flags |= IGB_FLAG_DCA_ENABLED; 4093 adapter->flags |= IGB_FLAG_DCA_ENABLED;
3893 dev_info(&adapter->pdev->dev, "DCA enabled\n"); 4094 dev_info(&pdev->dev, "DCA enabled\n");
3894 igb_setup_dca(adapter); 4095 igb_setup_dca(adapter);
3895 break; 4096 break;
3896 } 4097 }
@@ -3898,9 +4099,9 @@ static int __igb_notify_dca(struct device *dev, void *data)
3898 case DCA_PROVIDER_REMOVE: 4099 case DCA_PROVIDER_REMOVE:
3899 if (adapter->flags & IGB_FLAG_DCA_ENABLED) { 4100 if (adapter->flags & IGB_FLAG_DCA_ENABLED) {
3900 /* without this a class_device is left 4101 /* without this a class_device is left
3901 * hanging around in the sysfs model */ 4102 * hanging around in the sysfs model */
3902 dca_remove_requester(dev); 4103 dca_remove_requester(dev);
3903 dev_info(&adapter->pdev->dev, "DCA disabled\n"); 4104 dev_info(&pdev->dev, "DCA disabled\n");
3904 adapter->flags &= ~IGB_FLAG_DCA_ENABLED; 4105 adapter->flags &= ~IGB_FLAG_DCA_ENABLED;
3905 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE); 4106 wr32(E1000_DCA_CTRL, E1000_DCA_CTRL_DCA_MODE_DISABLE);
3906 } 4107 }
@@ -3930,12 +4131,51 @@ static void igb_ping_all_vfs(struct igb_adapter *adapter)
3930 4131
3931 for (i = 0 ; i < adapter->vfs_allocated_count; i++) { 4132 for (i = 0 ; i < adapter->vfs_allocated_count; i++) {
3932 ping = E1000_PF_CONTROL_MSG; 4133 ping = E1000_PF_CONTROL_MSG;
3933 if (adapter->vf_data[i].clear_to_send) 4134 if (adapter->vf_data[i].flags & IGB_VF_FLAG_CTS)
3934 ping |= E1000_VT_MSGTYPE_CTS; 4135 ping |= E1000_VT_MSGTYPE_CTS;
3935 igb_write_mbx(hw, &ping, 1, i); 4136 igb_write_mbx(hw, &ping, 1, i);
3936 } 4137 }
3937} 4138}
3938 4139
4140static int igb_set_vf_promisc(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4141{
4142 struct e1000_hw *hw = &adapter->hw;
4143 u32 vmolr = rd32(E1000_VMOLR(vf));
4144 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4145
4146 vf_data->flags |= ~(IGB_VF_FLAG_UNI_PROMISC |
4147 IGB_VF_FLAG_MULTI_PROMISC);
4148 vmolr &= ~(E1000_VMOLR_ROPE | E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4149
4150 if (*msgbuf & E1000_VF_SET_PROMISC_MULTICAST) {
4151 vmolr |= E1000_VMOLR_MPME;
4152 *msgbuf &= ~E1000_VF_SET_PROMISC_MULTICAST;
4153 } else {
4154 /*
4155 * if we have hashes and we are clearing a multicast promisc
4156 * flag we need to write the hashes to the MTA as this step
4157 * was previously skipped
4158 */
4159 if (vf_data->num_vf_mc_hashes > 30) {
4160 vmolr |= E1000_VMOLR_MPME;
4161 } else if (vf_data->num_vf_mc_hashes) {
4162 int j;
4163 vmolr |= E1000_VMOLR_ROMPE;
4164 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4165 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4166 }
4167 }
4168
4169 wr32(E1000_VMOLR(vf), vmolr);
4170
4171 /* there are flags left unprocessed, likely not supported */
4172 if (*msgbuf & E1000_VT_MSGINFO_MASK)
4173 return -EINVAL;
4174
4175 return 0;
4176
4177}
4178
3939static int igb_set_vf_multicasts(struct igb_adapter *adapter, 4179static int igb_set_vf_multicasts(struct igb_adapter *adapter,
3940 u32 *msgbuf, u32 vf) 4180 u32 *msgbuf, u32 vf)
3941{ 4181{
@@ -3944,18 +4184,17 @@ static int igb_set_vf_multicasts(struct igb_adapter *adapter,
3944 struct vf_data_storage *vf_data = &adapter->vf_data[vf]; 4184 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
3945 int i; 4185 int i;
3946 4186
3947 /* only up to 30 hash values supported */ 4187 /* salt away the number of multicast addresses assigned
3948 if (n > 30)
3949 n = 30;
3950
3951 /* salt away the number of multi cast addresses assigned
3952 * to this VF for later use to restore when the PF multi cast 4188 * to this VF for later use to restore when the PF multi cast
3953 * list changes 4189 * list changes
3954 */ 4190 */
3955 vf_data->num_vf_mc_hashes = n; 4191 vf_data->num_vf_mc_hashes = n;
3956 4192
3957 /* VFs are limited to using the MTA hash table for their multicast 4193 /* only up to 30 hash values supported */
3958 * addresses */ 4194 if (n > 30)
4195 n = 30;
4196
4197 /* store the hashes for later use */
3959 for (i = 0; i < n; i++) 4198 for (i = 0; i < n; i++)
3960 vf_data->vf_mc_hashes[i] = hash_list[i]; 4199 vf_data->vf_mc_hashes[i] = hash_list[i];
3961 4200
@@ -3972,9 +4211,20 @@ static void igb_restore_vf_multicasts(struct igb_adapter *adapter)
3972 int i, j; 4211 int i, j;
3973 4212
3974 for (i = 0; i < adapter->vfs_allocated_count; i++) { 4213 for (i = 0; i < adapter->vfs_allocated_count; i++) {
4214 u32 vmolr = rd32(E1000_VMOLR(i));
4215 vmolr &= ~(E1000_VMOLR_ROMPE | E1000_VMOLR_MPME);
4216
3975 vf_data = &adapter->vf_data[i]; 4217 vf_data = &adapter->vf_data[i];
3976 for (j = 0; j < vf_data->num_vf_mc_hashes; j++) 4218
3977 igb_mta_set(hw, vf_data->vf_mc_hashes[j]); 4219 if ((vf_data->num_vf_mc_hashes > 30) ||
4220 (vf_data->flags & IGB_VF_FLAG_MULTI_PROMISC)) {
4221 vmolr |= E1000_VMOLR_MPME;
4222 } else if (vf_data->num_vf_mc_hashes) {
4223 vmolr |= E1000_VMOLR_ROMPE;
4224 for (j = 0; j < vf_data->num_vf_mc_hashes; j++)
4225 igb_mta_set(hw, vf_data->vf_mc_hashes[j]);
4226 }
4227 wr32(E1000_VMOLR(i), vmolr);
3978 } 4228 }
3979} 4229}
3980 4230
@@ -4012,7 +4262,11 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4012 struct e1000_hw *hw = &adapter->hw; 4262 struct e1000_hw *hw = &adapter->hw;
4013 u32 reg, i; 4263 u32 reg, i;
4014 4264
4015 /* It is an error to call this function when VFs are not enabled */ 4265 /* The vlvf table only exists on 82576 hardware and newer */
4266 if (hw->mac.type < e1000_82576)
4267 return -1;
4268
4269 /* we only need to do this if VMDq is enabled */
4016 if (!adapter->vfs_allocated_count) 4270 if (!adapter->vfs_allocated_count)
4017 return -1; 4271 return -1;
4018 4272
@@ -4042,16 +4296,12 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4042 4296
4043 /* if !enabled we need to set this up in vfta */ 4297 /* if !enabled we need to set this up in vfta */
4044 if (!(reg & E1000_VLVF_VLANID_ENABLE)) { 4298 if (!(reg & E1000_VLVF_VLANID_ENABLE)) {
4045 /* add VID to filter table, if bit already set 4299 /* add VID to filter table */
4046 * PF must have added it outside of table */ 4300 igb_vfta_set(hw, vid, true);
4047 if (igb_vfta_set(hw, vid, true))
4048 reg |= 1 << (E1000_VLVF_POOLSEL_SHIFT +
4049 adapter->vfs_allocated_count);
4050 reg |= E1000_VLVF_VLANID_ENABLE; 4301 reg |= E1000_VLVF_VLANID_ENABLE;
4051 } 4302 }
4052 reg &= ~E1000_VLVF_VLANID_MASK; 4303 reg &= ~E1000_VLVF_VLANID_MASK;
4053 reg |= vid; 4304 reg |= vid;
4054
4055 wr32(E1000_VLVF(i), reg); 4305 wr32(E1000_VLVF(i), reg);
4056 4306
4057 /* do not modify RLPML for PF devices */ 4307 /* do not modify RLPML for PF devices */
@@ -4067,8 +4317,8 @@ static s32 igb_vlvf_set(struct igb_adapter *adapter, u32 vid, bool add, u32 vf)
4067 reg |= size; 4317 reg |= size;
4068 wr32(E1000_VMOLR(vf), reg); 4318 wr32(E1000_VMOLR(vf), reg);
4069 } 4319 }
4070 adapter->vf_data[vf].vlans_enabled++;
4071 4320
4321 adapter->vf_data[vf].vlans_enabled++;
4072 return 0; 4322 return 0;
4073 } 4323 }
4074 } else { 4324 } else {
@@ -4110,15 +4360,14 @@ static int igb_set_vf_vlan(struct igb_adapter *adapter, u32 *msgbuf, u32 vf)
4110 return igb_vlvf_set(adapter, vid, add, vf); 4360 return igb_vlvf_set(adapter, vid, add, vf);
4111} 4361}
4112 4362
4113static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf) 4363static inline void igb_vf_reset(struct igb_adapter *adapter, u32 vf)
4114{ 4364{
4115 struct e1000_hw *hw = &adapter->hw; 4365 /* clear all flags */
4116 4366 adapter->vf_data[vf].flags = 0;
4117 /* disable mailbox functionality for vf */ 4367 adapter->vf_data[vf].last_nack = jiffies;
4118 adapter->vf_data[vf].clear_to_send = false;
4119 4368
4120 /* reset offloads to defaults */ 4369 /* reset offloads to defaults */
4121 igb_set_vmolr(hw, vf); 4370 igb_set_vmolr(adapter, vf);
4122 4371
4123 /* reset vlans for device */ 4372 /* reset vlans for device */
4124 igb_clear_vf_vfta(adapter, vf); 4373 igb_clear_vf_vfta(adapter, vf);
@@ -4130,7 +4379,18 @@ static inline void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4130 igb_set_rx_mode(adapter->netdev); 4379 igb_set_rx_mode(adapter->netdev);
4131} 4380}
4132 4381
4133static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf) 4382static void igb_vf_reset_event(struct igb_adapter *adapter, u32 vf)
4383{
4384 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
4385
4386 /* generate a new mac address as we were hotplug removed/added */
4387 random_ether_addr(vf_mac);
4388
4389 /* process remaining reset events */
4390 igb_vf_reset(adapter, vf);
4391}
4392
4393static void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4134{ 4394{
4135 struct e1000_hw *hw = &adapter->hw; 4395 struct e1000_hw *hw = &adapter->hw;
4136 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses; 4396 unsigned char *vf_mac = adapter->vf_data[vf].vf_mac_addresses;
@@ -4139,11 +4399,10 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4139 u8 *addr = (u8 *)(&msgbuf[1]); 4399 u8 *addr = (u8 *)(&msgbuf[1]);
4140 4400
4141 /* process all the same items cleared in a function level reset */ 4401 /* process all the same items cleared in a function level reset */
4142 igb_vf_reset_event(adapter, vf); 4402 igb_vf_reset(adapter, vf);
4143 4403
4144 /* set vf mac address */ 4404 /* set vf mac address */
4145 igb_rar_set(hw, vf_mac, rar_entry); 4405 igb_rar_set_qsel(adapter, vf_mac, rar_entry, vf);
4146 igb_set_rah_pool(hw, vf, rar_entry);
4147 4406
4148 /* enable transmit and receive for vf */ 4407 /* enable transmit and receive for vf */
4149 reg = rd32(E1000_VFTE); 4408 reg = rd32(E1000_VFTE);
@@ -4151,8 +4410,7 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4151 reg = rd32(E1000_VFRE); 4410 reg = rd32(E1000_VFRE);
4152 wr32(E1000_VFRE, reg | (1 << vf)); 4411 wr32(E1000_VFRE, reg | (1 << vf));
4153 4412
4154 /* enable mailbox functionality for vf */ 4413 adapter->vf_data[vf].flags = IGB_VF_FLAG_CTS;
4155 adapter->vf_data[vf].clear_to_send = true;
4156 4414
4157 /* reply to reset with ack and vf mac address */ 4415 /* reply to reset with ack and vf mac address */
4158 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK; 4416 msgbuf[0] = E1000_VF_RESET | E1000_VT_MSGTYPE_ACK;
@@ -4162,66 +4420,45 @@ static inline void igb_vf_reset_msg(struct igb_adapter *adapter, u32 vf)
4162 4420
4163static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf) 4421static int igb_set_vf_mac_addr(struct igb_adapter *adapter, u32 *msg, int vf)
4164{ 4422{
4165 unsigned char *addr = (char *)&msg[1]; 4423 unsigned char *addr = (char *)&msg[1];
4166 int err = -1; 4424 int err = -1;
4167
4168 if (is_valid_ether_addr(addr))
4169 err = igb_set_vf_mac(adapter, vf, addr);
4170 4425
4171 return err; 4426 if (is_valid_ether_addr(addr))
4427 err = igb_set_vf_mac(adapter, vf, addr);
4172 4428
4429 return err;
4173} 4430}
4174 4431
4175static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf) 4432static void igb_rcv_ack_from_vf(struct igb_adapter *adapter, u32 vf)
4176{ 4433{
4177 struct e1000_hw *hw = &adapter->hw; 4434 struct e1000_hw *hw = &adapter->hw;
4435 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4178 u32 msg = E1000_VT_MSGTYPE_NACK; 4436 u32 msg = E1000_VT_MSGTYPE_NACK;
4179 4437
4180 /* if device isn't clear to send it shouldn't be reading either */ 4438 /* if device isn't clear to send it shouldn't be reading either */
4181 if (!adapter->vf_data[vf].clear_to_send) 4439 if (!(vf_data->flags & IGB_VF_FLAG_CTS) &&
4440 time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
4182 igb_write_mbx(hw, &msg, 1, vf); 4441 igb_write_mbx(hw, &msg, 1, vf);
4183} 4442 vf_data->last_nack = jiffies;
4184
4185
4186static void igb_msg_task(struct igb_adapter *adapter)
4187{
4188 struct e1000_hw *hw = &adapter->hw;
4189 u32 vf;
4190
4191 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4192 /* process any reset requests */
4193 if (!igb_check_for_rst(hw, vf)) {
4194 adapter->vf_data[vf].clear_to_send = false;
4195 igb_vf_reset_event(adapter, vf);
4196 }
4197
4198 /* process any messages pending */
4199 if (!igb_check_for_msg(hw, vf))
4200 igb_rcv_msg_from_vf(adapter, vf);
4201
4202 /* process any acks */
4203 if (!igb_check_for_ack(hw, vf))
4204 igb_rcv_ack_from_vf(adapter, vf);
4205
4206 } 4443 }
4207} 4444}
4208 4445
4209static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf) 4446static void igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4210{ 4447{
4211 u32 mbx_size = E1000_VFMAILBOX_SIZE; 4448 struct pci_dev *pdev = adapter->pdev;
4212 u32 msgbuf[mbx_size]; 4449 u32 msgbuf[E1000_VFMAILBOX_SIZE];
4213 struct e1000_hw *hw = &adapter->hw; 4450 struct e1000_hw *hw = &adapter->hw;
4451 struct vf_data_storage *vf_data = &adapter->vf_data[vf];
4214 s32 retval; 4452 s32 retval;
4215 4453
4216 retval = igb_read_mbx(hw, msgbuf, mbx_size, vf); 4454 retval = igb_read_mbx(hw, msgbuf, E1000_VFMAILBOX_SIZE, vf);
4217 4455
4218 if (retval) 4456 if (retval)
4219 dev_err(&adapter->pdev->dev, 4457 dev_err(&pdev->dev, "Error receiving message from VF\n");
4220 "Error receiving message from VF\n");
4221 4458
4222 /* this is a message we already processed, do nothing */ 4459 /* this is a message we already processed, do nothing */
4223 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK)) 4460 if (msgbuf[0] & (E1000_VT_MSGTYPE_ACK | E1000_VT_MSGTYPE_NACK))
4224 return retval; 4461 return;
4225 4462
4226 /* 4463 /*
4227 * until the vf completes a reset it should not be 4464 * until the vf completes a reset it should not be
@@ -4230,20 +4467,25 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4230 4467
4231 if (msgbuf[0] == E1000_VF_RESET) { 4468 if (msgbuf[0] == E1000_VF_RESET) {
4232 igb_vf_reset_msg(adapter, vf); 4469 igb_vf_reset_msg(adapter, vf);
4233 4470 return;
4234 return retval;
4235 } 4471 }
4236 4472
4237 if (!adapter->vf_data[vf].clear_to_send) { 4473 if (!(vf_data->flags & IGB_VF_FLAG_CTS)) {
4238 msgbuf[0] |= E1000_VT_MSGTYPE_NACK; 4474 msgbuf[0] = E1000_VT_MSGTYPE_NACK;
4239 igb_write_mbx(hw, msgbuf, 1, vf); 4475 if (time_after(jiffies, vf_data->last_nack + (2 * HZ))) {
4240 return retval; 4476 igb_write_mbx(hw, msgbuf, 1, vf);
4477 vf_data->last_nack = jiffies;
4478 }
4479 return;
4241 } 4480 }
4242 4481
4243 switch ((msgbuf[0] & 0xFFFF)) { 4482 switch ((msgbuf[0] & 0xFFFF)) {
4244 case E1000_VF_SET_MAC_ADDR: 4483 case E1000_VF_SET_MAC_ADDR:
4245 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf); 4484 retval = igb_set_vf_mac_addr(adapter, msgbuf, vf);
4246 break; 4485 break;
4486 case E1000_VF_SET_PROMISC:
4487 retval = igb_set_vf_promisc(adapter, msgbuf, vf);
4488 break;
4247 case E1000_VF_SET_MULTICAST: 4489 case E1000_VF_SET_MULTICAST:
4248 retval = igb_set_vf_multicasts(adapter, msgbuf, vf); 4490 retval = igb_set_vf_multicasts(adapter, msgbuf, vf);
4249 break; 4491 break;
@@ -4254,7 +4496,7 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4254 retval = igb_set_vf_vlan(adapter, msgbuf, vf); 4496 retval = igb_set_vf_vlan(adapter, msgbuf, vf);
4255 break; 4497 break;
4256 default: 4498 default:
4257 dev_err(&adapter->pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]); 4499 dev_err(&pdev->dev, "Unhandled Msg %08x\n", msgbuf[0]);
4258 retval = -1; 4500 retval = -1;
4259 break; 4501 break;
4260 } 4502 }
@@ -4268,8 +4510,53 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4268 msgbuf[0] |= E1000_VT_MSGTYPE_CTS; 4510 msgbuf[0] |= E1000_VT_MSGTYPE_CTS;
4269 4511
4270 igb_write_mbx(hw, msgbuf, 1, vf); 4512 igb_write_mbx(hw, msgbuf, 1, vf);
4513}
4271 4514
4272 return retval; 4515static void igb_msg_task(struct igb_adapter *adapter)
4516{
4517 struct e1000_hw *hw = &adapter->hw;
4518 u32 vf;
4519
4520 for (vf = 0; vf < adapter->vfs_allocated_count; vf++) {
4521 /* process any reset requests */
4522 if (!igb_check_for_rst(hw, vf))
4523 igb_vf_reset_event(adapter, vf);
4524
4525 /* process any messages pending */
4526 if (!igb_check_for_msg(hw, vf))
4527 igb_rcv_msg_from_vf(adapter, vf);
4528
4529 /* process any acks */
4530 if (!igb_check_for_ack(hw, vf))
4531 igb_rcv_ack_from_vf(adapter, vf);
4532 }
4533}
4534
4535/**
4536 * igb_set_uta - Set unicast filter table address
4537 * @adapter: board private structure
4538 *
4539 * The unicast table address is a register array of 32-bit registers.
4540 * The table is meant to be used in a way similar to how the MTA is used
4541 * however due to certain limitations in the hardware it is necessary to
4542 * set all the hash bits to 1 and use the VMOLR ROPE bit as a promiscous
4543 * enable bit to allow vlan tag stripping when promiscous mode is enabled
4544 **/
4545static void igb_set_uta(struct igb_adapter *adapter)
4546{
4547 struct e1000_hw *hw = &adapter->hw;
4548 int i;
4549
4550 /* The UTA table only exists on 82576 hardware and newer */
4551 if (hw->mac.type < e1000_82576)
4552 return;
4553
4554 /* we only need to do this if VMDq is enabled */
4555 if (!adapter->vfs_allocated_count)
4556 return;
4557
4558 for (i = 0; i < hw->mac.uta_reg_count; i++)
4559 array_wr32(E1000_UTA, i, ~0);
4273} 4560}
4274 4561
4275/** 4562/**
@@ -4279,15 +4566,15 @@ static int igb_rcv_msg_from_vf(struct igb_adapter *adapter, u32 vf)
4279 **/ 4566 **/
4280static irqreturn_t igb_intr_msi(int irq, void *data) 4567static irqreturn_t igb_intr_msi(int irq, void *data)
4281{ 4568{
4282 struct net_device *netdev = data; 4569 struct igb_adapter *adapter = data;
4283 struct igb_adapter *adapter = netdev_priv(netdev); 4570 struct igb_q_vector *q_vector = adapter->q_vector[0];
4284 struct e1000_hw *hw = &adapter->hw; 4571 struct e1000_hw *hw = &adapter->hw;
4285 /* read ICR disables interrupts using IAM */ 4572 /* read ICR disables interrupts using IAM */
4286 u32 icr = rd32(E1000_ICR); 4573 u32 icr = rd32(E1000_ICR);
4287 4574
4288 igb_write_itr(adapter->rx_ring); 4575 igb_write_itr(q_vector);
4289 4576
4290 if(icr & E1000_ICR_DOUTSYNC) { 4577 if (icr & E1000_ICR_DOUTSYNC) {
4291 /* HW is reporting DMA is out of sync */ 4578 /* HW is reporting DMA is out of sync */
4292 adapter->stats.doosync++; 4579 adapter->stats.doosync++;
4293 } 4580 }
@@ -4298,7 +4585,7 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
4298 mod_timer(&adapter->watchdog_timer, jiffies + 1); 4585 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4299 } 4586 }
4300 4587
4301 napi_schedule(&adapter->rx_ring[0].napi); 4588 napi_schedule(&q_vector->napi);
4302 4589
4303 return IRQ_HANDLED; 4590 return IRQ_HANDLED;
4304} 4591}
@@ -4310,8 +4597,8 @@ static irqreturn_t igb_intr_msi(int irq, void *data)
4310 **/ 4597 **/
4311static irqreturn_t igb_intr(int irq, void *data) 4598static irqreturn_t igb_intr(int irq, void *data)
4312{ 4599{
4313 struct net_device *netdev = data; 4600 struct igb_adapter *adapter = data;
4314 struct igb_adapter *adapter = netdev_priv(netdev); 4601 struct igb_q_vector *q_vector = adapter->q_vector[0];
4315 struct e1000_hw *hw = &adapter->hw; 4602 struct e1000_hw *hw = &adapter->hw;
4316 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No 4603 /* Interrupt Auto-Mask...upon reading ICR, interrupts are masked. No
4317 * need for the IMC write */ 4604 * need for the IMC write */
@@ -4319,14 +4606,14 @@ static irqreturn_t igb_intr(int irq, void *data)
4319 if (!icr) 4606 if (!icr)
4320 return IRQ_NONE; /* Not our interrupt */ 4607 return IRQ_NONE; /* Not our interrupt */
4321 4608
4322 igb_write_itr(adapter->rx_ring); 4609 igb_write_itr(q_vector);
4323 4610
4324 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is 4611 /* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
4325 * not set, then the adapter didn't send an interrupt */ 4612 * not set, then the adapter didn't send an interrupt */
4326 if (!(icr & E1000_ICR_INT_ASSERTED)) 4613 if (!(icr & E1000_ICR_INT_ASSERTED))
4327 return IRQ_NONE; 4614 return IRQ_NONE;
4328 4615
4329 if(icr & E1000_ICR_DOUTSYNC) { 4616 if (icr & E1000_ICR_DOUTSYNC) {
4330 /* HW is reporting DMA is out of sync */ 4617 /* HW is reporting DMA is out of sync */
4331 adapter->stats.doosync++; 4618 adapter->stats.doosync++;
4332 } 4619 }
@@ -4338,26 +4625,27 @@ static irqreturn_t igb_intr(int irq, void *data)
4338 mod_timer(&adapter->watchdog_timer, jiffies + 1); 4625 mod_timer(&adapter->watchdog_timer, jiffies + 1);
4339 } 4626 }
4340 4627
4341 napi_schedule(&adapter->rx_ring[0].napi); 4628 napi_schedule(&q_vector->napi);
4342 4629
4343 return IRQ_HANDLED; 4630 return IRQ_HANDLED;
4344} 4631}
4345 4632
4346static inline void igb_rx_irq_enable(struct igb_ring *rx_ring) 4633static inline void igb_ring_irq_enable(struct igb_q_vector *q_vector)
4347{ 4634{
4348 struct igb_adapter *adapter = rx_ring->adapter; 4635 struct igb_adapter *adapter = q_vector->adapter;
4349 struct e1000_hw *hw = &adapter->hw; 4636 struct e1000_hw *hw = &adapter->hw;
4350 4637
4351 if (adapter->itr_setting & 3) { 4638 if ((q_vector->rx_ring && (adapter->rx_itr_setting & 3)) ||
4352 if (adapter->num_rx_queues == 1) 4639 (!q_vector->rx_ring && (adapter->tx_itr_setting & 3))) {
4640 if (!adapter->msix_entries)
4353 igb_set_itr(adapter); 4641 igb_set_itr(adapter);
4354 else 4642 else
4355 igb_update_ring_itr(rx_ring); 4643 igb_update_ring_itr(q_vector);
4356 } 4644 }
4357 4645
4358 if (!test_bit(__IGB_DOWN, &adapter->state)) { 4646 if (!test_bit(__IGB_DOWN, &adapter->state)) {
4359 if (adapter->msix_entries) 4647 if (adapter->msix_entries)
4360 wr32(E1000_EIMS, rx_ring->eims_value); 4648 wr32(E1000_EIMS, q_vector->eims_value);
4361 else 4649 else
4362 igb_irq_enable(adapter); 4650 igb_irq_enable(adapter);
4363 } 4651 }
@@ -4370,76 +4658,94 @@ static inline void igb_rx_irq_enable(struct igb_ring *rx_ring)
4370 **/ 4658 **/
4371static int igb_poll(struct napi_struct *napi, int budget) 4659static int igb_poll(struct napi_struct *napi, int budget)
4372{ 4660{
4373 struct igb_ring *rx_ring = container_of(napi, struct igb_ring, napi); 4661 struct igb_q_vector *q_vector = container_of(napi,
4374 int work_done = 0; 4662 struct igb_q_vector,
4663 napi);
4664 int tx_clean_complete = 1, work_done = 0;
4375 4665
4376#ifdef CONFIG_IGB_DCA 4666#ifdef CONFIG_IGB_DCA
4377 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) 4667 if (q_vector->adapter->flags & IGB_FLAG_DCA_ENABLED)
4378 igb_update_rx_dca(rx_ring); 4668 igb_update_dca(q_vector);
4379#endif 4669#endif
4380 igb_clean_rx_irq_adv(rx_ring, &work_done, budget); 4670 if (q_vector->tx_ring)
4671 tx_clean_complete = igb_clean_tx_irq(q_vector);
4381 4672
4382 if (rx_ring->buddy) { 4673 if (q_vector->rx_ring)
4383#ifdef CONFIG_IGB_DCA 4674 igb_clean_rx_irq_adv(q_vector, &work_done, budget);
4384 if (rx_ring->adapter->flags & IGB_FLAG_DCA_ENABLED) 4675
4385 igb_update_tx_dca(rx_ring->buddy); 4676 if (!tx_clean_complete)
4386#endif 4677 work_done = budget;
4387 if (!igb_clean_tx_irq(rx_ring->buddy))
4388 work_done = budget;
4389 }
4390 4678
4391 /* If not enough Rx work done, exit the polling mode */ 4679 /* If not enough Rx work done, exit the polling mode */
4392 if (work_done < budget) { 4680 if (work_done < budget) {
4393 napi_complete(napi); 4681 napi_complete(napi);
4394 igb_rx_irq_enable(rx_ring); 4682 igb_ring_irq_enable(q_vector);
4395 } 4683 }
4396 4684
4397 return work_done; 4685 return work_done;
4398} 4686}
4399 4687
4400/** 4688/**
4401 * igb_hwtstamp - utility function which checks for TX time stamp 4689 * igb_systim_to_hwtstamp - convert system time value to hw timestamp
4402 * @adapter: board private structure 4690 * @adapter: board private structure
4691 * @shhwtstamps: timestamp structure to update
4692 * @regval: unsigned 64bit system time value.
4693 *
4694 * We need to convert the system time value stored in the RX/TXSTMP registers
4695 * into a hwtstamp which can be used by the upper level timestamping functions
4696 */
4697static void igb_systim_to_hwtstamp(struct igb_adapter *adapter,
4698 struct skb_shared_hwtstamps *shhwtstamps,
4699 u64 regval)
4700{
4701 u64 ns;
4702
4703 ns = timecounter_cyc2time(&adapter->clock, regval);
4704 timecompare_update(&adapter->compare, ns);
4705 memset(shhwtstamps, 0, sizeof(struct skb_shared_hwtstamps));
4706 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4707 shhwtstamps->syststamp = timecompare_transform(&adapter->compare, ns);
4708}
4709
4710/**
4711 * igb_tx_hwtstamp - utility function which checks for TX time stamp
4712 * @q_vector: pointer to q_vector containing needed info
4403 * @skb: packet that was just sent 4713 * @skb: packet that was just sent
4404 * 4714 *
4405 * If we were asked to do hardware stamping and such a time stamp is 4715 * If we were asked to do hardware stamping and such a time stamp is
4406 * available, then it must have been for this skb here because we only 4716 * available, then it must have been for this skb here because we only
4407 * allow only one such packet into the queue. 4717 * allow only one such packet into the queue.
4408 */ 4718 */
4409static void igb_tx_hwtstamp(struct igb_adapter *adapter, struct sk_buff *skb) 4719static void igb_tx_hwtstamp(struct igb_q_vector *q_vector, struct sk_buff *skb)
4410{ 4720{
4721 struct igb_adapter *adapter = q_vector->adapter;
4411 union skb_shared_tx *shtx = skb_tx(skb); 4722 union skb_shared_tx *shtx = skb_tx(skb);
4412 struct e1000_hw *hw = &adapter->hw; 4723 struct e1000_hw *hw = &adapter->hw;
4724 struct skb_shared_hwtstamps shhwtstamps;
4725 u64 regval;
4413 4726
4414 if (unlikely(shtx->hardware)) { 4727 /* if skb does not support hw timestamp or TX stamp not valid exit */
4415 u32 valid = rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID; 4728 if (likely(!shtx->hardware) ||
4416 if (valid) { 4729 !(rd32(E1000_TSYNCTXCTL) & E1000_TSYNCTXCTL_VALID))
4417 u64 regval = rd32(E1000_TXSTMPL); 4730 return;
4418 u64 ns; 4731
4419 struct skb_shared_hwtstamps shhwtstamps; 4732 regval = rd32(E1000_TXSTMPL);
4420 4733 regval |= (u64)rd32(E1000_TXSTMPH) << 32;
4421 memset(&shhwtstamps, 0, sizeof(shhwtstamps)); 4734
4422 regval |= (u64)rd32(E1000_TXSTMPH) << 32; 4735 igb_systim_to_hwtstamp(adapter, &shhwtstamps, regval);
4423 ns = timecounter_cyc2time(&adapter->clock, 4736 skb_tstamp_tx(skb, &shhwtstamps);
4424 regval);
4425 timecompare_update(&adapter->compare, ns);
4426 shhwtstamps.hwtstamp = ns_to_ktime(ns);
4427 shhwtstamps.syststamp =
4428 timecompare_transform(&adapter->compare, ns);
4429 skb_tstamp_tx(skb, &shhwtstamps);
4430 }
4431 }
4432} 4737}
4433 4738
4434/** 4739/**
4435 * igb_clean_tx_irq - Reclaim resources after transmit completes 4740 * igb_clean_tx_irq - Reclaim resources after transmit completes
4436 * @adapter: board private structure 4741 * @q_vector: pointer to q_vector containing needed info
4437 * returns true if ring is completely cleaned 4742 * returns true if ring is completely cleaned
4438 **/ 4743 **/
4439static bool igb_clean_tx_irq(struct igb_ring *tx_ring) 4744static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
4440{ 4745{
4441 struct igb_adapter *adapter = tx_ring->adapter; 4746 struct igb_adapter *adapter = q_vector->adapter;
4442 struct net_device *netdev = adapter->netdev; 4747 struct igb_ring *tx_ring = q_vector->tx_ring;
4748 struct net_device *netdev = tx_ring->netdev;
4443 struct e1000_hw *hw = &adapter->hw; 4749 struct e1000_hw *hw = &adapter->hw;
4444 struct igb_buffer *buffer_info; 4750 struct igb_buffer *buffer_info;
4445 struct sk_buff *skb; 4751 struct sk_buff *skb;
@@ -4470,10 +4776,10 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4470 total_packets += segs; 4776 total_packets += segs;
4471 total_bytes += bytecount; 4777 total_bytes += bytecount;
4472 4778
4473 igb_tx_hwtstamp(adapter, skb); 4779 igb_tx_hwtstamp(q_vector, skb);
4474 } 4780 }
4475 4781
4476 igb_unmap_and_free_tx_resource(adapter, buffer_info); 4782 igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
4477 tx_desc->wb.status = 0; 4783 tx_desc->wb.status = 0;
4478 4784
4479 i++; 4785 i++;
@@ -4496,7 +4802,7 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4496 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) && 4802 if (__netif_subqueue_stopped(netdev, tx_ring->queue_index) &&
4497 !(test_bit(__IGB_DOWN, &adapter->state))) { 4803 !(test_bit(__IGB_DOWN, &adapter->state))) {
4498 netif_wake_subqueue(netdev, tx_ring->queue_index); 4804 netif_wake_subqueue(netdev, tx_ring->queue_index);
4499 ++adapter->restart_queue; 4805 tx_ring->tx_stats.restart_queue++;
4500 } 4806 }
4501 } 4807 }
4502 4808
@@ -4511,7 +4817,7 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4511 E1000_STATUS_TXOFF)) { 4817 E1000_STATUS_TXOFF)) {
4512 4818
4513 /* detected Tx unit hang */ 4819 /* detected Tx unit hang */
4514 dev_err(&adapter->pdev->dev, 4820 dev_err(&tx_ring->pdev->dev,
4515 "Detected Tx Unit Hang\n" 4821 "Detected Tx Unit Hang\n"
4516 " Tx Queue <%d>\n" 4822 " Tx Queue <%d>\n"
4517 " TDH <%x>\n" 4823 " TDH <%x>\n"
@@ -4524,11 +4830,11 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4524 " jiffies <%lx>\n" 4830 " jiffies <%lx>\n"
4525 " desc.status <%x>\n", 4831 " desc.status <%x>\n",
4526 tx_ring->queue_index, 4832 tx_ring->queue_index,
4527 readl(adapter->hw.hw_addr + tx_ring->head), 4833 readl(tx_ring->head),
4528 readl(adapter->hw.hw_addr + tx_ring->tail), 4834 readl(tx_ring->tail),
4529 tx_ring->next_to_use, 4835 tx_ring->next_to_use,
4530 tx_ring->next_to_clean, 4836 tx_ring->next_to_clean,
4531 tx_ring->buffer_info[i].time_stamp, 4837 tx_ring->buffer_info[eop].time_stamp,
4532 eop, 4838 eop,
4533 jiffies, 4839 jiffies,
4534 eop_desc->wb.status); 4840 eop_desc->wb.status);
@@ -4539,43 +4845,38 @@ static bool igb_clean_tx_irq(struct igb_ring *tx_ring)
4539 tx_ring->total_packets += total_packets; 4845 tx_ring->total_packets += total_packets;
4540 tx_ring->tx_stats.bytes += total_bytes; 4846 tx_ring->tx_stats.bytes += total_bytes;
4541 tx_ring->tx_stats.packets += total_packets; 4847 tx_ring->tx_stats.packets += total_packets;
4542 adapter->net_stats.tx_bytes += total_bytes;
4543 adapter->net_stats.tx_packets += total_packets;
4544 return (count < tx_ring->count); 4848 return (count < tx_ring->count);
4545} 4849}
4546 4850
4547/** 4851/**
4548 * igb_receive_skb - helper function to handle rx indications 4852 * igb_receive_skb - helper function to handle rx indications
4549 * @ring: pointer to receive ring receving this packet 4853 * @q_vector: structure containing interrupt and ring information
4550 * @status: descriptor status field as written by hardware 4854 * @skb: packet to send up
4551 * @rx_desc: receive descriptor containing vlan and type information. 4855 * @vlan_tag: vlan tag for packet
4552 * @skb: pointer to sk_buff to be indicated to stack
4553 **/ 4856 **/
4554static void igb_receive_skb(struct igb_ring *ring, u8 status, 4857static void igb_receive_skb(struct igb_q_vector *q_vector,
4555 union e1000_adv_rx_desc * rx_desc, 4858 struct sk_buff *skb,
4556 struct sk_buff *skb) 4859 u16 vlan_tag)
4557{ 4860{
4558 struct igb_adapter * adapter = ring->adapter; 4861 struct igb_adapter *adapter = q_vector->adapter;
4559 bool vlan_extracted = (adapter->vlgrp && (status & E1000_RXD_STAT_VP)); 4862
4560 4863 if (vlan_tag)
4561 skb_record_rx_queue(skb, ring->queue_index); 4864 vlan_gro_receive(&q_vector->napi, adapter->vlgrp,
4562 if (vlan_extracted) 4865 vlan_tag, skb);
4563 vlan_gro_receive(&ring->napi, adapter->vlgrp,
4564 le16_to_cpu(rx_desc->wb.upper.vlan),
4565 skb);
4566 else 4866 else
4567 napi_gro_receive(&ring->napi, skb); 4867 napi_gro_receive(&q_vector->napi, skb);
4568} 4868}
4569 4869
4570static inline void igb_rx_checksum_adv(struct igb_adapter *adapter, 4870static inline void igb_rx_checksum_adv(struct igb_ring *ring,
4571 u32 status_err, struct sk_buff *skb) 4871 u32 status_err, struct sk_buff *skb)
4572{ 4872{
4573 skb->ip_summed = CHECKSUM_NONE; 4873 skb->ip_summed = CHECKSUM_NONE;
4574 4874
4575 /* Ignore Checksum bit is set or checksum is disabled through ethtool */ 4875 /* Ignore Checksum bit is set or checksum is disabled through ethtool */
4576 if ((status_err & E1000_RXD_STAT_IXSM) || 4876 if (!(ring->flags & IGB_RING_FLAG_RX_CSUM) ||
4577 (adapter->flags & IGB_FLAG_RX_CSUM_DISABLED)) 4877 (status_err & E1000_RXD_STAT_IXSM))
4578 return; 4878 return;
4879
4579 /* TCP/UDP checksum error bit is set */ 4880 /* TCP/UDP checksum error bit is set */
4580 if (status_err & 4881 if (status_err &
4581 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) { 4882 (E1000_RXDEXT_STATERR_TCPE | E1000_RXDEXT_STATERR_IPE)) {
@@ -4584,9 +4885,10 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
4584 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc) 4885 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
4585 * packets, (aka let the stack check the crc32c) 4886 * packets, (aka let the stack check the crc32c)
4586 */ 4887 */
4587 if (!((adapter->hw.mac.type == e1000_82576) && 4888 if ((skb->len == 60) &&
4588 (skb->len == 60))) 4889 (ring->flags & IGB_RING_FLAG_RX_SCTP_CSUM))
4589 adapter->hw_csum_err++; 4890 ring->rx_stats.csum_err++;
4891
4590 /* let the stack verify checksum errors */ 4892 /* let the stack verify checksum errors */
4591 return; 4893 return;
4592 } 4894 }
@@ -4594,11 +4896,38 @@ static inline void igb_rx_checksum_adv(struct igb_adapter *adapter,
4594 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)) 4896 if (status_err & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS))
4595 skb->ip_summed = CHECKSUM_UNNECESSARY; 4897 skb->ip_summed = CHECKSUM_UNNECESSARY;
4596 4898
4597 dev_dbg(&adapter->pdev->dev, "cksum success: bits %08X\n", status_err); 4899 dev_dbg(&ring->pdev->dev, "cksum success: bits %08X\n", status_err);
4598 adapter->hw_csum_good++;
4599} 4900}
4600 4901
4601static inline u16 igb_get_hlen(struct igb_adapter *adapter, 4902static inline void igb_rx_hwtstamp(struct igb_q_vector *q_vector, u32 staterr,
4903 struct sk_buff *skb)
4904{
4905 struct igb_adapter *adapter = q_vector->adapter;
4906 struct e1000_hw *hw = &adapter->hw;
4907 u64 regval;
4908
4909 /*
4910 * If this bit is set, then the RX registers contain the time stamp. No
4911 * other packet will be time stamped until we read these registers, so
4912 * read the registers to make them available again. Because only one
4913 * packet can be time stamped at a time, we know that the register
4914 * values must belong to this one here and therefore we don't need to
4915 * compare any of the additional attributes stored for it.
4916 *
4917 * If nothing went wrong, then it should have a skb_shared_tx that we
4918 * can turn into a skb_shared_hwtstamps.
4919 */
4920 if (likely(!(staterr & E1000_RXDADV_STAT_TS)))
4921 return;
4922 if (!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID))
4923 return;
4924
4925 regval = rd32(E1000_RXSTMPL);
4926 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
4927
4928 igb_systim_to_hwtstamp(adapter, skb_hwtstamps(skb), regval);
4929}
4930static inline u16 igb_get_hlen(struct igb_ring *rx_ring,
4602 union e1000_adv_rx_desc *rx_desc) 4931 union e1000_adv_rx_desc *rx_desc)
4603{ 4932{
4604 /* HW will not DMA in data larger than the given buffer, even if it 4933 /* HW will not DMA in data larger than the given buffer, even if it
@@ -4607,27 +4936,28 @@ static inline u16 igb_get_hlen(struct igb_adapter *adapter,
4607 */ 4936 */
4608 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) & 4937 u16 hlen = (le16_to_cpu(rx_desc->wb.lower.lo_dword.hdr_info) &
4609 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT; 4938 E1000_RXDADV_HDRBUFLEN_MASK) >> E1000_RXDADV_HDRBUFLEN_SHIFT;
4610 if (hlen > adapter->rx_ps_hdr_size) 4939 if (hlen > rx_ring->rx_buffer_len)
4611 hlen = adapter->rx_ps_hdr_size; 4940 hlen = rx_ring->rx_buffer_len;
4612 return hlen; 4941 return hlen;
4613} 4942}
4614 4943
4615static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring, 4944static bool igb_clean_rx_irq_adv(struct igb_q_vector *q_vector,
4616 int *work_done, int budget) 4945 int *work_done, int budget)
4617{ 4946{
4618 struct igb_adapter *adapter = rx_ring->adapter; 4947 struct igb_ring *rx_ring = q_vector->rx_ring;
4619 struct net_device *netdev = adapter->netdev; 4948 struct net_device *netdev = rx_ring->netdev;
4620 struct e1000_hw *hw = &adapter->hw; 4949 struct pci_dev *pdev = rx_ring->pdev;
4621 struct pci_dev *pdev = adapter->pdev;
4622 union e1000_adv_rx_desc *rx_desc , *next_rxd; 4950 union e1000_adv_rx_desc *rx_desc , *next_rxd;
4623 struct igb_buffer *buffer_info , *next_buffer; 4951 struct igb_buffer *buffer_info , *next_buffer;
4624 struct sk_buff *skb; 4952 struct sk_buff *skb;
4625 bool cleaned = false; 4953 bool cleaned = false;
4626 int cleaned_count = 0; 4954 int cleaned_count = 0;
4955 int current_node = numa_node_id();
4627 unsigned int total_bytes = 0, total_packets = 0; 4956 unsigned int total_bytes = 0, total_packets = 0;
4628 unsigned int i; 4957 unsigned int i;
4629 u32 staterr; 4958 u32 staterr;
4630 u16 length; 4959 u16 length;
4960 u16 vlan_tag;
4631 4961
4632 i = rx_ring->next_to_clean; 4962 i = rx_ring->next_to_clean;
4633 buffer_info = &rx_ring->buffer_info[i]; 4963 buffer_info = &rx_ring->buffer_info[i];
@@ -4646,6 +4976,7 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4646 i++; 4976 i++;
4647 if (i == rx_ring->count) 4977 if (i == rx_ring->count)
4648 i = 0; 4978 i = 0;
4979
4649 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i); 4980 next_rxd = E1000_RX_DESC_ADV(*rx_ring, i);
4650 prefetch(next_rxd); 4981 prefetch(next_rxd);
4651 next_buffer = &rx_ring->buffer_info[i]; 4982 next_buffer = &rx_ring->buffer_info[i];
@@ -4654,23 +4985,16 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4654 cleaned = true; 4985 cleaned = true;
4655 cleaned_count++; 4986 cleaned_count++;
4656 4987
4657 /* this is the fast path for the non-packet split case */
4658 if (!adapter->rx_ps_hdr_size) {
4659 pci_unmap_single(pdev, buffer_info->dma,
4660 adapter->rx_buffer_len,
4661 PCI_DMA_FROMDEVICE);
4662 buffer_info->dma = 0;
4663 skb_put(skb, length);
4664 goto send_up;
4665 }
4666
4667 if (buffer_info->dma) { 4988 if (buffer_info->dma) {
4668 u16 hlen = igb_get_hlen(adapter, rx_desc);
4669 pci_unmap_single(pdev, buffer_info->dma, 4989 pci_unmap_single(pdev, buffer_info->dma,
4670 adapter->rx_ps_hdr_size, 4990 rx_ring->rx_buffer_len,
4671 PCI_DMA_FROMDEVICE); 4991 PCI_DMA_FROMDEVICE);
4672 buffer_info->dma = 0; 4992 buffer_info->dma = 0;
4673 skb_put(skb, hlen); 4993 if (rx_ring->rx_buffer_len >= IGB_RXBUFFER_1024) {
4994 skb_put(skb, length);
4995 goto send_up;
4996 }
4997 skb_put(skb, igb_get_hlen(rx_ring, rx_desc));
4674 } 4998 }
4675 4999
4676 if (length) { 5000 if (length) {
@@ -4683,15 +5007,14 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4683 buffer_info->page_offset, 5007 buffer_info->page_offset,
4684 length); 5008 length);
4685 5009
4686 if ((adapter->rx_buffer_len > (PAGE_SIZE / 2)) || 5010 if ((page_count(buffer_info->page) != 1) ||
4687 (page_count(buffer_info->page) != 1)) 5011 (page_to_nid(buffer_info->page) != current_node))
4688 buffer_info->page = NULL; 5012 buffer_info->page = NULL;
4689 else 5013 else
4690 get_page(buffer_info->page); 5014 get_page(buffer_info->page);
4691 5015
4692 skb->len += length; 5016 skb->len += length;
4693 skb->data_len += length; 5017 skb->data_len += length;
4694
4695 skb->truesize += length; 5018 skb->truesize += length;
4696 } 5019 }
4697 5020
@@ -4703,60 +5026,24 @@ static bool igb_clean_rx_irq_adv(struct igb_ring *rx_ring,
4703 goto next_desc; 5026 goto next_desc;
4704 } 5027 }
4705send_up: 5028send_up:
4706 /*
4707 * If this bit is set, then the RX registers contain
4708 * the time stamp. No other packet will be time
4709 * stamped until we read these registers, so read the
4710 * registers to make them available again. Because
4711 * only one packet can be time stamped at a time, we
4712 * know that the register values must belong to this
4713 * one here and therefore we don't need to compare
4714 * any of the additional attributes stored for it.
4715 *
4716 * If nothing went wrong, then it should have a
4717 * skb_shared_tx that we can turn into a
4718 * skb_shared_hwtstamps.
4719 *
4720 * TODO: can time stamping be triggered (thus locking
4721 * the registers) without the packet reaching this point
4722 * here? In that case RX time stamping would get stuck.
4723 *
4724 * TODO: in "time stamp all packets" mode this bit is
4725 * not set. Need a global flag for this mode and then
4726 * always read the registers. Cannot be done without
4727 * a race condition.
4728 */
4729 if (unlikely(staterr & E1000_RXD_STAT_TS)) {
4730 u64 regval;
4731 u64 ns;
4732 struct skb_shared_hwtstamps *shhwtstamps =
4733 skb_hwtstamps(skb);
4734
4735 WARN(!(rd32(E1000_TSYNCRXCTL) & E1000_TSYNCRXCTL_VALID),
4736 "igb: no RX time stamp available for time stamped packet");
4737 regval = rd32(E1000_RXSTMPL);
4738 regval |= (u64)rd32(E1000_RXSTMPH) << 32;
4739 ns = timecounter_cyc2time(&adapter->clock, regval);
4740 timecompare_update(&adapter->compare, ns);
4741 memset(shhwtstamps, 0, sizeof(*shhwtstamps));
4742 shhwtstamps->hwtstamp = ns_to_ktime(ns);
4743 shhwtstamps->syststamp =
4744 timecompare_transform(&adapter->compare, ns);
4745 }
4746
4747 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) { 5029 if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
4748 dev_kfree_skb_irq(skb); 5030 dev_kfree_skb_irq(skb);
4749 goto next_desc; 5031 goto next_desc;
4750 } 5032 }
4751 5033
5034 igb_rx_hwtstamp(q_vector, staterr, skb);
4752 total_bytes += skb->len; 5035 total_bytes += skb->len;
4753 total_packets++; 5036 total_packets++;
4754 5037
4755 igb_rx_checksum_adv(adapter, staterr, skb); 5038 igb_rx_checksum_adv(rx_ring, staterr, skb);
4756 5039
4757 skb->protocol = eth_type_trans(skb, netdev); 5040 skb->protocol = eth_type_trans(skb, netdev);
5041 skb_record_rx_queue(skb, rx_ring->queue_index);
4758 5042
4759 igb_receive_skb(rx_ring, staterr, rx_desc, skb); 5043 vlan_tag = ((staterr & E1000_RXD_STAT_VP) ?
5044 le16_to_cpu(rx_desc->wb.upper.vlan) : 0);
5045
5046 igb_receive_skb(q_vector, skb, vlan_tag);
4760 5047
4761next_desc: 5048next_desc:
4762 rx_desc->wb.upper.status_error = 0; 5049 rx_desc->wb.upper.status_error = 0;
@@ -4783,8 +5070,6 @@ next_desc:
4783 rx_ring->total_bytes += total_bytes; 5070 rx_ring->total_bytes += total_bytes;
4784 rx_ring->rx_stats.packets += total_packets; 5071 rx_ring->rx_stats.packets += total_packets;
4785 rx_ring->rx_stats.bytes += total_bytes; 5072 rx_ring->rx_stats.bytes += total_bytes;
4786 adapter->net_stats.rx_bytes += total_bytes;
4787 adapter->net_stats.rx_packets += total_packets;
4788 return cleaned; 5073 return cleaned;
4789} 5074}
4790 5075
@@ -4792,12 +5077,9 @@ next_desc:
4792 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split 5077 * igb_alloc_rx_buffers_adv - Replace used receive buffers; packet split
4793 * @adapter: address of board private structure 5078 * @adapter: address of board private structure
4794 **/ 5079 **/
4795static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, 5080void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring, int cleaned_count)
4796 int cleaned_count)
4797{ 5081{
4798 struct igb_adapter *adapter = rx_ring->adapter; 5082 struct net_device *netdev = rx_ring->netdev;
4799 struct net_device *netdev = adapter->netdev;
4800 struct pci_dev *pdev = adapter->pdev;
4801 union e1000_adv_rx_desc *rx_desc; 5083 union e1000_adv_rx_desc *rx_desc;
4802 struct igb_buffer *buffer_info; 5084 struct igb_buffer *buffer_info;
4803 struct sk_buff *skb; 5085 struct sk_buff *skb;
@@ -4807,19 +5089,16 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4807 i = rx_ring->next_to_use; 5089 i = rx_ring->next_to_use;
4808 buffer_info = &rx_ring->buffer_info[i]; 5090 buffer_info = &rx_ring->buffer_info[i];
4809 5091
4810 if (adapter->rx_ps_hdr_size) 5092 bufsz = rx_ring->rx_buffer_len;
4811 bufsz = adapter->rx_ps_hdr_size;
4812 else
4813 bufsz = adapter->rx_buffer_len;
4814 5093
4815 while (cleaned_count--) { 5094 while (cleaned_count--) {
4816 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i); 5095 rx_desc = E1000_RX_DESC_ADV(*rx_ring, i);
4817 5096
4818 if (adapter->rx_ps_hdr_size && !buffer_info->page_dma) { 5097 if ((bufsz < IGB_RXBUFFER_1024) && !buffer_info->page_dma) {
4819 if (!buffer_info->page) { 5098 if (!buffer_info->page) {
4820 buffer_info->page = alloc_page(GFP_ATOMIC); 5099 buffer_info->page = netdev_alloc_page(netdev);
4821 if (!buffer_info->page) { 5100 if (!buffer_info->page) {
4822 adapter->alloc_rx_buff_failed++; 5101 rx_ring->rx_stats.alloc_failed++;
4823 goto no_buffers; 5102 goto no_buffers;
4824 } 5103 }
4825 buffer_info->page_offset = 0; 5104 buffer_info->page_offset = 0;
@@ -4827,39 +5106,48 @@ static void igb_alloc_rx_buffers_adv(struct igb_ring *rx_ring,
4827 buffer_info->page_offset ^= PAGE_SIZE / 2; 5106 buffer_info->page_offset ^= PAGE_SIZE / 2;
4828 } 5107 }
4829 buffer_info->page_dma = 5108 buffer_info->page_dma =
4830 pci_map_page(pdev, buffer_info->page, 5109 pci_map_page(rx_ring->pdev, buffer_info->page,
4831 buffer_info->page_offset, 5110 buffer_info->page_offset,
4832 PAGE_SIZE / 2, 5111 PAGE_SIZE / 2,
4833 PCI_DMA_FROMDEVICE); 5112 PCI_DMA_FROMDEVICE);
5113 if (pci_dma_mapping_error(rx_ring->pdev,
5114 buffer_info->page_dma)) {
5115 buffer_info->page_dma = 0;
5116 rx_ring->rx_stats.alloc_failed++;
5117 goto no_buffers;
5118 }
4834 } 5119 }
4835 5120
4836 if (!buffer_info->skb) { 5121 skb = buffer_info->skb;
4837 skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN); 5122 if (!skb) {
5123 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
4838 if (!skb) { 5124 if (!skb) {
4839 adapter->alloc_rx_buff_failed++; 5125 rx_ring->rx_stats.alloc_failed++;
4840 goto no_buffers; 5126 goto no_buffers;
4841 } 5127 }
4842 5128
4843 /* Make buffer alignment 2 beyond a 16 byte boundary
4844 * this will result in a 16 byte aligned IP header after
4845 * the 14 byte MAC header is removed
4846 */
4847 skb_reserve(skb, NET_IP_ALIGN);
4848
4849 buffer_info->skb = skb; 5129 buffer_info->skb = skb;
4850 buffer_info->dma = pci_map_single(pdev, skb->data, 5130 }
5131 if (!buffer_info->dma) {
5132 buffer_info->dma = pci_map_single(rx_ring->pdev,
5133 skb->data,
4851 bufsz, 5134 bufsz,
4852 PCI_DMA_FROMDEVICE); 5135 PCI_DMA_FROMDEVICE);
5136 if (pci_dma_mapping_error(rx_ring->pdev,
5137 buffer_info->dma)) {
5138 buffer_info->dma = 0;
5139 rx_ring->rx_stats.alloc_failed++;
5140 goto no_buffers;
5141 }
4853 } 5142 }
4854 /* Refresh the desc even if buffer_addrs didn't change because 5143 /* Refresh the desc even if buffer_addrs didn't change because
4855 * each write-back erases this info. */ 5144 * each write-back erases this info. */
4856 if (adapter->rx_ps_hdr_size) { 5145 if (bufsz < IGB_RXBUFFER_1024) {
4857 rx_desc->read.pkt_addr = 5146 rx_desc->read.pkt_addr =
4858 cpu_to_le64(buffer_info->page_dma); 5147 cpu_to_le64(buffer_info->page_dma);
4859 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma); 5148 rx_desc->read.hdr_addr = cpu_to_le64(buffer_info->dma);
4860 } else { 5149 } else {
4861 rx_desc->read.pkt_addr = 5150 rx_desc->read.pkt_addr = cpu_to_le64(buffer_info->dma);
4862 cpu_to_le64(buffer_info->dma);
4863 rx_desc->read.hdr_addr = 0; 5151 rx_desc->read.hdr_addr = 0;
4864 } 5152 }
4865 5153
@@ -4882,7 +5170,7 @@ no_buffers:
4882 * applicable for weak-ordered memory model archs, 5170 * applicable for weak-ordered memory model archs,
4883 * such as IA-64). */ 5171 * such as IA-64). */
4884 wmb(); 5172 wmb();
4885 writel(i, adapter->hw.hw_addr + rx_ring->tail); 5173 writel(i, rx_ring->tail);
4886 } 5174 }
4887} 5175}
4888 5176
@@ -4941,13 +5229,11 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
4941 struct igb_adapter *adapter = netdev_priv(netdev); 5229 struct igb_adapter *adapter = netdev_priv(netdev);
4942 struct e1000_hw *hw = &adapter->hw; 5230 struct e1000_hw *hw = &adapter->hw;
4943 struct hwtstamp_config config; 5231 struct hwtstamp_config config;
4944 u32 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED; 5232 u32 tsync_tx_ctl = E1000_TSYNCTXCTL_ENABLED;
4945 u32 tsync_rx_ctl_bit = E1000_TSYNCRXCTL_ENABLED; 5233 u32 tsync_rx_ctl = E1000_TSYNCRXCTL_ENABLED;
4946 u32 tsync_rx_ctl_type = 0;
4947 u32 tsync_rx_cfg = 0; 5234 u32 tsync_rx_cfg = 0;
4948 int is_l4 = 0; 5235 bool is_l4 = false;
4949 int is_l2 = 0; 5236 bool is_l2 = false;
4950 short port = 319; /* PTP */
4951 u32 regval; 5237 u32 regval;
4952 5238
4953 if (copy_from_user(&config, ifr->ifr_data, sizeof(config))) 5239 if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
@@ -4959,10 +5245,8 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
4959 5245
4960 switch (config.tx_type) { 5246 switch (config.tx_type) {
4961 case HWTSTAMP_TX_OFF: 5247 case HWTSTAMP_TX_OFF:
4962 tsync_tx_ctl_bit = 0; 5248 tsync_tx_ctl = 0;
4963 break;
4964 case HWTSTAMP_TX_ON: 5249 case HWTSTAMP_TX_ON:
4965 tsync_tx_ctl_bit = E1000_TSYNCTXCTL_ENABLED;
4966 break; 5250 break;
4967 default: 5251 default:
4968 return -ERANGE; 5252 return -ERANGE;
@@ -4970,7 +5254,7 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
4970 5254
4971 switch (config.rx_filter) { 5255 switch (config.rx_filter) {
4972 case HWTSTAMP_FILTER_NONE: 5256 case HWTSTAMP_FILTER_NONE:
4973 tsync_rx_ctl_bit = 0; 5257 tsync_rx_ctl = 0;
4974 break; 5258 break;
4975 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT: 5259 case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
4976 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT: 5260 case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
@@ -4981,86 +5265,97 @@ static int igb_hwtstamp_ioctl(struct net_device *netdev,
4981 * possible to time stamp both Sync and Delay_Req messages 5265 * possible to time stamp both Sync and Delay_Req messages
4982 * => fall back to time stamping all packets 5266 * => fall back to time stamping all packets
4983 */ 5267 */
4984 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_ALL; 5268 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_ALL;
4985 config.rx_filter = HWTSTAMP_FILTER_ALL; 5269 config.rx_filter = HWTSTAMP_FILTER_ALL;
4986 break; 5270 break;
4987 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC: 5271 case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
4988 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1; 5272 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
4989 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE; 5273 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE;
4990 is_l4 = 1; 5274 is_l4 = true;
4991 break; 5275 break;
4992 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ: 5276 case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
4993 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L4_V1; 5277 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L4_V1;
4994 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE; 5278 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE;
4995 is_l4 = 1; 5279 is_l4 = true;
4996 break; 5280 break;
4997 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC: 5281 case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
4998 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC: 5282 case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
4999 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2; 5283 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
5000 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE; 5284 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE;
5001 is_l2 = 1; 5285 is_l2 = true;
5002 is_l4 = 1; 5286 is_l4 = true;
5003 config.rx_filter = HWTSTAMP_FILTER_SOME; 5287 config.rx_filter = HWTSTAMP_FILTER_SOME;
5004 break; 5288 break;
5005 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ: 5289 case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
5006 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ: 5290 case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
5007 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_L2_L4_V2; 5291 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_L2_L4_V2;
5008 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE; 5292 tsync_rx_cfg = E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE;
5009 is_l2 = 1; 5293 is_l2 = true;
5010 is_l4 = 1; 5294 is_l4 = true;
5011 config.rx_filter = HWTSTAMP_FILTER_SOME; 5295 config.rx_filter = HWTSTAMP_FILTER_SOME;
5012 break; 5296 break;
5013 case HWTSTAMP_FILTER_PTP_V2_EVENT: 5297 case HWTSTAMP_FILTER_PTP_V2_EVENT:
5014 case HWTSTAMP_FILTER_PTP_V2_SYNC: 5298 case HWTSTAMP_FILTER_PTP_V2_SYNC:
5015 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ: 5299 case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
5016 tsync_rx_ctl_type = E1000_TSYNCRXCTL_TYPE_EVENT_V2; 5300 tsync_rx_ctl |= E1000_TSYNCRXCTL_TYPE_EVENT_V2;
5017 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT; 5301 config.rx_filter = HWTSTAMP_FILTER_PTP_V2_EVENT;
5018 is_l2 = 1; 5302 is_l2 = true;
5019 break; 5303 break;
5020 default: 5304 default:
5021 return -ERANGE; 5305 return -ERANGE;
5022 } 5306 }
5023 5307
5308 if (hw->mac.type == e1000_82575) {
5309 if (tsync_rx_ctl | tsync_tx_ctl)
5310 return -EINVAL;
5311 return 0;
5312 }
5313
5024 /* enable/disable TX */ 5314 /* enable/disable TX */
5025 regval = rd32(E1000_TSYNCTXCTL); 5315 regval = rd32(E1000_TSYNCTXCTL);
5026 regval = (regval & ~E1000_TSYNCTXCTL_ENABLED) | tsync_tx_ctl_bit; 5316 regval &= ~E1000_TSYNCTXCTL_ENABLED;
5317 regval |= tsync_tx_ctl;
5027 wr32(E1000_TSYNCTXCTL, regval); 5318 wr32(E1000_TSYNCTXCTL, regval);
5028 5319
5029 /* enable/disable RX, define which PTP packets are time stamped */ 5320 /* enable/disable RX */
5030 regval = rd32(E1000_TSYNCRXCTL); 5321 regval = rd32(E1000_TSYNCRXCTL);
5031 regval = (regval & ~E1000_TSYNCRXCTL_ENABLED) | tsync_rx_ctl_bit; 5322 regval &= ~(E1000_TSYNCRXCTL_ENABLED | E1000_TSYNCRXCTL_TYPE_MASK);
5032 regval = (regval & ~0xE) | tsync_rx_ctl_type; 5323 regval |= tsync_rx_ctl;
5033 wr32(E1000_TSYNCRXCTL, regval); 5324 wr32(E1000_TSYNCRXCTL, regval);
5034 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
5035 5325
5036 /* 5326 /* define which PTP packets are time stamped */
5037 * Ethertype Filter Queue Filter[0][15:0] = 0x88F7 5327 wr32(E1000_TSYNCRXCFG, tsync_rx_cfg);
5038 * (Ethertype to filter on)
5039 * Ethertype Filter Queue Filter[0][26] = 0x1 (Enable filter)
5040 * Ethertype Filter Queue Filter[0][30] = 0x1 (Enable Timestamping)
5041 */
5042 wr32(E1000_ETQF0, is_l2 ? 0x440088f7 : 0);
5043
5044 /* L4 Queue Filter[0]: only filter by source and destination port */
5045 wr32(E1000_SPQF0, htons(port));
5046 wr32(E1000_IMIREXT(0), is_l4 ?
5047 ((1<<12) | (1<<19) /* bypass size and control flags */) : 0);
5048 wr32(E1000_IMIR(0), is_l4 ?
5049 (htons(port)
5050 | (0<<16) /* immediate interrupt disabled */
5051 | 0 /* (1<<17) bit cleared: do not bypass
5052 destination port check */)
5053 : 0);
5054 wr32(E1000_FTQF0, is_l4 ?
5055 (0x11 /* UDP */
5056 | (1<<15) /* VF not compared */
5057 | (1<<27) /* Enable Timestamping */
5058 | (7<<28) /* only source port filter enabled,
5059 source/target address and protocol
5060 masked */)
5061 : ((1<<15) | (15<<28) /* all mask bits set = filter not
5062 enabled */));
5063 5328
5329 /* define ethertype filter for timestamped packets */
5330 if (is_l2)
5331 wr32(E1000_ETQF(3),
5332 (E1000_ETQF_FILTER_ENABLE | /* enable filter */
5333 E1000_ETQF_1588 | /* enable timestamping */
5334 ETH_P_1588)); /* 1588 eth protocol type */
5335 else
5336 wr32(E1000_ETQF(3), 0);
5337
5338#define PTP_PORT 319
5339 /* L4 Queue Filter[3]: filter by destination port and protocol */
5340 if (is_l4) {
5341 u32 ftqf = (IPPROTO_UDP /* UDP */
5342 | E1000_FTQF_VF_BP /* VF not compared */
5343 | E1000_FTQF_1588_TIME_STAMP /* Enable Timestamping */
5344 | E1000_FTQF_MASK); /* mask all inputs */
5345 ftqf &= ~E1000_FTQF_MASK_PROTO_BP; /* enable protocol check */
5346
5347 wr32(E1000_IMIR(3), htons(PTP_PORT));
5348 wr32(E1000_IMIREXT(3),
5349 (E1000_IMIREXT_SIZE_BP | E1000_IMIREXT_CTRL_BP));
5350 if (hw->mac.type == e1000_82576) {
5351 /* enable source port check */
5352 wr32(E1000_SPQF(3), htons(PTP_PORT));
5353 ftqf &= ~E1000_FTQF_MASK_SOURCE_PORT_BP;
5354 }
5355 wr32(E1000_FTQF(3), ftqf);
5356 } else {
5357 wr32(E1000_FTQF(3), E1000_FTQF_MASK);
5358 }
5064 wrfl(); 5359 wrfl();
5065 5360
5066 adapter->hwtstamp_config = config; 5361 adapter->hwtstamp_config = config;
@@ -5137,21 +5432,15 @@ static void igb_vlan_rx_register(struct net_device *netdev,
5137 ctrl |= E1000_CTRL_VME; 5432 ctrl |= E1000_CTRL_VME;
5138 wr32(E1000_CTRL, ctrl); 5433 wr32(E1000_CTRL, ctrl);
5139 5434
5140 /* enable VLAN receive filtering */ 5435 /* Disable CFI check */
5141 rctl = rd32(E1000_RCTL); 5436 rctl = rd32(E1000_RCTL);
5142 rctl &= ~E1000_RCTL_CFIEN; 5437 rctl &= ~E1000_RCTL_CFIEN;
5143 wr32(E1000_RCTL, rctl); 5438 wr32(E1000_RCTL, rctl);
5144 igb_update_mng_vlan(adapter);
5145 } else { 5439 } else {
5146 /* disable VLAN tag insert/strip */ 5440 /* disable VLAN tag insert/strip */
5147 ctrl = rd32(E1000_CTRL); 5441 ctrl = rd32(E1000_CTRL);
5148 ctrl &= ~E1000_CTRL_VME; 5442 ctrl &= ~E1000_CTRL_VME;
5149 wr32(E1000_CTRL, ctrl); 5443 wr32(E1000_CTRL, ctrl);
5150
5151 if (adapter->mng_vlan_id != (u16)IGB_MNG_VLAN_NONE) {
5152 igb_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
5153 adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
5154 }
5155 } 5444 }
5156 5445
5157 igb_rlpml_set(adapter); 5446 igb_rlpml_set(adapter);
@@ -5166,16 +5455,11 @@ static void igb_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
5166 struct e1000_hw *hw = &adapter->hw; 5455 struct e1000_hw *hw = &adapter->hw;
5167 int pf_id = adapter->vfs_allocated_count; 5456 int pf_id = adapter->vfs_allocated_count;
5168 5457
5169 if ((hw->mng_cookie.status & 5458 /* attempt to add filter to vlvf array */
5170 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 5459 igb_vlvf_set(adapter, vid, true, pf_id);
5171 (vid == adapter->mng_vlan_id))
5172 return;
5173
5174 /* add vid to vlvf if sr-iov is enabled,
5175 * if that fails add directly to filter table */
5176 if (igb_vlvf_set(adapter, vid, true, pf_id))
5177 igb_vfta_set(hw, vid, true);
5178 5460
5461 /* add the filter since PF can receive vlans w/o entry in vlvf */
5462 igb_vfta_set(hw, vid, true);
5179} 5463}
5180 5464
5181static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid) 5465static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
@@ -5183,6 +5467,7 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5183 struct igb_adapter *adapter = netdev_priv(netdev); 5467 struct igb_adapter *adapter = netdev_priv(netdev);
5184 struct e1000_hw *hw = &adapter->hw; 5468 struct e1000_hw *hw = &adapter->hw;
5185 int pf_id = adapter->vfs_allocated_count; 5469 int pf_id = adapter->vfs_allocated_count;
5470 s32 err;
5186 5471
5187 igb_irq_disable(adapter); 5472 igb_irq_disable(adapter);
5188 vlan_group_set_device(adapter->vlgrp, vid, NULL); 5473 vlan_group_set_device(adapter->vlgrp, vid, NULL);
@@ -5190,17 +5475,11 @@ static void igb_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
5190 if (!test_bit(__IGB_DOWN, &adapter->state)) 5475 if (!test_bit(__IGB_DOWN, &adapter->state))
5191 igb_irq_enable(adapter); 5476 igb_irq_enable(adapter);
5192 5477
5193 if ((adapter->hw.mng_cookie.status & 5478 /* remove vlan from VLVF table array */
5194 E1000_MNG_DHCP_COOKIE_STATUS_VLAN) && 5479 err = igb_vlvf_set(adapter, vid, false, pf_id);
5195 (vid == adapter->mng_vlan_id)) {
5196 /* release control to f/w */
5197 igb_release_hw_control(adapter);
5198 return;
5199 }
5200 5480
5201 /* remove vid from vlvf if sr-iov is enabled, 5481 /* if vid was not present in VLVF just remove it from table */
5202 * if not in vlvf remove from vfta */ 5482 if (err)
5203 if (igb_vlvf_set(adapter, vid, false, pf_id))
5204 igb_vfta_set(hw, vid, false); 5483 igb_vfta_set(hw, vid, false);
5205} 5484}
5206 5485
@@ -5220,6 +5499,7 @@ static void igb_restore_vlan(struct igb_adapter *adapter)
5220 5499
5221int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx) 5500int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5222{ 5501{
5502 struct pci_dev *pdev = adapter->pdev;
5223 struct e1000_mac_info *mac = &adapter->hw.mac; 5503 struct e1000_mac_info *mac = &adapter->hw.mac;
5224 5504
5225 mac->autoneg = 0; 5505 mac->autoneg = 0;
@@ -5243,8 +5523,7 @@ int igb_set_spd_dplx(struct igb_adapter *adapter, u16 spddplx)
5243 break; 5523 break;
5244 case SPEED_1000 + DUPLEX_HALF: /* not supported */ 5524 case SPEED_1000 + DUPLEX_HALF: /* not supported */
5245 default: 5525 default:
5246 dev_err(&adapter->pdev->dev, 5526 dev_err(&pdev->dev, "Unsupported Speed/Duplex configuration\n");
5247 "Unsupported Speed/Duplex configuration\n");
5248 return -EINVAL; 5527 return -EINVAL;
5249 } 5528 }
5250 return 0; 5529 return 0;
@@ -5266,9 +5545,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
5266 if (netif_running(netdev)) 5545 if (netif_running(netdev))
5267 igb_close(netdev); 5546 igb_close(netdev);
5268 5547
5269 igb_reset_interrupt_capability(adapter); 5548 igb_clear_interrupt_scheme(adapter);
5270
5271 igb_free_queues(adapter);
5272 5549
5273#ifdef CONFIG_PM 5550#ifdef CONFIG_PM
5274 retval = pci_save_state(pdev); 5551 retval = pci_save_state(pdev);
@@ -5300,7 +5577,7 @@ static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake)
5300 wr32(E1000_CTRL, ctrl); 5577 wr32(E1000_CTRL, ctrl);
5301 5578
5302 /* Allow time for pending master requests to run */ 5579 /* Allow time for pending master requests to run */
5303 igb_disable_pcie_master(&adapter->hw); 5580 igb_disable_pcie_master(hw);
5304 5581
5305 wr32(E1000_WUC, E1000_WUC_PME_EN); 5582 wr32(E1000_WUC, E1000_WUC_PME_EN);
5306 wr32(E1000_WUFC, wufc); 5583 wr32(E1000_WUFC, wufc);
@@ -5363,9 +5640,7 @@ static int igb_resume(struct pci_dev *pdev)
5363 pci_enable_wake(pdev, PCI_D3hot, 0); 5640 pci_enable_wake(pdev, PCI_D3hot, 0);
5364 pci_enable_wake(pdev, PCI_D3cold, 0); 5641 pci_enable_wake(pdev, PCI_D3cold, 0);
5365 5642
5366 igb_set_interrupt_capability(adapter); 5643 if (igb_init_interrupt_scheme(adapter)) {
5367
5368 if (igb_alloc_queues(adapter)) {
5369 dev_err(&pdev->dev, "Unable to allocate memory for queues\n"); 5644 dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
5370 return -ENOMEM; 5645 return -ENOMEM;
5371 } 5646 }
@@ -5417,22 +5692,16 @@ static void igb_netpoll(struct net_device *netdev)
5417 int i; 5692 int i;
5418 5693
5419 if (!adapter->msix_entries) { 5694 if (!adapter->msix_entries) {
5695 struct igb_q_vector *q_vector = adapter->q_vector[0];
5420 igb_irq_disable(adapter); 5696 igb_irq_disable(adapter);
5421 napi_schedule(&adapter->rx_ring[0].napi); 5697 napi_schedule(&q_vector->napi);
5422 return; 5698 return;
5423 } 5699 }
5424 5700
5425 for (i = 0; i < adapter->num_tx_queues; i++) { 5701 for (i = 0; i < adapter->num_q_vectors; i++) {
5426 struct igb_ring *tx_ring = &adapter->tx_ring[i]; 5702 struct igb_q_vector *q_vector = adapter->q_vector[i];
5427 wr32(E1000_EIMC, tx_ring->eims_value); 5703 wr32(E1000_EIMC, q_vector->eims_value);
5428 igb_clean_tx_irq(tx_ring); 5704 napi_schedule(&q_vector->napi);
5429 wr32(E1000_EIMS, tx_ring->eims_value);
5430 }
5431
5432 for (i = 0; i < adapter->num_rx_queues; i++) {
5433 struct igb_ring *rx_ring = &adapter->rx_ring[i];
5434 wr32(E1000_EIMC, rx_ring->eims_value);
5435 napi_schedule(&rx_ring->napi);
5436 } 5705 }
5437} 5706}
5438#endif /* CONFIG_NET_POLL_CONTROLLER */ 5707#endif /* CONFIG_NET_POLL_CONTROLLER */
@@ -5532,6 +5801,33 @@ static void igb_io_resume(struct pci_dev *pdev)
5532 igb_get_hw_control(adapter); 5801 igb_get_hw_control(adapter);
5533} 5802}
5534 5803
5804static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
5805 u8 qsel)
5806{
5807 u32 rar_low, rar_high;
5808 struct e1000_hw *hw = &adapter->hw;
5809
5810 /* HW expects these in little endian so we reverse the byte order
5811 * from network order (big endian) to little endian
5812 */
5813 rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
5814 ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
5815 rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
5816
5817 /* Indicate to hardware the Address is Valid. */
5818 rar_high |= E1000_RAH_AV;
5819
5820 if (hw->mac.type == e1000_82575)
5821 rar_high |= E1000_RAH_POOL_1 * qsel;
5822 else
5823 rar_high |= E1000_RAH_POOL_1 << qsel;
5824
5825 wr32(E1000_RAL(index), rar_low);
5826 wrfl();
5827 wr32(E1000_RAH(index), rar_high);
5828 wrfl();
5829}
5830
5535static int igb_set_vf_mac(struct igb_adapter *adapter, 5831static int igb_set_vf_mac(struct igb_adapter *adapter,
5536 int vf, unsigned char *mac_addr) 5832 int vf, unsigned char *mac_addr)
5537{ 5833{
@@ -5542,8 +5838,7 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
5542 5838
5543 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN); 5839 memcpy(adapter->vf_data[vf].vf_mac_addresses, mac_addr, ETH_ALEN);
5544 5840
5545 igb_rar_set(hw, mac_addr, rar_entry); 5841 igb_rar_set_qsel(adapter, mac_addr, rar_entry, vf);
5546 igb_set_rah_pool(hw, vf, rar_entry);
5547 5842
5548 return 0; 5843 return 0;
5549} 5844}
@@ -5551,19 +5846,29 @@ static int igb_set_vf_mac(struct igb_adapter *adapter,
5551static void igb_vmm_control(struct igb_adapter *adapter) 5846static void igb_vmm_control(struct igb_adapter *adapter)
5552{ 5847{
5553 struct e1000_hw *hw = &adapter->hw; 5848 struct e1000_hw *hw = &adapter->hw;
5554 u32 reg_data; 5849 u32 reg;
5555 5850
5556 if (!adapter->vfs_allocated_count) 5851 /* replication is not supported for 82575 */
5852 if (hw->mac.type == e1000_82575)
5557 return; 5853 return;
5558 5854
5559 /* VF's need PF reset indication before they 5855 /* enable replication vlan tag stripping */
5560 * can send/receive mail */ 5856 reg = rd32(E1000_RPLOLR);
5561 reg_data = rd32(E1000_CTRL_EXT); 5857 reg |= E1000_RPLOLR_STRVLAN;
5562 reg_data |= E1000_CTRL_EXT_PFRSTD; 5858 wr32(E1000_RPLOLR, reg);
5563 wr32(E1000_CTRL_EXT, reg_data);
5564 5859
5565 igb_vmdq_set_loopback_pf(hw, true); 5860 /* notify HW that the MAC is adding vlan tags */
5566 igb_vmdq_set_replication_pf(hw, true); 5861 reg = rd32(E1000_DTXCTL);
5862 reg |= E1000_DTXCTL_VLAN_ADDED;
5863 wr32(E1000_DTXCTL, reg);
5864
5865 if (adapter->vfs_allocated_count) {
5866 igb_vmdq_set_loopback_pf(hw, true);
5867 igb_vmdq_set_replication_pf(hw, true);
5868 } else {
5869 igb_vmdq_set_loopback_pf(hw, false);
5870 igb_vmdq_set_replication_pf(hw, false);
5871 }
5567} 5872}
5568 5873
5569/* igb_main.c */ 5874/* igb_main.c */
diff --git a/drivers/net/igbvf/ethtool.c b/drivers/net/igbvf/ethtool.c
index c68265bd0d1a..8afff07ff559 100644
--- a/drivers/net/igbvf/ethtool.c
+++ b/drivers/net/igbvf/ethtool.c
@@ -367,16 +367,6 @@ static int igbvf_link_test(struct igbvf_adapter *adapter, u64 *data)
367 return *data; 367 return *data;
368} 368}
369 369
370static int igbvf_get_self_test_count(struct net_device *netdev)
371{
372 return IGBVF_TEST_LEN;
373}
374
375static int igbvf_get_stats_count(struct net_device *netdev)
376{
377 return IGBVF_GLOBAL_STATS_LEN;
378}
379
380static void igbvf_diag_test(struct net_device *netdev, 370static void igbvf_diag_test(struct net_device *netdev,
381 struct ethtool_test *eth_test, u64 *data) 371 struct ethtool_test *eth_test, u64 *data)
382{ 372{
@@ -484,6 +474,18 @@ static void igbvf_get_ethtool_stats(struct net_device *netdev,
484 474
485} 475}
486 476
477static int igbvf_get_sset_count(struct net_device *dev, int stringset)
478{
479 switch(stringset) {
480 case ETH_SS_TEST:
481 return IGBVF_TEST_LEN;
482 case ETH_SS_STATS:
483 return IGBVF_GLOBAL_STATS_LEN;
484 default:
485 return -EINVAL;
486 }
487}
488
487static void igbvf_get_strings(struct net_device *netdev, u32 stringset, 489static void igbvf_get_strings(struct net_device *netdev, u32 stringset,
488 u8 *data) 490 u8 *data)
489{ 491{
@@ -532,11 +534,10 @@ static const struct ethtool_ops igbvf_ethtool_ops = {
532 .get_tso = ethtool_op_get_tso, 534 .get_tso = ethtool_op_get_tso,
533 .set_tso = igbvf_set_tso, 535 .set_tso = igbvf_set_tso,
534 .self_test = igbvf_diag_test, 536 .self_test = igbvf_diag_test,
537 .get_sset_count = igbvf_get_sset_count,
535 .get_strings = igbvf_get_strings, 538 .get_strings = igbvf_get_strings,
536 .phys_id = igbvf_phys_id, 539 .phys_id = igbvf_phys_id,
537 .get_ethtool_stats = igbvf_get_ethtool_stats, 540 .get_ethtool_stats = igbvf_get_ethtool_stats,
538 .self_test_count = igbvf_get_self_test_count,
539 .get_stats_count = igbvf_get_stats_count,
540 .get_coalesce = igbvf_get_coalesce, 541 .get_coalesce = igbvf_get_coalesce,
541 .set_coalesce = igbvf_set_coalesce, 542 .set_coalesce = igbvf_set_coalesce,
542}; 543};
diff --git a/drivers/net/igbvf/netdev.c b/drivers/net/igbvf/netdev.c
index 91024a3cdad3..fad7f348dd1b 100644
--- a/drivers/net/igbvf/netdev.c
+++ b/drivers/net/igbvf/netdev.c
@@ -170,18 +170,12 @@ static void igbvf_alloc_rx_buffers(struct igbvf_ring *rx_ring,
170 } 170 }
171 171
172 if (!buffer_info->skb) { 172 if (!buffer_info->skb) {
173 skb = netdev_alloc_skb(netdev, bufsz + NET_IP_ALIGN); 173 skb = netdev_alloc_skb_ip_align(netdev, bufsz);
174 if (!skb) { 174 if (!skb) {
175 adapter->alloc_rx_buff_failed++; 175 adapter->alloc_rx_buff_failed++;
176 goto no_buffers; 176 goto no_buffers;
177 } 177 }
178 178
179 /* Make buffer alignment 2 beyond a 16 byte boundary
180 * this will result in a 16 byte aligned IP header after
181 * the 14 byte MAC header is removed
182 */
183 skb_reserve(skb, NET_IP_ALIGN);
184
185 buffer_info->skb = skb; 179 buffer_info->skb = skb;
186 buffer_info->dma = pci_map_single(pdev, skb->data, 180 buffer_info->dma = pci_map_single(pdev, skb->data,
187 bufsz, 181 bufsz,
diff --git a/drivers/net/ipg.c b/drivers/net/ipg.c
index 9f7b5d4172b8..63056e7b9e22 100644
--- a/drivers/net/ipg.c
+++ b/drivers/net/ipg.c
@@ -738,17 +738,12 @@ static int ipg_get_rxbuff(struct net_device *dev, int entry)
738 738
739 IPG_DEBUG_MSG("_get_rxbuff\n"); 739 IPG_DEBUG_MSG("_get_rxbuff\n");
740 740
741 skb = netdev_alloc_skb(dev, sp->rxsupport_size + NET_IP_ALIGN); 741 skb = netdev_alloc_skb_ip_align(dev, sp->rxsupport_size);
742 if (!skb) { 742 if (!skb) {
743 sp->rx_buff[entry] = NULL; 743 sp->rx_buff[entry] = NULL;
744 return -ENOMEM; 744 return -ENOMEM;
745 } 745 }
746 746
747 /* Adjust the data start location within the buffer to
748 * align IP address field to a 16 byte boundary.
749 */
750 skb_reserve(skb, NET_IP_ALIGN);
751
752 /* Associate the receive buffer with the IPG NIC. */ 747 /* Associate the receive buffer with the IPG NIC. */
753 skb->dev = dev; 748 skb->dev = dev;
754 749
diff --git a/drivers/net/irda/kingsun-sir.c b/drivers/net/irda/kingsun-sir.c
index 2fc30b449eea..cb90d640007a 100644
--- a/drivers/net/irda/kingsun-sir.c
+++ b/drivers/net/irda/kingsun-sir.c
@@ -66,7 +66,6 @@
66#include <linux/errno.h> 66#include <linux/errno.h>
67#include <linux/init.h> 67#include <linux/init.h>
68#include <linux/slab.h> 68#include <linux/slab.h>
69#include <linux/kref.h>
70#include <linux/usb.h> 69#include <linux/usb.h>
71#include <linux/device.h> 70#include <linux/device.h>
72#include <linux/crc32.h> 71#include <linux/crc32.h>
diff --git a/drivers/net/irda/ks959-sir.c b/drivers/net/irda/ks959-sir.c
index f4d13fc51cbc..b54d3b48045e 100644
--- a/drivers/net/irda/ks959-sir.c
+++ b/drivers/net/irda/ks959-sir.c
@@ -118,7 +118,6 @@
118#include <linux/errno.h> 118#include <linux/errno.h>
119#include <linux/init.h> 119#include <linux/init.h>
120#include <linux/slab.h> 120#include <linux/slab.h>
121#include <linux/kref.h>
122#include <linux/usb.h> 121#include <linux/usb.h>
123#include <linux/device.h> 122#include <linux/device.h>
124#include <linux/crc32.h> 123#include <linux/crc32.h>
diff --git a/drivers/net/irda/ksdazzle-sir.c b/drivers/net/irda/ksdazzle-sir.c
index 5f9d73353972..8d713ebac15b 100644
--- a/drivers/net/irda/ksdazzle-sir.c
+++ b/drivers/net/irda/ksdazzle-sir.c
@@ -82,7 +82,6 @@
82#include <linux/errno.h> 82#include <linux/errno.h>
83#include <linux/init.h> 83#include <linux/init.h>
84#include <linux/slab.h> 84#include <linux/slab.h>
85#include <linux/kref.h>
86#include <linux/usb.h> 85#include <linux/usb.h>
87#include <linux/device.h> 86#include <linux/device.h>
88#include <linux/crc32.h> 87#include <linux/crc32.h>
diff --git a/drivers/net/irda/mcs7780.c b/drivers/net/irda/mcs7780.c
index b3d30bcb88e7..c0e0bb9401d3 100644
--- a/drivers/net/irda/mcs7780.c
+++ b/drivers/net/irda/mcs7780.c
@@ -50,7 +50,6 @@
50#include <linux/errno.h> 50#include <linux/errno.h>
51#include <linux/init.h> 51#include <linux/init.h>
52#include <linux/slab.h> 52#include <linux/slab.h>
53#include <linux/kref.h>
54#include <linux/usb.h> 53#include <linux/usb.h>
55#include <linux/device.h> 54#include <linux/device.h>
56#include <linux/crc32.h> 55#include <linux/crc32.h>
diff --git a/drivers/net/irda/pxaficp_ir.c b/drivers/net/irda/pxaficp_ir.c
index 1445e5865196..84db145d2b59 100644
--- a/drivers/net/irda/pxaficp_ir.c
+++ b/drivers/net/irda/pxaficp_ir.c
@@ -17,6 +17,7 @@
17#include <linux/etherdevice.h> 17#include <linux/etherdevice.h>
18#include <linux/platform_device.h> 18#include <linux/platform_device.h>
19#include <linux/clk.h> 19#include <linux/clk.h>
20#include <linux/gpio.h>
20 21
21#include <net/irda/irda.h> 22#include <net/irda/irda.h>
22#include <net/irda/irmod.h> 23#include <net/irda/irmod.h>
@@ -163,6 +164,22 @@ inline static void pxa_irda_fir_dma_tx_start(struct pxa_irda *si)
163} 164}
164 165
165/* 166/*
167 * Set the IrDA communications mode.
168 */
169static void pxa_irda_set_mode(struct pxa_irda *si, int mode)
170{
171 if (si->pdata->transceiver_mode)
172 si->pdata->transceiver_mode(si->dev, mode);
173 else {
174 if (gpio_is_valid(si->pdata->gpio_pwdown))
175 gpio_set_value(si->pdata->gpio_pwdown,
176 !(mode & IR_OFF) ^
177 !si->pdata->gpio_pwdown_inverted);
178 pxa2xx_transceiver_mode(si->dev, mode);
179 }
180}
181
182/*
166 * Set the IrDA communications speed. 183 * Set the IrDA communications speed.
167 */ 184 */
168static int pxa_irda_set_speed(struct pxa_irda *si, int speed) 185static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
@@ -188,7 +205,7 @@ static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
188 pxa_irda_disable_clk(si); 205 pxa_irda_disable_clk(si);
189 206
190 /* set board transceiver to SIR mode */ 207 /* set board transceiver to SIR mode */
191 si->pdata->transceiver_mode(si->dev, IR_SIRMODE); 208 pxa_irda_set_mode(si, IR_SIRMODE);
192 209
193 /* enable the STUART clock */ 210 /* enable the STUART clock */
194 pxa_irda_enable_sirclk(si); 211 pxa_irda_enable_sirclk(si);
@@ -222,7 +239,7 @@ static int pxa_irda_set_speed(struct pxa_irda *si, int speed)
222 ICCR0 = 0; 239 ICCR0 = 0;
223 240
224 /* set board transceiver to FIR mode */ 241 /* set board transceiver to FIR mode */
225 si->pdata->transceiver_mode(si->dev, IR_FIRMODE); 242 pxa_irda_set_mode(si, IR_FIRMODE);
226 243
227 /* enable the FICP clock */ 244 /* enable the FICP clock */
228 pxa_irda_enable_firclk(si); 245 pxa_irda_enable_firclk(si);
@@ -641,7 +658,7 @@ static void pxa_irda_shutdown(struct pxa_irda *si)
641 local_irq_restore(flags); 658 local_irq_restore(flags);
642 659
643 /* power off board transceiver */ 660 /* power off board transceiver */
644 si->pdata->transceiver_mode(si->dev, IR_OFF); 661 pxa_irda_set_mode(si, IR_OFF);
645 662
646 printk(KERN_DEBUG "pxa_ir: irda shutdown\n"); 663 printk(KERN_DEBUG "pxa_ir: irda shutdown\n");
647} 664}
@@ -849,10 +866,26 @@ static int pxa_irda_probe(struct platform_device *pdev)
849 if (err) 866 if (err)
850 goto err_mem_5; 867 goto err_mem_5;
851 868
852 if (si->pdata->startup) 869 if (gpio_is_valid(si->pdata->gpio_pwdown)) {
870 err = gpio_request(si->pdata->gpio_pwdown, "IrDA switch");
871 if (err)
872 goto err_startup;
873 err = gpio_direction_output(si->pdata->gpio_pwdown,
874 !si->pdata->gpio_pwdown_inverted);
875 if (err) {
876 gpio_free(si->pdata->gpio_pwdown);
877 goto err_startup;
878 }
879 }
880
881 if (si->pdata->startup) {
853 err = si->pdata->startup(si->dev); 882 err = si->pdata->startup(si->dev);
854 if (err) 883 if (err)
855 goto err_startup; 884 goto err_startup;
885 }
886
887 if (gpio_is_valid(si->pdata->gpio_pwdown) && si->pdata->startup)
888 dev_warn(si->dev, "gpio_pwdown and startup() both defined!\n");
856 889
857 dev->netdev_ops = &pxa_irda_netdev_ops; 890 dev->netdev_ops = &pxa_irda_netdev_ops;
858 891
@@ -903,6 +936,8 @@ static int pxa_irda_remove(struct platform_device *_dev)
903 if (dev) { 936 if (dev) {
904 struct pxa_irda *si = netdev_priv(dev); 937 struct pxa_irda *si = netdev_priv(dev);
905 unregister_netdev(dev); 938 unregister_netdev(dev);
939 if (gpio_is_valid(si->pdata->gpio_pwdown))
940 gpio_free(si->pdata->gpio_pwdown);
906 if (si->pdata->shutdown) 941 if (si->pdata->shutdown)
907 si->pdata->shutdown(si->dev); 942 si->pdata->shutdown(si->dev);
908 kfree(si->tx_buff.head); 943 kfree(si->tx_buff.head);
diff --git a/drivers/net/ixgb/ixgb.h b/drivers/net/ixgb/ixgb.h
index d85717e3022a..e95d9b6f1f2d 100644
--- a/drivers/net/ixgb/ixgb.h
+++ b/drivers/net/ixgb/ixgb.h
@@ -183,7 +183,6 @@ struct ixgb_adapter {
183 struct napi_struct napi; 183 struct napi_struct napi;
184 struct net_device *netdev; 184 struct net_device *netdev;
185 struct pci_dev *pdev; 185 struct pci_dev *pdev;
186 struct net_device_stats net_stats;
187 186
188 /* structs defined in ixgb_hw.h */ 187 /* structs defined in ixgb_hw.h */
189 struct ixgb_hw hw; 188 struct ixgb_hw hw;
diff --git a/drivers/net/ixgb/ixgb_ethtool.c b/drivers/net/ixgb/ixgb_ethtool.c
index 288ee1d0f431..a4ed96caae69 100644
--- a/drivers/net/ixgb/ixgb_ethtool.c
+++ b/drivers/net/ixgb/ixgb_ethtool.c
@@ -34,38 +34,46 @@
34 34
35#define IXGB_ALL_RAR_ENTRIES 16 35#define IXGB_ALL_RAR_ENTRIES 16
36 36
37enum {NETDEV_STATS, IXGB_STATS};
38
37struct ixgb_stats { 39struct ixgb_stats {
38 char stat_string[ETH_GSTRING_LEN]; 40 char stat_string[ETH_GSTRING_LEN];
41 int type;
39 int sizeof_stat; 42 int sizeof_stat;
40 int stat_offset; 43 int stat_offset;
41}; 44};
42 45
43#define IXGB_STAT(m) FIELD_SIZEOF(struct ixgb_adapter, m), \ 46#define IXGB_STAT(m) IXGB_STATS, \
44 offsetof(struct ixgb_adapter, m) 47 FIELD_SIZEOF(struct ixgb_adapter, m), \
48 offsetof(struct ixgb_adapter, m)
49#define IXGB_NETDEV_STAT(m) NETDEV_STATS, \
50 FIELD_SIZEOF(struct net_device, m), \
51 offsetof(struct net_device, m)
52
45static struct ixgb_stats ixgb_gstrings_stats[] = { 53static struct ixgb_stats ixgb_gstrings_stats[] = {
46 {"rx_packets", IXGB_STAT(net_stats.rx_packets)}, 54 {"rx_packets", IXGB_NETDEV_STAT(stats.rx_packets)},
47 {"tx_packets", IXGB_STAT(net_stats.tx_packets)}, 55 {"tx_packets", IXGB_NETDEV_STAT(stats.tx_packets)},
48 {"rx_bytes", IXGB_STAT(net_stats.rx_bytes)}, 56 {"rx_bytes", IXGB_NETDEV_STAT(stats.rx_bytes)},
49 {"tx_bytes", IXGB_STAT(net_stats.tx_bytes)}, 57 {"tx_bytes", IXGB_NETDEV_STAT(stats.tx_bytes)},
50 {"rx_errors", IXGB_STAT(net_stats.rx_errors)}, 58 {"rx_errors", IXGB_NETDEV_STAT(stats.rx_errors)},
51 {"tx_errors", IXGB_STAT(net_stats.tx_errors)}, 59 {"tx_errors", IXGB_NETDEV_STAT(stats.tx_errors)},
52 {"rx_dropped", IXGB_STAT(net_stats.rx_dropped)}, 60 {"rx_dropped", IXGB_NETDEV_STAT(stats.rx_dropped)},
53 {"tx_dropped", IXGB_STAT(net_stats.tx_dropped)}, 61 {"tx_dropped", IXGB_NETDEV_STAT(stats.tx_dropped)},
54 {"multicast", IXGB_STAT(net_stats.multicast)}, 62 {"multicast", IXGB_NETDEV_STAT(stats.multicast)},
55 {"collisions", IXGB_STAT(net_stats.collisions)}, 63 {"collisions", IXGB_NETDEV_STAT(stats.collisions)},
56 64
57/* { "rx_length_errors", IXGB_STAT(net_stats.rx_length_errors) }, */ 65/* { "rx_length_errors", IXGB_NETDEV_STAT(stats.rx_length_errors) }, */
58 {"rx_over_errors", IXGB_STAT(net_stats.rx_over_errors)}, 66 {"rx_over_errors", IXGB_NETDEV_STAT(stats.rx_over_errors)},
59 {"rx_crc_errors", IXGB_STAT(net_stats.rx_crc_errors)}, 67 {"rx_crc_errors", IXGB_NETDEV_STAT(stats.rx_crc_errors)},
60 {"rx_frame_errors", IXGB_STAT(net_stats.rx_frame_errors)}, 68 {"rx_frame_errors", IXGB_NETDEV_STAT(stats.rx_frame_errors)},
61 {"rx_no_buffer_count", IXGB_STAT(stats.rnbc)}, 69 {"rx_no_buffer_count", IXGB_STAT(stats.rnbc)},
62 {"rx_fifo_errors", IXGB_STAT(net_stats.rx_fifo_errors)}, 70 {"rx_fifo_errors", IXGB_NETDEV_STAT(stats.rx_fifo_errors)},
63 {"rx_missed_errors", IXGB_STAT(net_stats.rx_missed_errors)}, 71 {"rx_missed_errors", IXGB_NETDEV_STAT(stats.rx_missed_errors)},
64 {"tx_aborted_errors", IXGB_STAT(net_stats.tx_aborted_errors)}, 72 {"tx_aborted_errors", IXGB_NETDEV_STAT(stats.tx_aborted_errors)},
65 {"tx_carrier_errors", IXGB_STAT(net_stats.tx_carrier_errors)}, 73 {"tx_carrier_errors", IXGB_NETDEV_STAT(stats.tx_carrier_errors)},
66 {"tx_fifo_errors", IXGB_STAT(net_stats.tx_fifo_errors)}, 74 {"tx_fifo_errors", IXGB_NETDEV_STAT(stats.tx_fifo_errors)},
67 {"tx_heartbeat_errors", IXGB_STAT(net_stats.tx_heartbeat_errors)}, 75 {"tx_heartbeat_errors", IXGB_NETDEV_STAT(stats.tx_heartbeat_errors)},
68 {"tx_window_errors", IXGB_STAT(net_stats.tx_window_errors)}, 76 {"tx_window_errors", IXGB_NETDEV_STAT(stats.tx_window_errors)},
69 {"tx_deferred_ok", IXGB_STAT(stats.dc)}, 77 {"tx_deferred_ok", IXGB_STAT(stats.dc)},
70 {"tx_timeout_count", IXGB_STAT(tx_timeout_count) }, 78 {"tx_timeout_count", IXGB_STAT(tx_timeout_count) },
71 {"tx_restart_queue", IXGB_STAT(restart_queue) }, 79 {"tx_restart_queue", IXGB_STAT(restart_queue) },
@@ -662,10 +670,21 @@ ixgb_get_ethtool_stats(struct net_device *netdev,
662{ 670{
663 struct ixgb_adapter *adapter = netdev_priv(netdev); 671 struct ixgb_adapter *adapter = netdev_priv(netdev);
664 int i; 672 int i;
673 char *p = NULL;
665 674
666 ixgb_update_stats(adapter); 675 ixgb_update_stats(adapter);
667 for (i = 0; i < IXGB_STATS_LEN; i++) { 676 for (i = 0; i < IXGB_STATS_LEN; i++) {
668 char *p = (char *)adapter+ixgb_gstrings_stats[i].stat_offset; 677 switch (ixgb_gstrings_stats[i].type) {
678 case NETDEV_STATS:
679 p = (char *) netdev +
680 ixgb_gstrings_stats[i].stat_offset;
681 break;
682 case IXGB_STATS:
683 p = (char *) adapter +
684 ixgb_gstrings_stats[i].stat_offset;
685 break;
686 }
687
669 data[i] = (ixgb_gstrings_stats[i].sizeof_stat == 688 data[i] = (ixgb_gstrings_stats[i].sizeof_stat ==
670 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 689 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
671 } 690 }
diff --git a/drivers/net/ixgb/ixgb_main.c b/drivers/net/ixgb/ixgb_main.c
index 8aa44dca57eb..1bd0ca1b0465 100644
--- a/drivers/net/ixgb/ixgb_main.c
+++ b/drivers/net/ixgb/ixgb_main.c
@@ -1537,9 +1537,7 @@ ixgb_tx_timeout_task(struct work_struct *work)
1537static struct net_device_stats * 1537static struct net_device_stats *
1538ixgb_get_stats(struct net_device *netdev) 1538ixgb_get_stats(struct net_device *netdev)
1539{ 1539{
1540 struct ixgb_adapter *adapter = netdev_priv(netdev); 1540 return &netdev->stats;
1541
1542 return &adapter->net_stats;
1543} 1541}
1544 1542
1545/** 1543/**
@@ -1676,16 +1674,16 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
1676 1674
1677 /* Fill out the OS statistics structure */ 1675 /* Fill out the OS statistics structure */
1678 1676
1679 adapter->net_stats.rx_packets = adapter->stats.gprcl; 1677 netdev->stats.rx_packets = adapter->stats.gprcl;
1680 adapter->net_stats.tx_packets = adapter->stats.gptcl; 1678 netdev->stats.tx_packets = adapter->stats.gptcl;
1681 adapter->net_stats.rx_bytes = adapter->stats.gorcl; 1679 netdev->stats.rx_bytes = adapter->stats.gorcl;
1682 adapter->net_stats.tx_bytes = adapter->stats.gotcl; 1680 netdev->stats.tx_bytes = adapter->stats.gotcl;
1683 adapter->net_stats.multicast = adapter->stats.mprcl; 1681 netdev->stats.multicast = adapter->stats.mprcl;
1684 adapter->net_stats.collisions = 0; 1682 netdev->stats.collisions = 0;
1685 1683
1686 /* ignore RLEC as it reports errors for padded (<64bytes) frames 1684 /* ignore RLEC as it reports errors for padded (<64bytes) frames
1687 * with a length in the type/len field */ 1685 * with a length in the type/len field */
1688 adapter->net_stats.rx_errors = 1686 netdev->stats.rx_errors =
1689 /* adapter->stats.rnbc + */ adapter->stats.crcerrs + 1687 /* adapter->stats.rnbc + */ adapter->stats.crcerrs +
1690 adapter->stats.ruc + 1688 adapter->stats.ruc +
1691 adapter->stats.roc /*+ adapter->stats.rlec */ + 1689 adapter->stats.roc /*+ adapter->stats.rlec */ +
@@ -1693,21 +1691,21 @@ ixgb_update_stats(struct ixgb_adapter *adapter)
1693 adapter->stats.ecbc + adapter->stats.mpc; 1691 adapter->stats.ecbc + adapter->stats.mpc;
1694 1692
1695 /* see above 1693 /* see above
1696 * adapter->net_stats.rx_length_errors = adapter->stats.rlec; 1694 * netdev->stats.rx_length_errors = adapter->stats.rlec;
1697 */ 1695 */
1698 1696
1699 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 1697 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
1700 adapter->net_stats.rx_fifo_errors = adapter->stats.mpc; 1698 netdev->stats.rx_fifo_errors = adapter->stats.mpc;
1701 adapter->net_stats.rx_missed_errors = adapter->stats.mpc; 1699 netdev->stats.rx_missed_errors = adapter->stats.mpc;
1702 adapter->net_stats.rx_over_errors = adapter->stats.mpc; 1700 netdev->stats.rx_over_errors = adapter->stats.mpc;
1703 1701
1704 adapter->net_stats.tx_errors = 0; 1702 netdev->stats.tx_errors = 0;
1705 adapter->net_stats.rx_frame_errors = 0; 1703 netdev->stats.rx_frame_errors = 0;
1706 adapter->net_stats.tx_aborted_errors = 0; 1704 netdev->stats.tx_aborted_errors = 0;
1707 adapter->net_stats.tx_carrier_errors = 0; 1705 netdev->stats.tx_carrier_errors = 0;
1708 adapter->net_stats.tx_fifo_errors = 0; 1706 netdev->stats.tx_fifo_errors = 0;
1709 adapter->net_stats.tx_heartbeat_errors = 0; 1707 netdev->stats.tx_heartbeat_errors = 0;
1710 adapter->net_stats.tx_window_errors = 0; 1708 netdev->stats.tx_window_errors = 0;
1711} 1709}
1712 1710
1713#define IXGB_MAX_INTR 10 1711#define IXGB_MAX_INTR 10
@@ -1974,9 +1972,8 @@ ixgb_clean_rx_irq(struct ixgb_adapter *adapter, int *work_done, int work_to_do)
1974 * of reassembly being done in the stack */ 1972 * of reassembly being done in the stack */
1975 if (length < copybreak) { 1973 if (length < copybreak) {
1976 struct sk_buff *new_skb = 1974 struct sk_buff *new_skb =
1977 netdev_alloc_skb(netdev, length + NET_IP_ALIGN); 1975 netdev_alloc_skb_ip_align(netdev, length);
1978 if (new_skb) { 1976 if (new_skb) {
1979 skb_reserve(new_skb, NET_IP_ALIGN);
1980 skb_copy_to_linear_data_offset(new_skb, 1977 skb_copy_to_linear_data_offset(new_skb,
1981 -NET_IP_ALIGN, 1978 -NET_IP_ALIGN,
1982 (skb->data - 1979 (skb->data -
@@ -2059,20 +2056,13 @@ ixgb_alloc_rx_buffers(struct ixgb_adapter *adapter, int cleaned_count)
2059 goto map_skb; 2056 goto map_skb;
2060 } 2057 }
2061 2058
2062 skb = netdev_alloc_skb(netdev, adapter->rx_buffer_len 2059 skb = netdev_alloc_skb_ip_align(netdev, adapter->rx_buffer_len);
2063 + NET_IP_ALIGN);
2064 if (unlikely(!skb)) { 2060 if (unlikely(!skb)) {
2065 /* Better luck next round */ 2061 /* Better luck next round */
2066 adapter->alloc_rx_buff_failed++; 2062 adapter->alloc_rx_buff_failed++;
2067 break; 2063 break;
2068 } 2064 }
2069 2065
2070 /* Make buffer alignment 2 beyond a 16 byte boundary
2071 * this will result in a 16 byte aligned IP header after
2072 * the 14 byte MAC header is removed
2073 */
2074 skb_reserve(skb, NET_IP_ALIGN);
2075
2076 buffer_info->skb = skb; 2066 buffer_info->skb = skb;
2077 buffer_info->length = adapter->rx_buffer_len; 2067 buffer_info->length = adapter->rx_buffer_len;
2078map_skb: 2068map_skb:
diff --git a/drivers/net/ixgbe/ixgbe.h b/drivers/net/ixgbe/ixgbe.h
index 385be6016667..7eb08a6d3f99 100644
--- a/drivers/net/ixgbe/ixgbe.h
+++ b/drivers/net/ixgbe/ixgbe.h
@@ -340,7 +340,6 @@ struct ixgbe_adapter {
340 /* OS defined structs */ 340 /* OS defined structs */
341 struct net_device *netdev; 341 struct net_device *netdev;
342 struct pci_dev *pdev; 342 struct pci_dev *pdev;
343 struct net_device_stats net_stats;
344 343
345 u32 test_icr; 344 u32 test_icr;
346 struct ixgbe_ring test_tx_ring; 345 struct ixgbe_ring test_tx_ring;
@@ -397,7 +396,7 @@ enum ixgbe_boards {
397extern struct ixgbe_info ixgbe_82598_info; 396extern struct ixgbe_info ixgbe_82598_info;
398extern struct ixgbe_info ixgbe_82599_info; 397extern struct ixgbe_info ixgbe_82599_info;
399#ifdef CONFIG_IXGBE_DCB 398#ifdef CONFIG_IXGBE_DCB
400extern struct dcbnl_rtnl_ops dcbnl_ops; 399extern const struct dcbnl_rtnl_ops dcbnl_ops;
401extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg, 400extern int ixgbe_copy_dcb_cfg(struct ixgbe_dcb_config *src_dcb_cfg,
402 struct ixgbe_dcb_config *dst_dcb_cfg, 401 struct ixgbe_dcb_config *dst_dcb_cfg,
403 int tc_max); 402 int tc_max);
@@ -458,6 +457,7 @@ extern int ixgbe_fcoe_disable(struct net_device *netdev);
458extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter); 457extern u8 ixgbe_fcoe_getapp(struct ixgbe_adapter *adapter);
459extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up); 458extern u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up);
460#endif /* CONFIG_IXGBE_DCB */ 459#endif /* CONFIG_IXGBE_DCB */
460extern int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type);
461#endif /* IXGBE_FCOE */ 461#endif /* IXGBE_FCOE */
462 462
463#endif /* _IXGBE_H_ */ 463#endif /* _IXGBE_H_ */
diff --git a/drivers/net/ixgbe/ixgbe_82599.c b/drivers/net/ixgbe/ixgbe_82599.c
index 34b04924c8a1..72106898a5cb 100644
--- a/drivers/net/ixgbe/ixgbe_82599.c
+++ b/drivers/net/ixgbe/ixgbe_82599.c
@@ -42,6 +42,10 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
42 ixgbe_link_speed speed, 42 ixgbe_link_speed speed,
43 bool autoneg, 43 bool autoneg,
44 bool autoneg_wait_to_complete); 44 bool autoneg_wait_to_complete);
45static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
46 ixgbe_link_speed speed,
47 bool autoneg,
48 bool autoneg_wait_to_complete);
45s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw, 49s32 ixgbe_start_mac_link_82599(struct ixgbe_hw *hw,
46 bool autoneg_wait_to_complete); 50 bool autoneg_wait_to_complete);
47s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw, 51s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
@@ -64,7 +68,13 @@ static void ixgbe_init_mac_link_ops_82599(struct ixgbe_hw *hw)
64 /* Set up dual speed SFP+ support */ 68 /* Set up dual speed SFP+ support */
65 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber; 69 mac->ops.setup_link = &ixgbe_setup_mac_link_multispeed_fiber;
66 } else { 70 } else {
67 mac->ops.setup_link = &ixgbe_setup_mac_link_82599; 71 if ((mac->ops.get_media_type(hw) ==
72 ixgbe_media_type_backplane) &&
73 (hw->phy.smart_speed == ixgbe_smart_speed_auto ||
74 hw->phy.smart_speed == ixgbe_smart_speed_on))
75 mac->ops.setup_link = &ixgbe_setup_mac_link_smartspeed;
76 else
77 mac->ops.setup_link = &ixgbe_setup_mac_link_82599;
68 } 78 }
69} 79}
70 80
@@ -337,6 +347,7 @@ static enum ixgbe_media_type ixgbe_get_media_type_82599(struct ixgbe_hw *hw)
337 media_type = ixgbe_media_type_backplane; 347 media_type = ixgbe_media_type_backplane;
338 break; 348 break;
339 case IXGBE_DEV_ID_82599_SFP: 349 case IXGBE_DEV_ID_82599_SFP:
350 case IXGBE_DEV_ID_82599_SFP_EM:
340 media_type = ixgbe_media_type_fiber; 351 media_type = ixgbe_media_type_fiber;
341 break; 352 break;
342 case IXGBE_DEV_ID_82599_CX4: 353 case IXGBE_DEV_ID_82599_CX4:
@@ -479,7 +490,12 @@ s32 ixgbe_setup_mac_link_multispeed_fiber(struct ixgbe_hw *hw,
479 hw->mac.autotry_restart = false; 490 hw->mac.autotry_restart = false;
480 } 491 }
481 492
482 /* The controller may take up to 500ms at 10g to acquire link */ 493 /*
494 * Wait for the controller to acquire link. Per IEEE 802.3ap,
495 * Section 73.10.2, we may have to wait up to 500ms if KR is
496 * attempted. 82599 uses the same timing for 10g SFI.
497 */
498
483 for (i = 0; i < 5; i++) { 499 for (i = 0; i < 5; i++) {
484 /* Wait for the link partner to also set speed */ 500 /* Wait for the link partner to also set speed */
485 msleep(100); 501 msleep(100);
@@ -567,6 +583,111 @@ out:
567} 583}
568 584
569/** 585/**
586 * ixgbe_setup_mac_link_smartspeed - Set MAC link speed using SmartSpeed
587 * @hw: pointer to hardware structure
588 * @speed: new link speed
589 * @autoneg: true if autonegotiation enabled
590 * @autoneg_wait_to_complete: true when waiting for completion is needed
591 *
592 * Implements the Intel SmartSpeed algorithm.
593 **/
594static s32 ixgbe_setup_mac_link_smartspeed(struct ixgbe_hw *hw,
595 ixgbe_link_speed speed, bool autoneg,
596 bool autoneg_wait_to_complete)
597{
598 s32 status = 0;
599 ixgbe_link_speed link_speed;
600 s32 i, j;
601 bool link_up = false;
602 u32 autoc_reg = IXGBE_READ_REG(hw, IXGBE_AUTOC);
603
604 hw_dbg(hw, "ixgbe_setup_mac_link_smartspeed.\n");
605
606 /* Set autoneg_advertised value based on input link speed */
607 hw->phy.autoneg_advertised = 0;
608
609 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
610 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_10GB_FULL;
611
612 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
613 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_1GB_FULL;
614
615 if (speed & IXGBE_LINK_SPEED_100_FULL)
616 hw->phy.autoneg_advertised |= IXGBE_LINK_SPEED_100_FULL;
617
618 /*
619 * Implement Intel SmartSpeed algorithm. SmartSpeed will reduce the
620 * autoneg advertisement if link is unable to be established at the
621 * highest negotiated rate. This can sometimes happen due to integrity
622 * issues with the physical media connection.
623 */
624
625 /* First, try to get link with full advertisement */
626 hw->phy.smart_speed_active = false;
627 for (j = 0; j < IXGBE_SMARTSPEED_MAX_RETRIES; j++) {
628 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
629 autoneg_wait_to_complete);
630 if (status)
631 goto out;
632
633 /*
634 * Wait for the controller to acquire link. Per IEEE 802.3ap,
635 * Section 73.10.2, we may have to wait up to 500ms if KR is
636 * attempted, or 200ms if KX/KX4/BX/BX4 is attempted, per
637 * Table 9 in the AN MAS.
638 */
639 for (i = 0; i < 5; i++) {
640 mdelay(100);
641
642 /* If we have link, just jump out */
643 hw->mac.ops.check_link(hw, &link_speed,
644 &link_up, false);
645 if (link_up)
646 goto out;
647 }
648 }
649
650 /*
651 * We didn't get link. If we advertised KR plus one of KX4/KX
652 * (or BX4/BX), then disable KR and try again.
653 */
654 if (((autoc_reg & IXGBE_AUTOC_KR_SUPP) == 0) ||
655 ((autoc_reg & IXGBE_AUTOC_KX4_KX_SUPP_MASK) == 0))
656 goto out;
657
658 /* Turn SmartSpeed on to disable KR support */
659 hw->phy.smart_speed_active = true;
660 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
661 autoneg_wait_to_complete);
662 if (status)
663 goto out;
664
665 /*
666 * Wait for the controller to acquire link. 600ms will allow for
667 * the AN link_fail_inhibit_timer as well for multiple cycles of
668 * parallel detect, both 10g and 1g. This allows for the maximum
669 * connect attempts as defined in the AN MAS table 73-7.
670 */
671 for (i = 0; i < 6; i++) {
672 mdelay(100);
673
674 /* If we have link, just jump out */
675 hw->mac.ops.check_link(hw, &link_speed,
676 &link_up, false);
677 if (link_up)
678 goto out;
679 }
680
681 /* We didn't get link. Turn SmartSpeed back off. */
682 hw->phy.smart_speed_active = false;
683 status = ixgbe_setup_mac_link_82599(hw, speed, autoneg,
684 autoneg_wait_to_complete);
685
686out:
687 return status;
688}
689
690/**
570 * ixgbe_check_mac_link_82599 - Determine link and speed status 691 * ixgbe_check_mac_link_82599 - Determine link and speed status
571 * @hw: pointer to hardware structure 692 * @hw: pointer to hardware structure
572 * @speed: pointer to link speed 693 * @speed: pointer to link speed
@@ -669,7 +790,8 @@ s32 ixgbe_setup_mac_link_82599(struct ixgbe_hw *hw,
669 if (speed & IXGBE_LINK_SPEED_10GB_FULL) 790 if (speed & IXGBE_LINK_SPEED_10GB_FULL)
670 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP) 791 if (orig_autoc & IXGBE_AUTOC_KX4_SUPP)
671 autoc |= IXGBE_AUTOC_KX4_SUPP; 792 autoc |= IXGBE_AUTOC_KX4_SUPP;
672 if (orig_autoc & IXGBE_AUTOC_KR_SUPP) 793 if ((orig_autoc & IXGBE_AUTOC_KR_SUPP) &&
794 (hw->phy.smart_speed_active == false))
673 autoc |= IXGBE_AUTOC_KR_SUPP; 795 autoc |= IXGBE_AUTOC_KR_SUPP;
674 if (speed & IXGBE_LINK_SPEED_1GB_FULL) 796 if (speed & IXGBE_LINK_SPEED_1GB_FULL)
675 autoc |= IXGBE_AUTOC_KX_SUPP; 797 autoc |= IXGBE_AUTOC_KX_SUPP;
@@ -878,6 +1000,10 @@ static s32 ixgbe_reset_hw_82599(struct ixgbe_hw *hw)
878 hw->mac.num_rar_entries--; 1000 hw->mac.num_rar_entries--;
879 } 1001 }
880 1002
1003 /* Store the alternative WWNN/WWPN prefix */
1004 hw->mac.ops.get_wwn_prefix(hw, &hw->mac.wwnn_prefix,
1005 &hw->mac.wwpn_prefix);
1006
881reset_hw_out: 1007reset_hw_out:
882 return status; 1008 return status;
883} 1009}
@@ -2414,6 +2540,51 @@ fw_version_out:
2414 return status; 2540 return status;
2415} 2541}
2416 2542
2543/**
2544 * ixgbe_get_wwn_prefix_82599 - Get alternative WWNN/WWPN prefix from
2545 * the EEPROM
2546 * @hw: pointer to hardware structure
2547 * @wwnn_prefix: the alternative WWNN prefix
2548 * @wwpn_prefix: the alternative WWPN prefix
2549 *
2550 * This function will read the EEPROM from the alternative SAN MAC address
2551 * block to check the support for the alternative WWNN/WWPN prefix support.
2552 **/
2553static s32 ixgbe_get_wwn_prefix_82599(struct ixgbe_hw *hw, u16 *wwnn_prefix,
2554 u16 *wwpn_prefix)
2555{
2556 u16 offset, caps;
2557 u16 alt_san_mac_blk_offset;
2558
2559 /* clear output first */
2560 *wwnn_prefix = 0xFFFF;
2561 *wwpn_prefix = 0xFFFF;
2562
2563 /* check if alternative SAN MAC is supported */
2564 hw->eeprom.ops.read(hw, IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR,
2565 &alt_san_mac_blk_offset);
2566
2567 if ((alt_san_mac_blk_offset == 0) ||
2568 (alt_san_mac_blk_offset == 0xFFFF))
2569 goto wwn_prefix_out;
2570
2571 /* check capability in alternative san mac address block */
2572 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET;
2573 hw->eeprom.ops.read(hw, offset, &caps);
2574 if (!(caps & IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN))
2575 goto wwn_prefix_out;
2576
2577 /* get the corresponding prefix for WWNN/WWPN */
2578 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET;
2579 hw->eeprom.ops.read(hw, offset, wwnn_prefix);
2580
2581 offset = alt_san_mac_blk_offset + IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET;
2582 hw->eeprom.ops.read(hw, offset, wwpn_prefix);
2583
2584wwn_prefix_out:
2585 return 0;
2586}
2587
2417static struct ixgbe_mac_operations mac_ops_82599 = { 2588static struct ixgbe_mac_operations mac_ops_82599 = {
2418 .init_hw = &ixgbe_init_hw_generic, 2589 .init_hw = &ixgbe_init_hw_generic,
2419 .reset_hw = &ixgbe_reset_hw_82599, 2590 .reset_hw = &ixgbe_reset_hw_82599,
@@ -2425,6 +2596,7 @@ static struct ixgbe_mac_operations mac_ops_82599 = {
2425 .get_mac_addr = &ixgbe_get_mac_addr_generic, 2596 .get_mac_addr = &ixgbe_get_mac_addr_generic,
2426 .get_san_mac_addr = &ixgbe_get_san_mac_addr_82599, 2597 .get_san_mac_addr = &ixgbe_get_san_mac_addr_82599,
2427 .get_device_caps = &ixgbe_get_device_caps_82599, 2598 .get_device_caps = &ixgbe_get_device_caps_82599,
2599 .get_wwn_prefix = &ixgbe_get_wwn_prefix_82599,
2428 .stop_adapter = &ixgbe_stop_adapter_generic, 2600 .stop_adapter = &ixgbe_stop_adapter_generic,
2429 .get_bus_info = &ixgbe_get_bus_info_generic, 2601 .get_bus_info = &ixgbe_get_bus_info_generic,
2430 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie, 2602 .set_lan_id = &ixgbe_set_lan_id_multi_port_pcie,
diff --git a/drivers/net/ixgbe/ixgbe_dcb_nl.c b/drivers/net/ixgbe/ixgbe_dcb_nl.c
index a6bc1ef28f92..3c7a79a7d7c6 100644
--- a/drivers/net/ixgbe/ixgbe_dcb_nl.c
+++ b/drivers/net/ixgbe/ixgbe_dcb_nl.c
@@ -563,7 +563,7 @@ static u8 ixgbe_dcbnl_setapp(struct net_device *netdev,
563 return rval; 563 return rval;
564} 564}
565 565
566struct dcbnl_rtnl_ops dcbnl_ops = { 566const struct dcbnl_rtnl_ops dcbnl_ops = {
567 .getstate = ixgbe_dcbnl_get_state, 567 .getstate = ixgbe_dcbnl_get_state,
568 .setstate = ixgbe_dcbnl_set_state, 568 .setstate = ixgbe_dcbnl_set_state,
569 .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr, 569 .getpermhwaddr = ixgbe_dcbnl_get_perm_hw_addr,
diff --git a/drivers/net/ixgbe/ixgbe_ethtool.c b/drivers/net/ixgbe/ixgbe_ethtool.c
index 856c18c207f3..9d2cc833691b 100644
--- a/drivers/net/ixgbe/ixgbe_ethtool.c
+++ b/drivers/net/ixgbe/ixgbe_ethtool.c
@@ -40,19 +40,27 @@
40 40
41#define IXGBE_ALL_RAR_ENTRIES 16 41#define IXGBE_ALL_RAR_ENTRIES 16
42 42
43enum {NETDEV_STATS, IXGBE_STATS};
44
43struct ixgbe_stats { 45struct ixgbe_stats {
44 char stat_string[ETH_GSTRING_LEN]; 46 char stat_string[ETH_GSTRING_LEN];
47 int type;
45 int sizeof_stat; 48 int sizeof_stat;
46 int stat_offset; 49 int stat_offset;
47}; 50};
48 51
49#define IXGBE_STAT(m) sizeof(((struct ixgbe_adapter *)0)->m), \ 52#define IXGBE_STAT(m) IXGBE_STATS, \
50 offsetof(struct ixgbe_adapter, m) 53 sizeof(((struct ixgbe_adapter *)0)->m), \
54 offsetof(struct ixgbe_adapter, m)
55#define IXGBE_NETDEV_STAT(m) NETDEV_STATS, \
56 sizeof(((struct net_device *)0)->m), \
57 offsetof(struct net_device, m)
58
51static struct ixgbe_stats ixgbe_gstrings_stats[] = { 59static struct ixgbe_stats ixgbe_gstrings_stats[] = {
52 {"rx_packets", IXGBE_STAT(net_stats.rx_packets)}, 60 {"rx_packets", IXGBE_NETDEV_STAT(stats.rx_packets)},
53 {"tx_packets", IXGBE_STAT(net_stats.tx_packets)}, 61 {"tx_packets", IXGBE_NETDEV_STAT(stats.tx_packets)},
54 {"rx_bytes", IXGBE_STAT(net_stats.rx_bytes)}, 62 {"rx_bytes", IXGBE_NETDEV_STAT(stats.rx_bytes)},
55 {"tx_bytes", IXGBE_STAT(net_stats.tx_bytes)}, 63 {"tx_bytes", IXGBE_NETDEV_STAT(stats.tx_bytes)},
56 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)}, 64 {"rx_pkts_nic", IXGBE_STAT(stats.gprc)},
57 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)}, 65 {"tx_pkts_nic", IXGBE_STAT(stats.gptc)},
58 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)}, 66 {"rx_bytes_nic", IXGBE_STAT(stats.gorc)},
@@ -60,26 +68,26 @@ static struct ixgbe_stats ixgbe_gstrings_stats[] = {
60 {"lsc_int", IXGBE_STAT(lsc_int)}, 68 {"lsc_int", IXGBE_STAT(lsc_int)},
61 {"tx_busy", IXGBE_STAT(tx_busy)}, 69 {"tx_busy", IXGBE_STAT(tx_busy)},
62 {"non_eop_descs", IXGBE_STAT(non_eop_descs)}, 70 {"non_eop_descs", IXGBE_STAT(non_eop_descs)},
63 {"rx_errors", IXGBE_STAT(net_stats.rx_errors)}, 71 {"rx_errors", IXGBE_NETDEV_STAT(stats.rx_errors)},
64 {"tx_errors", IXGBE_STAT(net_stats.tx_errors)}, 72 {"tx_errors", IXGBE_NETDEV_STAT(stats.tx_errors)},
65 {"rx_dropped", IXGBE_STAT(net_stats.rx_dropped)}, 73 {"rx_dropped", IXGBE_NETDEV_STAT(stats.rx_dropped)},
66 {"tx_dropped", IXGBE_STAT(net_stats.tx_dropped)}, 74 {"tx_dropped", IXGBE_NETDEV_STAT(stats.tx_dropped)},
67 {"multicast", IXGBE_STAT(net_stats.multicast)}, 75 {"multicast", IXGBE_NETDEV_STAT(stats.multicast)},
68 {"broadcast", IXGBE_STAT(stats.bprc)}, 76 {"broadcast", IXGBE_STAT(stats.bprc)},
69 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) }, 77 {"rx_no_buffer_count", IXGBE_STAT(stats.rnbc[0]) },
70 {"collisions", IXGBE_STAT(net_stats.collisions)}, 78 {"collisions", IXGBE_NETDEV_STAT(stats.collisions)},
71 {"rx_over_errors", IXGBE_STAT(net_stats.rx_over_errors)}, 79 {"rx_over_errors", IXGBE_NETDEV_STAT(stats.rx_over_errors)},
72 {"rx_crc_errors", IXGBE_STAT(net_stats.rx_crc_errors)}, 80 {"rx_crc_errors", IXGBE_NETDEV_STAT(stats.rx_crc_errors)},
73 {"rx_frame_errors", IXGBE_STAT(net_stats.rx_frame_errors)}, 81 {"rx_frame_errors", IXGBE_NETDEV_STAT(stats.rx_frame_errors)},
74 {"hw_rsc_count", IXGBE_STAT(rsc_count)}, 82 {"hw_rsc_count", IXGBE_STAT(rsc_count)},
75 {"fdir_match", IXGBE_STAT(stats.fdirmatch)}, 83 {"fdir_match", IXGBE_STAT(stats.fdirmatch)},
76 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)}, 84 {"fdir_miss", IXGBE_STAT(stats.fdirmiss)},
77 {"rx_fifo_errors", IXGBE_STAT(net_stats.rx_fifo_errors)}, 85 {"rx_fifo_errors", IXGBE_NETDEV_STAT(stats.rx_fifo_errors)},
78 {"rx_missed_errors", IXGBE_STAT(net_stats.rx_missed_errors)}, 86 {"rx_missed_errors", IXGBE_NETDEV_STAT(stats.rx_missed_errors)},
79 {"tx_aborted_errors", IXGBE_STAT(net_stats.tx_aborted_errors)}, 87 {"tx_aborted_errors", IXGBE_NETDEV_STAT(stats.tx_aborted_errors)},
80 {"tx_carrier_errors", IXGBE_STAT(net_stats.tx_carrier_errors)}, 88 {"tx_carrier_errors", IXGBE_NETDEV_STAT(stats.tx_carrier_errors)},
81 {"tx_fifo_errors", IXGBE_STAT(net_stats.tx_fifo_errors)}, 89 {"tx_fifo_errors", IXGBE_NETDEV_STAT(stats.tx_fifo_errors)},
82 {"tx_heartbeat_errors", IXGBE_STAT(net_stats.tx_heartbeat_errors)}, 90 {"tx_heartbeat_errors", IXGBE_NETDEV_STAT(stats.tx_heartbeat_errors)},
83 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)}, 91 {"tx_timeout_count", IXGBE_STAT(tx_timeout_count)},
84 {"tx_restart_queue", IXGBE_STAT(restart_queue)}, 92 {"tx_restart_queue", IXGBE_STAT(restart_queue)},
85 {"rx_long_length_errors", IXGBE_STAT(stats.roc)}, 93 {"rx_long_length_errors", IXGBE_STAT(stats.roc)},
@@ -933,10 +941,21 @@ static void ixgbe_get_ethtool_stats(struct net_device *netdev,
933 int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64); 941 int stat_count = sizeof(struct ixgbe_queue_stats) / sizeof(u64);
934 int j, k; 942 int j, k;
935 int i; 943 int i;
944 char *p = NULL;
936 945
937 ixgbe_update_stats(adapter); 946 ixgbe_update_stats(adapter);
938 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) { 947 for (i = 0; i < IXGBE_GLOBAL_STATS_LEN; i++) {
939 char *p = (char *)adapter + ixgbe_gstrings_stats[i].stat_offset; 948 switch (ixgbe_gstrings_stats[i].type) {
949 case NETDEV_STATS:
950 p = (char *) netdev +
951 ixgbe_gstrings_stats[i].stat_offset;
952 break;
953 case IXGBE_STATS:
954 p = (char *) adapter +
955 ixgbe_gstrings_stats[i].stat_offset;
956 break;
957 }
958
940 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat == 959 data[i] = (ixgbe_gstrings_stats[i].sizeof_stat ==
941 sizeof(u64)) ? *(u64 *)p : *(u32 *)p; 960 sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
942 } 961 }
diff --git a/drivers/net/ixgbe/ixgbe_fcoe.c b/drivers/net/ixgbe/ixgbe_fcoe.c
index a3c9f99515e2..edecdc853c14 100644
--- a/drivers/net/ixgbe/ixgbe_fcoe.c
+++ b/drivers/net/ixgbe/ixgbe_fcoe.c
@@ -718,3 +718,49 @@ u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up)
718 return 1; 718 return 1;
719} 719}
720#endif /* CONFIG_IXGBE_DCB */ 720#endif /* CONFIG_IXGBE_DCB */
721
722/**
723 * ixgbe_fcoe_get_wwn - get world wide name for the node or the port
724 * @netdev : ixgbe adapter
725 * @wwn : the world wide name
726 * @type: the type of world wide name
727 *
728 * Returns the node or port world wide name if both the prefix and the san
729 * mac address are valid, then the wwn is formed based on the NAA-2 for
730 * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3).
731 *
732 * Returns : 0 on success
733 */
734int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type)
735{
736 int rc = -EINVAL;
737 u16 prefix = 0xffff;
738 struct ixgbe_adapter *adapter = netdev_priv(netdev);
739 struct ixgbe_mac_info *mac = &adapter->hw.mac;
740
741 switch (type) {
742 case NETDEV_FCOE_WWNN:
743 prefix = mac->wwnn_prefix;
744 break;
745 case NETDEV_FCOE_WWPN:
746 prefix = mac->wwpn_prefix;
747 break;
748 default:
749 break;
750 }
751
752 if ((prefix != 0xffff) &&
753 is_valid_ether_addr(mac->san_addr)) {
754 *wwn = ((u64) prefix << 48) |
755 ((u64) mac->san_addr[0] << 40) |
756 ((u64) mac->san_addr[1] << 32) |
757 ((u64) mac->san_addr[2] << 24) |
758 ((u64) mac->san_addr[3] << 16) |
759 ((u64) mac->san_addr[4] << 8) |
760 ((u64) mac->san_addr[5]);
761 rc = 0;
762 }
763 return rc;
764}
765
766
diff --git a/drivers/net/ixgbe/ixgbe_main.c b/drivers/net/ixgbe/ixgbe_main.c
index a5036f7c1923..dceed80f16fb 100644
--- a/drivers/net/ixgbe/ixgbe_main.c
+++ b/drivers/net/ixgbe/ixgbe_main.c
@@ -98,6 +98,8 @@ static struct pci_device_id ixgbe_pci_tbl[] = {
98 board_82599 }, 98 board_82599 },
99 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP), 99 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP),
100 board_82599 }, 100 board_82599 },
101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_SFP_EM),
102 board_82599 },
101 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ), 103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_KX4_MEZZ),
102 board_82599 }, 104 board_82599 },
103 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4), 105 {PCI_VDEVICE(INTEL, IXGBE_DEV_ID_82599_CX4),
@@ -423,8 +425,8 @@ static bool ixgbe_clean_tx_irq(struct ixgbe_q_vector *q_vector,
423 tx_ring->total_packets += total_packets; 425 tx_ring->total_packets += total_packets;
424 tx_ring->stats.packets += total_packets; 426 tx_ring->stats.packets += total_packets;
425 tx_ring->stats.bytes += total_bytes; 427 tx_ring->stats.bytes += total_bytes;
426 adapter->net_stats.tx_bytes += total_bytes; 428 netdev->stats.tx_bytes += total_bytes;
427 adapter->net_stats.tx_packets += total_packets; 429 netdev->stats.tx_packets += total_packets;
428 return (count < tx_ring->work_limit); 430 return (count < tx_ring->work_limit);
429} 431}
430 432
@@ -669,22 +671,14 @@ static void ixgbe_alloc_rx_buffers(struct ixgbe_adapter *adapter,
669 671
670 if (!bi->skb) { 672 if (!bi->skb) {
671 struct sk_buff *skb; 673 struct sk_buff *skb;
672 skb = netdev_alloc_skb(adapter->netdev, 674 skb = netdev_alloc_skb_ip_align(adapter->netdev,
673 (rx_ring->rx_buf_len + 675 rx_ring->rx_buf_len);
674 NET_IP_ALIGN));
675 676
676 if (!skb) { 677 if (!skb) {
677 adapter->alloc_rx_buff_failed++; 678 adapter->alloc_rx_buff_failed++;
678 goto no_buffers; 679 goto no_buffers;
679 } 680 }
680 681
681 /*
682 * Make buffer alignment 2 beyond a 16 byte boundary
683 * this will result in a 16 byte aligned IP header after
684 * the 14 byte MAC header is removed
685 */
686 skb_reserve(skb, NET_IP_ALIGN);
687
688 bi->skb = skb; 682 bi->skb = skb;
689 bi->dma = pci_map_single(pdev, skb->data, 683 bi->dma = pci_map_single(pdev, skb->data,
690 rx_ring->rx_buf_len, 684 rx_ring->rx_buf_len,
@@ -764,6 +758,7 @@ static bool ixgbe_clean_rx_irq(struct ixgbe_q_vector *q_vector,
764 int *work_done, int work_to_do) 758 int *work_done, int work_to_do)
765{ 759{
766 struct ixgbe_adapter *adapter = q_vector->adapter; 760 struct ixgbe_adapter *adapter = q_vector->adapter;
761 struct net_device *netdev = adapter->netdev;
767 struct pci_dev *pdev = adapter->pdev; 762 struct pci_dev *pdev = adapter->pdev;
768 union ixgbe_adv_rx_desc *rx_desc, *next_rxd; 763 union ixgbe_adv_rx_desc *rx_desc, *next_rxd;
769 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer; 764 struct ixgbe_rx_buffer *rx_buffer_info, *next_buffer;
@@ -935,8 +930,8 @@ next_desc:
935 930
936 rx_ring->total_packets += total_rx_packets; 931 rx_ring->total_packets += total_rx_packets;
937 rx_ring->total_bytes += total_rx_bytes; 932 rx_ring->total_bytes += total_rx_bytes;
938 adapter->net_stats.rx_bytes += total_rx_bytes; 933 netdev->stats.rx_bytes += total_rx_bytes;
939 adapter->net_stats.rx_packets += total_rx_packets; 934 netdev->stats.rx_packets += total_rx_packets;
940 935
941 return cleaned; 936 return cleaned;
942} 937}
@@ -1209,6 +1204,7 @@ static void ixgbe_check_lsc(struct ixgbe_adapter *adapter)
1209 adapter->link_check_timeout = jiffies; 1204 adapter->link_check_timeout = jiffies;
1210 if (!test_bit(__IXGBE_DOWN, &adapter->state)) { 1205 if (!test_bit(__IXGBE_DOWN, &adapter->state)) {
1211 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC); 1206 IXGBE_WRITE_REG(hw, IXGBE_EIMC, IXGBE_EIMC_LSC);
1207 IXGBE_WRITE_FLUSH(hw);
1212 schedule_work(&adapter->watchdog_task); 1208 schedule_work(&adapter->watchdog_task);
1213 } 1209 }
1214} 1210}
@@ -1344,8 +1340,6 @@ static irqreturn_t ixgbe_msix_clean_rx(int irq, void *data)
1344 if (!q_vector->rxr_count) 1340 if (!q_vector->rxr_count)
1345 return IRQ_HANDLED; 1341 return IRQ_HANDLED;
1346 1342
1347 r_idx = find_first_bit(q_vector->rxr_idx, adapter->num_rx_queues);
1348 rx_ring = &(adapter->rx_ring[r_idx]);
1349 /* disable interrupts on this vector only */ 1343 /* disable interrupts on this vector only */
1350 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx)); 1344 ixgbe_irq_disable_queues(adapter, ((u64)1 << q_vector->v_idx));
1351 napi_schedule(&q_vector->napi); 1345 napi_schedule(&q_vector->napi);
@@ -3632,10 +3626,10 @@ static int ixgbe_set_interrupt_capability(struct ixgbe_adapter *adapter)
3632 * It's easy to be greedy for MSI-X vectors, but it really 3626 * It's easy to be greedy for MSI-X vectors, but it really
3633 * doesn't do us much good if we have a lot more vectors 3627 * doesn't do us much good if we have a lot more vectors
3634 * than CPU's. So let's be conservative and only ask for 3628 * than CPU's. So let's be conservative and only ask for
3635 * (roughly) twice the number of vectors as there are CPU's. 3629 * (roughly) the same number of vectors as there are CPU's.
3636 */ 3630 */
3637 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues, 3631 v_budget = min(adapter->num_rx_queues + adapter->num_tx_queues,
3638 (int)(num_online_cpus() * 2)) + NON_Q_VECTORS; 3632 (int)num_online_cpus()) + NON_Q_VECTORS;
3639 3633
3640 /* 3634 /*
3641 * At the same time, hardware can only support a maximum of 3635 * At the same time, hardware can only support a maximum of
@@ -4475,6 +4469,7 @@ static void ixgbe_shutdown(struct pci_dev *pdev)
4475 **/ 4469 **/
4476void ixgbe_update_stats(struct ixgbe_adapter *adapter) 4470void ixgbe_update_stats(struct ixgbe_adapter *adapter)
4477{ 4471{
4472 struct net_device *netdev = adapter->netdev;
4478 struct ixgbe_hw *hw = &adapter->hw; 4473 struct ixgbe_hw *hw = &adapter->hw;
4479 u64 total_mpc = 0; 4474 u64 total_mpc = 0;
4480 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot; 4475 u32 i, missed_rx = 0, mpc, bprc, lxon, lxoff, xon_off_tot;
@@ -4594,15 +4589,15 @@ void ixgbe_update_stats(struct ixgbe_adapter *adapter)
4594 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC); 4589 adapter->stats.bptc += IXGBE_READ_REG(hw, IXGBE_BPTC);
4595 4590
4596 /* Fill out the OS statistics structure */ 4591 /* Fill out the OS statistics structure */
4597 adapter->net_stats.multicast = adapter->stats.mprc; 4592 netdev->stats.multicast = adapter->stats.mprc;
4598 4593
4599 /* Rx Errors */ 4594 /* Rx Errors */
4600 adapter->net_stats.rx_errors = adapter->stats.crcerrs + 4595 netdev->stats.rx_errors = adapter->stats.crcerrs +
4601 adapter->stats.rlec; 4596 adapter->stats.rlec;
4602 adapter->net_stats.rx_dropped = 0; 4597 netdev->stats.rx_dropped = 0;
4603 adapter->net_stats.rx_length_errors = adapter->stats.rlec; 4598 netdev->stats.rx_length_errors = adapter->stats.rlec;
4604 adapter->net_stats.rx_crc_errors = adapter->stats.crcerrs; 4599 netdev->stats.rx_crc_errors = adapter->stats.crcerrs;
4605 adapter->net_stats.rx_missed_errors = total_mpc; 4600 netdev->stats.rx_missed_errors = total_mpc;
4606} 4601}
4607 4602
4608/** 4603/**
@@ -5372,10 +5367,8 @@ static netdev_tx_t ixgbe_xmit_frame(struct sk_buff *skb,
5372 **/ 5367 **/
5373static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev) 5368static struct net_device_stats *ixgbe_get_stats(struct net_device *netdev)
5374{ 5369{
5375 struct ixgbe_adapter *adapter = netdev_priv(netdev);
5376
5377 /* only return the current stats */ 5370 /* only return the current stats */
5378 return &adapter->net_stats; 5371 return &netdev->stats;
5379} 5372}
5380 5373
5381/** 5374/**
@@ -5527,6 +5520,7 @@ static const struct net_device_ops ixgbe_netdev_ops = {
5527 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put, 5520 .ndo_fcoe_ddp_done = ixgbe_fcoe_ddp_put,
5528 .ndo_fcoe_enable = ixgbe_fcoe_enable, 5521 .ndo_fcoe_enable = ixgbe_fcoe_enable,
5529 .ndo_fcoe_disable = ixgbe_fcoe_disable, 5522 .ndo_fcoe_disable = ixgbe_fcoe_disable,
5523 .ndo_fcoe_get_wwn = ixgbe_fcoe_get_wwn,
5530#endif /* IXGBE_FCOE */ 5524#endif /* IXGBE_FCOE */
5531}; 5525};
5532 5526
diff --git a/drivers/net/ixgbe/ixgbe_type.h b/drivers/net/ixgbe/ixgbe_type.h
index ef4bdd58e016..21b6633da578 100644
--- a/drivers/net/ixgbe/ixgbe_type.h
+++ b/drivers/net/ixgbe/ixgbe_type.h
@@ -52,6 +52,7 @@
52#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514 52#define IXGBE_DEV_ID_82599_KX4_MEZZ 0x1514
53#define IXGBE_DEV_ID_82599_CX4 0x10F9 53#define IXGBE_DEV_ID_82599_CX4 0x10F9
54#define IXGBE_DEV_ID_82599_SFP 0x10FB 54#define IXGBE_DEV_ID_82599_SFP 0x10FB
55#define IXGBE_DEV_ID_82599_SFP_EM 0x1507
55#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC 56#define IXGBE_DEV_ID_82599_XAUI_LOM 0x10FC
56#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8 57#define IXGBE_DEV_ID_82599_COMBO_BACKPLANE 0x10F8
57 58
@@ -1538,6 +1539,16 @@
1538#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4 1539#define IXGBE_FW_PASSTHROUGH_PATCH_CONFIG_PTR 0x4
1539#define IXGBE_FW_PATCH_VERSION_4 0x7 1540#define IXGBE_FW_PATCH_VERSION_4 0x7
1540 1541
1542/* Alternative SAN MAC Address Block */
1543#define IXGBE_ALT_SAN_MAC_ADDR_BLK_PTR 0x27 /* Alt. SAN MAC block */
1544#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_OFFSET 0x0 /* Alt. SAN MAC capability */
1545#define IXGBE_ALT_SAN_MAC_ADDR_PORT0_OFFSET 0x1 /* Alt. SAN MAC 0 offset */
1546#define IXGBE_ALT_SAN_MAC_ADDR_PORT1_OFFSET 0x4 /* Alt. SAN MAC 1 offset */
1547#define IXGBE_ALT_SAN_MAC_ADDR_WWNN_OFFSET 0x7 /* Alt. WWNN prefix offset */
1548#define IXGBE_ALT_SAN_MAC_ADDR_WWPN_OFFSET 0x8 /* Alt. WWPN prefix offset */
1549#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_SANMAC 0x0 /* Alt. SAN MAC exists */
1550#define IXGBE_ALT_SAN_MAC_ADDR_CAPS_ALTWWN 0x1 /* Alt. WWN base exists */
1551
1541/* PCI Bus Info */ 1552/* PCI Bus Info */
1542#define IXGBE_PCI_LINK_STATUS 0xB2 1553#define IXGBE_PCI_LINK_STATUS 0xB2
1543#define IXGBE_PCI_DEVICE_CONTROL2 0xC8 1554#define IXGBE_PCI_DEVICE_CONTROL2 0xC8
@@ -2171,6 +2182,14 @@ enum ixgbe_fc_mode {
2171 ixgbe_fc_default 2182 ixgbe_fc_default
2172}; 2183};
2173 2184
2185/* Smart Speed Settings */
2186#define IXGBE_SMARTSPEED_MAX_RETRIES 3
2187enum ixgbe_smart_speed {
2188 ixgbe_smart_speed_auto = 0,
2189 ixgbe_smart_speed_on,
2190 ixgbe_smart_speed_off
2191};
2192
2174/* PCI bus types */ 2193/* PCI bus types */
2175enum ixgbe_bus_type { 2194enum ixgbe_bus_type {
2176 ixgbe_bus_type_unknown = 0, 2195 ixgbe_bus_type_unknown = 0,
@@ -2336,6 +2355,7 @@ struct ixgbe_mac_operations {
2336 s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *); 2355 s32 (*get_mac_addr)(struct ixgbe_hw *, u8 *);
2337 s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *); 2356 s32 (*get_san_mac_addr)(struct ixgbe_hw *, u8 *);
2338 s32 (*get_device_caps)(struct ixgbe_hw *, u16 *); 2357 s32 (*get_device_caps)(struct ixgbe_hw *, u16 *);
2358 s32 (*get_wwn_prefix)(struct ixgbe_hw *, u16 *, u16 *);
2339 s32 (*stop_adapter)(struct ixgbe_hw *); 2359 s32 (*stop_adapter)(struct ixgbe_hw *);
2340 s32 (*get_bus_info)(struct ixgbe_hw *); 2360 s32 (*get_bus_info)(struct ixgbe_hw *);
2341 void (*set_lan_id)(struct ixgbe_hw *); 2361 void (*set_lan_id)(struct ixgbe_hw *);
@@ -2407,6 +2427,10 @@ struct ixgbe_mac_info {
2407 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; 2427 u8 addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
2408 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; 2428 u8 perm_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
2409 u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS]; 2429 u8 san_addr[IXGBE_ETH_LENGTH_OF_ADDRESS];
2430 /* prefix for World Wide Node Name (WWNN) */
2431 u16 wwnn_prefix;
2432 /* prefix for World Wide Port Name (WWPN) */
2433 u16 wwpn_prefix;
2410 s32 mc_filter_type; 2434 s32 mc_filter_type;
2411 u32 mcft_size; 2435 u32 mcft_size;
2412 u32 vft_size; 2436 u32 vft_size;
@@ -2431,6 +2455,8 @@ struct ixgbe_phy_info {
2431 enum ixgbe_media_type media_type; 2455 enum ixgbe_media_type media_type;
2432 bool reset_disable; 2456 bool reset_disable;
2433 ixgbe_autoneg_advertised autoneg_advertised; 2457 ixgbe_autoneg_advertised autoneg_advertised;
2458 enum ixgbe_smart_speed smart_speed;
2459 bool smart_speed_active;
2434 bool multispeed_fiber; 2460 bool multispeed_fiber;
2435}; 2461};
2436 2462
diff --git a/drivers/net/ixp2000/ixpdev.c b/drivers/net/ixp2000/ixpdev.c
index 9aee0cc922c9..e9d9d595e1b7 100644
--- a/drivers/net/ixp2000/ixpdev.c
+++ b/drivers/net/ixp2000/ixpdev.c
@@ -109,9 +109,8 @@ static int ixpdev_rx(struct net_device *dev, int processed, int budget)
109 if (unlikely(!netif_running(nds[desc->channel]))) 109 if (unlikely(!netif_running(nds[desc->channel])))
110 goto err; 110 goto err;
111 111
112 skb = netdev_alloc_skb(dev, desc->pkt_length + 2); 112 skb = netdev_alloc_skb_ip_align(dev, desc->pkt_length);
113 if (likely(skb != NULL)) { 113 if (likely(skb != NULL)) {
114 skb_reserve(skb, 2);
115 skb_copy_to_linear_data(skb, buf, desc->pkt_length); 114 skb_copy_to_linear_data(skb, buf, desc->pkt_length);
116 skb_put(skb, desc->pkt_length); 115 skb_put(skb, desc->pkt_length);
117 skb->protocol = eth_type_trans(skb, nds[desc->channel]); 116 skb->protocol = eth_type_trans(skb, nds[desc->channel]);
diff --git a/drivers/net/korina.c b/drivers/net/korina.c
index 03199fa10003..a07a5972b57e 100644
--- a/drivers/net/korina.c
+++ b/drivers/net/korina.c
@@ -400,7 +400,7 @@ static int korina_rx(struct net_device *dev, int limit)
400 dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4); 400 dma_cache_inv((unsigned long)pkt_buf, pkt_len - 4);
401 401
402 /* Malloc up new buffer. */ 402 /* Malloc up new buffer. */
403 skb_new = netdev_alloc_skb(dev, KORINA_RBSIZE + 2); 403 skb_new = netdev_alloc_skb_ip_align(dev, KORINA_RBSIZE);
404 404
405 if (!skb_new) 405 if (!skb_new)
406 break; 406 break;
@@ -417,9 +417,6 @@ static int korina_rx(struct net_device *dev, int limit)
417 if (devcs & ETH_RX_MP) 417 if (devcs & ETH_RX_MP)
418 dev->stats.multicast++; 418 dev->stats.multicast++;
419 419
420 /* 16 bit align */
421 skb_reserve(skb_new, 2);
422
423 lp->rx_skb[lp->rx_next_done] = skb_new; 420 lp->rx_skb[lp->rx_next_done] = skb_new;
424 } 421 }
425 422
diff --git a/drivers/net/ks8842.c b/drivers/net/ks8842.c
index 99e954167fa6..5c45cb58d023 100644
--- a/drivers/net/ks8842.c
+++ b/drivers/net/ks8842.c
@@ -357,7 +357,7 @@ static void ks8842_rx_frame(struct net_device *netdev,
357 357
358 /* check the status */ 358 /* check the status */
359 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) { 359 if ((status & RXSR_VALID) && !(status & RXSR_ERROR)) {
360 struct sk_buff *skb = netdev_alloc_skb(netdev, len + 2); 360 struct sk_buff *skb = netdev_alloc_skb_ip_align(netdev, len);
361 361
362 dev_dbg(&adapter->pdev->dev, "%s, got package, len: %d\n", 362 dev_dbg(&adapter->pdev->dev, "%s, got package, len: %d\n",
363 __func__, len); 363 __func__, len);
@@ -369,9 +369,6 @@ static void ks8842_rx_frame(struct net_device *netdev,
369 if (status & RXSR_MULTICAST) 369 if (status & RXSR_MULTICAST)
370 netdev->stats.multicast++; 370 netdev->stats.multicast++;
371 371
372 /* Align socket buffer in 4-byte boundary for
373 better performance. */
374 skb_reserve(skb, 2);
375 data = (u32 *)skb_put(skb, len); 372 data = (u32 *)skb_put(skb, len);
376 373
377 ks8842_select_bank(adapter, 17); 374 ks8842_select_bank(adapter, 17);
diff --git a/drivers/net/lib82596.c b/drivers/net/lib82596.c
index 51e11c3e53e1..5b24c67de25e 100644
--- a/drivers/net/lib82596.c
+++ b/drivers/net/lib82596.c
@@ -470,11 +470,11 @@ static inline int init_rx_bufs(struct net_device *dev)
470 470
471 for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) { 471 for (i = 0, rbd = dma->rbds; i < rx_ring_size; i++, rbd++) {
472 dma_addr_t dma_addr; 472 dma_addr_t dma_addr;
473 struct sk_buff *skb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4); 473 struct sk_buff *skb;
474 474
475 skb = netdev_alloc_skb_ip_align(dev, PKT_BUF_SZ);
475 if (skb == NULL) 476 if (skb == NULL)
476 return -1; 477 return -1;
477 skb_reserve(skb, 2);
478 dma_addr = dma_map_single(dev->dev.parent, skb->data, 478 dma_addr = dma_map_single(dev->dev.parent, skb->data,
479 PKT_BUF_SZ, DMA_FROM_DEVICE); 479 PKT_BUF_SZ, DMA_FROM_DEVICE);
480 rbd->v_next = rbd+1; 480 rbd->v_next = rbd+1;
@@ -697,12 +697,12 @@ static inline int i596_rx(struct net_device *dev)
697 (dma_addr_t)SWAP32(rbd->b_data), 697 (dma_addr_t)SWAP32(rbd->b_data),
698 PKT_BUF_SZ, DMA_FROM_DEVICE); 698 PKT_BUF_SZ, DMA_FROM_DEVICE);
699 /* Get fresh skbuff to replace filled one. */ 699 /* Get fresh skbuff to replace filled one. */
700 newskb = netdev_alloc_skb(dev, PKT_BUF_SZ + 4); 700 newskb = netdev_alloc_skb_ip_align(dev,
701 PKT_BUF_SZ);
701 if (newskb == NULL) { 702 if (newskb == NULL) {
702 skb = NULL; /* drop pkt */ 703 skb = NULL; /* drop pkt */
703 goto memory_squeeze; 704 goto memory_squeeze;
704 } 705 }
705 skb_reserve(newskb, 2);
706 706
707 /* Pass up the skb already on the Rx ring. */ 707 /* Pass up the skb already on the Rx ring. */
708 skb_put(skb, pkt_len); 708 skb_put(skb, pkt_len);
@@ -716,7 +716,7 @@ static inline int i596_rx(struct net_device *dev)
716 rbd->b_data = SWAP32(dma_addr); 716 rbd->b_data = SWAP32(dma_addr);
717 DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd)); 717 DMA_WBACK_INV(dev, rbd, sizeof(struct i596_rbd));
718 } else 718 } else
719 skb = netdev_alloc_skb(dev, pkt_len + 2); 719 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
720memory_squeeze: 720memory_squeeze:
721 if (skb == NULL) { 721 if (skb == NULL) {
722 /* XXX tulip.c can defer packets here!! */ 722 /* XXX tulip.c can defer packets here!! */
@@ -730,7 +730,6 @@ memory_squeeze:
730 dma_sync_single_for_cpu(dev->dev.parent, 730 dma_sync_single_for_cpu(dev->dev.parent,
731 (dma_addr_t)SWAP32(rbd->b_data), 731 (dma_addr_t)SWAP32(rbd->b_data),
732 PKT_BUF_SZ, DMA_FROM_DEVICE); 732 PKT_BUF_SZ, DMA_FROM_DEVICE);
733 skb_reserve(skb, 2);
734 memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len); 733 memcpy(skb_put(skb, pkt_len), rbd->v_data, pkt_len);
735 dma_sync_single_for_device(dev->dev.parent, 734 dma_sync_single_for_device(dev->dev.parent,
736 (dma_addr_t)SWAP32(rbd->b_data), 735 (dma_addr_t)SWAP32(rbd->b_data),
diff --git a/drivers/net/macvlan.c b/drivers/net/macvlan.c
index 3aabfd9dd212..271aa7e1d033 100644
--- a/drivers/net/macvlan.c
+++ b/drivers/net/macvlan.c
@@ -202,7 +202,7 @@ static netdev_tx_t macvlan_start_xmit(struct sk_buff *skb,
202 } else 202 } else
203 txq->tx_dropped++; 203 txq->tx_dropped++;
204 204
205 return NETDEV_TX_OK; 205 return ret;
206} 206}
207 207
208static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev, 208static int macvlan_hard_header(struct sk_buff *skb, struct net_device *dev,
@@ -504,7 +504,7 @@ static int macvlan_get_tx_queues(struct net *net,
504 return 0; 504 return 0;
505} 505}
506 506
507static int macvlan_newlink(struct net_device *dev, 507static int macvlan_newlink(struct net *src_net, struct net_device *dev,
508 struct nlattr *tb[], struct nlattr *data[]) 508 struct nlattr *tb[], struct nlattr *data[])
509{ 509{
510 struct macvlan_dev *vlan = netdev_priv(dev); 510 struct macvlan_dev *vlan = netdev_priv(dev);
@@ -515,7 +515,7 @@ static int macvlan_newlink(struct net_device *dev,
515 if (!tb[IFLA_LINK]) 515 if (!tb[IFLA_LINK])
516 return -EINVAL; 516 return -EINVAL;
517 517
518 lowerdev = __dev_get_by_index(dev_net(dev), nla_get_u32(tb[IFLA_LINK])); 518 lowerdev = __dev_get_by_index(src_net, nla_get_u32(tb[IFLA_LINK]));
519 if (lowerdev == NULL) 519 if (lowerdev == NULL)
520 return -ENODEV; 520 return -ENODEV;
521 521
@@ -555,13 +555,13 @@ static int macvlan_newlink(struct net_device *dev,
555 return 0; 555 return 0;
556} 556}
557 557
558static void macvlan_dellink(struct net_device *dev) 558static void macvlan_dellink(struct net_device *dev, struct list_head *head)
559{ 559{
560 struct macvlan_dev *vlan = netdev_priv(dev); 560 struct macvlan_dev *vlan = netdev_priv(dev);
561 struct macvlan_port *port = vlan->port; 561 struct macvlan_port *port = vlan->port;
562 562
563 list_del(&vlan->list); 563 list_del(&vlan->list);
564 unregister_netdevice(dev); 564 unregister_netdevice_queue(dev, head);
565 565
566 if (list_empty(&port->vlans)) 566 if (list_empty(&port->vlans))
567 macvlan_port_destroy(port->dev); 567 macvlan_port_destroy(port->dev);
@@ -601,7 +601,7 @@ static int macvlan_device_event(struct notifier_block *unused,
601 break; 601 break;
602 case NETDEV_UNREGISTER: 602 case NETDEV_UNREGISTER:
603 list_for_each_entry_safe(vlan, next, &port->vlans, list) 603 list_for_each_entry_safe(vlan, next, &port->vlans, list)
604 macvlan_dellink(vlan->dev); 604 macvlan_dellink(vlan->dev, NULL);
605 break; 605 break;
606 } 606 }
607 return NOTIFY_DONE; 607 return NOTIFY_DONE;
diff --git a/drivers/net/mdio.c b/drivers/net/mdio.c
index 21f8754fcf4c..e85bf04cf813 100644
--- a/drivers/net/mdio.c
+++ b/drivers/net/mdio.c
@@ -162,6 +162,10 @@ static u32 mdio45_get_an(const struct mdio_if_info *mdio, u16 addr)
162 result |= ADVERTISED_100baseT_Half; 162 result |= ADVERTISED_100baseT_Half;
163 if (reg & ADVERTISE_100FULL) 163 if (reg & ADVERTISE_100FULL)
164 result |= ADVERTISED_100baseT_Full; 164 result |= ADVERTISED_100baseT_Full;
165 if (reg & ADVERTISE_PAUSE_CAP)
166 result |= ADVERTISED_Pause;
167 if (reg & ADVERTISE_PAUSE_ASYM)
168 result |= ADVERTISED_Asym_Pause;
165 return result; 169 return result;
166} 170}
167 171
@@ -344,11 +348,9 @@ void mdio45_ethtool_spauseparam_an(const struct mdio_if_info *mdio,
344 348
345 old_adv = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_AN, 349 old_adv = mdio->mdio_read(mdio->dev, mdio->prtad, MDIO_MMD_AN,
346 MDIO_AN_ADVERTISE); 350 MDIO_AN_ADVERTISE);
347 adv = old_adv & ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM); 351 adv = ((old_adv & ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) |
348 if (ecmd->autoneg) 352 mii_advertise_flowctrl((ecmd->rx_pause ? FLOW_CTRL_RX : 0) |
349 adv |= mii_advertise_flowctrl( 353 (ecmd->tx_pause ? FLOW_CTRL_TX : 0)));
350 (ecmd->rx_pause ? FLOW_CTRL_RX : 0) |
351 (ecmd->tx_pause ? FLOW_CTRL_TX : 0));
352 if (adv != old_adv) { 354 if (adv != old_adv) {
353 mdio->mdio_write(mdio->dev, mdio->prtad, MDIO_MMD_AN, 355 mdio->mdio_write(mdio->dev, mdio->prtad, MDIO_MMD_AN,
354 MDIO_AN_ADVERTISE, adv); 356 MDIO_AN_ADVERTISE, adv);
diff --git a/drivers/net/mlx4/fw.c b/drivers/net/mlx4/fw.c
index cee199ceba2f..3c16602172fc 100644
--- a/drivers/net/mlx4/fw.c
+++ b/drivers/net/mlx4/fw.c
@@ -33,6 +33,7 @@
33 */ 33 */
34 34
35#include <linux/mlx4/cmd.h> 35#include <linux/mlx4/cmd.h>
36#include <linux/cache.h>
36 37
37#include "fw.h" 38#include "fw.h"
38#include "icm.h" 39#include "icm.h"
@@ -698,6 +699,7 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
698#define INIT_HCA_IN_SIZE 0x200 699#define INIT_HCA_IN_SIZE 0x200
699#define INIT_HCA_VERSION_OFFSET 0x000 700#define INIT_HCA_VERSION_OFFSET 0x000
700#define INIT_HCA_VERSION 2 701#define INIT_HCA_VERSION 2
702#define INIT_HCA_CACHELINE_SZ_OFFSET 0x0e
701#define INIT_HCA_FLAGS_OFFSET 0x014 703#define INIT_HCA_FLAGS_OFFSET 0x014
702#define INIT_HCA_QPC_OFFSET 0x020 704#define INIT_HCA_QPC_OFFSET 0x020
703#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10) 705#define INIT_HCA_QPC_BASE_OFFSET (INIT_HCA_QPC_OFFSET + 0x10)
@@ -735,6 +737,9 @@ int mlx4_INIT_HCA(struct mlx4_dev *dev, struct mlx4_init_hca_param *param)
735 737
736 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION; 738 *((u8 *) mailbox->buf + INIT_HCA_VERSION_OFFSET) = INIT_HCA_VERSION;
737 739
740 *((u8 *) mailbox->buf + INIT_HCA_CACHELINE_SZ_OFFSET) =
741 (ilog2(cache_line_size()) - 4) << 5;
742
738#if defined(__LITTLE_ENDIAN) 743#if defined(__LITTLE_ENDIAN)
739 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1); 744 *(inbox + INIT_HCA_FLAGS_OFFSET / 4) &= ~cpu_to_be32(1 << 1);
740#elif defined(__BIG_ENDIAN) 745#elif defined(__BIG_ENDIAN)
diff --git a/drivers/net/myri10ge/myri10ge.c b/drivers/net/myri10ge/myri10ge.c
index f3624517cb0e..85e1b6a3ac1b 100644
--- a/drivers/net/myri10ge/myri10ge.c
+++ b/drivers/net/myri10ge/myri10ge.c
@@ -207,7 +207,6 @@ struct myri10ge_priv {
207 int big_bytes; 207 int big_bytes;
208 int max_intr_slots; 208 int max_intr_slots;
209 struct net_device *dev; 209 struct net_device *dev;
210 struct net_device_stats stats;
211 spinlock_t stats_lock; 210 spinlock_t stats_lock;
212 u8 __iomem *sram; 211 u8 __iomem *sram;
213 int sram_size; 212 int sram_size;
@@ -264,6 +263,10 @@ static char *myri10ge_fw_unaligned = "myri10ge_ethp_z8e.dat";
264static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat"; 263static char *myri10ge_fw_aligned = "myri10ge_eth_z8e.dat";
265static char *myri10ge_fw_rss_unaligned = "myri10ge_rss_ethp_z8e.dat"; 264static char *myri10ge_fw_rss_unaligned = "myri10ge_rss_ethp_z8e.dat";
266static char *myri10ge_fw_rss_aligned = "myri10ge_rss_eth_z8e.dat"; 265static char *myri10ge_fw_rss_aligned = "myri10ge_rss_eth_z8e.dat";
266MODULE_FIRMWARE("myri10ge_ethp_z8e.dat");
267MODULE_FIRMWARE("myri10ge_eth_z8e.dat");
268MODULE_FIRMWARE("myri10ge_rss_ethp_z8e.dat");
269MODULE_FIRMWARE("myri10ge_rss_eth_z8e.dat");
267 270
268static char *myri10ge_fw_name = NULL; 271static char *myri10ge_fw_name = NULL;
269module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR); 272module_param(myri10ge_fw_name, charp, S_IRUGO | S_IWUSR);
@@ -1832,7 +1835,7 @@ myri10ge_get_ethtool_stats(struct net_device *netdev,
1832 /* force stats update */ 1835 /* force stats update */
1833 (void)myri10ge_get_stats(netdev); 1836 (void)myri10ge_get_stats(netdev);
1834 for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++) 1837 for (i = 0; i < MYRI10GE_NET_STATS_LEN; i++)
1835 data[i] = ((unsigned long *)&mgp->stats)[i]; 1838 data[i] = ((unsigned long *)&netdev->stats)[i];
1836 1839
1837 data[i++] = (unsigned int)mgp->tx_boundary; 1840 data[i++] = (unsigned int)mgp->tx_boundary;
1838 data[i++] = (unsigned int)mgp->wc_enabled; 1841 data[i++] = (unsigned int)mgp->wc_enabled;
@@ -3002,7 +3005,7 @@ static struct net_device_stats *myri10ge_get_stats(struct net_device *dev)
3002{ 3005{
3003 struct myri10ge_priv *mgp = netdev_priv(dev); 3006 struct myri10ge_priv *mgp = netdev_priv(dev);
3004 struct myri10ge_slice_netstats *slice_stats; 3007 struct myri10ge_slice_netstats *slice_stats;
3005 struct net_device_stats *stats = &mgp->stats; 3008 struct net_device_stats *stats = &dev->stats;
3006 int i; 3009 int i;
3007 3010
3008 spin_lock(&mgp->stats_lock); 3011 spin_lock(&mgp->stats_lock);
diff --git a/drivers/net/netx-eth.c b/drivers/net/netx-eth.c
index 9f4235466d59..a0d65f592a12 100644
--- a/drivers/net/netx-eth.c
+++ b/drivers/net/netx-eth.c
@@ -510,3 +510,6 @@ module_exit(netx_eth_cleanup);
510MODULE_AUTHOR("Sascha Hauer, Pengutronix"); 510MODULE_AUTHOR("Sascha Hauer, Pengutronix");
511MODULE_LICENSE("GPL"); 511MODULE_LICENSE("GPL");
512MODULE_ALIAS("platform:" CARDNAME); 512MODULE_ALIAS("platform:" CARDNAME);
513MODULE_FIRMWARE("xc0.bin");
514MODULE_FIRMWARE("xc1.bin");
515MODULE_FIRMWARE("xc2.bin");
diff --git a/drivers/net/netxen/netxen_nic.h b/drivers/net/netxen/netxen_nic.h
index 7384f59df615..645450d93f4e 100644
--- a/drivers/net/netxen/netxen_nic.h
+++ b/drivers/net/netxen/netxen_nic.h
@@ -53,8 +53,8 @@
53 53
54#define _NETXEN_NIC_LINUX_MAJOR 4 54#define _NETXEN_NIC_LINUX_MAJOR 4
55#define _NETXEN_NIC_LINUX_MINOR 0 55#define _NETXEN_NIC_LINUX_MINOR 0
56#define _NETXEN_NIC_LINUX_SUBVERSION 50 56#define _NETXEN_NIC_LINUX_SUBVERSION 65
57#define NETXEN_NIC_LINUX_VERSIONID "4.0.50" 57#define NETXEN_NIC_LINUX_VERSIONID "4.0.65"
58 58
59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c)) 59#define NETXEN_VERSION_CODE(a, b, c) (((a) << 24) + ((b) << 16) + (c))
60#define _major(v) (((v) >> 24) & 0xff) 60#define _major(v) (((v) >> 24) & 0xff)
@@ -74,8 +74,6 @@
74#define NETXEN_FLASH_TOTAL_SIZE (NETXEN_NUM_FLASH_SECTORS \ 74#define NETXEN_FLASH_TOTAL_SIZE (NETXEN_NUM_FLASH_SECTORS \
75 * NETXEN_FLASH_SECTOR_SIZE) 75 * NETXEN_FLASH_SECTOR_SIZE)
76 76
77#define PHAN_VENDOR_ID 0x4040
78
79#define RCV_DESC_RINGSIZE(rds_ring) \ 77#define RCV_DESC_RINGSIZE(rds_ring) \
80 (sizeof(struct rcv_desc) * (rds_ring)->num_desc) 78 (sizeof(struct rcv_desc) * (rds_ring)->num_desc)
81#define RCV_BUFF_RINGSIZE(rds_ring) \ 79#define RCV_BUFF_RINGSIZE(rds_ring) \
@@ -117,9 +115,11 @@
117#define NX_P3_B0 0x40 115#define NX_P3_B0 0x40
118#define NX_P3_B1 0x41 116#define NX_P3_B1 0x41
119#define NX_P3_B2 0x42 117#define NX_P3_B2 0x42
118#define NX_P3P_A0 0x50
120 119
121#define NX_IS_REVISION_P2(REVISION) (REVISION <= NX_P2_C1) 120#define NX_IS_REVISION_P2(REVISION) (REVISION <= NX_P2_C1)
122#define NX_IS_REVISION_P3(REVISION) (REVISION >= NX_P3_A0) 121#define NX_IS_REVISION_P3(REVISION) (REVISION >= NX_P3_A0)
122#define NX_IS_REVISION_P3P(REVISION) (REVISION >= NX_P3P_A0)
123 123
124#define FIRST_PAGE_GROUP_START 0 124#define FIRST_PAGE_GROUP_START 0
125#define FIRST_PAGE_GROUP_END 0x100000 125#define FIRST_PAGE_GROUP_END 0x100000
@@ -419,6 +419,34 @@ struct status_desc {
419 __le64 status_desc_data[2]; 419 __le64 status_desc_data[2];
420} __attribute__ ((aligned(16))); 420} __attribute__ ((aligned(16)));
421 421
422/* UNIFIED ROMIMAGE *************************/
423#define NX_UNI_FW_MIN_SIZE 0x3eb000
424#define NX_UNI_DIR_SECT_PRODUCT_TBL 0x0
425#define NX_UNI_DIR_SECT_BOOTLD 0x6
426#define NX_UNI_DIR_SECT_FW 0x7
427
428/*Offsets */
429#define NX_UNI_CHIP_REV_OFF 10
430#define NX_UNI_FLAGS_OFF 11
431#define NX_UNI_BIOS_VERSION_OFF 12
432#define NX_UNI_BOOTLD_IDX_OFF 27
433#define NX_UNI_FIRMWARE_IDX_OFF 29
434
435struct uni_table_desc{
436 uint32_t findex;
437 uint32_t num_entries;
438 uint32_t entry_size;
439 uint32_t reserved[5];
440};
441
442struct uni_data_desc{
443 uint32_t findex;
444 uint32_t size;
445 uint32_t reserved[5];
446};
447
448/* UNIFIED ROMIMAGE *************************/
449
422/* The version of the main data structure */ 450/* The version of the main data structure */
423#define NETXEN_BDINFO_VERSION 1 451#define NETXEN_BDINFO_VERSION 1
424 452
@@ -485,7 +513,15 @@ struct status_desc {
485#define NX_P2_MN_ROMIMAGE 0 513#define NX_P2_MN_ROMIMAGE 0
486#define NX_P3_CT_ROMIMAGE 1 514#define NX_P3_CT_ROMIMAGE 1
487#define NX_P3_MN_ROMIMAGE 2 515#define NX_P3_MN_ROMIMAGE 2
488#define NX_FLASH_ROMIMAGE 3 516#define NX_UNIFIED_ROMIMAGE 3
517#define NX_FLASH_ROMIMAGE 4
518#define NX_UNKNOWN_ROMIMAGE 0xff
519
520#define NX_P2_MN_ROMIMAGE_NAME "nxromimg.bin"
521#define NX_P3_CT_ROMIMAGE_NAME "nx3fwct.bin"
522#define NX_P3_MN_ROMIMAGE_NAME "nx3fwmn.bin"
523#define NX_UNIFIED_ROMIMAGE_NAME "phanfw.bin"
524#define NX_FLASH_ROMIMAGE_NAME "flash"
489 525
490extern char netxen_nic_driver_name[]; 526extern char netxen_nic_driver_name[];
491 527
@@ -543,13 +579,16 @@ struct netxen_hardware_context {
543 void __iomem *pci_base1; 579 void __iomem *pci_base1;
544 void __iomem *pci_base2; 580 void __iomem *pci_base2;
545 void __iomem *db_base; 581 void __iomem *db_base;
582 void __iomem *ocm_win_crb;
583
546 unsigned long db_len; 584 unsigned long db_len;
547 unsigned long pci_len0; 585 unsigned long pci_len0;
548 586
549 int qdr_sn_window; 587 u32 ocm_win;
550 int ddr_mn_window; 588 u32 crb_win;
551 u32 mn_win_crb; 589
552 u32 ms_win_crb; 590 rwlock_t crb_lock;
591 spinlock_t mem_lock;
553 592
554 u8 cut_through; 593 u8 cut_through;
555 u8 revision_id; 594 u8 revision_id;
@@ -1039,6 +1078,9 @@ typedef struct {
1039#define LINKEVENT_LINKSPEED_MBPS 0 1078#define LINKEVENT_LINKSPEED_MBPS 0
1040#define LINKEVENT_LINKSPEED_ENCODED 1 1079#define LINKEVENT_LINKSPEED_ENCODED 1
1041 1080
1081#define AUTO_FW_RESET_ENABLED 0xEF10AF12
1082#define AUTO_FW_RESET_DISABLED 0xDCBAAF12
1083
1042/* firmware response header: 1084/* firmware response header:
1043 * 63:58 - message type 1085 * 63:58 - message type
1044 * 57:56 - owner 1086 * 57:56 - owner
@@ -1086,6 +1128,7 @@ typedef struct {
1086#define NETXEN_NIC_MSIX_ENABLED 0x04 1128#define NETXEN_NIC_MSIX_ENABLED 0x04
1087#define NETXEN_NIC_LRO_ENABLED 0x08 1129#define NETXEN_NIC_LRO_ENABLED 0x08
1088#define NETXEN_NIC_BRIDGE_ENABLED 0X10 1130#define NETXEN_NIC_BRIDGE_ENABLED 0X10
1131#define NETXEN_NIC_DIAG_ENABLED 0x20
1089#define NETXEN_IS_MSI_FAMILY(adapter) \ 1132#define NETXEN_IS_MSI_FAMILY(adapter) \
1090 ((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED)) 1133 ((adapter)->flags & (NETXEN_NIC_MSI_ENABLED | NETXEN_NIC_MSIX_ENABLED))
1091 1134
@@ -1115,10 +1158,6 @@ struct netxen_adapter {
1115 struct pci_dev *pdev; 1158 struct pci_dev *pdev;
1116 struct list_head mac_list; 1159 struct list_head mac_list;
1117 1160
1118 u32 curr_window;
1119 u32 crb_win;
1120 rwlock_t adapter_lock;
1121
1122 spinlock_t tx_clean_lock; 1161 spinlock_t tx_clean_lock;
1123 1162
1124 u16 num_txd; 1163 u16 num_txd;
@@ -1180,11 +1219,10 @@ struct netxen_adapter {
1180 u32 (*crb_read)(struct netxen_adapter *, ulong); 1219 u32 (*crb_read)(struct netxen_adapter *, ulong);
1181 int (*crb_write)(struct netxen_adapter *, ulong, u32); 1220 int (*crb_write)(struct netxen_adapter *, ulong, u32);
1182 1221
1183 int (*pci_mem_read)(struct netxen_adapter *, u64, void *, int); 1222 int (*pci_mem_read)(struct netxen_adapter *, u64, u64 *);
1184 int (*pci_mem_write)(struct netxen_adapter *, u64, void *, int); 1223 int (*pci_mem_write)(struct netxen_adapter *, u64, u64);
1185 1224
1186 unsigned long (*pci_set_window)(struct netxen_adapter *, 1225 int (*pci_set_window)(struct netxen_adapter *, u64, u32 *);
1187 unsigned long long);
1188 1226
1189 u32 (*io_read)(struct netxen_adapter *, void __iomem *); 1227 u32 (*io_read)(struct netxen_adapter *, void __iomem *);
1190 void (*io_write)(struct netxen_adapter *, void __iomem *, u32); 1228 void (*io_write)(struct netxen_adapter *, void __iomem *, u32);
@@ -1203,12 +1241,10 @@ struct netxen_adapter {
1203 1241
1204 struct work_struct tx_timeout_task; 1242 struct work_struct tx_timeout_task;
1205 1243
1206 struct net_device_stats net_stats;
1207
1208 nx_nic_intr_coalesce_t coal; 1244 nx_nic_intr_coalesce_t coal;
1209 1245
1210 unsigned long state; 1246 unsigned long state;
1211 u32 resv5; 1247 __le32 file_prd_off; /*File fw product offset*/
1212 u32 fw_version; 1248 u32 fw_version;
1213 const struct firmware *fw; 1249 const struct firmware *fw;
1214}; 1250};
@@ -1271,7 +1307,7 @@ int netxen_load_firmware(struct netxen_adapter *adapter);
1271int netxen_need_fw_reset(struct netxen_adapter *adapter); 1307int netxen_need_fw_reset(struct netxen_adapter *adapter);
1272void netxen_request_firmware(struct netxen_adapter *adapter); 1308void netxen_request_firmware(struct netxen_adapter *adapter);
1273void netxen_release_firmware(struct netxen_adapter *adapter); 1309void netxen_release_firmware(struct netxen_adapter *adapter);
1274int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose); 1310int netxen_pinit_from_rom(struct netxen_adapter *adapter);
1275 1311
1276int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp); 1312int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp);
1277int netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr, 1313int netxen_rom_fast_read_words(struct netxen_adapter *adapter, int addr,
diff --git a/drivers/net/netxen/netxen_nic_ethtool.c b/drivers/net/netxen/netxen_nic_ethtool.c
index 714f38791a9a..c86095eb5d9e 100644
--- a/drivers/net/netxen/netxen_nic_ethtool.c
+++ b/drivers/net/netxen/netxen_nic_ethtool.c
@@ -85,11 +85,9 @@ netxen_nic_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *drvinfo)
85 85
86 strncpy(drvinfo->driver, netxen_nic_driver_name, 32); 86 strncpy(drvinfo->driver, netxen_nic_driver_name, 32);
87 strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32); 87 strncpy(drvinfo->version, NETXEN_NIC_LINUX_VERSIONID, 32);
88 read_lock(&adapter->adapter_lock);
89 fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); 88 fw_major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
90 fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR); 89 fw_minor = NXRD32(adapter, NETXEN_FW_VERSION_MINOR);
91 fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB); 90 fw_build = NXRD32(adapter, NETXEN_FW_VERSION_SUB);
92 read_unlock(&adapter->adapter_lock);
93 sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build); 91 sprintf(drvinfo->fw_version, "%d.%d.%d", fw_major, fw_minor, fw_build);
94 92
95 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32); 93 strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
@@ -690,8 +688,8 @@ static int netxen_nic_reg_test(struct net_device *dev)
690 u32 data_read, data_written; 688 u32 data_read, data_written;
691 689
692 data_read = NXRD32(adapter, NETXEN_PCIX_PH_REG(0)); 690 data_read = NXRD32(adapter, NETXEN_PCIX_PH_REG(0));
693 if ((data_read & 0xffff) != PHAN_VENDOR_ID) 691 if ((data_read & 0xffff) != adapter->pdev->vendor)
694 return 1; 692 return 1;
695 693
696 data_written = (u32)0xa5a5a5a5; 694 data_written = (u32)0xa5a5a5a5;
697 695
diff --git a/drivers/net/netxen/netxen_nic_hdr.h b/drivers/net/netxen/netxen_nic_hdr.h
index 1c46da632125..a39155d61bad 100644
--- a/drivers/net/netxen/netxen_nic_hdr.h
+++ b/drivers/net/netxen/netxen_nic_hdr.h
@@ -662,40 +662,51 @@ enum {
662#define NETXEN_NIU_AP_STATION_ADDR_0(I) (NETXEN_CRB_NIU+0xa0040+(I)*0x10000) 662#define NETXEN_NIU_AP_STATION_ADDR_0(I) (NETXEN_CRB_NIU+0xa0040+(I)*0x10000)
663#define NETXEN_NIU_AP_STATION_ADDR_1(I) (NETXEN_CRB_NIU+0xa0044+(I)*0x10000) 663#define NETXEN_NIU_AP_STATION_ADDR_1(I) (NETXEN_CRB_NIU+0xa0044+(I)*0x10000)
664 664
665
666#define TEST_AGT_CTRL (0x00)
667
668#define TA_CTL_START 1
669#define TA_CTL_ENABLE 2
670#define TA_CTL_WRITE 4
671#define TA_CTL_BUSY 8
672
665/* 673/*
666 * Register offsets for MN 674 * Register offsets for MN
667 */ 675 */
668#define MIU_CONTROL (0x000) 676#define MIU_TEST_AGT_BASE (0x90)
669#define MIU_TEST_AGT_CTRL (0x090) 677
670#define MIU_TEST_AGT_ADDR_LO (0x094) 678#define MIU_TEST_AGT_ADDR_LO (0x04)
671#define MIU_TEST_AGT_ADDR_HI (0x098) 679#define MIU_TEST_AGT_ADDR_HI (0x08)
672#define MIU_TEST_AGT_WRDATA_LO (0x0a0) 680#define MIU_TEST_AGT_WRDATA_LO (0x10)
673#define MIU_TEST_AGT_WRDATA_HI (0x0a4) 681#define MIU_TEST_AGT_WRDATA_HI (0x14)
674#define MIU_TEST_AGT_WRDATA(i) (0x0a0+(4*(i))) 682#define MIU_TEST_AGT_WRDATA_UPPER_LO (0x20)
675#define MIU_TEST_AGT_RDDATA_LO (0x0a8) 683#define MIU_TEST_AGT_WRDATA_UPPER_HI (0x24)
676#define MIU_TEST_AGT_RDDATA_HI (0x0ac) 684#define MIU_TEST_AGT_WRDATA(i) (0x10+(0x10*((i)>>1))+(4*((i)&1)))
677#define MIU_TEST_AGT_RDDATA(i) (0x0a8+(4*(i))) 685#define MIU_TEST_AGT_RDDATA_LO (0x18)
678#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8 686#define MIU_TEST_AGT_RDDATA_HI (0x1c)
679#define MIU_TEST_AGT_UPPER_ADDR(off) (0) 687#define MIU_TEST_AGT_RDDATA_UPPER_LO (0x28)
680 688#define MIU_TEST_AGT_RDDATA_UPPER_HI (0x2c)
681/* MIU_TEST_AGT_CTRL flags. work for SIU as well */ 689#define MIU_TEST_AGT_RDDATA(i) (0x18+(0x10*((i)>>1))+(4*((i)&1)))
682#define MIU_TA_CTL_START 1 690
683#define MIU_TA_CTL_ENABLE 2 691#define MIU_TEST_AGT_ADDR_MASK 0xfffffff8
684#define MIU_TA_CTL_WRITE 4 692#define MIU_TEST_AGT_UPPER_ADDR(off) (0)
685#define MIU_TA_CTL_BUSY 8 693
686 694/*
687#define SIU_TEST_AGT_CTRL (0x060) 695 * Register offsets for MS
688#define SIU_TEST_AGT_ADDR_LO (0x064) 696 */
689#define SIU_TEST_AGT_ADDR_HI (0x078) 697#define SIU_TEST_AGT_BASE (0x60)
690#define SIU_TEST_AGT_WRDATA_LO (0x068) 698
691#define SIU_TEST_AGT_WRDATA_HI (0x06c) 699#define SIU_TEST_AGT_ADDR_LO (0x04)
692#define SIU_TEST_AGT_WRDATA(i) (0x068+(4*(i))) 700#define SIU_TEST_AGT_ADDR_HI (0x18)
693#define SIU_TEST_AGT_RDDATA_LO (0x070) 701#define SIU_TEST_AGT_WRDATA_LO (0x08)
694#define SIU_TEST_AGT_RDDATA_HI (0x074) 702#define SIU_TEST_AGT_WRDATA_HI (0x0c)
695#define SIU_TEST_AGT_RDDATA(i) (0x070+(4*(i))) 703#define SIU_TEST_AGT_WRDATA(i) (0x08+(4*(i)))
696 704#define SIU_TEST_AGT_RDDATA_LO (0x10)
697#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8 705#define SIU_TEST_AGT_RDDATA_HI (0x14)
698#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22) 706#define SIU_TEST_AGT_RDDATA(i) (0x10+(4*(i)))
707
708#define SIU_TEST_AGT_ADDR_MASK 0x3ffff8
709#define SIU_TEST_AGT_UPPER_ADDR(off) ((off)>>22)
699 710
700/* XG Link status */ 711/* XG Link status */
701#define XG_LINK_UP 0x10 712#define XG_LINK_UP 0x10
@@ -857,6 +868,9 @@ enum {
857 (PCIX_SN_WINDOW_F0 + (0x20 * (func))) :\ 868 (PCIX_SN_WINDOW_F0 + (0x20 * (func))) :\
858 (PCIX_SN_WINDOW_F4 + (0x10 * ((func)-4)))) 869 (PCIX_SN_WINDOW_F4 + (0x10 * ((func)-4))))
859 870
871#define PCIX_OCM_WINDOW (0x10800)
872#define PCIX_OCM_WINDOW_REG(func) (PCIX_OCM_WINDOW + 0x20 * (func))
873
860#define PCIX_TARGET_STATUS (0x10118) 874#define PCIX_TARGET_STATUS (0x10118)
861#define PCIX_TARGET_STATUS_F1 (0x10160) 875#define PCIX_TARGET_STATUS_F1 (0x10160)
862#define PCIX_TARGET_STATUS_F2 (0x10164) 876#define PCIX_TARGET_STATUS_F2 (0x10164)
diff --git a/drivers/net/netxen/netxen_nic_hw.c b/drivers/net/netxen/netxen_nic_hw.c
index 3185a98b0917..b3054c6cc608 100644
--- a/drivers/net/netxen/netxen_nic_hw.c
+++ b/drivers/net/netxen/netxen_nic_hw.c
@@ -31,6 +31,7 @@
31#define MASK(n) ((1ULL<<(n))-1) 31#define MASK(n) ((1ULL<<(n))-1)
32#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff)) 32#define MN_WIN(addr) (((addr & 0x1fc0000) >> 1) | ((addr >> 25) & 0x3ff))
33#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff)) 33#define OCM_WIN(addr) (((addr & 0x1ff0000) >> 1) | ((addr >> 25) & 0x3ff))
34#define OCM_WIN_P3P(addr) (addr & 0xffc0000)
34#define MS_WIN(addr) (addr & 0x0ffc0000) 35#define MS_WIN(addr) (addr & 0x0ffc0000)
35 36
36#define GET_MEM_OFFS_2M(addr) (addr & MASK(18)) 37#define GET_MEM_OFFS_2M(addr) (addr & MASK(18))
@@ -41,6 +42,11 @@
41#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000)) 42#define CRB_HI(off) ((crb_hub_agt[CRB_BLK(off)] << 20) | ((off) & 0xf0000))
42#define CRB_INDIRECT_2M (0x1e0000UL) 43#define CRB_INDIRECT_2M (0x1e0000UL)
43 44
45static void netxen_nic_io_write_128M(struct netxen_adapter *adapter,
46 void __iomem *addr, u32 data);
47static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter,
48 void __iomem *addr);
49
44#ifndef readq 50#ifndef readq
45static inline u64 readq(void __iomem *addr) 51static inline u64 readq(void __iomem *addr)
46{ 52{
@@ -326,7 +332,7 @@ netxen_pcie_sem_lock(struct netxen_adapter *adapter, int sem, u32 id_reg)
326 if (done == 1) 332 if (done == 1)
327 break; 333 break;
328 if (++timeout >= NETXEN_PCIE_SEM_TIMEOUT) 334 if (++timeout >= NETXEN_PCIE_SEM_TIMEOUT)
329 return -1; 335 return -EIO;
330 msleep(1); 336 msleep(1);
331 } 337 }
332 338
@@ -1046,89 +1052,71 @@ int netxen_p3_get_mac_addr(struct netxen_adapter *adapter, __le64 *mac)
1046 * Changes the CRB window to the specified window. 1052 * Changes the CRB window to the specified window.
1047 */ 1053 */
1048static void 1054static void
1049netxen_nic_pci_change_crbwindow_128M(struct netxen_adapter *adapter, u32 wndw) 1055netxen_nic_pci_set_crbwindow_128M(struct netxen_adapter *adapter,
1056 u32 window)
1050{ 1057{
1051 void __iomem *offset; 1058 void __iomem *offset;
1052 u32 tmp; 1059 int count = 10;
1053 int count = 0; 1060 u8 func = adapter->ahw.pci_func;
1054 uint8_t func = adapter->ahw.pci_func;
1055 1061
1056 if (adapter->curr_window == wndw) 1062 if (adapter->ahw.crb_win == window)
1057 return; 1063 return;
1058 /* 1064
1059 * Move the CRB window.
1060 * We need to write to the "direct access" region of PCI
1061 * to avoid a race condition where the window register has
1062 * not been successfully written across CRB before the target
1063 * register address is received by PCI. The direct region bypasses
1064 * the CRB bus.
1065 */
1066 offset = PCI_OFFSET_SECOND_RANGE(adapter, 1065 offset = PCI_OFFSET_SECOND_RANGE(adapter,
1067 NETXEN_PCIX_PH_REG(PCIE_CRB_WINDOW_REG(func))); 1066 NETXEN_PCIX_PH_REG(PCIE_CRB_WINDOW_REG(func)));
1068 1067
1069 if (wndw & 0x1) 1068 writel(window, offset);
1070 wndw = NETXEN_WINDOW_ONE; 1069 do {
1070 if (window == readl(offset))
1071 break;
1071 1072
1072 writel(wndw, offset); 1073 if (printk_ratelimit())
1074 dev_warn(&adapter->pdev->dev,
1075 "failed to set CRB window to %d\n",
1076 (window == NETXEN_WINDOW_ONE));
1077 udelay(1);
1073 1078
1074 /* MUST make sure window is set before we forge on... */ 1079 } while (--count > 0);
1075 while ((tmp = readl(offset)) != wndw) {
1076 printk(KERN_WARNING "%s: %s WARNING: CRB window value not "
1077 "registered properly: 0x%08x.\n",
1078 netxen_nic_driver_name, __func__, tmp);
1079 mdelay(1);
1080 if (count >= 10)
1081 break;
1082 count++;
1083 }
1084 1080
1085 if (wndw == NETXEN_WINDOW_ONE) 1081 if (count > 0)
1086 adapter->curr_window = 1; 1082 adapter->ahw.crb_win = window;
1087 else
1088 adapter->curr_window = 0;
1089} 1083}
1090 1084
1091/* 1085/*
1092 * Return -1 if off is not valid, 1086 * Returns < 0 if off is not valid,
1093 * 1 if window access is needed. 'off' is set to offset from 1087 * 1 if window access is needed. 'off' is set to offset from
1094 * CRB space in 128M pci map 1088 * CRB space in 128M pci map
1095 * 0 if no window access is needed. 'off' is set to 2M addr 1089 * 0 if no window access is needed. 'off' is set to 2M addr
1096 * In: 'off' is offset from base in 128M pci map 1090 * In: 'off' is offset from base in 128M pci map
1097 */ 1091 */
1098static int 1092static int
1099netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter, ulong *off) 1093netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter,
1094 ulong off, void __iomem **addr)
1100{ 1095{
1101 crb_128M_2M_sub_block_map_t *m; 1096 crb_128M_2M_sub_block_map_t *m;
1102 1097
1103 1098
1104 if (*off >= NETXEN_CRB_MAX) 1099 if ((off >= NETXEN_CRB_MAX) || (off < NETXEN_PCI_CRBSPACE))
1105 return -1; 1100 return -EINVAL;
1106
1107 if (*off >= NETXEN_PCI_CAMQM && (*off < NETXEN_PCI_CAMQM_2M_END)) {
1108 *off = (*off - NETXEN_PCI_CAMQM) + NETXEN_PCI_CAMQM_2M_BASE +
1109 (ulong)adapter->ahw.pci_base0;
1110 return 0;
1111 }
1112
1113 if (*off < NETXEN_PCI_CRBSPACE)
1114 return -1;
1115 1101
1116 *off -= NETXEN_PCI_CRBSPACE; 1102 off -= NETXEN_PCI_CRBSPACE;
1117 1103
1118 /* 1104 /*
1119 * Try direct map 1105 * Try direct map
1120 */ 1106 */
1121 m = &crb_128M_2M_map[CRB_BLK(*off)].sub_block[CRB_SUBBLK(*off)]; 1107 m = &crb_128M_2M_map[CRB_BLK(off)].sub_block[CRB_SUBBLK(off)];
1122 1108
1123 if (m->valid && (m->start_128M <= *off) && (m->end_128M > *off)) { 1109 if (m->valid && (m->start_128M <= off) && (m->end_128M > off)) {
1124 *off = *off + m->start_2M - m->start_128M + 1110 *addr = adapter->ahw.pci_base0 + m->start_2M +
1125 (ulong)adapter->ahw.pci_base0; 1111 (off - m->start_128M);
1126 return 0; 1112 return 0;
1127 } 1113 }
1128 1114
1129 /* 1115 /*
1130 * Not in direct map, use crb window 1116 * Not in direct map, use crb window
1131 */ 1117 */
1118 *addr = adapter->ahw.pci_base0 + CRB_INDIRECT_2M +
1119 (off & MASK(16));
1132 return 1; 1120 return 1;
1133} 1121}
1134 1122
@@ -1138,24 +1126,26 @@ netxen_nic_pci_get_crb_addr_2M(struct netxen_adapter *adapter, ulong *off)
1138 * side effect: lock crb window 1126 * side effect: lock crb window
1139 */ 1127 */
1140static void 1128static void
1141netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong *off) 1129netxen_nic_pci_set_crbwindow_2M(struct netxen_adapter *adapter, ulong off)
1142{ 1130{
1143 u32 win_read; 1131 u32 window;
1132 void __iomem *addr = adapter->ahw.pci_base0 + CRB_WINDOW_2M;
1144 1133
1145 adapter->crb_win = CRB_HI(*off); 1134 off -= NETXEN_PCI_CRBSPACE;
1146 writel(adapter->crb_win, (adapter->ahw.pci_base0 + CRB_WINDOW_2M)); 1135
1147 /* 1136 window = CRB_HI(off);
1148 * Read back value to make sure write has gone through before trying 1137
1149 * to use it. 1138 if (adapter->ahw.crb_win == window)
1150 */ 1139 return;
1151 win_read = readl(adapter->ahw.pci_base0 + CRB_WINDOW_2M); 1140
1152 if (win_read != adapter->crb_win) { 1141 writel(window, addr);
1153 printk(KERN_ERR "%s: Written crbwin (0x%x) != " 1142 if (readl(addr) != window) {
1154 "Read crbwin (0x%x), off=0x%lx\n", 1143 if (printk_ratelimit())
1155 __func__, adapter->crb_win, win_read, *off); 1144 dev_warn(&adapter->pdev->dev,
1145 "failed to set CRB window to %d off 0x%lx\n",
1146 window, off);
1156 } 1147 }
1157 *off = (*off & MASK(16)) + CRB_INDIRECT_2M + 1148 adapter->ahw.crb_win = window;
1158 (ulong)adapter->ahw.pci_base0;
1159} 1149}
1160 1150
1161static int 1151static int
@@ -1172,16 +1162,15 @@ netxen_nic_hw_write_wx_128M(struct netxen_adapter *adapter, ulong off, u32 data)
1172 BUG_ON(!addr); 1162 BUG_ON(!addr);
1173 1163
1174 if (ADDR_IN_WINDOW1(off)) { /* Window 1 */ 1164 if (ADDR_IN_WINDOW1(off)) { /* Window 1 */
1175 read_lock(&adapter->adapter_lock); 1165 netxen_nic_io_write_128M(adapter, addr, data);
1176 writel(data, addr);
1177 read_unlock(&adapter->adapter_lock);
1178 } else { /* Window 0 */ 1166 } else { /* Window 0 */
1179 write_lock_irqsave(&adapter->adapter_lock, flags); 1167 write_lock_irqsave(&adapter->ahw.crb_lock, flags);
1180 addr = pci_base_offset(adapter, off); 1168 addr = pci_base_offset(adapter, off);
1181 netxen_nic_pci_change_crbwindow_128M(adapter, 0); 1169 netxen_nic_pci_set_crbwindow_128M(adapter, 0);
1182 writel(data, addr); 1170 writel(data, addr);
1183 netxen_nic_pci_change_crbwindow_128M(adapter, 1); 1171 netxen_nic_pci_set_crbwindow_128M(adapter,
1184 write_unlock_irqrestore(&adapter->adapter_lock, flags); 1172 NETXEN_WINDOW_ONE);
1173 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
1185 } 1174 }
1186 1175
1187 return 0; 1176 return 0;
@@ -1202,15 +1191,14 @@ netxen_nic_hw_read_wx_128M(struct netxen_adapter *adapter, ulong off)
1202 BUG_ON(!addr); 1191 BUG_ON(!addr);
1203 1192
1204 if (ADDR_IN_WINDOW1(off)) { /* Window 1 */ 1193 if (ADDR_IN_WINDOW1(off)) { /* Window 1 */
1205 read_lock(&adapter->adapter_lock); 1194 data = netxen_nic_io_read_128M(adapter, addr);
1206 data = readl(addr);
1207 read_unlock(&adapter->adapter_lock);
1208 } else { /* Window 0 */ 1195 } else { /* Window 0 */
1209 write_lock_irqsave(&adapter->adapter_lock, flags); 1196 write_lock_irqsave(&adapter->ahw.crb_lock, flags);
1210 netxen_nic_pci_change_crbwindow_128M(adapter, 0); 1197 netxen_nic_pci_set_crbwindow_128M(adapter, 0);
1211 data = readl(addr); 1198 data = readl(addr);
1212 netxen_nic_pci_change_crbwindow_128M(adapter, 1); 1199 netxen_nic_pci_set_crbwindow_128M(adapter,
1213 write_unlock_irqrestore(&adapter->adapter_lock, flags); 1200 NETXEN_WINDOW_ONE);
1201 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
1214 } 1202 }
1215 1203
1216 return data; 1204 return data;
@@ -1221,28 +1209,30 @@ netxen_nic_hw_write_wx_2M(struct netxen_adapter *adapter, ulong off, u32 data)
1221{ 1209{
1222 unsigned long flags; 1210 unsigned long flags;
1223 int rv; 1211 int rv;
1212 void __iomem *addr = NULL;
1224 1213
1225 rv = netxen_nic_pci_get_crb_addr_2M(adapter, &off); 1214 rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr);
1226 1215
1227 if (rv == -1) { 1216 if (rv == 0) {
1228 printk(KERN_ERR "%s: invalid offset: 0x%016lx\n", 1217 writel(data, addr);
1229 __func__, off); 1218 return 0;
1230 dump_stack();
1231 return -1;
1232 } 1219 }
1233 1220
1234 if (rv == 1) { 1221 if (rv > 0) {
1235 write_lock_irqsave(&adapter->adapter_lock, flags); 1222 /* indirect access */
1223 write_lock_irqsave(&adapter->ahw.crb_lock, flags);
1236 crb_win_lock(adapter); 1224 crb_win_lock(adapter);
1237 netxen_nic_pci_set_crbwindow_2M(adapter, &off); 1225 netxen_nic_pci_set_crbwindow_2M(adapter, off);
1238 writel(data, (void __iomem *)off); 1226 writel(data, addr);
1239 crb_win_unlock(adapter); 1227 crb_win_unlock(adapter);
1240 write_unlock_irqrestore(&adapter->adapter_lock, flags); 1228 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
1241 } else 1229 return 0;
1242 writel(data, (void __iomem *)off); 1230 }
1243
1244 1231
1245 return 0; 1232 dev_err(&adapter->pdev->dev,
1233 "%s: invalid offset: 0x%016lx\n", __func__, off);
1234 dump_stack();
1235 return -EIO;
1246} 1236}
1247 1237
1248static u32 1238static u32
@@ -1251,102 +1241,37 @@ netxen_nic_hw_read_wx_2M(struct netxen_adapter *adapter, ulong off)
1251 unsigned long flags; 1241 unsigned long flags;
1252 int rv; 1242 int rv;
1253 u32 data; 1243 u32 data;
1244 void __iomem *addr = NULL;
1254 1245
1255 rv = netxen_nic_pci_get_crb_addr_2M(adapter, &off); 1246 rv = netxen_nic_pci_get_crb_addr_2M(adapter, off, &addr);
1256 1247
1257 if (rv == -1) { 1248 if (rv == 0)
1258 printk(KERN_ERR "%s: invalid offset: 0x%016lx\n", 1249 return readl(addr);
1259 __func__, off);
1260 dump_stack();
1261 return -1;
1262 }
1263 1250
1264 if (rv == 1) { 1251 if (rv > 0) {
1265 write_lock_irqsave(&adapter->adapter_lock, flags); 1252 /* indirect access */
1253 write_lock_irqsave(&adapter->ahw.crb_lock, flags);
1266 crb_win_lock(adapter); 1254 crb_win_lock(adapter);
1267 netxen_nic_pci_set_crbwindow_2M(adapter, &off); 1255 netxen_nic_pci_set_crbwindow_2M(adapter, off);
1268 data = readl((void __iomem *)off); 1256 data = readl(addr);
1269 crb_win_unlock(adapter); 1257 crb_win_unlock(adapter);
1270 write_unlock_irqrestore(&adapter->adapter_lock, flags); 1258 write_unlock_irqrestore(&adapter->ahw.crb_lock, flags);
1271 } else 1259 return data;
1272 data = readl((void __iomem *)off);
1273
1274 return data;
1275}
1276
1277static int netxen_pci_set_window_warning_count;
1278
1279static unsigned long
1280netxen_nic_pci_set_window_128M(struct netxen_adapter *adapter,
1281 unsigned long long addr)
1282{
1283 void __iomem *offset;
1284 int window;
1285 unsigned long long qdr_max;
1286 uint8_t func = adapter->ahw.pci_func;
1287
1288 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
1289 qdr_max = NETXEN_ADDR_QDR_NET_MAX_P2;
1290 } else {
1291 qdr_max = NETXEN_ADDR_QDR_NET_MAX_P3;
1292 } 1260 }
1293 1261
1294 if (ADDR_IN_RANGE(addr, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { 1262 dev_err(&adapter->pdev->dev,
1295 /* DDR network side */ 1263 "%s: invalid offset: 0x%016lx\n", __func__, off);
1296 addr -= NETXEN_ADDR_DDR_NET; 1264 dump_stack();
1297 window = (addr >> 25) & 0x3ff; 1265 return -1;
1298 if (adapter->ahw.ddr_mn_window != window) {
1299 adapter->ahw.ddr_mn_window = window;
1300 offset = PCI_OFFSET_SECOND_RANGE(adapter,
1301 NETXEN_PCIX_PH_REG(PCIE_MN_WINDOW_REG(func)));
1302 writel(window, offset);
1303 /* MUST make sure window is set before we forge on... */
1304 readl(offset);
1305 }
1306 addr -= (window * NETXEN_WINDOW_ONE);
1307 addr += NETXEN_PCI_DDR_NET;
1308 } else if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) {
1309 addr -= NETXEN_ADDR_OCM0;
1310 addr += NETXEN_PCI_OCM0;
1311 } else if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
1312 addr -= NETXEN_ADDR_OCM1;
1313 addr += NETXEN_PCI_OCM1;
1314 } else if (ADDR_IN_RANGE(addr, NETXEN_ADDR_QDR_NET, qdr_max)) {
1315 /* QDR network side */
1316 addr -= NETXEN_ADDR_QDR_NET;
1317 window = (addr >> 22) & 0x3f;
1318 if (adapter->ahw.qdr_sn_window != window) {
1319 adapter->ahw.qdr_sn_window = window;
1320 offset = PCI_OFFSET_SECOND_RANGE(adapter,
1321 NETXEN_PCIX_PH_REG(PCIE_SN_WINDOW_REG(func)));
1322 writel((window << 22), offset);
1323 /* MUST make sure window is set before we forge on... */
1324 readl(offset);
1325 }
1326 addr -= (window * 0x400000);
1327 addr += NETXEN_PCI_QDR_NET;
1328 } else {
1329 /*
1330 * peg gdb frequently accesses memory that doesn't exist,
1331 * this limits the chit chat so debugging isn't slowed down.
1332 */
1333 if ((netxen_pci_set_window_warning_count++ < 8)
1334 || (netxen_pci_set_window_warning_count % 64 == 0))
1335 printk("%s: Warning:netxen_nic_pci_set_window()"
1336 " Unknown address range!\n",
1337 netxen_nic_driver_name);
1338 addr = -1UL;
1339 }
1340 return addr;
1341} 1266}
1342 1267
1343/* window 1 registers only */ 1268/* window 1 registers only */
1344static void netxen_nic_io_write_128M(struct netxen_adapter *adapter, 1269static void netxen_nic_io_write_128M(struct netxen_adapter *adapter,
1345 void __iomem *addr, u32 data) 1270 void __iomem *addr, u32 data)
1346{ 1271{
1347 read_lock(&adapter->adapter_lock); 1272 read_lock(&adapter->ahw.crb_lock);
1348 writel(data, addr); 1273 writel(data, addr);
1349 read_unlock(&adapter->adapter_lock); 1274 read_unlock(&adapter->ahw.crb_lock);
1350} 1275}
1351 1276
1352static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter, 1277static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter,
@@ -1354,9 +1279,9 @@ static u32 netxen_nic_io_read_128M(struct netxen_adapter *adapter,
1354{ 1279{
1355 u32 val; 1280 u32 val;
1356 1281
1357 read_lock(&adapter->adapter_lock); 1282 read_lock(&adapter->ahw.crb_lock);
1358 val = readl(addr); 1283 val = readl(addr);
1359 read_unlock(&adapter->adapter_lock); 1284 read_unlock(&adapter->ahw.crb_lock);
1360 1285
1361 return val; 1286 return val;
1362} 1287}
@@ -1376,488 +1301,437 @@ static u32 netxen_nic_io_read_2M(struct netxen_adapter *adapter,
1376void __iomem * 1301void __iomem *
1377netxen_get_ioaddr(struct netxen_adapter *adapter, u32 offset) 1302netxen_get_ioaddr(struct netxen_adapter *adapter, u32 offset)
1378{ 1303{
1379 ulong off = offset; 1304 void __iomem *addr = NULL;
1380 1305
1381 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 1306 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) {
1382 if (offset < NETXEN_CRB_PCIX_HOST2 && 1307 if ((offset < NETXEN_CRB_PCIX_HOST2) &&
1383 offset > NETXEN_CRB_PCIX_HOST) 1308 (offset > NETXEN_CRB_PCIX_HOST))
1384 return PCI_OFFSET_SECOND_RANGE(adapter, offset); 1309 addr = PCI_OFFSET_SECOND_RANGE(adapter, offset);
1385 return NETXEN_CRB_NORMALIZE(adapter, offset); 1310 else
1311 addr = NETXEN_CRB_NORMALIZE(adapter, offset);
1312 } else {
1313 WARN_ON(netxen_nic_pci_get_crb_addr_2M(adapter,
1314 offset, &addr));
1386 } 1315 }
1387 1316
1388 BUG_ON(netxen_nic_pci_get_crb_addr_2M(adapter, &off)); 1317 return addr;
1389 return (void __iomem *)off;
1390} 1318}
1391 1319
1392static unsigned long 1320static int
1393netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter, 1321netxen_nic_pci_set_window_128M(struct netxen_adapter *adapter,
1394 unsigned long long addr) 1322 u64 addr, u32 *start)
1395{ 1323{
1396 int window; 1324 if (ADDR_IN_RANGE(addr, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) {
1397 u32 win_read; 1325 *start = (addr - NETXEN_ADDR_OCM0 + NETXEN_PCI_OCM0);
1398 1326 return 0;
1399 if (ADDR_IN_RANGE(addr, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
1400 /* DDR network side */
1401 window = MN_WIN(addr);
1402 adapter->ahw.ddr_mn_window = window;
1403 NXWR32(adapter, adapter->ahw.mn_win_crb, window);
1404 win_read = NXRD32(adapter, adapter->ahw.mn_win_crb);
1405 if ((win_read << 17) != window) {
1406 printk(KERN_INFO "Written MNwin (0x%x) != "
1407 "Read MNwin (0x%x)\n", window, win_read);
1408 }
1409 addr = GET_MEM_OFFS_2M(addr) + NETXEN_PCI_DDR_NET;
1410 } else if (ADDR_IN_RANGE(addr, 1327 } else if (ADDR_IN_RANGE(addr,
1411 NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) { 1328 NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
1412 if ((addr & 0x00ff800) == 0xff800) { 1329 *start = (addr - NETXEN_ADDR_OCM1 + NETXEN_PCI_OCM1);
1413 printk("%s: QM access not handled.\n", __func__); 1330 return 0;
1414 addr = -1UL; 1331 }
1415 } 1332
1333 return -EIO;
1334}
1335
1336static int
1337netxen_nic_pci_set_window_2M(struct netxen_adapter *adapter,
1338 u64 addr, u32 *start)
1339{
1340 u32 window;
1341 struct pci_dev *pdev = adapter->pdev;
1416 1342
1343 if ((addr & 0x00ff800) == 0xff800) {
1344 if (printk_ratelimit())
1345 dev_warn(&pdev->dev, "QM access not handled\n");
1346 return -EIO;
1347 }
1348
1349 if (NX_IS_REVISION_P3P(adapter->ahw.revision_id))
1350 window = OCM_WIN_P3P(addr);
1351 else
1417 window = OCM_WIN(addr); 1352 window = OCM_WIN(addr);
1418 adapter->ahw.ddr_mn_window = window;
1419 NXWR32(adapter, adapter->ahw.mn_win_crb, window);
1420 win_read = NXRD32(adapter, adapter->ahw.mn_win_crb);
1421 if ((win_read >> 7) != window) {
1422 printk(KERN_INFO "%s: Written OCMwin (0x%x) != "
1423 "Read OCMwin (0x%x)\n",
1424 __func__, window, win_read);
1425 }
1426 addr = GET_MEM_OFFS_2M(addr) + NETXEN_PCI_OCM0_2M;
1427 1353
1428 } else if (ADDR_IN_RANGE(addr, 1354 writel(window, adapter->ahw.ocm_win_crb);
1429 NETXEN_ADDR_QDR_NET, NETXEN_ADDR_QDR_NET_MAX_P3)) { 1355 /* read back to flush */
1430 /* QDR network side */ 1356 readl(adapter->ahw.ocm_win_crb);
1431 window = MS_WIN(addr);
1432 adapter->ahw.qdr_sn_window = window;
1433 NXWR32(adapter, adapter->ahw.ms_win_crb, window);
1434 win_read = NXRD32(adapter, adapter->ahw.ms_win_crb);
1435 if (win_read != window) {
1436 printk(KERN_INFO "%s: Written MSwin (0x%x) != "
1437 "Read MSwin (0x%x)\n",
1438 __func__, window, win_read);
1439 }
1440 addr = GET_MEM_OFFS_2M(addr) + NETXEN_PCI_QDR_NET;
1441 1357
1442 } else { 1358 adapter->ahw.ocm_win = window;
1443 /* 1359 *start = NETXEN_PCI_OCM0_2M + GET_MEM_OFFS_2M(addr);
1444 * peg gdb frequently accesses memory that doesn't exist, 1360 return 0;
1445 * this limits the chit chat so debugging isn't slowed down.
1446 */
1447 if ((netxen_pci_set_window_warning_count++ < 8)
1448 || (netxen_pci_set_window_warning_count%64 == 0)) {
1449 printk("%s: Warning:%s Unknown address range!\n",
1450 __func__, netxen_nic_driver_name);
1451} 1361}
1452 addr = -1UL; 1362
1363static int
1364netxen_nic_pci_mem_access_direct(struct netxen_adapter *adapter, u64 off,
1365 u64 *data, int op)
1366{
1367 void __iomem *addr, *mem_ptr = NULL;
1368 resource_size_t mem_base;
1369 int ret = -EIO;
1370 u32 start;
1371
1372 spin_lock(&adapter->ahw.mem_lock);
1373
1374 ret = adapter->pci_set_window(adapter, off, &start);
1375 if (ret != 0)
1376 goto unlock;
1377
1378 addr = pci_base_offset(adapter, start);
1379 if (addr)
1380 goto noremap;
1381
1382 mem_base = pci_resource_start(adapter->pdev, 0) + (start & PAGE_MASK);
1383
1384 mem_ptr = ioremap(mem_base, PAGE_SIZE);
1385 if (mem_ptr == NULL) {
1386 ret = -EIO;
1387 goto unlock;
1453 } 1388 }
1454 return addr; 1389
1390 addr = mem_ptr + (start & (PAGE_SIZE - 1));
1391
1392noremap:
1393 if (op == 0) /* read */
1394 *data = readq(addr);
1395 else /* write */
1396 writeq(*data, addr);
1397
1398unlock:
1399 spin_unlock(&adapter->ahw.mem_lock);
1400
1401 if (mem_ptr)
1402 iounmap(mem_ptr);
1403 return ret;
1455} 1404}
1456 1405
1457#define MAX_CTL_CHECK 1000 1406#define MAX_CTL_CHECK 1000
1458 1407
1459static int 1408static int
1460netxen_nic_pci_mem_write_128M(struct netxen_adapter *adapter, 1409netxen_nic_pci_mem_write_128M(struct netxen_adapter *adapter,
1461 u64 off, void *data, int size) 1410 u64 off, u64 data)
1462{ 1411{
1463 unsigned long flags; 1412 int j, ret;
1464 int i, j, ret = 0, loop, sz[2], off0; 1413 u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo;
1465 uint32_t temp;
1466 uint64_t off8, tmpw, word[2] = {0, 0};
1467 void __iomem *mem_crb; 1414 void __iomem *mem_crb;
1468 1415
1469 if (size != 8) 1416 /* Only 64-bit aligned access */
1417 if (off & 7)
1470 return -EIO; 1418 return -EIO;
1471 1419
1420 /* P2 has different SIU and MIU test agent base addr */
1472 if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, 1421 if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
1473 NETXEN_ADDR_QDR_NET_MAX_P2)) { 1422 NETXEN_ADDR_QDR_NET_MAX_P2)) {
1474 mem_crb = pci_base_offset(adapter, NETXEN_CRB_QDR_NET); 1423 mem_crb = pci_base_offset(adapter,
1424 NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE);
1425 addr_hi = SIU_TEST_AGT_ADDR_HI;
1426 data_lo = SIU_TEST_AGT_WRDATA_LO;
1427 data_hi = SIU_TEST_AGT_WRDATA_HI;
1428 off_lo = off & SIU_TEST_AGT_ADDR_MASK;
1429 off_hi = SIU_TEST_AGT_UPPER_ADDR(off);
1475 goto correct; 1430 goto correct;
1476 } 1431 }
1477 1432
1478 if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { 1433 if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
1479 mem_crb = pci_base_offset(adapter, NETXEN_CRB_DDR_NET); 1434 mem_crb = pci_base_offset(adapter,
1435 NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
1436 addr_hi = MIU_TEST_AGT_ADDR_HI;
1437 data_lo = MIU_TEST_AGT_WRDATA_LO;
1438 data_hi = MIU_TEST_AGT_WRDATA_HI;
1439 off_lo = off & MIU_TEST_AGT_ADDR_MASK;
1440 off_hi = 0;
1480 goto correct; 1441 goto correct;
1481 } 1442 }
1482 1443
1483 return -EIO; 1444 if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) ||
1484 1445 ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
1485correct: 1446 if (adapter->ahw.pci_len0 != 0) {
1486 off8 = off & 0xfffffff8; 1447 return netxen_nic_pci_mem_access_direct(adapter,
1487 off0 = off & 0x7; 1448 off, &data, 1);
1488 sz[0] = (size < (8 - off0)) ? size : (8 - off0);
1489 sz[1] = size - sz[0];
1490 loop = ((off0 + size - 1) >> 3) + 1;
1491
1492 if ((size != 8) || (off0 != 0)) {
1493 for (i = 0; i < loop; i++) {
1494 if (adapter->pci_mem_read(adapter,
1495 off8 + (i << 3), &word[i], 8))
1496 return -1;
1497 } 1449 }
1498 } 1450 }
1499 1451
1500 switch (size) { 1452 return -EIO;
1501 case 1:
1502 tmpw = *((uint8_t *)data);
1503 break;
1504 case 2:
1505 tmpw = *((uint16_t *)data);
1506 break;
1507 case 4:
1508 tmpw = *((uint32_t *)data);
1509 break;
1510 case 8:
1511 default:
1512 tmpw = *((uint64_t *)data);
1513 break;
1514 }
1515 word[0] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8));
1516 word[0] |= tmpw << (off0 * 8);
1517 1453
1518 if (loop == 2) { 1454correct:
1519 word[1] &= ~(~0ULL << (sz[1] * 8)); 1455 spin_lock(&adapter->ahw.mem_lock);
1520 word[1] |= tmpw >> (sz[0] * 8); 1456 netxen_nic_pci_set_crbwindow_128M(adapter, 0);
1457
1458 writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1459 writel(off_hi, (mem_crb + addr_hi));
1460 writel(data & 0xffffffff, (mem_crb + data_lo));
1461 writel((data >> 32) & 0xffffffff, (mem_crb + data_hi));
1462 writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
1463 writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
1464 (mem_crb + TEST_AGT_CTRL));
1465
1466 for (j = 0; j < MAX_CTL_CHECK; j++) {
1467 temp = readl((mem_crb + TEST_AGT_CTRL));
1468 if ((temp & TA_CTL_BUSY) == 0)
1469 break;
1521 } 1470 }
1522 1471
1523 write_lock_irqsave(&adapter->adapter_lock, flags); 1472 if (j >= MAX_CTL_CHECK) {
1524 netxen_nic_pci_change_crbwindow_128M(adapter, 0); 1473 if (printk_ratelimit())
1525 1474 dev_err(&adapter->pdev->dev,
1526 for (i = 0; i < loop; i++) {
1527 writel((uint32_t)(off8 + (i << 3)),
1528 (mem_crb+MIU_TEST_AGT_ADDR_LO));
1529 writel(0,
1530 (mem_crb+MIU_TEST_AGT_ADDR_HI));
1531 writel(word[i] & 0xffffffff,
1532 (mem_crb+MIU_TEST_AGT_WRDATA_LO));
1533 writel((word[i] >> 32) & 0xffffffff,
1534 (mem_crb+MIU_TEST_AGT_WRDATA_HI));
1535 writel(MIU_TA_CTL_ENABLE|MIU_TA_CTL_WRITE,
1536 (mem_crb+MIU_TEST_AGT_CTRL));
1537 writel(MIU_TA_CTL_START|MIU_TA_CTL_ENABLE|MIU_TA_CTL_WRITE,
1538 (mem_crb+MIU_TEST_AGT_CTRL));
1539
1540 for (j = 0; j < MAX_CTL_CHECK; j++) {
1541 temp = readl(
1542 (mem_crb+MIU_TEST_AGT_CTRL));
1543 if ((temp & MIU_TA_CTL_BUSY) == 0)
1544 break;
1545 }
1546
1547 if (j >= MAX_CTL_CHECK) {
1548 if (printk_ratelimit())
1549 dev_err(&adapter->pdev->dev,
1550 "failed to write through agent\n"); 1475 "failed to write through agent\n");
1551 ret = -1; 1476 ret = -EIO;
1552 break; 1477 } else
1553 } 1478 ret = 0;
1554 }
1555 1479
1556 netxen_nic_pci_change_crbwindow_128M(adapter, 1); 1480 netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE);
1557 write_unlock_irqrestore(&adapter->adapter_lock, flags); 1481 spin_unlock(&adapter->ahw.mem_lock);
1558 return ret; 1482 return ret;
1559} 1483}
1560 1484
1561static int 1485static int
1562netxen_nic_pci_mem_read_128M(struct netxen_adapter *adapter, 1486netxen_nic_pci_mem_read_128M(struct netxen_adapter *adapter,
1563 u64 off, void *data, int size) 1487 u64 off, u64 *data)
1564{ 1488{
1565 unsigned long flags; 1489 int j, ret;
1566 int i, j = 0, k, start, end, loop, sz[2], off0[2]; 1490 u32 temp, off_lo, off_hi, addr_hi, data_hi, data_lo;
1567 uint32_t temp; 1491 u64 val;
1568 uint64_t off8, val, word[2] = {0, 0};
1569 void __iomem *mem_crb; 1492 void __iomem *mem_crb;
1570 1493
1571 if (size != 8) 1494 /* Only 64-bit aligned access */
1495 if (off & 7)
1572 return -EIO; 1496 return -EIO;
1573 1497
1498 /* P2 has different SIU and MIU test agent base addr */
1574 if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, 1499 if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
1575 NETXEN_ADDR_QDR_NET_MAX_P2)) { 1500 NETXEN_ADDR_QDR_NET_MAX_P2)) {
1576 mem_crb = pci_base_offset(adapter, NETXEN_CRB_QDR_NET); 1501 mem_crb = pci_base_offset(adapter,
1502 NETXEN_CRB_QDR_NET+SIU_TEST_AGT_BASE);
1503 addr_hi = SIU_TEST_AGT_ADDR_HI;
1504 data_lo = SIU_TEST_AGT_RDDATA_LO;
1505 data_hi = SIU_TEST_AGT_RDDATA_HI;
1506 off_lo = off & SIU_TEST_AGT_ADDR_MASK;
1507 off_hi = SIU_TEST_AGT_UPPER_ADDR(off);
1577 goto correct; 1508 goto correct;
1578 } 1509 }
1579 1510
1580 if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { 1511 if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
1581 mem_crb = pci_base_offset(adapter, NETXEN_CRB_DDR_NET); 1512 mem_crb = pci_base_offset(adapter,
1513 NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
1514 addr_hi = MIU_TEST_AGT_ADDR_HI;
1515 data_lo = MIU_TEST_AGT_RDDATA_LO;
1516 data_hi = MIU_TEST_AGT_RDDATA_HI;
1517 off_lo = off & MIU_TEST_AGT_ADDR_MASK;
1518 off_hi = 0;
1582 goto correct; 1519 goto correct;
1583 } 1520 }
1584 1521
1522 if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX) ||
1523 ADDR_IN_RANGE(off, NETXEN_ADDR_OCM1, NETXEN_ADDR_OCM1_MAX)) {
1524 if (adapter->ahw.pci_len0 != 0) {
1525 return netxen_nic_pci_mem_access_direct(adapter,
1526 off, data, 0);
1527 }
1528 }
1529
1585 return -EIO; 1530 return -EIO;
1586 1531
1587correct: 1532correct:
1588 off8 = off & 0xfffffff8; 1533 spin_lock(&adapter->ahw.mem_lock);
1589 off0[0] = off & 0x7; 1534 netxen_nic_pci_set_crbwindow_128M(adapter, 0);
1590 off0[1] = 0;
1591 sz[0] = (size < (8 - off0[0])) ? size : (8 - off0[0]);
1592 sz[1] = size - sz[0];
1593 loop = ((off0[0] + size - 1) >> 3) + 1;
1594
1595 write_lock_irqsave(&adapter->adapter_lock, flags);
1596 netxen_nic_pci_change_crbwindow_128M(adapter, 0);
1597
1598 for (i = 0; i < loop; i++) {
1599 writel((uint32_t)(off8 + (i << 3)),
1600 (mem_crb+MIU_TEST_AGT_ADDR_LO));
1601 writel(0,
1602 (mem_crb+MIU_TEST_AGT_ADDR_HI));
1603 writel(MIU_TA_CTL_ENABLE,
1604 (mem_crb+MIU_TEST_AGT_CTRL));
1605 writel(MIU_TA_CTL_START|MIU_TA_CTL_ENABLE,
1606 (mem_crb+MIU_TEST_AGT_CTRL));
1607 1535
1608 for (j = 0; j < MAX_CTL_CHECK; j++) { 1536 writel(off_lo, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1609 temp = readl( 1537 writel(off_hi, (mem_crb + addr_hi));
1610 (mem_crb+MIU_TEST_AGT_CTRL)); 1538 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1611 if ((temp & MIU_TA_CTL_BUSY) == 0) 1539 writel((TA_CTL_START|TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
1612 break;
1613 }
1614 1540
1615 if (j >= MAX_CTL_CHECK) { 1541 for (j = 0; j < MAX_CTL_CHECK; j++) {
1616 if (printk_ratelimit()) 1542 temp = readl(mem_crb + TEST_AGT_CTRL);
1617 dev_err(&adapter->pdev->dev, 1543 if ((temp & TA_CTL_BUSY) == 0)
1618 "failed to read through agent\n");
1619 break; 1544 break;
1620 }
1621
1622 start = off0[i] >> 2;
1623 end = (off0[i] + sz[i] - 1) >> 2;
1624 for (k = start; k <= end; k++) {
1625 word[i] |= ((uint64_t) readl(
1626 (mem_crb +
1627 MIU_TEST_AGT_RDDATA(k))) << (32*k));
1628 }
1629 } 1545 }
1630 1546
1631 netxen_nic_pci_change_crbwindow_128M(adapter, 1); 1547 if (j >= MAX_CTL_CHECK) {
1632 write_unlock_irqrestore(&adapter->adapter_lock, flags); 1548 if (printk_ratelimit())
1633 1549 dev_err(&adapter->pdev->dev,
1634 if (j >= MAX_CTL_CHECK) 1550 "failed to read through agent\n");
1635 return -1; 1551 ret = -EIO;
1636
1637 if (sz[0] == 8) {
1638 val = word[0];
1639 } else { 1552 } else {
1640 val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) |
1641 ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8));
1642 }
1643 1553
1644 switch (size) { 1554 temp = readl(mem_crb + data_hi);
1645 case 1: 1555 val = ((u64)temp << 32);
1646 *(uint8_t *)data = val; 1556 val |= readl(mem_crb + data_lo);
1647 break; 1557 *data = val;
1648 case 2: 1558 ret = 0;
1649 *(uint16_t *)data = val;
1650 break;
1651 case 4:
1652 *(uint32_t *)data = val;
1653 break;
1654 case 8:
1655 *(uint64_t *)data = val;
1656 break;
1657 } 1559 }
1658 return 0; 1560
1561 netxen_nic_pci_set_crbwindow_128M(adapter, NETXEN_WINDOW_ONE);
1562 spin_unlock(&adapter->ahw.mem_lock);
1563
1564 return ret;
1659} 1565}
1660 1566
1661static int 1567static int
1662netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter, 1568netxen_nic_pci_mem_write_2M(struct netxen_adapter *adapter,
1663 u64 off, void *data, int size) 1569 u64 off, u64 data)
1664{ 1570{
1665 int i, j, ret = 0, loop, sz[2], off0; 1571 int i, j, ret;
1666 uint32_t temp; 1572 u32 temp, off8;
1667 uint64_t off8, tmpw, word[2] = {0, 0}; 1573 u64 stride;
1668 void __iomem *mem_crb; 1574 void __iomem *mem_crb;
1669 1575
1670 if (size != 8) 1576 /* Only 64-bit aligned access */
1577 if (off & 7)
1671 return -EIO; 1578 return -EIO;
1672 1579
1580 /* P3 onward, test agent base for MIU and SIU is same */
1673 if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, 1581 if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
1674 NETXEN_ADDR_QDR_NET_MAX_P3)) { 1582 NETXEN_ADDR_QDR_NET_MAX_P3)) {
1675 mem_crb = netxen_get_ioaddr(adapter, NETXEN_CRB_QDR_NET); 1583 mem_crb = netxen_get_ioaddr(adapter,
1584 NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE);
1676 goto correct; 1585 goto correct;
1677 } 1586 }
1678 1587
1679 if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { 1588 if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
1680 mem_crb = netxen_get_ioaddr(adapter, NETXEN_CRB_DDR_NET); 1589 mem_crb = netxen_get_ioaddr(adapter,
1590 NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
1681 goto correct; 1591 goto correct;
1682 } 1592 }
1683 1593
1594 if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX))
1595 return netxen_nic_pci_mem_access_direct(adapter, off, &data, 1);
1596
1684 return -EIO; 1597 return -EIO;
1685 1598
1686correct: 1599correct:
1687 off8 = off & 0xfffffff8; 1600 stride = NX_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
1688 off0 = off & 0x7;
1689 sz[0] = (size < (8 - off0)) ? size : (8 - off0);
1690 sz[1] = size - sz[0];
1691 loop = ((off0 + size - 1) >> 3) + 1;
1692
1693 if ((size != 8) || (off0 != 0)) {
1694 for (i = 0; i < loop; i++) {
1695 if (adapter->pci_mem_read(adapter,
1696 off8 + (i << 3), &word[i], 8))
1697 return -1;
1698 }
1699 }
1700 1601
1701 switch (size) { 1602 off8 = off & ~(stride-1);
1702 case 1:
1703 tmpw = *((uint8_t *)data);
1704 break;
1705 case 2:
1706 tmpw = *((uint16_t *)data);
1707 break;
1708 case 4:
1709 tmpw = *((uint32_t *)data);
1710 break;
1711 case 8:
1712 default:
1713 tmpw = *((uint64_t *)data);
1714 break;
1715 }
1716 1603
1717 word[0] &= ~((~(~0ULL << (sz[0] * 8))) << (off0 * 8)); 1604 spin_lock(&adapter->ahw.mem_lock);
1718 word[0] |= tmpw << (off0 * 8);
1719 1605
1720 if (loop == 2) { 1606 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1721 word[1] &= ~(~0ULL << (sz[1] * 8)); 1607 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
1722 word[1] |= tmpw >> (sz[0] * 8);
1723 }
1724
1725 /*
1726 * don't lock here - write_wx gets the lock if each time
1727 * write_lock_irqsave(&adapter->adapter_lock, flags);
1728 * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
1729 */
1730 1608
1731 for (i = 0; i < loop; i++) { 1609 i = 0;
1732 writel(off8 + (i << 3), mem_crb+MIU_TEST_AGT_ADDR_LO); 1610 if (stride == 16) {
1733 writel(0, mem_crb+MIU_TEST_AGT_ADDR_HI); 1611 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1734 writel(word[i] & 0xffffffff, mem_crb+MIU_TEST_AGT_WRDATA_LO); 1612 writel((TA_CTL_START | TA_CTL_ENABLE),
1735 writel((word[i] >> 32) & 0xffffffff, 1613 (mem_crb + TEST_AGT_CTRL));
1736 mem_crb+MIU_TEST_AGT_WRDATA_HI);
1737 writel((MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE),
1738 mem_crb+MIU_TEST_AGT_CTRL);
1739 writel(MIU_TA_CTL_START | MIU_TA_CTL_ENABLE | MIU_TA_CTL_WRITE,
1740 mem_crb+MIU_TEST_AGT_CTRL);
1741 1614
1742 for (j = 0; j < MAX_CTL_CHECK; j++) { 1615 for (j = 0; j < MAX_CTL_CHECK; j++) {
1743 temp = readl(mem_crb + MIU_TEST_AGT_CTRL); 1616 temp = readl(mem_crb + TEST_AGT_CTRL);
1744 if ((temp & MIU_TA_CTL_BUSY) == 0) 1617 if ((temp & TA_CTL_BUSY) == 0)
1745 break; 1618 break;
1746 } 1619 }
1747 1620
1748 if (j >= MAX_CTL_CHECK) { 1621 if (j >= MAX_CTL_CHECK) {
1749 if (printk_ratelimit()) 1622 ret = -EIO;
1750 dev_err(&adapter->pdev->dev, 1623 goto done;
1751 "failed to write through agent\n");
1752 ret = -1;
1753 break;
1754 } 1624 }
1625
1626 i = (off & 0xf) ? 0 : 2;
1627 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i)),
1628 mem_crb + MIU_TEST_AGT_WRDATA(i));
1629 writel(readl(mem_crb + MIU_TEST_AGT_RDDATA(i+1)),
1630 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1631 i = (off & 0xf) ? 2 : 0;
1755 } 1632 }
1756 1633
1757 /* 1634 writel(data & 0xffffffff,
1758 * netxen_nic_pci_change_crbwindow_128M(adapter, 1); 1635 mem_crb + MIU_TEST_AGT_WRDATA(i));
1759 * write_unlock_irqrestore(&adapter->adapter_lock, flags); 1636 writel((data >> 32) & 0xffffffff,
1760 */ 1637 mem_crb + MIU_TEST_AGT_WRDATA(i+1));
1638
1639 writel((TA_CTL_ENABLE | TA_CTL_WRITE), (mem_crb + TEST_AGT_CTRL));
1640 writel((TA_CTL_START | TA_CTL_ENABLE | TA_CTL_WRITE),
1641 (mem_crb + TEST_AGT_CTRL));
1642
1643 for (j = 0; j < MAX_CTL_CHECK; j++) {
1644 temp = readl(mem_crb + TEST_AGT_CTRL);
1645 if ((temp & TA_CTL_BUSY) == 0)
1646 break;
1647 }
1648
1649 if (j >= MAX_CTL_CHECK) {
1650 if (printk_ratelimit())
1651 dev_err(&adapter->pdev->dev,
1652 "failed to write through agent\n");
1653 ret = -EIO;
1654 } else
1655 ret = 0;
1656
1657done:
1658 spin_unlock(&adapter->ahw.mem_lock);
1659
1761 return ret; 1660 return ret;
1762} 1661}
1763 1662
1764static int 1663static int
1765netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter, 1664netxen_nic_pci_mem_read_2M(struct netxen_adapter *adapter,
1766 u64 off, void *data, int size) 1665 u64 off, u64 *data)
1767{ 1666{
1768 int i, j = 0, k, start, end, loop, sz[2], off0[2]; 1667 int j, ret;
1769 uint32_t temp; 1668 u32 temp, off8;
1770 uint64_t off8, val, word[2] = {0, 0}; 1669 u64 val, stride;
1771 void __iomem *mem_crb; 1670 void __iomem *mem_crb;
1772 1671
1773 if (size != 8) 1672 /* Only 64-bit aligned access */
1673 if (off & 7)
1774 return -EIO; 1674 return -EIO;
1775 1675
1676 /* P3 onward, test agent base for MIU and SIU is same */
1776 if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET, 1677 if (ADDR_IN_RANGE(off, NETXEN_ADDR_QDR_NET,
1777 NETXEN_ADDR_QDR_NET_MAX_P3)) { 1678 NETXEN_ADDR_QDR_NET_MAX_P3)) {
1778 mem_crb = netxen_get_ioaddr(adapter, NETXEN_CRB_QDR_NET); 1679 mem_crb = netxen_get_ioaddr(adapter,
1680 NETXEN_CRB_QDR_NET+MIU_TEST_AGT_BASE);
1779 goto correct; 1681 goto correct;
1780 } 1682 }
1781 1683
1782 if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) { 1684 if (ADDR_IN_RANGE(off, NETXEN_ADDR_DDR_NET, NETXEN_ADDR_DDR_NET_MAX)) {
1783 mem_crb = netxen_get_ioaddr(adapter, NETXEN_CRB_DDR_NET); 1685 mem_crb = netxen_get_ioaddr(adapter,
1686 NETXEN_CRB_DDR_NET+MIU_TEST_AGT_BASE);
1784 goto correct; 1687 goto correct;
1785 } 1688 }
1786 1689
1690 if (ADDR_IN_RANGE(off, NETXEN_ADDR_OCM0, NETXEN_ADDR_OCM0_MAX)) {
1691 return netxen_nic_pci_mem_access_direct(adapter,
1692 off, data, 0);
1693 }
1694
1787 return -EIO; 1695 return -EIO;
1788 1696
1789correct: 1697correct:
1790 off8 = off & 0xfffffff8; 1698 stride = NX_IS_REVISION_P3P(adapter->ahw.revision_id) ? 16 : 8;
1791 off0[0] = off & 0x7;
1792 off0[1] = 0;
1793 sz[0] = (size < (8 - off0[0])) ? size : (8 - off0[0]);
1794 sz[1] = size - sz[0];
1795 loop = ((off0[0] + size - 1) >> 3) + 1;
1796 1699
1797 /* 1700 off8 = off & ~(stride-1);
1798 * don't lock here - write_wx gets the lock if each time
1799 * write_lock_irqsave(&adapter->adapter_lock, flags);
1800 * netxen_nic_pci_change_crbwindow_128M(adapter, 0);
1801 */
1802 1701
1803 for (i = 0; i < loop; i++) { 1702 spin_lock(&adapter->ahw.mem_lock);
1804 writel(off8 + (i << 3), mem_crb + MIU_TEST_AGT_ADDR_LO);
1805 writel(0, mem_crb + MIU_TEST_AGT_ADDR_HI);
1806 writel(MIU_TA_CTL_ENABLE, mem_crb + MIU_TEST_AGT_CTRL);
1807 writel(MIU_TA_CTL_START | MIU_TA_CTL_ENABLE,
1808 mem_crb + MIU_TEST_AGT_CTRL);
1809 1703
1810 for (j = 0; j < MAX_CTL_CHECK; j++) { 1704 writel(off8, (mem_crb + MIU_TEST_AGT_ADDR_LO));
1811 temp = readl(mem_crb + MIU_TEST_AGT_CTRL); 1705 writel(0, (mem_crb + MIU_TEST_AGT_ADDR_HI));
1812 if ((temp & MIU_TA_CTL_BUSY) == 0) 1706 writel(TA_CTL_ENABLE, (mem_crb + TEST_AGT_CTRL));
1813 break; 1707 writel((TA_CTL_START | TA_CTL_ENABLE), (mem_crb + TEST_AGT_CTRL));
1814 }
1815 1708
1816 if (j >= MAX_CTL_CHECK) { 1709 for (j = 0; j < MAX_CTL_CHECK; j++) {
1817 if (printk_ratelimit()) 1710 temp = readl(mem_crb + TEST_AGT_CTRL);
1818 dev_err(&adapter->pdev->dev, 1711 if ((temp & TA_CTL_BUSY) == 0)
1819 "failed to read through agent\n");
1820 break; 1712 break;
1821 }
1822
1823 start = off0[i] >> 2;
1824 end = (off0[i] + sz[i] - 1) >> 2;
1825 for (k = start; k <= end; k++) {
1826 temp = readl(mem_crb + MIU_TEST_AGT_RDDATA(k));
1827 word[i] |= ((uint64_t)temp << (32 * k));
1828 }
1829 } 1713 }
1830 1714
1831 /* 1715 if (j >= MAX_CTL_CHECK) {
1832 * netxen_nic_pci_change_crbwindow_128M(adapter, 1); 1716 if (printk_ratelimit())
1833 * write_unlock_irqrestore(&adapter->adapter_lock, flags); 1717 dev_err(&adapter->pdev->dev,
1834 */ 1718 "failed to read through agent\n");
1835 1719 ret = -EIO;
1836 if (j >= MAX_CTL_CHECK)
1837 return -1;
1838
1839 if (sz[0] == 8) {
1840 val = word[0];
1841 } else { 1720 } else {
1842 val = ((word[0] >> (off0[0] * 8)) & (~(~0ULL << (sz[0] * 8)))) | 1721 off8 = MIU_TEST_AGT_RDDATA_LO;
1843 ((word[1] & (~(~0ULL << (sz[1] * 8)))) << (sz[0] * 8)); 1722 if ((stride == 16) && (off & 0xf))
1844 } 1723 off8 = MIU_TEST_AGT_RDDATA_UPPER_LO;
1845 1724
1846 switch (size) { 1725 temp = readl(mem_crb + off8 + 4);
1847 case 1: 1726 val = (u64)temp << 32;
1848 *(uint8_t *)data = val; 1727 val |= readl(mem_crb + off8);
1849 break; 1728 *data = val;
1850 case 2: 1729 ret = 0;
1851 *(uint16_t *)data = val;
1852 break;
1853 case 4:
1854 *(uint32_t *)data = val;
1855 break;
1856 case 8:
1857 *(uint64_t *)data = val;
1858 break;
1859 } 1730 }
1860 return 0; 1731
1732 spin_unlock(&adapter->ahw.mem_lock);
1733
1734 return ret;
1861} 1735}
1862 1736
1863void 1737void
diff --git a/drivers/net/netxen/netxen_nic_init.c b/drivers/net/netxen/netxen_nic_init.c
index e40b914d6faf..6ee27a630d89 100644
--- a/drivers/net/netxen/netxen_nic_init.c
+++ b/drivers/net/netxen/netxen_nic_init.c
@@ -46,6 +46,7 @@ static unsigned int crb_addr_xform[NETXEN_MAX_CRB_XFORM];
46static void 46static void
47netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter, 47netxen_post_rx_buffers_nodb(struct netxen_adapter *adapter,
48 struct nx_host_rds_ring *rds_ring); 48 struct nx_host_rds_ring *rds_ring);
49static int netxen_p3_has_mn(struct netxen_adapter *adapter);
49 50
50static void crb_addr_transform_setup(void) 51static void crb_addr_transform_setup(void)
51{ 52{
@@ -437,7 +438,7 @@ int netxen_rom_fast_read(struct netxen_adapter *adapter, int addr, int *valp)
437#define NETXEN_BOARDNUM 0x400c 438#define NETXEN_BOARDNUM 0x400c
438#define NETXEN_CHIPNUM 0x4010 439#define NETXEN_CHIPNUM 0x4010
439 440
440int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose) 441int netxen_pinit_from_rom(struct netxen_adapter *adapter)
441{ 442{
442 int addr, val; 443 int addr, val;
443 int i, n, init_delay = 0; 444 int i, n, init_delay = 0;
@@ -450,21 +451,6 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
450 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xffffffff); 451 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0xffffffff);
451 netxen_rom_unlock(adapter); 452 netxen_rom_unlock(adapter);
452 453
453 if (verbose) {
454 if (netxen_rom_fast_read(adapter, NETXEN_BOARDTYPE, &val) == 0)
455 printk("P2 ROM board type: 0x%08x\n", val);
456 else
457 printk("Could not read board type\n");
458 if (netxen_rom_fast_read(adapter, NETXEN_BOARDNUM, &val) == 0)
459 printk("P2 ROM board num: 0x%08x\n", val);
460 else
461 printk("Could not read board number\n");
462 if (netxen_rom_fast_read(adapter, NETXEN_CHIPNUM, &val) == 0)
463 printk("P2 ROM chip num: 0x%08x\n", val);
464 else
465 printk("Could not read chip number\n");
466 }
467
468 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) { 454 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
469 if (netxen_rom_fast_read(adapter, 0, &n) != 0 || 455 if (netxen_rom_fast_read(adapter, 0, &n) != 0 ||
470 (n != 0xcafecafe) || 456 (n != 0xcafecafe) ||
@@ -486,11 +472,7 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
486 n &= ~0x80000000; 472 n &= ~0x80000000;
487 } 473 }
488 474
489 if (n < 1024) { 475 if (n >= 1024) {
490 if (verbose)
491 printk(KERN_DEBUG "%s: %d CRB init values found"
492 " in ROM.\n", netxen_nic_driver_name, n);
493 } else {
494 printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not" 476 printk(KERN_ERR "%s:n=0x%x Error! NetXen card flash not"
495 " initialized.\n", __func__, n); 477 " initialized.\n", __func__, n);
496 return -EIO; 478 return -EIO;
@@ -502,6 +484,7 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
502 netxen_nic_driver_name); 484 netxen_nic_driver_name);
503 return -ENOMEM; 485 return -ENOMEM;
504 } 486 }
487
505 for (i = 0; i < n; i++) { 488 for (i = 0; i < n; i++) {
506 if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 || 489 if (netxen_rom_fast_read(adapter, 8*i + 4*offset, &val) != 0 ||
507 netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) { 490 netxen_rom_fast_read(adapter, 8*i + 4*offset + 4, &addr) != 0) {
@@ -512,11 +495,8 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
512 buf[i].addr = addr; 495 buf[i].addr = addr;
513 buf[i].data = val; 496 buf[i].data = val;
514 497
515 if (verbose)
516 printk(KERN_DEBUG "%s: PCI: 0x%08x == 0x%08x\n",
517 netxen_nic_driver_name,
518 (u32)netxen_decode_crb_addr(addr), val);
519 } 498 }
499
520 for (i = 0; i < n; i++) { 500 for (i = 0; i < n; i++) {
521 501
522 off = netxen_decode_crb_addr(buf[i].addr); 502 off = netxen_decode_crb_addr(buf[i].addr);
@@ -526,6 +506,10 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
526 continue; 506 continue;
527 } 507 }
528 off += NETXEN_PCI_CRBSPACE; 508 off += NETXEN_PCI_CRBSPACE;
509
510 if (off & 1)
511 continue;
512
529 /* skipping cold reboot MAGIC */ 513 /* skipping cold reboot MAGIC */
530 if (off == NETXEN_CAM_RAM(0x1fc)) 514 if (off == NETXEN_CAM_RAM(0x1fc))
531 continue; 515 continue;
@@ -544,7 +528,8 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
544 continue; 528 continue;
545 if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */ 529 if (off == (ROMUSB_GLB + 0x1c)) /* MS clock */
546 continue; 530 continue;
547 if (off == (NETXEN_CRB_PEG_NET_1 + 0x18)) 531 if (off == (NETXEN_CRB_PEG_NET_1 + 0x18) &&
532 !NX_IS_REVISION_P3P(adapter->ahw.revision_id))
548 buf[i].data = 0x1020; 533 buf[i].data = 0x1020;
549 /* skip the function enable register */ 534 /* skip the function enable register */
550 if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION)) 535 if (off == NETXEN_PCIE_REG(PCIE_SETUP_FUNCTION))
@@ -605,6 +590,172 @@ int netxen_pinit_from_rom(struct netxen_adapter *adapter, int verbose)
605 return 0; 590 return 0;
606} 591}
607 592
593static struct uni_table_desc *nx_get_table_desc(const u8 *unirom, int section)
594{
595 uint32_t i;
596 struct uni_table_desc *directory = (struct uni_table_desc *) &unirom[0];
597 __le32 entries = cpu_to_le32(directory->num_entries);
598
599 for (i = 0; i < entries; i++) {
600
601 __le32 offs = cpu_to_le32(directory->findex) +
602 (i * cpu_to_le32(directory->entry_size));
603 __le32 tab_type = cpu_to_le32(*((u32 *)&unirom[offs] + 8));
604
605 if (tab_type == section)
606 return (struct uni_table_desc *) &unirom[offs];
607 }
608
609 return NULL;
610}
611
612static int
613nx_set_product_offs(struct netxen_adapter *adapter)
614{
615 struct uni_table_desc *ptab_descr;
616 const u8 *unirom = adapter->fw->data;
617 uint32_t i;
618 __le32 entries;
619
620 ptab_descr = nx_get_table_desc(unirom, NX_UNI_DIR_SECT_PRODUCT_TBL);
621 if (ptab_descr == NULL)
622 return -1;
623
624 entries = cpu_to_le32(ptab_descr->num_entries);
625
626 for (i = 0; i < entries; i++) {
627
628 __le32 flags, file_chiprev, offs;
629 u8 chiprev = adapter->ahw.revision_id;
630 int mn_present = netxen_p3_has_mn(adapter);
631 uint32_t flagbit;
632
633 offs = cpu_to_le32(ptab_descr->findex) +
634 (i * cpu_to_le32(ptab_descr->entry_size));
635 flags = cpu_to_le32(*((int *)&unirom[offs] + NX_UNI_FLAGS_OFF));
636 file_chiprev = cpu_to_le32(*((int *)&unirom[offs] +
637 NX_UNI_CHIP_REV_OFF));
638
639 flagbit = mn_present ? 1 : 2;
640
641 if ((chiprev == file_chiprev) &&
642 ((1ULL << flagbit) & flags)) {
643 adapter->file_prd_off = offs;
644 return 0;
645 }
646 }
647
648 return -1;
649}
650
651
652static struct uni_data_desc *nx_get_data_desc(struct netxen_adapter *adapter,
653 u32 section, u32 idx_offset)
654{
655 const u8 *unirom = adapter->fw->data;
656 int idx = cpu_to_le32(*((int *)&unirom[adapter->file_prd_off] +
657 idx_offset));
658 struct uni_table_desc *tab_desc;
659 __le32 offs;
660
661 tab_desc = nx_get_table_desc(unirom, section);
662
663 if (tab_desc == NULL)
664 return NULL;
665
666 offs = cpu_to_le32(tab_desc->findex) +
667 (cpu_to_le32(tab_desc->entry_size) * idx);
668
669 return (struct uni_data_desc *)&unirom[offs];
670}
671
672static u8 *
673nx_get_bootld_offs(struct netxen_adapter *adapter)
674{
675 u32 offs = NETXEN_BOOTLD_START;
676
677 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
678 offs = cpu_to_le32((nx_get_data_desc(adapter,
679 NX_UNI_DIR_SECT_BOOTLD,
680 NX_UNI_BOOTLD_IDX_OFF))->findex);
681
682 return (u8 *)&adapter->fw->data[offs];
683}
684
685static u8 *
686nx_get_fw_offs(struct netxen_adapter *adapter)
687{
688 u32 offs = NETXEN_IMAGE_START;
689
690 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
691 offs = cpu_to_le32((nx_get_data_desc(adapter,
692 NX_UNI_DIR_SECT_FW,
693 NX_UNI_FIRMWARE_IDX_OFF))->findex);
694
695 return (u8 *)&adapter->fw->data[offs];
696}
697
698static __le32
699nx_get_fw_size(struct netxen_adapter *adapter)
700{
701 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE)
702 return cpu_to_le32((nx_get_data_desc(adapter,
703 NX_UNI_DIR_SECT_FW,
704 NX_UNI_FIRMWARE_IDX_OFF))->size);
705 else
706 return cpu_to_le32(
707 *(u32 *)&adapter->fw->data[NX_FW_SIZE_OFFSET]);
708}
709
710static __le32
711nx_get_fw_version(struct netxen_adapter *adapter)
712{
713 struct uni_data_desc *fw_data_desc;
714 const struct firmware *fw = adapter->fw;
715 __le32 major, minor, sub;
716 const u8 *ver_str;
717 int i, ret = 0;
718
719 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
720
721 fw_data_desc = nx_get_data_desc(adapter,
722 NX_UNI_DIR_SECT_FW, NX_UNI_FIRMWARE_IDX_OFF);
723 ver_str = fw->data + cpu_to_le32(fw_data_desc->findex) +
724 cpu_to_le32(fw_data_desc->size) - 17;
725
726 for (i = 0; i < 12; i++) {
727 if (!strncmp(&ver_str[i], "REV=", 4)) {
728 ret = sscanf(&ver_str[i+4], "%u.%u.%u ",
729 &major, &minor, &sub);
730 break;
731 }
732 }
733
734 if (ret != 3)
735 return 0;
736
737 return major + (minor << 8) + (sub << 16);
738
739 } else
740 return cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
741}
742
743static __le32
744nx_get_bios_version(struct netxen_adapter *adapter)
745{
746 const struct firmware *fw = adapter->fw;
747 __le32 bios_ver, prd_off = adapter->file_prd_off;
748
749 if (adapter->fw_type == NX_UNIFIED_ROMIMAGE) {
750 bios_ver = cpu_to_le32(*((u32 *) (&fw->data[prd_off])
751 + NX_UNI_BIOS_VERSION_OFF));
752 return (bios_ver << 24) + ((bios_ver >> 8) & 0xff00) +
753 (bios_ver >> 24);
754 } else
755 return cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]);
756
757}
758
608int 759int
609netxen_need_fw_reset(struct netxen_adapter *adapter) 760netxen_need_fw_reset(struct netxen_adapter *adapter)
610{ 761{
@@ -644,9 +795,8 @@ netxen_need_fw_reset(struct netxen_adapter *adapter)
644 /* check if we have got newer or different file firmware */ 795 /* check if we have got newer or different file firmware */
645 if (adapter->fw) { 796 if (adapter->fw) {
646 797
647 const struct firmware *fw = adapter->fw; 798 val = nx_get_fw_version(adapter);
648 799
649 val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]);
650 version = NETXEN_DECODE_VERSION(val); 800 version = NETXEN_DECODE_VERSION(val);
651 801
652 major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR); 802 major = NXRD32(adapter, NETXEN_FW_VERSION_MAJOR);
@@ -656,7 +806,8 @@ netxen_need_fw_reset(struct netxen_adapter *adapter)
656 if (version > NETXEN_VERSION_CODE(major, minor, build)) 806 if (version > NETXEN_VERSION_CODE(major, minor, build))
657 return 1; 807 return 1;
658 808
659 if (version == NETXEN_VERSION_CODE(major, minor, build)) { 809 if (version == NETXEN_VERSION_CODE(major, minor, build) &&
810 adapter->fw_type != NX_UNIFIED_ROMIMAGE) {
660 811
661 val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL); 812 val = NXRD32(adapter, NETXEN_MIU_MN_CONTROL);
662 fw_type = (val & 0x4) ? 813 fw_type = (val & 0x4) ?
@@ -671,7 +822,11 @@ netxen_need_fw_reset(struct netxen_adapter *adapter)
671} 822}
672 823
673static char *fw_name[] = { 824static char *fw_name[] = {
674 "nxromimg.bin", "nx3fwct.bin", "nx3fwmn.bin", "flash", 825 NX_P2_MN_ROMIMAGE_NAME,
826 NX_P3_CT_ROMIMAGE_NAME,
827 NX_P3_MN_ROMIMAGE_NAME,
828 NX_UNIFIED_ROMIMAGE_NAME,
829 NX_FLASH_ROMIMAGE_NAME,
675}; 830};
676 831
677int 832int
@@ -693,26 +848,28 @@ netxen_load_firmware(struct netxen_adapter *adapter)
693 848
694 size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8; 849 size = (NETXEN_IMAGE_START - NETXEN_BOOTLD_START) / 8;
695 850
696 ptr64 = (u64 *)&fw->data[NETXEN_BOOTLD_START]; 851 ptr64 = (u64 *)nx_get_bootld_offs(adapter);
697 flashaddr = NETXEN_BOOTLD_START; 852 flashaddr = NETXEN_BOOTLD_START;
698 853
699 for (i = 0; i < size; i++) { 854 for (i = 0; i < size; i++) {
700 data = cpu_to_le64(ptr64[i]); 855 data = cpu_to_le64(ptr64[i]);
701 adapter->pci_mem_write(adapter, flashaddr, &data, 8); 856
857 if (adapter->pci_mem_write(adapter, flashaddr, data))
858 return -EIO;
859
702 flashaddr += 8; 860 flashaddr += 8;
703 } 861 }
704 862
705 size = *(u32 *)&fw->data[NX_FW_SIZE_OFFSET]; 863 size = (__force u32)nx_get_fw_size(adapter) / 8;
706 size = (__force u32)cpu_to_le32(size) / 8;
707 864
708 ptr64 = (u64 *)&fw->data[NETXEN_IMAGE_START]; 865 ptr64 = (u64 *)nx_get_fw_offs(adapter);
709 flashaddr = NETXEN_IMAGE_START; 866 flashaddr = NETXEN_IMAGE_START;
710 867
711 for (i = 0; i < size; i++) { 868 for (i = 0; i < size; i++) {
712 data = cpu_to_le64(ptr64[i]); 869 data = cpu_to_le64(ptr64[i]);
713 870
714 if (adapter->pci_mem_write(adapter, 871 if (adapter->pci_mem_write(adapter,
715 flashaddr, &data, 8)) 872 flashaddr, data))
716 return -EIO; 873 return -EIO;
717 874
718 flashaddr += 8; 875 flashaddr += 8;
@@ -726,17 +883,17 @@ netxen_load_firmware(struct netxen_adapter *adapter)
726 883
727 for (i = 0; i < size; i++) { 884 for (i = 0; i < size; i++) {
728 if (netxen_rom_fast_read(adapter, 885 if (netxen_rom_fast_read(adapter,
729 flashaddr, &lo) != 0) 886 flashaddr, (int *)&lo) != 0)
730 return -EIO; 887 return -EIO;
731 if (netxen_rom_fast_read(adapter, 888 if (netxen_rom_fast_read(adapter,
732 flashaddr + 4, &hi) != 0) 889 flashaddr + 4, (int *)&hi) != 0)
733 return -EIO; 890 return -EIO;
734 891
735 /* hi, lo are already in host endian byteorder */ 892 /* hi, lo are already in host endian byteorder */
736 data = (((u64)hi << 32) | lo); 893 data = (((u64)hi << 32) | lo);
737 894
738 if (adapter->pci_mem_write(adapter, 895 if (adapter->pci_mem_write(adapter,
739 flashaddr, &data, 8)) 896 flashaddr, data))
740 return -EIO; 897 return -EIO;
741 898
742 flashaddr += 8; 899 flashaddr += 8;
@@ -744,7 +901,10 @@ netxen_load_firmware(struct netxen_adapter *adapter)
744 } 901 }
745 msleep(1); 902 msleep(1);
746 903
747 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 904 if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
905 NXWR32(adapter, NETXEN_CRB_PEG_NET_0 + 0x18, 0x1020);
906 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001e);
907 } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
748 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d); 908 NXWR32(adapter, NETXEN_ROMUSB_GLB_SW_RESET, 0x80001d);
749 else { 909 else {
750 NXWR32(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff); 910 NXWR32(adapter, NETXEN_ROMUSB_GLB_CHIP_CLK_CTRL, 0x3fff);
@@ -755,21 +915,31 @@ netxen_load_firmware(struct netxen_adapter *adapter)
755} 915}
756 916
757static int 917static int
758netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname) 918netxen_validate_firmware(struct netxen_adapter *adapter)
759{ 919{
760 __le32 val; 920 __le32 val;
761 u32 ver, min_ver, bios; 921 u32 ver, min_ver, bios, min_size;
762 struct pci_dev *pdev = adapter->pdev; 922 struct pci_dev *pdev = adapter->pdev;
763 const struct firmware *fw = adapter->fw; 923 const struct firmware *fw = adapter->fw;
924 u8 fw_type = adapter->fw_type;
764 925
765 if (fw->size < NX_FW_MIN_SIZE) 926 if (fw_type == NX_UNIFIED_ROMIMAGE) {
766 return -EINVAL; 927 if (nx_set_product_offs(adapter))
928 return -EINVAL;
929
930 min_size = NX_UNI_FW_MIN_SIZE;
931 } else {
932 val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]);
933 if ((__force u32)val != NETXEN_BDINFO_MAGIC)
934 return -EINVAL;
935
936 min_size = NX_FW_MIN_SIZE;
937 }
767 938
768 val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_MAGIC_OFFSET]); 939 if (fw->size < min_size)
769 if ((__force u32)val != NETXEN_BDINFO_MAGIC)
770 return -EINVAL; 940 return -EINVAL;
771 941
772 val = cpu_to_le32(*(u32 *)&fw->data[NX_FW_VERSION_OFFSET]); 942 val = nx_get_fw_version(adapter);
773 943
774 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 944 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
775 min_ver = NETXEN_VERSION_CODE(4, 0, 216); 945 min_ver = NETXEN_VERSION_CODE(4, 0, 216);
@@ -781,15 +951,15 @@ netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
781 if ((_major(ver) > _NETXEN_NIC_LINUX_MAJOR) || (ver < min_ver)) { 951 if ((_major(ver) > _NETXEN_NIC_LINUX_MAJOR) || (ver < min_ver)) {
782 dev_err(&pdev->dev, 952 dev_err(&pdev->dev,
783 "%s: firmware version %d.%d.%d unsupported\n", 953 "%s: firmware version %d.%d.%d unsupported\n",
784 fwname, _major(ver), _minor(ver), _build(ver)); 954 fw_name[fw_type], _major(ver), _minor(ver), _build(ver));
785 return -EINVAL; 955 return -EINVAL;
786 } 956 }
787 957
788 val = cpu_to_le32(*(u32 *)&fw->data[NX_BIOS_VERSION_OFFSET]); 958 val = nx_get_bios_version(adapter);
789 netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios); 959 netxen_rom_fast_read(adapter, NX_BIOS_VERSION_OFFSET, (int *)&bios);
790 if ((__force u32)val != bios) { 960 if ((__force u32)val != bios) {
791 dev_err(&pdev->dev, "%s: firmware bios is incompatible\n", 961 dev_err(&pdev->dev, "%s: firmware bios is incompatible\n",
792 fwname); 962 fw_name[fw_type]);
793 return -EINVAL; 963 return -EINVAL;
794 } 964 }
795 965
@@ -800,7 +970,7 @@ netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
800 val = NETXEN_DECODE_VERSION(val); 970 val = NETXEN_DECODE_VERSION(val);
801 if (val > ver) { 971 if (val > ver) {
802 dev_info(&pdev->dev, "%s: firmware is older than flash\n", 972 dev_info(&pdev->dev, "%s: firmware is older than flash\n",
803 fwname); 973 fw_name[fw_type]);
804 return -EINVAL; 974 return -EINVAL;
805 } 975 }
806 976
@@ -808,6 +978,41 @@ netxen_validate_firmware(struct netxen_adapter *adapter, const char *fwname)
808 return 0; 978 return 0;
809} 979}
810 980
981static void
982nx_get_next_fwtype(struct netxen_adapter *adapter)
983{
984 u8 fw_type;
985
986 switch (adapter->fw_type) {
987 case NX_UNKNOWN_ROMIMAGE:
988 fw_type = NX_UNIFIED_ROMIMAGE;
989 break;
990
991 case NX_UNIFIED_ROMIMAGE:
992 if (NX_IS_REVISION_P3P(adapter->ahw.revision_id))
993 fw_type = NX_FLASH_ROMIMAGE;
994 else if (NX_IS_REVISION_P2(adapter->ahw.revision_id))
995 fw_type = NX_P2_MN_ROMIMAGE;
996 else if (netxen_p3_has_mn(adapter))
997 fw_type = NX_P3_MN_ROMIMAGE;
998 else
999 fw_type = NX_P3_CT_ROMIMAGE;
1000 break;
1001
1002 case NX_P3_MN_ROMIMAGE:
1003 fw_type = NX_P3_CT_ROMIMAGE;
1004 break;
1005
1006 case NX_P2_MN_ROMIMAGE:
1007 case NX_P3_CT_ROMIMAGE:
1008 default:
1009 fw_type = NX_FLASH_ROMIMAGE;
1010 break;
1011 }
1012
1013 adapter->fw_type = fw_type;
1014}
1015
811static int 1016static int
812netxen_p3_has_mn(struct netxen_adapter *adapter) 1017netxen_p3_has_mn(struct netxen_adapter *adapter)
813{ 1018{
@@ -829,49 +1034,29 @@ netxen_p3_has_mn(struct netxen_adapter *adapter)
829 1034
830void netxen_request_firmware(struct netxen_adapter *adapter) 1035void netxen_request_firmware(struct netxen_adapter *adapter)
831{ 1036{
832 u8 fw_type;
833 struct pci_dev *pdev = adapter->pdev; 1037 struct pci_dev *pdev = adapter->pdev;
834 int rc = 0; 1038 int rc = 0;
835 1039
836 if (NX_IS_REVISION_P2(adapter->ahw.revision_id)) { 1040 adapter->fw_type = NX_UNKNOWN_ROMIMAGE;
837 fw_type = NX_P2_MN_ROMIMAGE;
838 goto request_fw;
839 }
840 1041
841 fw_type = netxen_p3_has_mn(adapter) ? 1042next:
842 NX_P3_MN_ROMIMAGE : NX_P3_CT_ROMIMAGE; 1043 nx_get_next_fwtype(adapter);
843 1044
844request_fw: 1045 if (adapter->fw_type == NX_FLASH_ROMIMAGE) {
845 rc = request_firmware(&adapter->fw, fw_name[fw_type], &pdev->dev);
846 if (rc != 0) {
847 if (fw_type == NX_P3_MN_ROMIMAGE) {
848 msleep(1);
849 fw_type = NX_P3_CT_ROMIMAGE;
850 goto request_fw;
851 }
852
853 fw_type = NX_FLASH_ROMIMAGE;
854 adapter->fw = NULL; 1046 adapter->fw = NULL;
855 goto done; 1047 } else {
856 } 1048 rc = request_firmware(&adapter->fw,
857 1049 fw_name[adapter->fw_type], &pdev->dev);
858 rc = netxen_validate_firmware(adapter, fw_name[fw_type]); 1050 if (rc != 0)
859 if (rc != 0) { 1051 goto next;
860 release_firmware(adapter->fw); 1052
861 1053 rc = netxen_validate_firmware(adapter);
862 if (fw_type == NX_P3_MN_ROMIMAGE) { 1054 if (rc != 0) {
1055 release_firmware(adapter->fw);
863 msleep(1); 1056 msleep(1);
864 fw_type = NX_P3_CT_ROMIMAGE; 1057 goto next;
865 goto request_fw;
866 } 1058 }
867
868 fw_type = NX_FLASH_ROMIMAGE;
869 adapter->fw = NULL;
870 goto done;
871 } 1059 }
872
873done:
874 adapter->fw_type = fw_type;
875} 1060}
876 1061
877 1062
@@ -1506,10 +1691,8 @@ netxen_post_rx_buffers(struct netxen_adapter *adapter, u32 ringid,
1506 (rds_ring->num_desc - 1))); 1691 (rds_ring->num_desc - 1)));
1507 netxen_set_msg_ctxid(msg, adapter->portnum); 1692 netxen_set_msg_ctxid(msg, adapter->portnum);
1508 netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid)); 1693 netxen_set_msg_opcode(msg, NETXEN_RCV_PRODUCER(ringid));
1509 read_lock(&adapter->adapter_lock); 1694 NXWRIO(adapter, DB_NORMALIZE(adapter,
1510 writel(msg, DB_NORMALIZE(adapter, 1695 NETXEN_RCV_PRODUCER_OFFSET), msg);
1511 NETXEN_RCV_PRODUCER_OFFSET));
1512 read_unlock(&adapter->adapter_lock);
1513 } 1696 }
1514 } 1697 }
1515} 1698}
diff --git a/drivers/net/netxen/netxen_nic_main.c b/drivers/net/netxen/netxen_nic_main.c
index 0b4a56a8c8d5..12d1037cd81b 100644
--- a/drivers/net/netxen/netxen_nic_main.c
+++ b/drivers/net/netxen/netxen_nic_main.c
@@ -34,13 +34,18 @@
34#include <net/ip.h> 34#include <net/ip.h>
35#include <linux/ipv6.h> 35#include <linux/ipv6.h>
36#include <linux/inetdevice.h> 36#include <linux/inetdevice.h>
37#include <linux/sysfs.h>
37 38
38MODULE_DESCRIPTION("NetXen Multi port (1/10) Gigabit Network Driver"); 39MODULE_DESCRIPTION("QLogic/NetXen (1/10) GbE Converged Ethernet Driver");
39MODULE_LICENSE("GPL"); 40MODULE_LICENSE("GPL");
40MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID); 41MODULE_VERSION(NETXEN_NIC_LINUX_VERSIONID);
42MODULE_FIRMWARE(NX_P2_MN_ROMIMAGE_NAME);
43MODULE_FIRMWARE(NX_P3_CT_ROMIMAGE_NAME);
44MODULE_FIRMWARE(NX_P3_MN_ROMIMAGE_NAME);
45MODULE_FIRMWARE(NX_UNIFIED_ROMIMAGE_NAME);
41 46
42char netxen_nic_driver_name[] = "netxen_nic"; 47char netxen_nic_driver_name[] = "netxen_nic";
43static char netxen_nic_driver_string[] = "NetXen Network Driver version " 48static char netxen_nic_driver_string[] = "QLogic/NetXen Network Driver v"
44 NETXEN_NIC_LINUX_VERSIONID; 49 NETXEN_NIC_LINUX_VERSIONID;
45 50
46static int port_mode = NETXEN_PORT_MODE_AUTO_NEG; 51static int port_mode = NETXEN_PORT_MODE_AUTO_NEG;
@@ -52,7 +57,8 @@ static int use_msi = 1;
52 57
53static int use_msi_x = 1; 58static int use_msi_x = 1;
54 59
55/* Local functions to NetXen NIC driver */ 60static unsigned long auto_fw_reset = AUTO_FW_RESET_ENABLED;
61
56static int __devinit netxen_nic_probe(struct pci_dev *pdev, 62static int __devinit netxen_nic_probe(struct pci_dev *pdev,
57 const struct pci_device_id *ent); 63 const struct pci_device_id *ent);
58static void __devexit netxen_nic_remove(struct pci_dev *pdev); 64static void __devexit netxen_nic_remove(struct pci_dev *pdev);
@@ -73,6 +79,8 @@ static void netxen_nic_poll_controller(struct net_device *netdev);
73 79
74static void netxen_create_sysfs_entries(struct netxen_adapter *adapter); 80static void netxen_create_sysfs_entries(struct netxen_adapter *adapter);
75static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter); 81static void netxen_remove_sysfs_entries(struct netxen_adapter *adapter);
82static void netxen_create_diag_entries(struct netxen_adapter *adapter);
83static void netxen_remove_diag_entries(struct netxen_adapter *adapter);
76 84
77static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter); 85static int nx_decr_dev_ref_cnt(struct netxen_adapter *adapter);
78static int netxen_can_start_firmware(struct netxen_adapter *adapter); 86static int netxen_can_start_firmware(struct netxen_adapter *adapter);
@@ -87,6 +95,11 @@ static void netxen_config_indev_addr(struct net_device *dev, unsigned long);
87#define ENTRY(device) \ 95#define ENTRY(device) \
88 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \ 96 {PCI_DEVICE(PCI_VENDOR_ID_NETXEN, (device)), \
89 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0} 97 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
98#define ENTRY2(device) \
99 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, (device)), \
100 .class = PCI_CLASS_NETWORK_ETHERNET << 8, .class_mask = ~0}
101
102#define PCI_DEVICE_ID_QLOGIC_QLE824X 0x8020
90 103
91static struct pci_device_id netxen_pci_tbl[] __devinitdata = { 104static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
92 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR), 105 ENTRY(PCI_DEVICE_ID_NX2031_10GXSR),
@@ -97,6 +110,7 @@ static struct pci_device_id netxen_pci_tbl[] __devinitdata = {
97 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT), 110 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT),
98 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2), 111 ENTRY(PCI_DEVICE_ID_NX2031_XG_MGMT2),
99 ENTRY(PCI_DEVICE_ID_NX3031), 112 ENTRY(PCI_DEVICE_ID_NX3031),
113 ENTRY2(PCI_DEVICE_ID_QLOGIC_QLE824X),
100 {0,} 114 {0,}
101}; 115};
102 116
@@ -607,14 +621,12 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
607 * Set the CRB window to invalid. If any register in window 0 is 621 * Set the CRB window to invalid. If any register in window 0 is
608 * accessed it should set the window to 0 and then reset it to 1. 622 * accessed it should set the window to 0 and then reset it to 1.
609 */ 623 */
610 adapter->curr_window = 255; 624 adapter->ahw.crb_win = -1;
611 adapter->ahw.qdr_sn_window = -1; 625 adapter->ahw.ocm_win = -1;
612 adapter->ahw.ddr_mn_window = -1;
613 626
614 /* remap phys address */ 627 /* remap phys address */
615 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */ 628 mem_base = pci_resource_start(pdev, 0); /* 0 is for BAR 0 */
616 mem_len = pci_resource_len(pdev, 0); 629 mem_len = pci_resource_len(pdev, 0);
617 pci_len0 = 0;
618 630
619 /* 128 Meg of memory */ 631 /* 128 Meg of memory */
620 if (mem_len == NETXEN_PCI_128MB_SIZE) { 632 if (mem_len == NETXEN_PCI_128MB_SIZE) {
@@ -623,6 +635,7 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
623 SECOND_PAGE_GROUP_SIZE); 635 SECOND_PAGE_GROUP_SIZE);
624 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START, 636 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START,
625 THIRD_PAGE_GROUP_SIZE); 637 THIRD_PAGE_GROUP_SIZE);
638 pci_len0 = FIRST_PAGE_GROUP_SIZE;
626 } else if (mem_len == NETXEN_PCI_32MB_SIZE) { 639 } else if (mem_len == NETXEN_PCI_32MB_SIZE) {
627 mem_ptr1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE); 640 mem_ptr1 = ioremap(mem_base, SECOND_PAGE_GROUP_SIZE);
628 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START - 641 mem_ptr2 = ioremap(mem_base + THIRD_PAGE_GROUP_START -
@@ -635,19 +648,6 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
635 return -EIO; 648 return -EIO;
636 } 649 }
637 pci_len0 = mem_len; 650 pci_len0 = mem_len;
638
639 adapter->ahw.ddr_mn_window = 0;
640 adapter->ahw.qdr_sn_window = 0;
641
642 adapter->ahw.mn_win_crb = NETXEN_PCI_CRBSPACE +
643 0x100000 + PCIX_MN_WINDOW + (pci_func * 0x20);
644 adapter->ahw.ms_win_crb = NETXEN_PCI_CRBSPACE +
645 0x100000 + PCIX_SN_WINDOW;
646 if (pci_func < 4)
647 adapter->ahw.ms_win_crb += (pci_func * 0x20);
648 else
649 adapter->ahw.ms_win_crb +=
650 0xA0 + ((pci_func - 4) * 0x10);
651 } else { 651 } else {
652 return -EIO; 652 return -EIO;
653 } 653 }
@@ -661,6 +661,15 @@ netxen_setup_pci_map(struct netxen_adapter *adapter)
661 adapter->ahw.pci_base1 = mem_ptr1; 661 adapter->ahw.pci_base1 = mem_ptr1;
662 adapter->ahw.pci_base2 = mem_ptr2; 662 adapter->ahw.pci_base2 = mem_ptr2;
663 663
664 if (NX_IS_REVISION_P3P(adapter->ahw.revision_id)) {
665 adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter,
666 NETXEN_PCIX_PS_REG(PCIX_OCM_WINDOW_REG(pci_func)));
667
668 } else if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) {
669 adapter->ahw.ocm_win_crb = netxen_get_ioaddr(adapter,
670 NETXEN_PCIX_PS_REG(PCIE_MN_WINDOW_REG(pci_func)));
671 }
672
664 if (NX_IS_REVISION_P3(adapter->ahw.revision_id)) 673 if (NX_IS_REVISION_P3(adapter->ahw.revision_id))
665 goto skip_doorbell; 674 goto skip_doorbell;
666 675
@@ -725,7 +734,8 @@ netxen_check_options(struct netxen_adapter *adapter)
725 if (adapter->portnum == 0) { 734 if (adapter->portnum == 0) {
726 get_brd_name_by_type(adapter->ahw.board_type, brd_name); 735 get_brd_name_by_type(adapter->ahw.board_type, brd_name);
727 736
728 printk(KERN_INFO "NetXen %s Board S/N %s Chip rev 0x%x\n", 737 pr_info("%s: %s Board S/N %s Chip rev 0x%x\n",
738 module_name(THIS_MODULE),
729 brd_name, serial_num, adapter->ahw.revision_id); 739 brd_name, serial_num, adapter->ahw.revision_id);
730 } 740 }
731 741
@@ -817,7 +827,7 @@ netxen_start_firmware(struct netxen_adapter *adapter)
817 827
818 if (first_boot != 0x55555555) { 828 if (first_boot != 0x55555555) {
819 NXWR32(adapter, CRB_CMDPEG_STATE, 0); 829 NXWR32(adapter, CRB_CMDPEG_STATE, 0);
820 netxen_pinit_from_rom(adapter, 0); 830 netxen_pinit_from_rom(adapter);
821 msleep(1); 831 msleep(1);
822 } 832 }
823 833
@@ -1207,16 +1217,10 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1207 int pci_func_id = PCI_FUNC(pdev->devfn); 1217 int pci_func_id = PCI_FUNC(pdev->devfn);
1208 uint8_t revision_id; 1218 uint8_t revision_id;
1209 1219
1210 if (pdev->class != 0x020000) {
1211 printk(KERN_DEBUG "NetXen function %d, class %x will not "
1212 "be enabled.\n",pci_func_id, pdev->class);
1213 return -ENODEV;
1214 }
1215
1216 if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) { 1220 if (pdev->revision >= NX_P3_A0 && pdev->revision < NX_P3_B1) {
1217 printk(KERN_WARNING "NetXen chip revisions between 0x%x-0x%x" 1221 pr_warning("%s: chip revisions between 0x%x-0x%x"
1218 "will not be enabled.\n", 1222 "will not be enabled.\n",
1219 NX_P3_A0, NX_P3_B1); 1223 module_name(THIS_MODULE), NX_P3_A0, NX_P3_B1);
1220 return -ENODEV; 1224 return -ENODEV;
1221 } 1225 }
1222 1226
@@ -1250,7 +1254,9 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1250 revision_id = pdev->revision; 1254 revision_id = pdev->revision;
1251 adapter->ahw.revision_id = revision_id; 1255 adapter->ahw.revision_id = revision_id;
1252 1256
1253 rwlock_init(&adapter->adapter_lock); 1257 rwlock_init(&adapter->ahw.crb_lock);
1258 spin_lock_init(&adapter->ahw.mem_lock);
1259
1254 spin_lock_init(&adapter->tx_clean_lock); 1260 spin_lock_init(&adapter->tx_clean_lock);
1255 INIT_LIST_HEAD(&adapter->mac_list); 1261 INIT_LIST_HEAD(&adapter->mac_list);
1256 1262
@@ -1315,6 +1321,8 @@ netxen_nic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1315 break; 1321 break;
1316 } 1322 }
1317 1323
1324 netxen_create_diag_entries(adapter);
1325
1318 return 0; 1326 return 0;
1319 1327
1320err_out_disable_msi: 1328err_out_disable_msi:
@@ -1367,6 +1375,8 @@ static void __devexit netxen_nic_remove(struct pci_dev *pdev)
1367 1375
1368 netxen_teardown_intr(adapter); 1376 netxen_teardown_intr(adapter);
1369 1377
1378 netxen_remove_diag_entries(adapter);
1379
1370 netxen_cleanup_pci_map(adapter); 1380 netxen_cleanup_pci_map(adapter);
1371 1381
1372 netxen_release_firmware(adapter); 1382 netxen_release_firmware(adapter);
@@ -1447,7 +1457,8 @@ netxen_nic_resume(struct pci_dev *pdev)
1447 if (err) 1457 if (err)
1448 return err; 1458 return err;
1449 1459
1450 adapter->curr_window = 255; 1460 adapter->ahw.crb_win = -1;
1461 adapter->ahw.ocm_win = -1;
1451 1462
1452 err = netxen_start_firmware(adapter); 1463 err = netxen_start_firmware(adapter);
1453 if (err) { 1464 if (err) {
@@ -1925,7 +1936,7 @@ request_reset:
1925struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev) 1936struct net_device_stats *netxen_nic_get_stats(struct net_device *netdev)
1926{ 1937{
1927 struct netxen_adapter *adapter = netdev_priv(netdev); 1938 struct netxen_adapter *adapter = netdev_priv(netdev);
1928 struct net_device_stats *stats = &adapter->net_stats; 1939 struct net_device_stats *stats = &netdev->stats;
1929 1940
1930 memset(stats, 0, sizeof(*stats)); 1941 memset(stats, 0, sizeof(*stats));
1931 1942
@@ -2261,7 +2272,8 @@ netxen_check_health(struct netxen_adapter *adapter)
2261 dev_info(&netdev->dev, "firmware hang detected\n"); 2272 dev_info(&netdev->dev, "firmware hang detected\n");
2262 2273
2263detach: 2274detach:
2264 if (!test_and_set_bit(__NX_RESETTING, &adapter->state)) 2275 if ((auto_fw_reset == AUTO_FW_RESET_ENABLED) &&
2276 !test_and_set_bit(__NX_RESETTING, &adapter->state))
2265 netxen_schedule_work(adapter, netxen_detach_work, 0); 2277 netxen_schedule_work(adapter, netxen_detach_work, 0);
2266 return 1; 2278 return 1;
2267} 2279}
@@ -2339,6 +2351,197 @@ static struct device_attribute dev_attr_bridged_mode = {
2339 .store = netxen_store_bridged_mode, 2351 .store = netxen_store_bridged_mode,
2340}; 2352};
2341 2353
2354static ssize_t
2355netxen_store_diag_mode(struct device *dev,
2356 struct device_attribute *attr, const char *buf, size_t len)
2357{
2358 struct netxen_adapter *adapter = dev_get_drvdata(dev);
2359 unsigned long new;
2360
2361 if (strict_strtoul(buf, 2, &new))
2362 return -EINVAL;
2363
2364 if (!!new != !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
2365 adapter->flags ^= NETXEN_NIC_DIAG_ENABLED;
2366
2367 return len;
2368}
2369
2370static ssize_t
2371netxen_show_diag_mode(struct device *dev,
2372 struct device_attribute *attr, char *buf)
2373{
2374 struct netxen_adapter *adapter = dev_get_drvdata(dev);
2375
2376 return sprintf(buf, "%d\n",
2377 !!(adapter->flags & NETXEN_NIC_DIAG_ENABLED));
2378}
2379
2380static struct device_attribute dev_attr_diag_mode = {
2381 .attr = {.name = "diag_mode", .mode = (S_IRUGO | S_IWUSR)},
2382 .show = netxen_show_diag_mode,
2383 .store = netxen_store_diag_mode,
2384};
2385
2386static int
2387netxen_sysfs_validate_crb(struct netxen_adapter *adapter,
2388 loff_t offset, size_t size)
2389{
2390 if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
2391 return -EIO;
2392
2393 if ((size != 4) || (offset & 0x3))
2394 return -EINVAL;
2395
2396 if (offset < NETXEN_PCI_CRBSPACE)
2397 return -EINVAL;
2398
2399 return 0;
2400}
2401
2402static ssize_t
2403netxen_sysfs_read_crb(struct kobject *kobj, struct bin_attribute *attr,
2404 char *buf, loff_t offset, size_t size)
2405{
2406 struct device *dev = container_of(kobj, struct device, kobj);
2407 struct netxen_adapter *adapter = dev_get_drvdata(dev);
2408 u32 data;
2409 int ret;
2410
2411 ret = netxen_sysfs_validate_crb(adapter, offset, size);
2412 if (ret != 0)
2413 return ret;
2414
2415 data = NXRD32(adapter, offset);
2416 memcpy(buf, &data, size);
2417 return size;
2418}
2419
2420static ssize_t
2421netxen_sysfs_write_crb(struct kobject *kobj, struct bin_attribute *attr,
2422 char *buf, loff_t offset, size_t size)
2423{
2424 struct device *dev = container_of(kobj, struct device, kobj);
2425 struct netxen_adapter *adapter = dev_get_drvdata(dev);
2426 u32 data;
2427 int ret;
2428
2429 ret = netxen_sysfs_validate_crb(adapter, offset, size);
2430 if (ret != 0)
2431 return ret;
2432
2433 memcpy(&data, buf, size);
2434 NXWR32(adapter, offset, data);
2435 return size;
2436}
2437
2438static int
2439netxen_sysfs_validate_mem(struct netxen_adapter *adapter,
2440 loff_t offset, size_t size)
2441{
2442 if (!(adapter->flags & NETXEN_NIC_DIAG_ENABLED))
2443 return -EIO;
2444
2445 if ((size != 8) || (offset & 0x7))
2446 return -EIO;
2447
2448 return 0;
2449}
2450
2451static ssize_t
2452netxen_sysfs_read_mem(struct kobject *kobj, struct bin_attribute *attr,
2453 char *buf, loff_t offset, size_t size)
2454{
2455 struct device *dev = container_of(kobj, struct device, kobj);
2456 struct netxen_adapter *adapter = dev_get_drvdata(dev);
2457 u64 data;
2458 int ret;
2459
2460 ret = netxen_sysfs_validate_mem(adapter, offset, size);
2461 if (ret != 0)
2462 return ret;
2463
2464 if (adapter->pci_mem_read(adapter, offset, &data))
2465 return -EIO;
2466
2467 memcpy(buf, &data, size);
2468
2469 return size;
2470}
2471
2472ssize_t netxen_sysfs_write_mem(struct kobject *kobj,
2473 struct bin_attribute *attr, char *buf,
2474 loff_t offset, size_t size)
2475{
2476 struct device *dev = container_of(kobj, struct device, kobj);
2477 struct netxen_adapter *adapter = dev_get_drvdata(dev);
2478 u64 data;
2479 int ret;
2480
2481 ret = netxen_sysfs_validate_mem(adapter, offset, size);
2482 if (ret != 0)
2483 return ret;
2484
2485 memcpy(&data, buf, size);
2486
2487 if (adapter->pci_mem_write(adapter, offset, data))
2488 return -EIO;
2489
2490 return size;
2491}
2492
2493
2494static struct bin_attribute bin_attr_crb = {
2495 .attr = {.name = "crb", .mode = (S_IRUGO | S_IWUSR)},
2496 .size = 0,
2497 .read = netxen_sysfs_read_crb,
2498 .write = netxen_sysfs_write_crb,
2499};
2500
2501static struct bin_attribute bin_attr_mem = {
2502 .attr = {.name = "mem", .mode = (S_IRUGO | S_IWUSR)},
2503 .size = 0,
2504 .read = netxen_sysfs_read_mem,
2505 .write = netxen_sysfs_write_mem,
2506};
2507
2508#ifdef CONFIG_MODULES
2509static ssize_t
2510netxen_store_auto_fw_reset(struct module_attribute *mattr,
2511 struct module *mod, const char *buf, size_t count)
2512
2513{
2514 unsigned long new;
2515
2516 if (strict_strtoul(buf, 16, &new))
2517 return -EINVAL;
2518
2519 if ((new == AUTO_FW_RESET_ENABLED) || (new == AUTO_FW_RESET_DISABLED)) {
2520 auto_fw_reset = new;
2521 return count;
2522 }
2523
2524 return -EINVAL;
2525}
2526
2527static ssize_t
2528netxen_show_auto_fw_reset(struct module_attribute *mattr,
2529 struct module *mod, char *buf)
2530
2531{
2532 if (auto_fw_reset == AUTO_FW_RESET_ENABLED)
2533 return sprintf(buf, "enabled\n");
2534 else
2535 return sprintf(buf, "disabled\n");
2536}
2537
2538static struct module_attribute mod_attr_fw_reset = {
2539 .attr = {.name = "auto_fw_reset", .mode = (S_IRUGO | S_IWUSR)},
2540 .show = netxen_show_auto_fw_reset,
2541 .store = netxen_store_auto_fw_reset,
2542};
2543#endif
2544
2342static void 2545static void
2343netxen_create_sysfs_entries(struct netxen_adapter *adapter) 2546netxen_create_sysfs_entries(struct netxen_adapter *adapter)
2344{ 2547{
@@ -2364,6 +2567,33 @@ netxen_remove_sysfs_entries(struct netxen_adapter *adapter)
2364 device_remove_file(dev, &dev_attr_bridged_mode); 2567 device_remove_file(dev, &dev_attr_bridged_mode);
2365} 2568}
2366 2569
2570static void
2571netxen_create_diag_entries(struct netxen_adapter *adapter)
2572{
2573 struct pci_dev *pdev = adapter->pdev;
2574 struct device *dev;
2575
2576 dev = &pdev->dev;
2577 if (device_create_file(dev, &dev_attr_diag_mode))
2578 dev_info(dev, "failed to create diag_mode sysfs entry\n");
2579 if (device_create_bin_file(dev, &bin_attr_crb))
2580 dev_info(dev, "failed to create crb sysfs entry\n");
2581 if (device_create_bin_file(dev, &bin_attr_mem))
2582 dev_info(dev, "failed to create mem sysfs entry\n");
2583}
2584
2585
2586static void
2587netxen_remove_diag_entries(struct netxen_adapter *adapter)
2588{
2589 struct pci_dev *pdev = adapter->pdev;
2590 struct device *dev = &pdev->dev;
2591
2592 device_remove_file(dev, &dev_attr_diag_mode);
2593 device_remove_bin_file(dev, &bin_attr_crb);
2594 device_remove_bin_file(dev, &bin_attr_mem);
2595}
2596
2367#ifdef CONFIG_INET 2597#ifdef CONFIG_INET
2368 2598
2369#define is_netxen_netdev(dev) (dev->netdev_ops == &netxen_netdev_ops) 2599#define is_netxen_netdev(dev) (dev->netdev_ops == &netxen_netdev_ops)
@@ -2516,6 +2746,10 @@ static struct pci_driver netxen_driver = {
2516 2746
2517static int __init netxen_init_module(void) 2747static int __init netxen_init_module(void)
2518{ 2748{
2749#ifdef CONFIG_MODULES
2750 struct module *mod = THIS_MODULE;
2751#endif
2752
2519 printk(KERN_INFO "%s\n", netxen_nic_driver_string); 2753 printk(KERN_INFO "%s\n", netxen_nic_driver_string);
2520 2754
2521#ifdef CONFIG_INET 2755#ifdef CONFIG_INET
@@ -2523,6 +2757,12 @@ static int __init netxen_init_module(void)
2523 register_inetaddr_notifier(&netxen_inetaddr_cb); 2757 register_inetaddr_notifier(&netxen_inetaddr_cb);
2524#endif 2758#endif
2525 2759
2760#ifdef CONFIG_MODULES
2761 if (sysfs_create_file(&mod->mkobj.kobj, &mod_attr_fw_reset.attr))
2762 printk(KERN_ERR "%s: Failed to create auto_fw_reset "
2763 "sysfs entry.", netxen_nic_driver_name);
2764#endif
2765
2526 return pci_register_driver(&netxen_driver); 2766 return pci_register_driver(&netxen_driver);
2527} 2767}
2528 2768
@@ -2530,6 +2770,12 @@ module_init(netxen_init_module);
2530 2770
2531static void __exit netxen_exit_module(void) 2771static void __exit netxen_exit_module(void)
2532{ 2772{
2773#ifdef CONFIG_MODULES
2774 struct module *mod = THIS_MODULE;
2775
2776 sysfs_remove_file(&mod->mkobj.kobj, &mod_attr_fw_reset.attr);
2777#endif
2778
2533 pci_unregister_driver(&netxen_driver); 2779 pci_unregister_driver(&netxen_driver);
2534 2780
2535#ifdef CONFIG_INET 2781#ifdef CONFIG_INET
diff --git a/drivers/net/niu.c b/drivers/net/niu.c
index d6c7ac68f6ea..8ce58c4c7dd3 100644
--- a/drivers/net/niu.c
+++ b/drivers/net/niu.c
@@ -45,10 +45,6 @@ MODULE_DESCRIPTION("NIU ethernet driver");
45MODULE_LICENSE("GPL"); 45MODULE_LICENSE("GPL");
46MODULE_VERSION(DRV_MODULE_VERSION); 46MODULE_VERSION(DRV_MODULE_VERSION);
47 47
48#ifndef DMA_44BIT_MASK
49#define DMA_44BIT_MASK 0x00000fffffffffffULL
50#endif
51
52#ifndef readq 48#ifndef readq
53static u64 readq(void __iomem *reg) 49static u64 readq(void __iomem *reg)
54{ 50{
@@ -7855,10 +7851,13 @@ static void niu_get_strings(struct net_device *dev, u32 stringset, u8 *data)
7855 } 7851 }
7856} 7852}
7857 7853
7858static int niu_get_stats_count(struct net_device *dev) 7854static int niu_get_sset_count(struct net_device *dev, int stringset)
7859{ 7855{
7860 struct niu *np = netdev_priv(dev); 7856 struct niu *np = netdev_priv(dev);
7861 7857
7858 if (stringset != ETH_SS_STATS)
7859 return -EINVAL;
7860
7862 return ((np->flags & NIU_FLAGS_XMAC ? 7861 return ((np->flags & NIU_FLAGS_XMAC ?
7863 NUM_XMAC_STAT_KEYS : 7862 NUM_XMAC_STAT_KEYS :
7864 NUM_BMAC_STAT_KEYS) + 7863 NUM_BMAC_STAT_KEYS) +
@@ -7978,7 +7977,7 @@ static const struct ethtool_ops niu_ethtool_ops = {
7978 .get_settings = niu_get_settings, 7977 .get_settings = niu_get_settings,
7979 .set_settings = niu_set_settings, 7978 .set_settings = niu_set_settings,
7980 .get_strings = niu_get_strings, 7979 .get_strings = niu_get_strings,
7981 .get_stats_count = niu_get_stats_count, 7980 .get_sset_count = niu_get_sset_count,
7982 .get_ethtool_stats = niu_get_ethtool_stats, 7981 .get_ethtool_stats = niu_get_ethtool_stats,
7983 .phys_id = niu_phys_id, 7982 .phys_id = niu_phys_id,
7984 .get_rxnfc = niu_get_nfc, 7983 .get_rxnfc = niu_get_nfc,
@@ -8144,7 +8143,7 @@ static void __devinit niu_vpd_parse_version(struct niu *np)
8144 int i; 8143 int i;
8145 8144
8146 for (i = 0; i < len - 5; i++) { 8145 for (i = 0; i < len - 5; i++) {
8147 if (!strncmp(s + i, "FCode ", 5)) 8146 if (!strncmp(s + i, "FCode ", 6))
8148 break; 8147 break;
8149 } 8148 }
8150 if (i >= len - 5) 8149 if (i >= len - 5)
@@ -9915,7 +9914,7 @@ static int __devinit niu_pci_init_one(struct pci_dev *pdev,
9915 PCI_EXP_DEVCTL_RELAX_EN); 9914 PCI_EXP_DEVCTL_RELAX_EN);
9916 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16); 9915 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
9917 9916
9918 dma_mask = DMA_44BIT_MASK; 9917 dma_mask = DMA_BIT_MASK(44);
9919 err = pci_set_dma_mask(pdev, dma_mask); 9918 err = pci_set_dma_mask(pdev, dma_mask);
9920 if (!err) { 9919 if (!err) {
9921 dev->features |= NETIF_F_HIGHDMA; 9920 dev->features |= NETIF_F_HIGHDMA;
diff --git a/drivers/net/pasemi_mac_ethtool.c b/drivers/net/pasemi_mac_ethtool.c
index 28a86224879d..fefa79e34b95 100644
--- a/drivers/net/pasemi_mac_ethtool.c
+++ b/drivers/net/pasemi_mac_ethtool.c
@@ -77,6 +77,19 @@ pasemi_mac_ethtool_get_settings(struct net_device *netdev,
77 return phy_ethtool_gset(phydev, cmd); 77 return phy_ethtool_gset(phydev, cmd);
78} 78}
79 79
80static int
81pasemi_mac_ethtool_set_settings(struct net_device *netdev,
82 struct ethtool_cmd *cmd)
83{
84 struct pasemi_mac *mac = netdev_priv(netdev);
85 struct phy_device *phydev = mac->phydev;
86
87 if (!phydev)
88 return -EOPNOTSUPP;
89
90 return phy_ethtool_sset(phydev, cmd);
91}
92
80static void 93static void
81pasemi_mac_ethtool_get_drvinfo(struct net_device *netdev, 94pasemi_mac_ethtool_get_drvinfo(struct net_device *netdev,
82 struct ethtool_drvinfo *drvinfo) 95 struct ethtool_drvinfo *drvinfo)
@@ -150,6 +163,7 @@ static void pasemi_mac_get_strings(struct net_device *netdev, u32 stringset,
150 163
151const struct ethtool_ops pasemi_mac_ethtool_ops = { 164const struct ethtool_ops pasemi_mac_ethtool_ops = {
152 .get_settings = pasemi_mac_ethtool_get_settings, 165 .get_settings = pasemi_mac_ethtool_get_settings,
166 .set_settings = pasemi_mac_ethtool_set_settings,
153 .get_drvinfo = pasemi_mac_ethtool_get_drvinfo, 167 .get_drvinfo = pasemi_mac_ethtool_get_drvinfo,
154 .get_msglevel = pasemi_mac_ethtool_get_msglevel, 168 .get_msglevel = pasemi_mac_ethtool_get_msglevel,
155 .set_msglevel = pasemi_mac_ethtool_set_msglevel, 169 .set_msglevel = pasemi_mac_ethtool_set_msglevel,
diff --git a/drivers/net/pcmcia/pcnet_cs.c b/drivers/net/pcmcia/pcnet_cs.c
index 94c9ad2746bc..469684474b72 100644
--- a/drivers/net/pcmcia/pcnet_cs.c
+++ b/drivers/net/pcmcia/pcnet_cs.c
@@ -1768,6 +1768,13 @@ static struct pcmcia_device_id pcnet_ids[] = {
1768 PCMCIA_DEVICE_NULL 1768 PCMCIA_DEVICE_NULL
1769}; 1769};
1770MODULE_DEVICE_TABLE(pcmcia, pcnet_ids); 1770MODULE_DEVICE_TABLE(pcmcia, pcnet_ids);
1771MODULE_FIRMWARE("cis/PCMLM28.cis");
1772MODULE_FIRMWARE("cis/DP83903.cis");
1773MODULE_FIRMWARE("cis/LA-PCM.cis");
1774MODULE_FIRMWARE("PE520.cis");
1775MODULE_FIRMWARE("cis/NE2K.cis");
1776MODULE_FIRMWARE("cis/PE-200.cis");
1777MODULE_FIRMWARE("cis/tamarack.cis");
1771 1778
1772static struct pcmcia_driver pcnet_driver = { 1779static struct pcmcia_driver pcnet_driver = {
1773 .drv = { 1780 .drv = {
diff --git a/drivers/net/phy/broadcom.c b/drivers/net/phy/broadcom.c
index f81e53222230..f63c96a4ecb4 100644
--- a/drivers/net/phy/broadcom.c
+++ b/drivers/net/phy/broadcom.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/module.h> 17#include <linux/module.h>
18#include <linux/phy.h> 18#include <linux/phy.h>
19#include <linux/brcmphy.h>
19 20
20#define PHY_ID_BCM50610 0x0143bd60 21#define PHY_ID_BCM50610 0x0143bd60
21#define PHY_ID_BCM50610M 0x0143bd70 22#define PHY_ID_BCM50610M 0x0143bd70
@@ -24,6 +25,9 @@
24#define BRCM_PHY_MODEL(phydev) \ 25#define BRCM_PHY_MODEL(phydev) \
25 ((phydev)->drv->phy_id & (phydev)->drv->phy_id_mask) 26 ((phydev)->drv->phy_id & (phydev)->drv->phy_id_mask)
26 27
28#define BRCM_PHY_REV(phydev) \
29 ((phydev)->drv->phy_id & ~((phydev)->drv->phy_id_mask))
30
27 31
28#define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */ 32#define MII_BCM54XX_ECR 0x10 /* BCM54xx extended control register */
29#define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */ 33#define MII_BCM54XX_ECR_IM 0x1000 /* Interrupt mask */
@@ -94,22 +98,35 @@
94#define BCM_LED_SRC_OFF 0xe /* Tied high */ 98#define BCM_LED_SRC_OFF 0xe /* Tied high */
95#define BCM_LED_SRC_ON 0xf /* Tied low */ 99#define BCM_LED_SRC_ON 0xf /* Tied low */
96 100
101
97/* 102/*
98 * BCM5482: Shadow registers 103 * BCM5482: Shadow registers
99 * Shadow values go into bits [14:10] of register 0x1c to select a shadow 104 * Shadow values go into bits [14:10] of register 0x1c to select a shadow
100 * register to access. 105 * register to access.
101 */ 106 */
107/* 00101: Spare Control Register 3 */
108#define BCM54XX_SHD_SCR3 0x05
109#define BCM54XX_SHD_SCR3_DEF_CLK125 0x0001
110#define BCM54XX_SHD_SCR3_DLLAPD_DIS 0x0002
111#define BCM54XX_SHD_SCR3_TRDDAPD 0x0004
112
113/* 01010: Auto Power-Down */
114#define BCM54XX_SHD_APD 0x0a
115#define BCM54XX_SHD_APD_EN 0x0020
116
102#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */ 117#define BCM5482_SHD_LEDS1 0x0d /* 01101: LED Selector 1 */
103 /* LED3 / ~LINKSPD[2] selector */ 118 /* LED3 / ~LINKSPD[2] selector */
104#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4) 119#define BCM5482_SHD_LEDS1_LED3(src) ((src & 0xf) << 4)
105 /* LED1 / ~LINKSPD[1] selector */ 120 /* LED1 / ~LINKSPD[1] selector */
106#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0) 121#define BCM5482_SHD_LEDS1_LED1(src) ((src & 0xf) << 0)
122#define BCM54XX_SHD_RGMII_MODE 0x0b /* 01011: RGMII Mode Selector */
107#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */ 123#define BCM5482_SHD_SSD 0x14 /* 10100: Secondary SerDes control */
108#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */ 124#define BCM5482_SHD_SSD_LEDM 0x0008 /* SSD LED Mode enable */
109#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */ 125#define BCM5482_SHD_SSD_EN 0x0001 /* SSD enable */
110#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */ 126#define BCM5482_SHD_MODE 0x1f /* 11111: Mode Control Register */
111#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */ 127#define BCM5482_SHD_MODE_1000BX 0x0001 /* Enable 1000BASE-X registers */
112 128
129
113/* 130/*
114 * EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17) 131 * EXPANSION SHADOW ACCESS REGISTERS. (PHY REG 0x15, 0x16, and 0x17)
115 */ 132 */
@@ -138,16 +155,6 @@
138#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */ 155#define BCM5482_SSD_SGMII_SLAVE_EN 0x0002 /* Slave mode enable */
139#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */ 156#define BCM5482_SSD_SGMII_SLAVE_AD 0x0001 /* Slave auto-detection */
140 157
141/*
142 * Device flags for PHYs that can be configured for different operating
143 * modes.
144 */
145#define PHY_BCM_FLAGS_VALID 0x80000000
146#define PHY_BCM_FLAGS_INTF_XAUI 0x00000020
147#define PHY_BCM_FLAGS_INTF_SGMII 0x00000010
148#define PHY_BCM_FLAGS_MODE_1000BX 0x00000002
149#define PHY_BCM_FLAGS_MODE_COPPER 0x00000001
150
151 158
152/*****************************************************************************/ 159/*****************************************************************************/
153/* Fast Ethernet Transceiver definitions. */ 160/* Fast Ethernet Transceiver definitions. */
@@ -237,53 +244,145 @@ static int bcm54xx_auxctl_write(struct phy_device *phydev, u16 regnum, u16 val)
237 return phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum | val); 244 return phy_write(phydev, MII_BCM54XX_AUX_CTL, regnum | val);
238} 245}
239 246
247/* Needs SMDSP clock enabled via bcm54xx_phydsp_config() */
240static int bcm50610_a0_workaround(struct phy_device *phydev) 248static int bcm50610_a0_workaround(struct phy_device *phydev)
241{ 249{
242 int err; 250 int err;
243 251
244 err = bcm54xx_auxctl_write(phydev,
245 MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
246 MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA |
247 MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
248 if (err < 0)
249 return err;
250
251 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP08,
252 MII_BCM54XX_EXP_EXP08_RJCT_2MHZ |
253 MII_BCM54XX_EXP_EXP08_EARLY_DAC_WAKE);
254 if (err < 0)
255 goto error;
256
257 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH0, 252 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH0,
258 MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN | 253 MII_BCM54XX_EXP_AADJ1CH0_SWP_ABCD_OEN |
259 MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF); 254 MII_BCM54XX_EXP_AADJ1CH0_SWSEL_THPF);
260 if (err < 0) 255 if (err < 0)
261 goto error; 256 return err;
262 257
263 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH3, 258 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_AADJ1CH3,
264 MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ); 259 MII_BCM54XX_EXP_AADJ1CH3_ADCCKADJ);
265 if (err < 0) 260 if (err < 0)
266 goto error; 261 return err;
267 262
268 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75, 263 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75,
269 MII_BCM54XX_EXP_EXP75_VDACCTRL); 264 MII_BCM54XX_EXP_EXP75_VDACCTRL);
270 if (err < 0) 265 if (err < 0)
271 goto error; 266 return err;
272 267
273 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP96, 268 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP96,
274 MII_BCM54XX_EXP_EXP96_MYST); 269 MII_BCM54XX_EXP_EXP96_MYST);
275 if (err < 0) 270 if (err < 0)
276 goto error; 271 return err;
277 272
278 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP97, 273 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP97,
279 MII_BCM54XX_EXP_EXP97_MYST); 274 MII_BCM54XX_EXP_EXP97_MYST);
280 275
276 return err;
277}
278
279static int bcm54xx_phydsp_config(struct phy_device *phydev)
280{
281 int err, err2;
282
283 /* Enable the SMDSP clock */
284 err = bcm54xx_auxctl_write(phydev,
285 MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
286 MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA |
287 MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
288 if (err < 0)
289 return err;
290
291 if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
292 BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) {
293 /* Clear bit 9 to fix a phy interop issue. */
294 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP08,
295 MII_BCM54XX_EXP_EXP08_RJCT_2MHZ);
296 if (err < 0)
297 goto error;
298
299 if (phydev->drv->phy_id == PHY_ID_BCM50610) {
300 err = bcm50610_a0_workaround(phydev);
301 if (err < 0)
302 goto error;
303 }
304 }
305
306 if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM57780) {
307 int val;
308
309 val = bcm54xx_exp_read(phydev, MII_BCM54XX_EXP_EXP75);
310 if (val < 0)
311 goto error;
312
313 val |= MII_BCM54XX_EXP_EXP75_CM_OSC;
314 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75, val);
315 }
316
281error: 317error:
282 bcm54xx_auxctl_write(phydev, 318 /* Disable the SMDSP clock */
283 MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL, 319 err2 = bcm54xx_auxctl_write(phydev,
284 MII_BCM54XX_AUXCTL_ACTL_TX_6DB); 320 MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
321 MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
285 322
286 return err; 323 /* Return the first error reported. */
324 return err ? err : err2;
325}
326
327static void bcm54xx_adjust_rxrefclk(struct phy_device *phydev)
328{
329 u32 val, orig;
330 bool clk125en = true;
331
332 /* Abort if we are using an untested phy. */
333 if (BRCM_PHY_MODEL(phydev) != PHY_ID_BCM57780 ||
334 BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610 ||
335 BRCM_PHY_MODEL(phydev) != PHY_ID_BCM50610M)
336 return;
337
338 val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_SCR3);
339 if (val < 0)
340 return;
341
342 orig = val;
343
344 if ((BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
345 BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) &&
346 BRCM_PHY_REV(phydev) >= 0x3) {
347 /*
348 * Here, bit 0 _disables_ CLK125 when set.
349 * This bit is set by default.
350 */
351 clk125en = false;
352 } else {
353 if (phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED) {
354 /* Here, bit 0 _enables_ CLK125 when set */
355 val &= ~BCM54XX_SHD_SCR3_DEF_CLK125;
356 clk125en = false;
357 }
358 }
359
360 if (clk125en == false ||
361 (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
362 val &= ~BCM54XX_SHD_SCR3_DLLAPD_DIS;
363 else
364 val |= BCM54XX_SHD_SCR3_DLLAPD_DIS;
365
366 if (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY)
367 val |= BCM54XX_SHD_SCR3_TRDDAPD;
368
369 if (orig != val)
370 bcm54xx_shadow_write(phydev, BCM54XX_SHD_SCR3, val);
371
372 val = bcm54xx_shadow_read(phydev, BCM54XX_SHD_APD);
373 if (val < 0)
374 return;
375
376 orig = val;
377
378 if (clk125en == false ||
379 (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
380 val |= BCM54XX_SHD_APD_EN;
381 else
382 val &= ~BCM54XX_SHD_APD_EN;
383
384 if (orig != val)
385 bcm54xx_shadow_write(phydev, BCM54XX_SHD_APD, val);
287} 386}
288 387
289static int bcm54xx_config_init(struct phy_device *phydev) 388static int bcm54xx_config_init(struct phy_device *phydev)
@@ -308,38 +407,17 @@ static int bcm54xx_config_init(struct phy_device *phydev)
308 if (err < 0) 407 if (err < 0)
309 return err; 408 return err;
310 409
311 if (phydev->drv->phy_id == PHY_ID_BCM50610) { 410 if ((BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610 ||
312 err = bcm50610_a0_workaround(phydev); 411 BRCM_PHY_MODEL(phydev) == PHY_ID_BCM50610M) &&
313 if (err < 0) 412 (phydev->dev_flags & PHY_BRCM_CLEAR_RGMII_MODE))
314 return err; 413 bcm54xx_shadow_write(phydev, BCM54XX_SHD_RGMII_MODE, 0);
315 }
316
317 if (BRCM_PHY_MODEL(phydev) == PHY_ID_BCM57780) {
318 int err2;
319
320 err = bcm54xx_auxctl_write(phydev,
321 MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
322 MII_BCM54XX_AUXCTL_ACTL_SMDSP_ENA |
323 MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
324 if (err < 0)
325 return err;
326
327 reg = bcm54xx_exp_read(phydev, MII_BCM54XX_EXP_EXP75);
328 if (reg < 0)
329 goto error;
330 414
331 reg |= MII_BCM54XX_EXP_EXP75_CM_OSC; 415 if ((phydev->dev_flags & PHY_BRCM_RX_REFCLK_UNUSED) ||
332 err = bcm54xx_exp_write(phydev, MII_BCM54XX_EXP_EXP75, reg); 416 (phydev->dev_flags & PHY_BRCM_DIS_TXCRXC_NOENRGY) ||
417 (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE))
418 bcm54xx_adjust_rxrefclk(phydev);
333 419
334error: 420 bcm54xx_phydsp_config(phydev);
335 err2 = bcm54xx_auxctl_write(phydev,
336 MII_BCM54XX_AUXCTL_SHDWSEL_AUXCTL,
337 MII_BCM54XX_AUXCTL_ACTL_TX_6DB);
338 if (err)
339 return err;
340 if (err2)
341 return err2;
342 }
343 421
344 return 0; 422 return 0;
345} 423}
@@ -564,9 +642,11 @@ static int brcm_fet_config_init(struct phy_device *phydev)
564 if (err < 0) 642 if (err < 0)
565 goto done; 643 goto done;
566 644
567 /* Enable auto power down */ 645 if (phydev->dev_flags & PHY_BRCM_AUTO_PWRDWN_ENABLE) {
568 err = brcm_phy_setbits(phydev, MII_BRCM_FET_SHDW_AUXSTAT2, 646 /* Enable auto power down */
569 MII_BRCM_FET_SHDW_AS2_APDE); 647 err = brcm_phy_setbits(phydev, MII_BRCM_FET_SHDW_AUXSTAT2,
648 MII_BRCM_FET_SHDW_AS2_APDE);
649 }
570 650
571done: 651done:
572 /* Disable shadow register access */ 652 /* Disable shadow register access */
diff --git a/drivers/net/ppp_async.c b/drivers/net/ppp_async.c
index 6de8399d6dd9..30b1b3326765 100644
--- a/drivers/net/ppp_async.c
+++ b/drivers/net/ppp_async.c
@@ -337,10 +337,7 @@ ppp_asynctty_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
337 return 0; 337 return 0;
338} 338}
339 339
340/* 340/* May sleep, don't call from interrupt level or with interrupts disabled */
341 * This can now be called from hard interrupt level as well
342 * as soft interrupt level or mainline.
343 */
344static void 341static void
345ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf, 342ppp_asynctty_receive(struct tty_struct *tty, const unsigned char *buf,
346 char *cflags, int count) 343 char *cflags, int count)
diff --git a/drivers/net/ppp_synctty.c b/drivers/net/ppp_synctty.c
index d2fa2db13586..c908b08dc981 100644
--- a/drivers/net/ppp_synctty.c
+++ b/drivers/net/ppp_synctty.c
@@ -378,10 +378,7 @@ ppp_sync_poll(struct tty_struct *tty, struct file *file, poll_table *wait)
378 return 0; 378 return 0;
379} 379}
380 380
381/* 381/* May sleep, don't call from interrupt level or with interrupts disabled */
382 * This can now be called from hard interrupt level as well
383 * as soft interrupt level or mainline.
384 */
385static void 382static void
386ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf, 383ppp_sync_receive(struct tty_struct *tty, const unsigned char *buf,
387 char *cflags, int count) 384 char *cflags, int count)
diff --git a/drivers/net/pppoe.c b/drivers/net/pppoe.c
index 2559991eea6a..60c8d233209f 100644
--- a/drivers/net/pppoe.c
+++ b/drivers/net/pppoe.c
@@ -250,20 +250,19 @@ static inline struct pppox_sock *get_item_by_addr(struct net *net,
250{ 250{
251 struct net_device *dev; 251 struct net_device *dev;
252 struct pppoe_net *pn; 252 struct pppoe_net *pn;
253 struct pppox_sock *pppox_sock; 253 struct pppox_sock *pppox_sock = NULL;
254 254
255 int ifindex; 255 int ifindex;
256 256
257 dev = dev_get_by_name(net, sp->sa_addr.pppoe.dev); 257 rcu_read_lock();
258 if (!dev) 258 dev = dev_get_by_name_rcu(net, sp->sa_addr.pppoe.dev);
259 return NULL; 259 if (dev) {
260 260 ifindex = dev->ifindex;
261 ifindex = dev->ifindex; 261 pn = net_generic(net, pppoe_net_id);
262 pn = net_generic(net, pppoe_net_id); 262 pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
263 pppox_sock = get_item(pn, sp->sa_addr.pppoe.sid,
264 sp->sa_addr.pppoe.remote, ifindex); 263 sp->sa_addr.pppoe.remote, ifindex);
265 dev_put(dev); 264 }
266 265 rcu_read_unlock();
267 return pppox_sock; 266 return pppox_sock;
268} 267}
269 268
diff --git a/drivers/net/pppol2tp.c b/drivers/net/pppol2tp.c
index 5910df60c93e..849cc9c62c2a 100644
--- a/drivers/net/pppol2tp.c
+++ b/drivers/net/pppol2tp.c
@@ -516,7 +516,7 @@ static inline int pppol2tp_verify_udp_checksum(struct sock *sk,
516 return 0; 516 return 0;
517 517
518 inet = inet_sk(sk); 518 inet = inet_sk(sk);
519 psum = csum_tcpudp_nofold(inet->saddr, inet->daddr, ulen, 519 psum = csum_tcpudp_nofold(inet->inet_saddr, inet->inet_daddr, ulen,
520 IPPROTO_UDP, 0); 520 IPPROTO_UDP, 0);
521 521
522 if ((skb->ip_summed == CHECKSUM_COMPLETE) && 522 if ((skb->ip_summed == CHECKSUM_COMPLETE) &&
@@ -949,8 +949,8 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
949 inet = inet_sk(sk_tun); 949 inet = inet_sk(sk_tun);
950 udp_len = hdr_len + sizeof(ppph) + total_len; 950 udp_len = hdr_len + sizeof(ppph) + total_len;
951 uh = (struct udphdr *) skb->data; 951 uh = (struct udphdr *) skb->data;
952 uh->source = inet->sport; 952 uh->source = inet->inet_sport;
953 uh->dest = inet->dport; 953 uh->dest = inet->inet_dport;
954 uh->len = htons(udp_len); 954 uh->len = htons(udp_len);
955 uh->check = 0; 955 uh->check = 0;
956 skb_put(skb, sizeof(struct udphdr)); 956 skb_put(skb, sizeof(struct udphdr));
@@ -978,7 +978,8 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
978 else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) { 978 else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
979 skb->ip_summed = CHECKSUM_COMPLETE; 979 skb->ip_summed = CHECKSUM_COMPLETE;
980 csum = skb_checksum(skb, 0, udp_len, 0); 980 csum = skb_checksum(skb, 0, udp_len, 0);
981 uh->check = csum_tcpudp_magic(inet->saddr, inet->daddr, 981 uh->check = csum_tcpudp_magic(inet->inet_saddr,
982 inet->inet_daddr,
982 udp_len, IPPROTO_UDP, csum); 983 udp_len, IPPROTO_UDP, csum);
983 if (uh->check == 0) 984 if (uh->check == 0)
984 uh->check = CSUM_MANGLED_0; 985 uh->check = CSUM_MANGLED_0;
@@ -986,7 +987,8 @@ static int pppol2tp_sendmsg(struct kiocb *iocb, struct socket *sock, struct msgh
986 skb->ip_summed = CHECKSUM_PARTIAL; 987 skb->ip_summed = CHECKSUM_PARTIAL;
987 skb->csum_start = skb_transport_header(skb) - skb->head; 988 skb->csum_start = skb_transport_header(skb) - skb->head;
988 skb->csum_offset = offsetof(struct udphdr, check); 989 skb->csum_offset = offsetof(struct udphdr, check);
989 uh->check = ~csum_tcpudp_magic(inet->saddr, inet->daddr, 990 uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
991 inet->inet_daddr,
990 udp_len, IPPROTO_UDP, 0); 992 udp_len, IPPROTO_UDP, 0);
991 } 993 }
992 994
@@ -1136,8 +1138,8 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
1136 __skb_push(skb, sizeof(*uh)); 1138 __skb_push(skb, sizeof(*uh));
1137 skb_reset_transport_header(skb); 1139 skb_reset_transport_header(skb);
1138 uh = udp_hdr(skb); 1140 uh = udp_hdr(skb);
1139 uh->source = inet->sport; 1141 uh->source = inet->inet_sport;
1140 uh->dest = inet->dport; 1142 uh->dest = inet->inet_dport;
1141 uh->len = htons(udp_len); 1143 uh->len = htons(udp_len);
1142 uh->check = 0; 1144 uh->check = 0;
1143 1145
@@ -1181,7 +1183,8 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
1181 else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) { 1183 else if (!(skb_dst(skb)->dev->features & NETIF_F_V4_CSUM)) {
1182 skb->ip_summed = CHECKSUM_COMPLETE; 1184 skb->ip_summed = CHECKSUM_COMPLETE;
1183 csum = skb_checksum(skb, 0, udp_len, 0); 1185 csum = skb_checksum(skb, 0, udp_len, 0);
1184 uh->check = csum_tcpudp_magic(inet->saddr, inet->daddr, 1186 uh->check = csum_tcpudp_magic(inet->inet_saddr,
1187 inet->inet_daddr,
1185 udp_len, IPPROTO_UDP, csum); 1188 udp_len, IPPROTO_UDP, csum);
1186 if (uh->check == 0) 1189 if (uh->check == 0)
1187 uh->check = CSUM_MANGLED_0; 1190 uh->check = CSUM_MANGLED_0;
@@ -1189,7 +1192,8 @@ static int pppol2tp_xmit(struct ppp_channel *chan, struct sk_buff *skb)
1189 skb->ip_summed = CHECKSUM_PARTIAL; 1192 skb->ip_summed = CHECKSUM_PARTIAL;
1190 skb->csum_start = skb_transport_header(skb) - skb->head; 1193 skb->csum_start = skb_transport_header(skb) - skb->head;
1191 skb->csum_offset = offsetof(struct udphdr, check); 1194 skb->csum_offset = offsetof(struct udphdr, check);
1192 uh->check = ~csum_tcpudp_magic(inet->saddr, inet->daddr, 1195 uh->check = ~csum_tcpudp_magic(inet->inet_saddr,
1196 inet->inet_daddr,
1193 udp_len, IPPROTO_UDP, 0); 1197 udp_len, IPPROTO_UDP, 0);
1194 } 1198 }
1195 1199
diff --git a/drivers/net/pppox.c b/drivers/net/pppox.c
index 4f6d33fbc673..ac806b27c658 100644
--- a/drivers/net/pppox.c
+++ b/drivers/net/pppox.c
@@ -104,7 +104,8 @@ int pppox_ioctl(struct socket *sock, unsigned int cmd, unsigned long arg)
104 104
105EXPORT_SYMBOL(pppox_ioctl); 105EXPORT_SYMBOL(pppox_ioctl);
106 106
107static int pppox_create(struct net *net, struct socket *sock, int protocol) 107static int pppox_create(struct net *net, struct socket *sock, int protocol,
108 int kern)
108{ 109{
109 int rc = -EPROTOTYPE; 110 int rc = -EPROTOTYPE;
110 111
@@ -125,7 +126,7 @@ out:
125 return rc; 126 return rc;
126} 127}
127 128
128static struct net_proto_family pppox_proto_family = { 129static const struct net_proto_family pppox_proto_family = {
129 .family = PF_PPPOX, 130 .family = PF_PPPOX,
130 .create = pppox_create, 131 .create = pppox_create,
131 .owner = THIS_MODULE, 132 .owner = THIS_MODULE,
diff --git a/drivers/net/qlge/qlge.h b/drivers/net/qlge/qlge.h
index c2383adcd527..862c1aaf3860 100644
--- a/drivers/net/qlge/qlge.h
+++ b/drivers/net/qlge/qlge.h
@@ -16,7 +16,7 @@
16 */ 16 */
17#define DRV_NAME "qlge" 17#define DRV_NAME "qlge"
18#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver " 18#define DRV_STRING "QLogic 10 Gigabit PCI-E Ethernet Driver "
19#define DRV_VERSION "v1.00.00-b3" 19#define DRV_VERSION "v1.00.00.23.00.00-01"
20 20
21#define PFX "qlge: " 21#define PFX "qlge: "
22#define QPRINTK(qdev, nlevel, klevel, fmt, args...) \ 22#define QPRINTK(qdev, nlevel, klevel, fmt, args...) \
@@ -54,8 +54,10 @@
54#define RX_RING_SHADOW_SPACE (sizeof(u64) + \ 54#define RX_RING_SHADOW_SPACE (sizeof(u64) + \
55 MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \ 55 MAX_DB_PAGES_PER_BQ(NUM_SMALL_BUFFERS) * sizeof(u64) + \
56 MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64)) 56 MAX_DB_PAGES_PER_BQ(NUM_LARGE_BUFFERS) * sizeof(u64))
57#define SMALL_BUFFER_SIZE 256 57#define SMALL_BUFFER_SIZE 512
58#define LARGE_BUFFER_SIZE PAGE_SIZE 58#define SMALL_BUF_MAP_SIZE (SMALL_BUFFER_SIZE / 2)
59#define LARGE_BUFFER_MAX_SIZE 8192
60#define LARGE_BUFFER_MIN_SIZE 2048
59#define MAX_SPLIT_SIZE 1023 61#define MAX_SPLIT_SIZE 1023
60#define QLGE_SB_PAD 32 62#define QLGE_SB_PAD 32
61 63
@@ -795,6 +797,7 @@ enum {
795 MB_WOL_BCAST = (1 << 5), 797 MB_WOL_BCAST = (1 << 5),
796 MB_WOL_LINK_UP = (1 << 6), 798 MB_WOL_LINK_UP = (1 << 6),
797 MB_WOL_LINK_DOWN = (1 << 7), 799 MB_WOL_LINK_DOWN = (1 << 7),
800 MB_WOL_MODE_ON = (1 << 16), /* Wake on Lan Mode on */
798 MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */ 801 MB_CMD_SET_WOL_FLTR = 0x00000111, /* Wake On Lan Filter */
799 MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */ 802 MB_CMD_CLEAR_WOL_FLTR = 0x00000112, /* Wake On Lan Filter */
800 MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */ 803 MB_CMD_SET_WOL_MAGIC = 0x00000113, /* Wake On Lan Magic Packet */
@@ -804,12 +807,27 @@ enum {
804 MB_CMD_SET_PORT_CFG = 0x00000122, 807 MB_CMD_SET_PORT_CFG = 0x00000122,
805 MB_CMD_GET_PORT_CFG = 0x00000123, 808 MB_CMD_GET_PORT_CFG = 0x00000123,
806 MB_CMD_GET_LINK_STS = 0x00000124, 809 MB_CMD_GET_LINK_STS = 0x00000124,
810 MB_CMD_SET_LED_CFG = 0x00000125, /* Set LED Configuration Register */
811 QL_LED_BLINK = 0x03e803e8,
812 MB_CMD_GET_LED_CFG = 0x00000126, /* Get LED Configuration Register */
807 MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */ 813 MB_CMD_SET_MGMNT_TFK_CTL = 0x00000160, /* Set Mgmnt Traffic Control */
808 MB_SET_MPI_TFK_STOP = (1 << 0), 814 MB_SET_MPI_TFK_STOP = (1 << 0),
809 MB_SET_MPI_TFK_RESUME = (1 << 1), 815 MB_SET_MPI_TFK_RESUME = (1 << 1),
810 MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */ 816 MB_CMD_GET_MGMNT_TFK_CTL = 0x00000161, /* Get Mgmnt Traffic Control */
811 MB_GET_MPI_TFK_STOPPED = (1 << 0), 817 MB_GET_MPI_TFK_STOPPED = (1 << 0),
812 MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1), 818 MB_GET_MPI_TFK_FIFO_EMPTY = (1 << 1),
819 /* Sub-commands for IDC request.
820 * This describes the reason for the
821 * IDC request.
822 */
823 MB_CMD_IOP_NONE = 0x0000,
824 MB_CMD_IOP_PREP_UPDATE_MPI = 0x0001,
825 MB_CMD_IOP_COMP_UPDATE_MPI = 0x0002,
826 MB_CMD_IOP_PREP_LINK_DOWN = 0x0010,
827 MB_CMD_IOP_DVR_START = 0x0100,
828 MB_CMD_IOP_FLASH_ACC = 0x0101,
829 MB_CMD_IOP_RESTART_MPI = 0x0102,
830 MB_CMD_IOP_CORE_DUMP_MPI = 0x0103,
813 831
814 /* Mailbox Command Status. */ 832 /* Mailbox Command Status. */
815 MB_CMD_STS_GOOD = 0x00004000, /* Success. */ 833 MB_CMD_STS_GOOD = 0x00004000, /* Success. */
@@ -1201,9 +1219,17 @@ struct tx_ring_desc {
1201 struct tx_ring_desc *next; 1219 struct tx_ring_desc *next;
1202}; 1220};
1203 1221
1222struct page_chunk {
1223 struct page *page; /* master page */
1224 char *va; /* virt addr for this chunk */
1225 u64 map; /* mapping for master */
1226 unsigned int offset; /* offset for this chunk */
1227 unsigned int last_flag; /* flag set for last chunk in page */
1228};
1229
1204struct bq_desc { 1230struct bq_desc {
1205 union { 1231 union {
1206 struct page *lbq_page; 1232 struct page_chunk pg_chunk;
1207 struct sk_buff *skb; 1233 struct sk_buff *skb;
1208 } p; 1234 } p;
1209 __le64 *addr; 1235 __le64 *addr;
@@ -1237,6 +1263,9 @@ struct tx_ring {
1237 atomic_t queue_stopped; /* Turns queue off when full. */ 1263 atomic_t queue_stopped; /* Turns queue off when full. */
1238 struct delayed_work tx_work; 1264 struct delayed_work tx_work;
1239 struct ql_adapter *qdev; 1265 struct ql_adapter *qdev;
1266 u64 tx_packets;
1267 u64 tx_bytes;
1268 u64 tx_errors;
1240}; 1269};
1241 1270
1242/* 1271/*
@@ -1272,6 +1301,7 @@ struct rx_ring {
1272 dma_addr_t lbq_base_dma; 1301 dma_addr_t lbq_base_dma;
1273 void *lbq_base_indirect; 1302 void *lbq_base_indirect;
1274 dma_addr_t lbq_base_indirect_dma; 1303 dma_addr_t lbq_base_indirect_dma;
1304 struct page_chunk pg_chunk; /* current page for chunks */
1275 struct bq_desc *lbq; /* array of control blocks */ 1305 struct bq_desc *lbq; /* array of control blocks */
1276 void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */ 1306 void __iomem *lbq_prod_idx_db_reg; /* PCI doorbell mem area + 0x18 */
1277 u32 lbq_prod_idx; /* current sw prod idx */ 1307 u32 lbq_prod_idx; /* current sw prod idx */
@@ -1302,6 +1332,11 @@ struct rx_ring {
1302 struct napi_struct napi; 1332 struct napi_struct napi;
1303 u8 reserved; 1333 u8 reserved;
1304 struct ql_adapter *qdev; 1334 struct ql_adapter *qdev;
1335 u64 rx_packets;
1336 u64 rx_multicast;
1337 u64 rx_bytes;
1338 u64 rx_dropped;
1339 u64 rx_errors;
1305}; 1340};
1306 1341
1307/* 1342/*
@@ -1363,6 +1398,174 @@ struct nic_stats {
1363 u64 rx_1024_to_1518_pkts; 1398 u64 rx_1024_to_1518_pkts;
1364 u64 rx_1519_to_max_pkts; 1399 u64 rx_1519_to_max_pkts;
1365 u64 rx_len_err_pkts; 1400 u64 rx_len_err_pkts;
1401 /*
1402 * These stats come from offset 500h to 5C8h
1403 * in the XGMAC register.
1404 */
1405 u64 tx_cbfc_pause_frames0;
1406 u64 tx_cbfc_pause_frames1;
1407 u64 tx_cbfc_pause_frames2;
1408 u64 tx_cbfc_pause_frames3;
1409 u64 tx_cbfc_pause_frames4;
1410 u64 tx_cbfc_pause_frames5;
1411 u64 tx_cbfc_pause_frames6;
1412 u64 tx_cbfc_pause_frames7;
1413 u64 rx_cbfc_pause_frames0;
1414 u64 rx_cbfc_pause_frames1;
1415 u64 rx_cbfc_pause_frames2;
1416 u64 rx_cbfc_pause_frames3;
1417 u64 rx_cbfc_pause_frames4;
1418 u64 rx_cbfc_pause_frames5;
1419 u64 rx_cbfc_pause_frames6;
1420 u64 rx_cbfc_pause_frames7;
1421 u64 rx_nic_fifo_drop;
1422};
1423
1424/* Address/Length pairs for the coredump. */
1425enum {
1426 MPI_CORE_REGS_ADDR = 0x00030000,
1427 MPI_CORE_REGS_CNT = 127,
1428 MPI_CORE_SH_REGS_CNT = 16,
1429 TEST_REGS_ADDR = 0x00001000,
1430 TEST_REGS_CNT = 23,
1431 RMII_REGS_ADDR = 0x00001040,
1432 RMII_REGS_CNT = 64,
1433 FCMAC1_REGS_ADDR = 0x00001080,
1434 FCMAC2_REGS_ADDR = 0x000010c0,
1435 FCMAC_REGS_CNT = 64,
1436 FC1_MBX_REGS_ADDR = 0x00001100,
1437 FC2_MBX_REGS_ADDR = 0x00001240,
1438 FC_MBX_REGS_CNT = 64,
1439 IDE_REGS_ADDR = 0x00001140,
1440 IDE_REGS_CNT = 64,
1441 NIC1_MBX_REGS_ADDR = 0x00001180,
1442 NIC2_MBX_REGS_ADDR = 0x00001280,
1443 NIC_MBX_REGS_CNT = 64,
1444 SMBUS_REGS_ADDR = 0x00001200,
1445 SMBUS_REGS_CNT = 64,
1446 I2C_REGS_ADDR = 0x00001fc0,
1447 I2C_REGS_CNT = 64,
1448 MEMC_REGS_ADDR = 0x00003000,
1449 MEMC_REGS_CNT = 256,
1450 PBUS_REGS_ADDR = 0x00007c00,
1451 PBUS_REGS_CNT = 256,
1452 MDE_REGS_ADDR = 0x00010000,
1453 MDE_REGS_CNT = 6,
1454 CODE_RAM_ADDR = 0x00020000,
1455 CODE_RAM_CNT = 0x2000,
1456 MEMC_RAM_ADDR = 0x00100000,
1457 MEMC_RAM_CNT = 0x2000,
1458};
1459
1460#define MPI_COREDUMP_COOKIE 0x5555aaaa
1461struct mpi_coredump_global_header {
1462 u32 cookie;
1463 u8 idString[16];
1464 u32 timeLo;
1465 u32 timeHi;
1466 u32 imageSize;
1467 u32 headerSize;
1468 u8 info[220];
1469};
1470
1471struct mpi_coredump_segment_header {
1472 u32 cookie;
1473 u32 segNum;
1474 u32 segSize;
1475 u32 extra;
1476 u8 description[16];
1477};
1478
1479/* Reg dump segment numbers. */
1480enum {
1481 CORE_SEG_NUM = 1,
1482 TEST_LOGIC_SEG_NUM = 2,
1483 RMII_SEG_NUM = 3,
1484 FCMAC1_SEG_NUM = 4,
1485 FCMAC2_SEG_NUM = 5,
1486 FC1_MBOX_SEG_NUM = 6,
1487 IDE_SEG_NUM = 7,
1488 NIC1_MBOX_SEG_NUM = 8,
1489 SMBUS_SEG_NUM = 9,
1490 FC2_MBOX_SEG_NUM = 10,
1491 NIC2_MBOX_SEG_NUM = 11,
1492 I2C_SEG_NUM = 12,
1493 MEMC_SEG_NUM = 13,
1494 PBUS_SEG_NUM = 14,
1495 MDE_SEG_NUM = 15,
1496 NIC1_CONTROL_SEG_NUM = 16,
1497 NIC2_CONTROL_SEG_NUM = 17,
1498 NIC1_XGMAC_SEG_NUM = 18,
1499 NIC2_XGMAC_SEG_NUM = 19,
1500 WCS_RAM_SEG_NUM = 20,
1501 MEMC_RAM_SEG_NUM = 21,
1502 XAUI_AN_SEG_NUM = 22,
1503 XAUI_HSS_PCS_SEG_NUM = 23,
1504 XFI_AN_SEG_NUM = 24,
1505 XFI_TRAIN_SEG_NUM = 25,
1506 XFI_HSS_PCS_SEG_NUM = 26,
1507 XFI_HSS_TX_SEG_NUM = 27,
1508 XFI_HSS_RX_SEG_NUM = 28,
1509 XFI_HSS_PLL_SEG_NUM = 29,
1510 MISC_NIC_INFO_SEG_NUM = 30,
1511 INTR_STATES_SEG_NUM = 31,
1512 CAM_ENTRIES_SEG_NUM = 32,
1513 ROUTING_WORDS_SEG_NUM = 33,
1514 ETS_SEG_NUM = 34,
1515 PROBE_DUMP_SEG_NUM = 35,
1516 ROUTING_INDEX_SEG_NUM = 36,
1517 MAC_PROTOCOL_SEG_NUM = 37,
1518 XAUI2_AN_SEG_NUM = 38,
1519 XAUI2_HSS_PCS_SEG_NUM = 39,
1520 XFI2_AN_SEG_NUM = 40,
1521 XFI2_TRAIN_SEG_NUM = 41,
1522 XFI2_HSS_PCS_SEG_NUM = 42,
1523 XFI2_HSS_TX_SEG_NUM = 43,
1524 XFI2_HSS_RX_SEG_NUM = 44,
1525 XFI2_HSS_PLL_SEG_NUM = 45,
1526 SEM_REGS_SEG_NUM = 50
1527
1528};
1529
1530struct ql_nic_misc {
1531 u32 rx_ring_count;
1532 u32 tx_ring_count;
1533 u32 intr_count;
1534 u32 function;
1535};
1536
1537struct ql_reg_dump {
1538
1539 /* segment 0 */
1540 struct mpi_coredump_global_header mpi_global_header;
1541
1542 /* segment 16 */
1543 struct mpi_coredump_segment_header nic_regs_seg_hdr;
1544 u32 nic_regs[64];
1545
1546 /* segment 30 */
1547 struct mpi_coredump_segment_header misc_nic_seg_hdr;
1548 struct ql_nic_misc misc_nic_info;
1549
1550 /* segment 31 */
1551 /* one interrupt state for each CQ */
1552 struct mpi_coredump_segment_header intr_states_seg_hdr;
1553 u32 intr_states[MAX_CPUS];
1554
1555 /* segment 32 */
1556 /* 3 cam words each for 16 unicast,
1557 * 2 cam words for each of 32 multicast.
1558 */
1559 struct mpi_coredump_segment_header cam_entries_seg_hdr;
1560 u32 cam_entries[(16 * 3) + (32 * 3)];
1561
1562 /* segment 33 */
1563 struct mpi_coredump_segment_header nic_routing_words_seg_hdr;
1564 u32 nic_routing_words[16];
1565
1566 /* segment 34 */
1567 struct mpi_coredump_segment_header ets_seg_hdr;
1568 u32 ets[8+2];
1366}; 1569};
1367 1570
1368/* 1571/*
@@ -1398,6 +1601,8 @@ enum {
1398 QL_ALLMULTI = 6, 1601 QL_ALLMULTI = 6,
1399 QL_PORT_CFG = 7, 1602 QL_PORT_CFG = 7,
1400 QL_CAM_RT_SET = 8, 1603 QL_CAM_RT_SET = 8,
1604 QL_SELFTEST = 9,
1605 QL_LB_LINK_UP = 10,
1401}; 1606};
1402 1607
1403/* link_status bit definitions */ 1608/* link_status bit definitions */
@@ -1505,6 +1710,7 @@ struct ql_adapter {
1505 1710
1506 struct rx_ring rx_ring[MAX_RX_RINGS]; 1711 struct rx_ring rx_ring[MAX_RX_RINGS];
1507 struct tx_ring tx_ring[MAX_TX_RINGS]; 1712 struct tx_ring tx_ring[MAX_TX_RINGS];
1713 unsigned int lbq_buf_order;
1508 1714
1509 int rx_csum; 1715 int rx_csum;
1510 u32 default_rx_queue; 1716 u32 default_rx_queue;
@@ -1519,11 +1725,11 @@ struct ql_adapter {
1519 u32 port_init; 1725 u32 port_init;
1520 u32 link_status; 1726 u32 link_status;
1521 u32 link_config; 1727 u32 link_config;
1728 u32 led_config;
1522 u32 max_frame_size; 1729 u32 max_frame_size;
1523 1730
1524 union flash_params flash; 1731 union flash_params flash;
1525 1732
1526 struct net_device_stats stats;
1527 struct workqueue_struct *workqueue; 1733 struct workqueue_struct *workqueue;
1528 struct delayed_work asic_reset_work; 1734 struct delayed_work asic_reset_work;
1529 struct delayed_work mpi_reset_work; 1735 struct delayed_work mpi_reset_work;
@@ -1533,6 +1739,7 @@ struct ql_adapter {
1533 struct completion ide_completion; 1739 struct completion ide_completion;
1534 struct nic_operations *nic_ops; 1740 struct nic_operations *nic_ops;
1535 u16 device_id; 1741 u16 device_id;
1742 atomic_t lb_count;
1536}; 1743};
1537 1744
1538/* 1745/*
@@ -1611,10 +1818,22 @@ int ql_mb_get_fw_state(struct ql_adapter *qdev);
1611int ql_cam_route_initialize(struct ql_adapter *qdev); 1818int ql_cam_route_initialize(struct ql_adapter *qdev);
1612int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data); 1819int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data);
1613int ql_mb_about_fw(struct ql_adapter *qdev); 1820int ql_mb_about_fw(struct ql_adapter *qdev);
1821int ql_wol(struct ql_adapter *qdev);
1822int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol);
1823int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol);
1824int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config);
1825int ql_mb_get_led_cfg(struct ql_adapter *qdev);
1614void ql_link_on(struct ql_adapter *qdev); 1826void ql_link_on(struct ql_adapter *qdev);
1615void ql_link_off(struct ql_adapter *qdev); 1827void ql_link_off(struct ql_adapter *qdev);
1616int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control); 1828int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control);
1829int ql_mb_get_port_cfg(struct ql_adapter *qdev);
1830int ql_mb_set_port_cfg(struct ql_adapter *qdev);
1617int ql_wait_fifo_empty(struct ql_adapter *qdev); 1831int ql_wait_fifo_empty(struct ql_adapter *qdev);
1832void ql_gen_reg_dump(struct ql_adapter *qdev,
1833 struct ql_reg_dump *mpi_coredump);
1834netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev);
1835void ql_check_lb_frame(struct ql_adapter *, struct sk_buff *);
1836int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget);
1618 1837
1619#if 1 1838#if 1
1620#define QL_ALL_DUMP 1839#define QL_ALL_DUMP
diff --git a/drivers/net/qlge/qlge_dbg.c b/drivers/net/qlge/qlge_dbg.c
index aa88cb3f41c7..9f58c4710761 100644
--- a/drivers/net/qlge/qlge_dbg.c
+++ b/drivers/net/qlge/qlge_dbg.c
@@ -1,5 +1,185 @@
1#include "qlge.h" 1#include "qlge.h"
2 2
3
4static int ql_get_ets_regs(struct ql_adapter *qdev, u32 * buf)
5{
6 int status = 0;
7 int i;
8
9 for (i = 0; i < 8; i++, buf++) {
10 ql_write32(qdev, NIC_ETS, i << 29 | 0x08000000);
11 *buf = ql_read32(qdev, NIC_ETS);
12 }
13
14 for (i = 0; i < 2; i++, buf++) {
15 ql_write32(qdev, CNA_ETS, i << 29 | 0x08000000);
16 *buf = ql_read32(qdev, CNA_ETS);
17 }
18
19 return status;
20}
21
22static void ql_get_intr_states(struct ql_adapter *qdev, u32 * buf)
23{
24 int i;
25
26 for (i = 0; i < qdev->rx_ring_count; i++, buf++) {
27 ql_write32(qdev, INTR_EN,
28 qdev->intr_context[i].intr_read_mask);
29 *buf = ql_read32(qdev, INTR_EN);
30 }
31}
32
33static int ql_get_cam_entries(struct ql_adapter *qdev, u32 * buf)
34{
35 int i, status;
36 u32 value[3];
37
38 status = ql_sem_spinlock(qdev, SEM_MAC_ADDR_MASK);
39 if (status)
40 return status;
41
42 for (i = 0; i < 16; i++) {
43 status = ql_get_mac_addr_reg(qdev,
44 MAC_ADDR_TYPE_CAM_MAC, i, value);
45 if (status) {
46 QPRINTK(qdev, DRV, ERR,
47 "Failed read of mac index register.\n");
48 goto err;
49 }
50 *buf++ = value[0]; /* lower MAC address */
51 *buf++ = value[1]; /* upper MAC address */
52 *buf++ = value[2]; /* output */
53 }
54 for (i = 0; i < 32; i++) {
55 status = ql_get_mac_addr_reg(qdev,
56 MAC_ADDR_TYPE_MULTI_MAC, i, value);
57 if (status) {
58 QPRINTK(qdev, DRV, ERR,
59 "Failed read of mac index register.\n");
60 goto err;
61 }
62 *buf++ = value[0]; /* lower Mcast address */
63 *buf++ = value[1]; /* upper Mcast address */
64 }
65err:
66 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
67 return status;
68}
69
70static int ql_get_routing_entries(struct ql_adapter *qdev, u32 * buf)
71{
72 int status;
73 u32 value, i;
74
75 status = ql_sem_spinlock(qdev, SEM_RT_IDX_MASK);
76 if (status)
77 return status;
78
79 for (i = 0; i < 16; i++) {
80 status = ql_get_routing_reg(qdev, i, &value);
81 if (status) {
82 QPRINTK(qdev, DRV, ERR,
83 "Failed read of routing index register.\n");
84 goto err;
85 } else {
86 *buf++ = value;
87 }
88 }
89err:
90 ql_sem_unlock(qdev, SEM_RT_IDX_MASK);
91 return status;
92}
93
94/* Create a coredump segment header */
95static void ql_build_coredump_seg_header(
96 struct mpi_coredump_segment_header *seg_hdr,
97 u32 seg_number, u32 seg_size, u8 *desc)
98{
99 memset(seg_hdr, 0, sizeof(struct mpi_coredump_segment_header));
100 seg_hdr->cookie = MPI_COREDUMP_COOKIE;
101 seg_hdr->segNum = seg_number;
102 seg_hdr->segSize = seg_size;
103 memcpy(seg_hdr->description, desc, (sizeof(seg_hdr->description)) - 1);
104}
105
106void ql_gen_reg_dump(struct ql_adapter *qdev,
107 struct ql_reg_dump *mpi_coredump)
108{
109 int i, status;
110
111
112 memset(&(mpi_coredump->mpi_global_header), 0,
113 sizeof(struct mpi_coredump_global_header));
114 mpi_coredump->mpi_global_header.cookie = MPI_COREDUMP_COOKIE;
115 mpi_coredump->mpi_global_header.headerSize =
116 sizeof(struct mpi_coredump_global_header);
117 mpi_coredump->mpi_global_header.imageSize =
118 sizeof(struct ql_reg_dump);
119 memcpy(mpi_coredump->mpi_global_header.idString, "MPI Coredump",
120 sizeof(mpi_coredump->mpi_global_header.idString));
121
122
123 /* segment 16 */
124 ql_build_coredump_seg_header(&mpi_coredump->misc_nic_seg_hdr,
125 MISC_NIC_INFO_SEG_NUM,
126 sizeof(struct mpi_coredump_segment_header)
127 + sizeof(mpi_coredump->misc_nic_info),
128 "MISC NIC INFO");
129 mpi_coredump->misc_nic_info.rx_ring_count = qdev->rx_ring_count;
130 mpi_coredump->misc_nic_info.tx_ring_count = qdev->tx_ring_count;
131 mpi_coredump->misc_nic_info.intr_count = qdev->intr_count;
132 mpi_coredump->misc_nic_info.function = qdev->func;
133
134 /* Segment 16, Rev C. Step 18 */
135 ql_build_coredump_seg_header(&mpi_coredump->nic_regs_seg_hdr,
136 NIC1_CONTROL_SEG_NUM,
137 sizeof(struct mpi_coredump_segment_header)
138 + sizeof(mpi_coredump->nic_regs),
139 "NIC Registers");
140 /* Get generic reg dump */
141 for (i = 0; i < 64; i++)
142 mpi_coredump->nic_regs[i] = ql_read32(qdev, i * sizeof(u32));
143
144 /* Segment 31 */
145 /* Get indexed register values. */
146 ql_build_coredump_seg_header(&mpi_coredump->intr_states_seg_hdr,
147 INTR_STATES_SEG_NUM,
148 sizeof(struct mpi_coredump_segment_header)
149 + sizeof(mpi_coredump->intr_states),
150 "INTR States");
151 ql_get_intr_states(qdev, &mpi_coredump->intr_states[0]);
152
153 ql_build_coredump_seg_header(&mpi_coredump->cam_entries_seg_hdr,
154 CAM_ENTRIES_SEG_NUM,
155 sizeof(struct mpi_coredump_segment_header)
156 + sizeof(mpi_coredump->cam_entries),
157 "CAM Entries");
158 status = ql_get_cam_entries(qdev, &mpi_coredump->cam_entries[0]);
159 if (status)
160 return;
161
162 ql_build_coredump_seg_header(&mpi_coredump->nic_routing_words_seg_hdr,
163 ROUTING_WORDS_SEG_NUM,
164 sizeof(struct mpi_coredump_segment_header)
165 + sizeof(mpi_coredump->nic_routing_words),
166 "Routing Words");
167 status = ql_get_routing_entries(qdev,
168 &mpi_coredump->nic_routing_words[0]);
169 if (status)
170 return;
171
172 /* Segment 34 (Rev C. step 23) */
173 ql_build_coredump_seg_header(&mpi_coredump->ets_seg_hdr,
174 ETS_SEG_NUM,
175 sizeof(struct mpi_coredump_segment_header)
176 + sizeof(mpi_coredump->ets),
177 "ETS Registers");
178 status = ql_get_ets_regs(qdev, &mpi_coredump->ets[0]);
179 if (status)
180 return;
181}
182
3#ifdef QL_REG_DUMP 183#ifdef QL_REG_DUMP
4static void ql_dump_intr_states(struct ql_adapter *qdev) 184static void ql_dump_intr_states(struct ql_adapter *qdev)
5{ 185{
diff --git a/drivers/net/qlge/qlge_ethtool.c b/drivers/net/qlge/qlge_ethtool.c
index 52073946bce3..058fa0a48c6f 100644
--- a/drivers/net/qlge/qlge_ethtool.c
+++ b/drivers/net/qlge/qlge_ethtool.c
@@ -36,6 +36,11 @@
36 36
37#include "qlge.h" 37#include "qlge.h"
38 38
39static const char ql_gstrings_test[][ETH_GSTRING_LEN] = {
40 "Loopback test (offline)"
41};
42#define QLGE_TEST_LEN (sizeof(ql_gstrings_test) / ETH_GSTRING_LEN)
43
39static int ql_update_ring_coalescing(struct ql_adapter *qdev) 44static int ql_update_ring_coalescing(struct ql_adapter *qdev)
40{ 45{
41 int i, status = 0; 46 int i, status = 0;
@@ -132,6 +137,41 @@ static void ql_update_stats(struct ql_adapter *qdev)
132 iter++; 137 iter++;
133 } 138 }
134 139
140 /*
141 * Get Per-priority TX pause frame counter statistics.
142 */
143 for (i = 0x500; i < 0x540; i += 8) {
144 if (ql_read_xgmac_reg64(qdev, i, &data)) {
145 QPRINTK(qdev, DRV, ERR,
146 "Error reading status register 0x%.04x.\n", i);
147 goto end;
148 } else
149 *iter = data;
150 iter++;
151 }
152
153 /*
154 * Get Per-priority RX pause frame counter statistics.
155 */
156 for (i = 0x568; i < 0x5a8; i += 8) {
157 if (ql_read_xgmac_reg64(qdev, i, &data)) {
158 QPRINTK(qdev, DRV, ERR,
159 "Error reading status register 0x%.04x.\n", i);
160 goto end;
161 } else
162 *iter = data;
163 iter++;
164 }
165
166 /*
167 * Get RX NIC FIFO DROP statistics.
168 */
169 if (ql_read_xgmac_reg64(qdev, 0x5b8, &data)) {
170 QPRINTK(qdev, DRV, ERR,
171 "Error reading status register 0x%.04x.\n", i);
172 goto end;
173 } else
174 *iter = data;
135end: 175end:
136 ql_sem_unlock(qdev, qdev->xg_sem_mask); 176 ql_sem_unlock(qdev, qdev->xg_sem_mask);
137quit: 177quit:
@@ -185,6 +225,23 @@ static char ql_stats_str_arr[][ETH_GSTRING_LEN] = {
185 {"rx_1024_to_1518_pkts"}, 225 {"rx_1024_to_1518_pkts"},
186 {"rx_1519_to_max_pkts"}, 226 {"rx_1519_to_max_pkts"},
187 {"rx_len_err_pkts"}, 227 {"rx_len_err_pkts"},
228 {"tx_cbfc_pause_frames0"},
229 {"tx_cbfc_pause_frames1"},
230 {"tx_cbfc_pause_frames2"},
231 {"tx_cbfc_pause_frames3"},
232 {"tx_cbfc_pause_frames4"},
233 {"tx_cbfc_pause_frames5"},
234 {"tx_cbfc_pause_frames6"},
235 {"tx_cbfc_pause_frames7"},
236 {"rx_cbfc_pause_frames0"},
237 {"rx_cbfc_pause_frames1"},
238 {"rx_cbfc_pause_frames2"},
239 {"rx_cbfc_pause_frames3"},
240 {"rx_cbfc_pause_frames4"},
241 {"rx_cbfc_pause_frames5"},
242 {"rx_cbfc_pause_frames6"},
243 {"rx_cbfc_pause_frames7"},
244 {"rx_nic_fifo_drop"},
188}; 245};
189 246
190static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf) 247static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
@@ -199,6 +256,8 @@ static void ql_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
199static int ql_get_sset_count(struct net_device *dev, int sset) 256static int ql_get_sset_count(struct net_device *dev, int sset)
200{ 257{
201 switch (sset) { 258 switch (sset) {
259 case ETH_SS_TEST:
260 return QLGE_TEST_LEN;
202 case ETH_SS_STATS: 261 case ETH_SS_STATS:
203 return ARRAY_SIZE(ql_stats_str_arr); 262 return ARRAY_SIZE(ql_stats_str_arr);
204 default: 263 default:
@@ -257,6 +316,23 @@ ql_get_ethtool_stats(struct net_device *ndev,
257 *data++ = s->rx_1024_to_1518_pkts; 316 *data++ = s->rx_1024_to_1518_pkts;
258 *data++ = s->rx_1519_to_max_pkts; 317 *data++ = s->rx_1519_to_max_pkts;
259 *data++ = s->rx_len_err_pkts; 318 *data++ = s->rx_len_err_pkts;
319 *data++ = s->tx_cbfc_pause_frames0;
320 *data++ = s->tx_cbfc_pause_frames1;
321 *data++ = s->tx_cbfc_pause_frames2;
322 *data++ = s->tx_cbfc_pause_frames3;
323 *data++ = s->tx_cbfc_pause_frames4;
324 *data++ = s->tx_cbfc_pause_frames5;
325 *data++ = s->tx_cbfc_pause_frames6;
326 *data++ = s->tx_cbfc_pause_frames7;
327 *data++ = s->rx_cbfc_pause_frames0;
328 *data++ = s->rx_cbfc_pause_frames1;
329 *data++ = s->rx_cbfc_pause_frames2;
330 *data++ = s->rx_cbfc_pause_frames3;
331 *data++ = s->rx_cbfc_pause_frames4;
332 *data++ = s->rx_cbfc_pause_frames5;
333 *data++ = s->rx_cbfc_pause_frames6;
334 *data++ = s->rx_cbfc_pause_frames7;
335 *data++ = s->rx_nic_fifo_drop;
260} 336}
261 337
262static int ql_get_settings(struct net_device *ndev, 338static int ql_get_settings(struct net_device *ndev,
@@ -302,6 +378,181 @@ static void ql_get_drvinfo(struct net_device *ndev,
302 drvinfo->eedump_len = 0; 378 drvinfo->eedump_len = 0;
303} 379}
304 380
381static void ql_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
382{
383 struct ql_adapter *qdev = netdev_priv(ndev);
384 /* What we support. */
385 wol->supported = WAKE_MAGIC;
386 /* What we've currently got set. */
387 wol->wolopts = qdev->wol;
388}
389
390static int ql_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
391{
392 struct ql_adapter *qdev = netdev_priv(ndev);
393 int status;
394
395 if (wol->wolopts & ~WAKE_MAGIC)
396 return -EINVAL;
397 qdev->wol = wol->wolopts;
398
399 QPRINTK(qdev, DRV, INFO, "Set wol option 0x%x on %s\n",
400 qdev->wol, ndev->name);
401 if (!qdev->wol) {
402 u32 wol = 0;
403 status = ql_mb_wol_mode(qdev, wol);
404 QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
405 (status == 0) ? "cleared sucessfully" : "clear failed",
406 wol, qdev->ndev->name);
407 }
408
409 return 0;
410}
411
412static int ql_phys_id(struct net_device *ndev, u32 data)
413{
414 struct ql_adapter *qdev = netdev_priv(ndev);
415 u32 led_reg, i;
416 int status;
417
418 /* Save the current LED settings */
419 status = ql_mb_get_led_cfg(qdev);
420 if (status)
421 return status;
422 led_reg = qdev->led_config;
423
424 /* Start blinking the led */
425 if (!data || data > 300)
426 data = 300;
427
428 for (i = 0; i < (data * 10); i++)
429 ql_mb_set_led_cfg(qdev, QL_LED_BLINK);
430
431 /* Restore LED settings */
432 status = ql_mb_set_led_cfg(qdev, led_reg);
433 if (status)
434 return status;
435
436 return 0;
437}
438
439static int ql_start_loopback(struct ql_adapter *qdev)
440{
441 if (netif_carrier_ok(qdev->ndev)) {
442 set_bit(QL_LB_LINK_UP, &qdev->flags);
443 netif_carrier_off(qdev->ndev);
444 } else
445 clear_bit(QL_LB_LINK_UP, &qdev->flags);
446 qdev->link_config |= CFG_LOOPBACK_PCS;
447 return ql_mb_set_port_cfg(qdev);
448}
449
450static void ql_stop_loopback(struct ql_adapter *qdev)
451{
452 qdev->link_config &= ~CFG_LOOPBACK_PCS;
453 ql_mb_set_port_cfg(qdev);
454 if (test_bit(QL_LB_LINK_UP, &qdev->flags)) {
455 netif_carrier_on(qdev->ndev);
456 clear_bit(QL_LB_LINK_UP, &qdev->flags);
457 }
458}
459
460static void ql_create_lb_frame(struct sk_buff *skb,
461 unsigned int frame_size)
462{
463 memset(skb->data, 0xFF, frame_size);
464 frame_size &= ~1;
465 memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
466 memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
467 memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
468}
469
470void ql_check_lb_frame(struct ql_adapter *qdev,
471 struct sk_buff *skb)
472{
473 unsigned int frame_size = skb->len;
474
475 if ((*(skb->data + 3) == 0xFF) &&
476 (*(skb->data + frame_size / 2 + 10) == 0xBE) &&
477 (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
478 atomic_dec(&qdev->lb_count);
479 return;
480 }
481}
482
483static int ql_run_loopback_test(struct ql_adapter *qdev)
484{
485 int i;
486 netdev_tx_t rc;
487 struct sk_buff *skb;
488 unsigned int size = SMALL_BUF_MAP_SIZE;
489
490 for (i = 0; i < 64; i++) {
491 skb = netdev_alloc_skb(qdev->ndev, size);
492 if (!skb)
493 return -ENOMEM;
494
495 skb->queue_mapping = 0;
496 skb_put(skb, size);
497 ql_create_lb_frame(skb, size);
498 rc = ql_lb_send(skb, qdev->ndev);
499 if (rc != NETDEV_TX_OK)
500 return -EPIPE;
501 atomic_inc(&qdev->lb_count);
502 }
503
504 ql_clean_lb_rx_ring(&qdev->rx_ring[0], 128);
505 return atomic_read(&qdev->lb_count) ? -EIO : 0;
506}
507
508static int ql_loopback_test(struct ql_adapter *qdev, u64 *data)
509{
510 *data = ql_start_loopback(qdev);
511 if (*data)
512 goto out;
513 *data = ql_run_loopback_test(qdev);
514out:
515 ql_stop_loopback(qdev);
516 return *data;
517}
518
519static void ql_self_test(struct net_device *ndev,
520 struct ethtool_test *eth_test, u64 *data)
521{
522 struct ql_adapter *qdev = netdev_priv(ndev);
523
524 if (netif_running(ndev)) {
525 set_bit(QL_SELFTEST, &qdev->flags);
526 if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
527 /* Offline tests */
528 if (ql_loopback_test(qdev, &data[0]))
529 eth_test->flags |= ETH_TEST_FL_FAILED;
530
531 } else {
532 /* Online tests */
533 data[0] = 0;
534 }
535 clear_bit(QL_SELFTEST, &qdev->flags);
536 } else {
537 QPRINTK(qdev, DRV, ERR,
538 "%s: is down, Loopback test will fail.\n", ndev->name);
539 eth_test->flags |= ETH_TEST_FL_FAILED;
540 }
541}
542
543static int ql_get_regs_len(struct net_device *ndev)
544{
545 return sizeof(struct ql_reg_dump);
546}
547
548static void ql_get_regs(struct net_device *ndev,
549 struct ethtool_regs *regs, void *p)
550{
551 struct ql_adapter *qdev = netdev_priv(ndev);
552
553 ql_gen_reg_dump(qdev, p);
554}
555
305static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c) 556static int ql_get_coalesce(struct net_device *dev, struct ethtool_coalesce *c)
306{ 557{
307 struct ql_adapter *qdev = netdev_priv(dev); 558 struct ql_adapter *qdev = netdev_priv(dev);
@@ -355,6 +606,37 @@ static int ql_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *c)
355 return ql_update_ring_coalescing(qdev); 606 return ql_update_ring_coalescing(qdev);
356} 607}
357 608
609static void ql_get_pauseparam(struct net_device *netdev,
610 struct ethtool_pauseparam *pause)
611{
612 struct ql_adapter *qdev = netdev_priv(netdev);
613
614 ql_mb_get_port_cfg(qdev);
615 if (qdev->link_config & CFG_PAUSE_STD) {
616 pause->rx_pause = 1;
617 pause->tx_pause = 1;
618 }
619}
620
621static int ql_set_pauseparam(struct net_device *netdev,
622 struct ethtool_pauseparam *pause)
623{
624 struct ql_adapter *qdev = netdev_priv(netdev);
625 int status = 0;
626
627 if ((pause->rx_pause) && (pause->tx_pause))
628 qdev->link_config |= CFG_PAUSE_STD;
629 else if (!pause->rx_pause && !pause->tx_pause)
630 qdev->link_config &= ~CFG_PAUSE_STD;
631 else
632 return -EINVAL;
633
634 status = ql_mb_set_port_cfg(qdev);
635 if (status)
636 return status;
637 return status;
638}
639
358static u32 ql_get_rx_csum(struct net_device *netdev) 640static u32 ql_get_rx_csum(struct net_device *netdev)
359{ 641{
360 struct ql_adapter *qdev = netdev_priv(netdev); 642 struct ql_adapter *qdev = netdev_priv(netdev);
@@ -396,9 +678,17 @@ static void ql_set_msglevel(struct net_device *ndev, u32 value)
396const struct ethtool_ops qlge_ethtool_ops = { 678const struct ethtool_ops qlge_ethtool_ops = {
397 .get_settings = ql_get_settings, 679 .get_settings = ql_get_settings,
398 .get_drvinfo = ql_get_drvinfo, 680 .get_drvinfo = ql_get_drvinfo,
681 .get_wol = ql_get_wol,
682 .set_wol = ql_set_wol,
683 .get_regs_len = ql_get_regs_len,
684 .get_regs = ql_get_regs,
399 .get_msglevel = ql_get_msglevel, 685 .get_msglevel = ql_get_msglevel,
400 .set_msglevel = ql_set_msglevel, 686 .set_msglevel = ql_set_msglevel,
401 .get_link = ethtool_op_get_link, 687 .get_link = ethtool_op_get_link,
688 .phys_id = ql_phys_id,
689 .self_test = ql_self_test,
690 .get_pauseparam = ql_get_pauseparam,
691 .set_pauseparam = ql_set_pauseparam,
402 .get_rx_csum = ql_get_rx_csum, 692 .get_rx_csum = ql_get_rx_csum,
403 .set_rx_csum = ql_set_rx_csum, 693 .set_rx_csum = ql_set_rx_csum,
404 .get_tx_csum = ethtool_op_get_tx_csum, 694 .get_tx_csum = ethtool_op_get_tx_csum,
diff --git a/drivers/net/qlge/qlge_main.c b/drivers/net/qlge/qlge_main.c
index a2fc70a0d0cc..e2ee47d9bca5 100644
--- a/drivers/net/qlge/qlge_main.c
+++ b/drivers/net/qlge/qlge_main.c
@@ -69,9 +69,9 @@ MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
69#define MSIX_IRQ 0 69#define MSIX_IRQ 0
70#define MSI_IRQ 1 70#define MSI_IRQ 1
71#define LEG_IRQ 2 71#define LEG_IRQ 2
72static int irq_type = MSIX_IRQ; 72static int qlge_irq_type = MSIX_IRQ;
73module_param(irq_type, int, MSIX_IRQ); 73module_param(qlge_irq_type, int, MSIX_IRQ);
74MODULE_PARM_DESC(irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy."); 74MODULE_PARM_DESC(qlge_irq_type, "0 = MSI-X, 1 = MSI, 2 = Legacy.");
75 75
76static struct pci_device_id qlge_pci_tbl[] __devinitdata = { 76static struct pci_device_id qlge_pci_tbl[] __devinitdata = {
77 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)}, 77 {PCI_DEVICE(PCI_VENDOR_ID_QLOGIC, QLGE_DEVICE_ID_8012)},
@@ -1025,6 +1025,11 @@ end:
1025 return status; 1025 return status;
1026} 1026}
1027 1027
1028static inline unsigned int ql_lbq_block_size(struct ql_adapter *qdev)
1029{
1030 return PAGE_SIZE << qdev->lbq_buf_order;
1031}
1032
1028/* Get the next large buffer. */ 1033/* Get the next large buffer. */
1029static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring) 1034static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1030{ 1035{
@@ -1036,6 +1041,28 @@ static struct bq_desc *ql_get_curr_lbuf(struct rx_ring *rx_ring)
1036 return lbq_desc; 1041 return lbq_desc;
1037} 1042}
1038 1043
1044static struct bq_desc *ql_get_curr_lchunk(struct ql_adapter *qdev,
1045 struct rx_ring *rx_ring)
1046{
1047 struct bq_desc *lbq_desc = ql_get_curr_lbuf(rx_ring);
1048
1049 pci_dma_sync_single_for_cpu(qdev->pdev,
1050 pci_unmap_addr(lbq_desc, mapaddr),
1051 rx_ring->lbq_buf_size,
1052 PCI_DMA_FROMDEVICE);
1053
1054 /* If it's the last chunk of our master page then
1055 * we unmap it.
1056 */
1057 if ((lbq_desc->p.pg_chunk.offset + rx_ring->lbq_buf_size)
1058 == ql_lbq_block_size(qdev))
1059 pci_unmap_page(qdev->pdev,
1060 lbq_desc->p.pg_chunk.map,
1061 ql_lbq_block_size(qdev),
1062 PCI_DMA_FROMDEVICE);
1063 return lbq_desc;
1064}
1065
1039/* Get the next small buffer. */ 1066/* Get the next small buffer. */
1040static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring) 1067static struct bq_desc *ql_get_curr_sbuf(struct rx_ring *rx_ring)
1041{ 1068{
@@ -1063,6 +1090,53 @@ static void ql_write_cq_idx(struct rx_ring *rx_ring)
1063 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg); 1090 ql_write_db_reg(rx_ring->cnsmr_idx, rx_ring->cnsmr_idx_db_reg);
1064} 1091}
1065 1092
1093static int ql_get_next_chunk(struct ql_adapter *qdev, struct rx_ring *rx_ring,
1094 struct bq_desc *lbq_desc)
1095{
1096 if (!rx_ring->pg_chunk.page) {
1097 u64 map;
1098 rx_ring->pg_chunk.page = alloc_pages(__GFP_COLD | __GFP_COMP |
1099 GFP_ATOMIC,
1100 qdev->lbq_buf_order);
1101 if (unlikely(!rx_ring->pg_chunk.page)) {
1102 QPRINTK(qdev, DRV, ERR,
1103 "page allocation failed.\n");
1104 return -ENOMEM;
1105 }
1106 rx_ring->pg_chunk.offset = 0;
1107 map = pci_map_page(qdev->pdev, rx_ring->pg_chunk.page,
1108 0, ql_lbq_block_size(qdev),
1109 PCI_DMA_FROMDEVICE);
1110 if (pci_dma_mapping_error(qdev->pdev, map)) {
1111 __free_pages(rx_ring->pg_chunk.page,
1112 qdev->lbq_buf_order);
1113 QPRINTK(qdev, DRV, ERR,
1114 "PCI mapping failed.\n");
1115 return -ENOMEM;
1116 }
1117 rx_ring->pg_chunk.map = map;
1118 rx_ring->pg_chunk.va = page_address(rx_ring->pg_chunk.page);
1119 }
1120
1121 /* Copy the current master pg_chunk info
1122 * to the current descriptor.
1123 */
1124 lbq_desc->p.pg_chunk = rx_ring->pg_chunk;
1125
1126 /* Adjust the master page chunk for next
1127 * buffer get.
1128 */
1129 rx_ring->pg_chunk.offset += rx_ring->lbq_buf_size;
1130 if (rx_ring->pg_chunk.offset == ql_lbq_block_size(qdev)) {
1131 rx_ring->pg_chunk.page = NULL;
1132 lbq_desc->p.pg_chunk.last_flag = 1;
1133 } else {
1134 rx_ring->pg_chunk.va += rx_ring->lbq_buf_size;
1135 get_page(rx_ring->pg_chunk.page);
1136 lbq_desc->p.pg_chunk.last_flag = 0;
1137 }
1138 return 0;
1139}
1066/* Process (refill) a large buffer queue. */ 1140/* Process (refill) a large buffer queue. */
1067static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring) 1141static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1068{ 1142{
@@ -1072,39 +1146,28 @@ static void ql_update_lbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1072 u64 map; 1146 u64 map;
1073 int i; 1147 int i;
1074 1148
1075 while (rx_ring->lbq_free_cnt > 16) { 1149 while (rx_ring->lbq_free_cnt > 32) {
1076 for (i = 0; i < 16; i++) { 1150 for (i = 0; i < 16; i++) {
1077 QPRINTK(qdev, RX_STATUS, DEBUG, 1151 QPRINTK(qdev, RX_STATUS, DEBUG,
1078 "lbq: try cleaning clean_idx = %d.\n", 1152 "lbq: try cleaning clean_idx = %d.\n",
1079 clean_idx); 1153 clean_idx);
1080 lbq_desc = &rx_ring->lbq[clean_idx]; 1154 lbq_desc = &rx_ring->lbq[clean_idx];
1081 if (lbq_desc->p.lbq_page == NULL) { 1155 if (ql_get_next_chunk(qdev, rx_ring, lbq_desc)) {
1082 QPRINTK(qdev, RX_STATUS, DEBUG, 1156 QPRINTK(qdev, IFUP, ERR,
1083 "lbq: getting new page for index %d.\n", 1157 "Could not get a page chunk.\n");
1084 lbq_desc->index);
1085 lbq_desc->p.lbq_page = alloc_page(GFP_ATOMIC);
1086 if (lbq_desc->p.lbq_page == NULL) {
1087 rx_ring->lbq_clean_idx = clean_idx;
1088 QPRINTK(qdev, RX_STATUS, ERR,
1089 "Couldn't get a page.\n");
1090 return;
1091 }
1092 map = pci_map_page(qdev->pdev,
1093 lbq_desc->p.lbq_page,
1094 0, PAGE_SIZE,
1095 PCI_DMA_FROMDEVICE);
1096 if (pci_dma_mapping_error(qdev->pdev, map)) {
1097 rx_ring->lbq_clean_idx = clean_idx;
1098 put_page(lbq_desc->p.lbq_page);
1099 lbq_desc->p.lbq_page = NULL;
1100 QPRINTK(qdev, RX_STATUS, ERR,
1101 "PCI mapping failed.\n");
1102 return; 1158 return;
1103 } 1159 }
1160
1161 map = lbq_desc->p.pg_chunk.map +
1162 lbq_desc->p.pg_chunk.offset;
1104 pci_unmap_addr_set(lbq_desc, mapaddr, map); 1163 pci_unmap_addr_set(lbq_desc, mapaddr, map);
1105 pci_unmap_len_set(lbq_desc, maplen, PAGE_SIZE); 1164 pci_unmap_len_set(lbq_desc, maplen,
1165 rx_ring->lbq_buf_size);
1106 *lbq_desc->addr = cpu_to_le64(map); 1166 *lbq_desc->addr = cpu_to_le64(map);
1107 } 1167
1168 pci_dma_sync_single_for_device(qdev->pdev, map,
1169 rx_ring->lbq_buf_size,
1170 PCI_DMA_FROMDEVICE);
1108 clean_idx++; 1171 clean_idx++;
1109 if (clean_idx == rx_ring->lbq_len) 1172 if (clean_idx == rx_ring->lbq_len)
1110 clean_idx = 0; 1173 clean_idx = 0;
@@ -1147,7 +1210,7 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1147 sbq_desc->index); 1210 sbq_desc->index);
1148 sbq_desc->p.skb = 1211 sbq_desc->p.skb =
1149 netdev_alloc_skb(qdev->ndev, 1212 netdev_alloc_skb(qdev->ndev,
1150 rx_ring->sbq_buf_size); 1213 SMALL_BUFFER_SIZE);
1151 if (sbq_desc->p.skb == NULL) { 1214 if (sbq_desc->p.skb == NULL) {
1152 QPRINTK(qdev, PROBE, ERR, 1215 QPRINTK(qdev, PROBE, ERR,
1153 "Couldn't get an skb.\n"); 1216 "Couldn't get an skb.\n");
@@ -1157,8 +1220,8 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1157 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD); 1220 skb_reserve(sbq_desc->p.skb, QLGE_SB_PAD);
1158 map = pci_map_single(qdev->pdev, 1221 map = pci_map_single(qdev->pdev,
1159 sbq_desc->p.skb->data, 1222 sbq_desc->p.skb->data,
1160 rx_ring->sbq_buf_size / 1223 rx_ring->sbq_buf_size,
1161 2, PCI_DMA_FROMDEVICE); 1224 PCI_DMA_FROMDEVICE);
1162 if (pci_dma_mapping_error(qdev->pdev, map)) { 1225 if (pci_dma_mapping_error(qdev->pdev, map)) {
1163 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n"); 1226 QPRINTK(qdev, IFUP, ERR, "PCI mapping failed.\n");
1164 rx_ring->sbq_clean_idx = clean_idx; 1227 rx_ring->sbq_clean_idx = clean_idx;
@@ -1168,7 +1231,7 @@ static void ql_update_sbq(struct ql_adapter *qdev, struct rx_ring *rx_ring)
1168 } 1231 }
1169 pci_unmap_addr_set(sbq_desc, mapaddr, map); 1232 pci_unmap_addr_set(sbq_desc, mapaddr, map);
1170 pci_unmap_len_set(sbq_desc, maplen, 1233 pci_unmap_len_set(sbq_desc, maplen,
1171 rx_ring->sbq_buf_size / 2); 1234 rx_ring->sbq_buf_size);
1172 *sbq_desc->addr = cpu_to_le64(map); 1235 *sbq_desc->addr = cpu_to_le64(map);
1173 } 1236 }
1174 1237
@@ -1480,27 +1543,24 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1480 * chain it to the header buffer's skb and let 1543 * chain it to the header buffer's skb and let
1481 * it rip. 1544 * it rip.
1482 */ 1545 */
1483 lbq_desc = ql_get_curr_lbuf(rx_ring); 1546 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1484 pci_unmap_page(qdev->pdev,
1485 pci_unmap_addr(lbq_desc,
1486 mapaddr),
1487 pci_unmap_len(lbq_desc, maplen),
1488 PCI_DMA_FROMDEVICE);
1489 QPRINTK(qdev, RX_STATUS, DEBUG, 1547 QPRINTK(qdev, RX_STATUS, DEBUG,
1490 "Chaining page to skb.\n"); 1548 "Chaining page at offset = %d,"
1491 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page, 1549 "for %d bytes to skb.\n",
1492 0, length); 1550 lbq_desc->p.pg_chunk.offset, length);
1551 skb_fill_page_desc(skb, 0, lbq_desc->p.pg_chunk.page,
1552 lbq_desc->p.pg_chunk.offset,
1553 length);
1493 skb->len += length; 1554 skb->len += length;
1494 skb->data_len += length; 1555 skb->data_len += length;
1495 skb->truesize += length; 1556 skb->truesize += length;
1496 lbq_desc->p.lbq_page = NULL;
1497 } else { 1557 } else {
1498 /* 1558 /*
1499 * The headers and data are in a single large buffer. We 1559 * The headers and data are in a single large buffer. We
1500 * copy it to a new skb and let it go. This can happen with 1560 * copy it to a new skb and let it go. This can happen with
1501 * jumbo mtu on a non-TCP/UDP frame. 1561 * jumbo mtu on a non-TCP/UDP frame.
1502 */ 1562 */
1503 lbq_desc = ql_get_curr_lbuf(rx_ring); 1563 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1504 skb = netdev_alloc_skb(qdev->ndev, length); 1564 skb = netdev_alloc_skb(qdev->ndev, length);
1505 if (skb == NULL) { 1565 if (skb == NULL) {
1506 QPRINTK(qdev, PROBE, DEBUG, 1566 QPRINTK(qdev, PROBE, DEBUG,
@@ -1515,13 +1575,14 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1515 skb_reserve(skb, NET_IP_ALIGN); 1575 skb_reserve(skb, NET_IP_ALIGN);
1516 QPRINTK(qdev, RX_STATUS, DEBUG, 1576 QPRINTK(qdev, RX_STATUS, DEBUG,
1517 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length); 1577 "%d bytes of headers and data in large. Chain page to new skb and pull tail.\n", length);
1518 skb_fill_page_desc(skb, 0, lbq_desc->p.lbq_page, 1578 skb_fill_page_desc(skb, 0,
1519 0, length); 1579 lbq_desc->p.pg_chunk.page,
1580 lbq_desc->p.pg_chunk.offset,
1581 length);
1520 skb->len += length; 1582 skb->len += length;
1521 skb->data_len += length; 1583 skb->data_len += length;
1522 skb->truesize += length; 1584 skb->truesize += length;
1523 length -= length; 1585 length -= length;
1524 lbq_desc->p.lbq_page = NULL;
1525 __pskb_pull_tail(skb, 1586 __pskb_pull_tail(skb,
1526 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? 1587 (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
1527 VLAN_ETH_HLEN : ETH_HLEN); 1588 VLAN_ETH_HLEN : ETH_HLEN);
@@ -1538,8 +1599,7 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1538 * frames. If the MTU goes up we could 1599 * frames. If the MTU goes up we could
1539 * eventually be in trouble. 1600 * eventually be in trouble.
1540 */ 1601 */
1541 int size, offset, i = 0; 1602 int size, i = 0;
1542 __le64 *bq, bq_array[8];
1543 sbq_desc = ql_get_curr_sbuf(rx_ring); 1603 sbq_desc = ql_get_curr_sbuf(rx_ring);
1544 pci_unmap_single(qdev->pdev, 1604 pci_unmap_single(qdev->pdev,
1545 pci_unmap_addr(sbq_desc, mapaddr), 1605 pci_unmap_addr(sbq_desc, mapaddr),
@@ -1558,37 +1618,25 @@ static struct sk_buff *ql_build_rx_skb(struct ql_adapter *qdev,
1558 QPRINTK(qdev, RX_STATUS, DEBUG, 1618 QPRINTK(qdev, RX_STATUS, DEBUG,
1559 "%d bytes of headers & data in chain of large.\n", length); 1619 "%d bytes of headers & data in chain of large.\n", length);
1560 skb = sbq_desc->p.skb; 1620 skb = sbq_desc->p.skb;
1561 bq = &bq_array[0];
1562 memcpy(bq, skb->data, sizeof(bq_array));
1563 sbq_desc->p.skb = NULL; 1621 sbq_desc->p.skb = NULL;
1564 skb_reserve(skb, NET_IP_ALIGN); 1622 skb_reserve(skb, NET_IP_ALIGN);
1565 } else {
1566 QPRINTK(qdev, RX_STATUS, DEBUG,
1567 "Headers in small, %d bytes of data in chain of large.\n", length);
1568 bq = (__le64 *)sbq_desc->p.skb->data;
1569 } 1623 }
1570 while (length > 0) { 1624 while (length > 0) {
1571 lbq_desc = ql_get_curr_lbuf(rx_ring); 1625 lbq_desc = ql_get_curr_lchunk(qdev, rx_ring);
1572 pci_unmap_page(qdev->pdev, 1626 size = (length < rx_ring->lbq_buf_size) ? length :
1573 pci_unmap_addr(lbq_desc, 1627 rx_ring->lbq_buf_size;
1574 mapaddr),
1575 pci_unmap_len(lbq_desc,
1576 maplen),
1577 PCI_DMA_FROMDEVICE);
1578 size = (length < PAGE_SIZE) ? length : PAGE_SIZE;
1579 offset = 0;
1580 1628
1581 QPRINTK(qdev, RX_STATUS, DEBUG, 1629 QPRINTK(qdev, RX_STATUS, DEBUG,
1582 "Adding page %d to skb for %d bytes.\n", 1630 "Adding page %d to skb for %d bytes.\n",
1583 i, size); 1631 i, size);
1584 skb_fill_page_desc(skb, i, lbq_desc->p.lbq_page, 1632 skb_fill_page_desc(skb, i,
1585 offset, size); 1633 lbq_desc->p.pg_chunk.page,
1634 lbq_desc->p.pg_chunk.offset,
1635 size);
1586 skb->len += size; 1636 skb->len += size;
1587 skb->data_len += size; 1637 skb->data_len += size;
1588 skb->truesize += size; 1638 skb->truesize += size;
1589 length -= size; 1639 length -= size;
1590 lbq_desc->p.lbq_page = NULL;
1591 bq++;
1592 i++; 1640 i++;
1593 } 1641 }
1594 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ? 1642 __pskb_pull_tail(skb, (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_V) ?
@@ -1613,6 +1661,7 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1613 if (unlikely(!skb)) { 1661 if (unlikely(!skb)) {
1614 QPRINTK(qdev, RX_STATUS, DEBUG, 1662 QPRINTK(qdev, RX_STATUS, DEBUG,
1615 "No skb available, drop packet.\n"); 1663 "No skb available, drop packet.\n");
1664 rx_ring->rx_dropped++;
1616 return; 1665 return;
1617 } 1666 }
1618 1667
@@ -1621,6 +1670,7 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1621 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n", 1670 QPRINTK(qdev, DRV, ERR, "Receive error, flags2 = 0x%x\n",
1622 ib_mac_rsp->flags2); 1671 ib_mac_rsp->flags2);
1623 dev_kfree_skb_any(skb); 1672 dev_kfree_skb_any(skb);
1673 rx_ring->rx_errors++;
1624 return; 1674 return;
1625 } 1675 }
1626 1676
@@ -1629,6 +1679,14 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1629 */ 1679 */
1630 if (skb->len > ndev->mtu + ETH_HLEN) { 1680 if (skb->len > ndev->mtu + ETH_HLEN) {
1631 dev_kfree_skb_any(skb); 1681 dev_kfree_skb_any(skb);
1682 rx_ring->rx_dropped++;
1683 return;
1684 }
1685
1686 /* loopback self test for ethtool */
1687 if (test_bit(QL_SELFTEST, &qdev->flags)) {
1688 ql_check_lb_frame(qdev, skb);
1689 dev_kfree_skb_any(skb);
1632 return; 1690 return;
1633 } 1691 }
1634 1692
@@ -1642,6 +1700,7 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1642 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "", 1700 IB_MAC_IOCB_RSP_M_REG ? "Registered" : "",
1643 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) == 1701 (ib_mac_rsp->flags1 & IB_MAC_IOCB_RSP_M_MASK) ==
1644 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : ""); 1702 IB_MAC_IOCB_RSP_M_PROM ? "Promiscuous" : "");
1703 rx_ring->rx_multicast++;
1645 } 1704 }
1646 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) { 1705 if (ib_mac_rsp->flags2 & IB_MAC_IOCB_RSP_P) {
1647 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n"); 1706 QPRINTK(qdev, RX_STATUS, DEBUG, "Promiscuous Packet.\n");
@@ -1673,8 +1732,8 @@ static void ql_process_mac_rx_intr(struct ql_adapter *qdev,
1673 } 1732 }
1674 } 1733 }
1675 1734
1676 qdev->stats.rx_packets++; 1735 rx_ring->rx_packets++;
1677 qdev->stats.rx_bytes += skb->len; 1736 rx_ring->rx_bytes += skb->len;
1678 skb_record_rx_queue(skb, rx_ring->cq_id); 1737 skb_record_rx_queue(skb, rx_ring->cq_id);
1679 if (skb->ip_summed == CHECKSUM_UNNECESSARY) { 1738 if (skb->ip_summed == CHECKSUM_UNNECESSARY) {
1680 if (qdev->vlgrp && 1739 if (qdev->vlgrp &&
@@ -1705,8 +1764,8 @@ static void ql_process_mac_tx_intr(struct ql_adapter *qdev,
1705 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx]; 1764 tx_ring = &qdev->tx_ring[mac_rsp->txq_idx];
1706 tx_ring_desc = &tx_ring->q[mac_rsp->tid]; 1765 tx_ring_desc = &tx_ring->q[mac_rsp->tid];
1707 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt); 1766 ql_unmap_send(qdev, tx_ring_desc, tx_ring_desc->map_cnt);
1708 qdev->stats.tx_bytes += (tx_ring_desc->skb)->len; 1767 tx_ring->tx_bytes += (tx_ring_desc->skb)->len;
1709 qdev->stats.tx_packets++; 1768 tx_ring->tx_packets++;
1710 dev_kfree_skb(tx_ring_desc->skb); 1769 dev_kfree_skb(tx_ring_desc->skb);
1711 tx_ring_desc->skb = NULL; 1770 tx_ring_desc->skb = NULL;
1712 1771
@@ -1929,7 +1988,7 @@ static int ql_napi_poll_msix(struct napi_struct *napi, int budget)
1929 return work_done; 1988 return work_done;
1930} 1989}
1931 1990
1932static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp) 1991static void qlge_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1933{ 1992{
1934 struct ql_adapter *qdev = netdev_priv(ndev); 1993 struct ql_adapter *qdev = netdev_priv(ndev);
1935 1994
@@ -1945,7 +2004,7 @@ static void ql_vlan_rx_register(struct net_device *ndev, struct vlan_group *grp)
1945 } 2004 }
1946} 2005}
1947 2006
1948static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid) 2007static void qlge_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1949{ 2008{
1950 struct ql_adapter *qdev = netdev_priv(ndev); 2009 struct ql_adapter *qdev = netdev_priv(ndev);
1951 u32 enable_bit = MAC_ADDR_E; 2010 u32 enable_bit = MAC_ADDR_E;
@@ -1961,7 +2020,7 @@ static void ql_vlan_rx_add_vid(struct net_device *ndev, u16 vid)
1961 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK); 2020 ql_sem_unlock(qdev, SEM_MAC_ADDR_MASK);
1962} 2021}
1963 2022
1964static void ql_vlan_rx_kill_vid(struct net_device *ndev, u16 vid) 2023static void qlge_vlan_rx_kill_vid(struct net_device *ndev, u16 vid)
1965{ 2024{
1966 struct ql_adapter *qdev = netdev_priv(ndev); 2025 struct ql_adapter *qdev = netdev_priv(ndev);
1967 u32 enable_bit = 0; 2026 u32 enable_bit = 0;
@@ -2046,12 +2105,12 @@ static irqreturn_t qlge_isr(int irq, void *dev_id)
2046 */ 2105 */
2047 var = ql_read32(qdev, ISR1); 2106 var = ql_read32(qdev, ISR1);
2048 if (var & intr_context->irq_mask) { 2107 if (var & intr_context->irq_mask) {
2049 QPRINTK(qdev, INTR, INFO, 2108 QPRINTK(qdev, INTR, INFO,
2050 "Waking handler for rx_ring[0].\n"); 2109 "Waking handler for rx_ring[0].\n");
2051 ql_disable_completion_interrupt(qdev, intr_context->intr); 2110 ql_disable_completion_interrupt(qdev, intr_context->intr);
2052 napi_schedule(&rx_ring->napi); 2111 napi_schedule(&rx_ring->napi);
2053 work_done++; 2112 work_done++;
2054 } 2113 }
2055 ql_enable_completion_interrupt(qdev, intr_context->intr); 2114 ql_enable_completion_interrupt(qdev, intr_context->intr);
2056 return work_done ? IRQ_HANDLED : IRQ_NONE; 2115 return work_done ? IRQ_HANDLED : IRQ_NONE;
2057} 2116}
@@ -2149,6 +2208,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2149 __func__, tx_ring_idx); 2208 __func__, tx_ring_idx);
2150 netif_stop_subqueue(ndev, tx_ring->wq_id); 2209 netif_stop_subqueue(ndev, tx_ring->wq_id);
2151 atomic_inc(&tx_ring->queue_stopped); 2210 atomic_inc(&tx_ring->queue_stopped);
2211 tx_ring->tx_errors++;
2152 return NETDEV_TX_BUSY; 2212 return NETDEV_TX_BUSY;
2153 } 2213 }
2154 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx]; 2214 tx_ring_desc = &tx_ring->q[tx_ring->prod_idx];
@@ -2183,6 +2243,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2183 NETDEV_TX_OK) { 2243 NETDEV_TX_OK) {
2184 QPRINTK(qdev, TX_QUEUED, ERR, 2244 QPRINTK(qdev, TX_QUEUED, ERR,
2185 "Could not map the segments.\n"); 2245 "Could not map the segments.\n");
2246 tx_ring->tx_errors++;
2186 return NETDEV_TX_BUSY; 2247 return NETDEV_TX_BUSY;
2187 } 2248 }
2188 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr); 2249 QL_DUMP_OB_MAC_IOCB(mac_iocb_ptr);
@@ -2199,6 +2260,7 @@ static netdev_tx_t qlge_send(struct sk_buff *skb, struct net_device *ndev)
2199 return NETDEV_TX_OK; 2260 return NETDEV_TX_OK;
2200} 2261}
2201 2262
2263
2202static void ql_free_shadow_space(struct ql_adapter *qdev) 2264static void ql_free_shadow_space(struct ql_adapter *qdev)
2203{ 2265{
2204 if (qdev->rx_ring_shadow_reg_area) { 2266 if (qdev->rx_ring_shadow_reg_area) {
@@ -2304,20 +2366,29 @@ err:
2304 2366
2305static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring) 2367static void ql_free_lbq_buffers(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2306{ 2368{
2307 int i;
2308 struct bq_desc *lbq_desc; 2369 struct bq_desc *lbq_desc;
2309 2370
2310 for (i = 0; i < rx_ring->lbq_len; i++) { 2371 uint32_t curr_idx, clean_idx;
2311 lbq_desc = &rx_ring->lbq[i]; 2372
2312 if (lbq_desc->p.lbq_page) { 2373 curr_idx = rx_ring->lbq_curr_idx;
2374 clean_idx = rx_ring->lbq_clean_idx;
2375 while (curr_idx != clean_idx) {
2376 lbq_desc = &rx_ring->lbq[curr_idx];
2377
2378 if (lbq_desc->p.pg_chunk.last_flag) {
2313 pci_unmap_page(qdev->pdev, 2379 pci_unmap_page(qdev->pdev,
2314 pci_unmap_addr(lbq_desc, mapaddr), 2380 lbq_desc->p.pg_chunk.map,
2315 pci_unmap_len(lbq_desc, maplen), 2381 ql_lbq_block_size(qdev),
2316 PCI_DMA_FROMDEVICE); 2382 PCI_DMA_FROMDEVICE);
2317 2383 lbq_desc->p.pg_chunk.last_flag = 0;
2318 put_page(lbq_desc->p.lbq_page);
2319 lbq_desc->p.lbq_page = NULL;
2320 } 2384 }
2385
2386 put_page(lbq_desc->p.pg_chunk.page);
2387 lbq_desc->p.pg_chunk.page = NULL;
2388
2389 if (++curr_idx == rx_ring->lbq_len)
2390 curr_idx = 0;
2391
2321 } 2392 }
2322} 2393}
2323 2394
@@ -2615,6 +2686,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2615 /* Set up the shadow registers for this ring. */ 2686 /* Set up the shadow registers for this ring. */
2616 rx_ring->prod_idx_sh_reg = shadow_reg; 2687 rx_ring->prod_idx_sh_reg = shadow_reg;
2617 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma; 2688 rx_ring->prod_idx_sh_reg_dma = shadow_reg_dma;
2689 *rx_ring->prod_idx_sh_reg = 0;
2618 shadow_reg += sizeof(u64); 2690 shadow_reg += sizeof(u64);
2619 shadow_reg_dma += sizeof(u64); 2691 shadow_reg_dma += sizeof(u64);
2620 rx_ring->lbq_base_indirect = shadow_reg; 2692 rx_ring->lbq_base_indirect = shadow_reg;
@@ -2692,7 +2764,7 @@ static int ql_start_rx_ring(struct ql_adapter *qdev, struct rx_ring *rx_ring)
2692 cqicb->sbq_addr = 2764 cqicb->sbq_addr =
2693 cpu_to_le64(rx_ring->sbq_base_indirect_dma); 2765 cpu_to_le64(rx_ring->sbq_base_indirect_dma);
2694 cqicb->sbq_buf_size = 2766 cqicb->sbq_buf_size =
2695 cpu_to_le16((u16)(rx_ring->sbq_buf_size/2)); 2767 cpu_to_le16((u16)(rx_ring->sbq_buf_size));
2696 bq_len = (rx_ring->sbq_len == 65536) ? 0 : 2768 bq_len = (rx_ring->sbq_len == 65536) ? 0 :
2697 (u16) rx_ring->sbq_len; 2769 (u16) rx_ring->sbq_len;
2698 cqicb->sbq_len = cpu_to_le16(bq_len); 2770 cqicb->sbq_len = cpu_to_le16(bq_len);
@@ -2798,7 +2870,7 @@ static void ql_enable_msix(struct ql_adapter *qdev)
2798 int i, err; 2870 int i, err;
2799 2871
2800 /* Get the MSIX vectors. */ 2872 /* Get the MSIX vectors. */
2801 if (irq_type == MSIX_IRQ) { 2873 if (qlge_irq_type == MSIX_IRQ) {
2802 /* Try to alloc space for the msix struct, 2874 /* Try to alloc space for the msix struct,
2803 * if it fails then go to MSI/legacy. 2875 * if it fails then go to MSI/legacy.
2804 */ 2876 */
@@ -2806,7 +2878,7 @@ static void ql_enable_msix(struct ql_adapter *qdev)
2806 sizeof(struct msix_entry), 2878 sizeof(struct msix_entry),
2807 GFP_KERNEL); 2879 GFP_KERNEL);
2808 if (!qdev->msi_x_entry) { 2880 if (!qdev->msi_x_entry) {
2809 irq_type = MSI_IRQ; 2881 qlge_irq_type = MSI_IRQ;
2810 goto msi; 2882 goto msi;
2811 } 2883 }
2812 2884
@@ -2829,7 +2901,7 @@ static void ql_enable_msix(struct ql_adapter *qdev)
2829 QPRINTK(qdev, IFUP, WARNING, 2901 QPRINTK(qdev, IFUP, WARNING,
2830 "MSI-X Enable failed, trying MSI.\n"); 2902 "MSI-X Enable failed, trying MSI.\n");
2831 qdev->intr_count = 1; 2903 qdev->intr_count = 1;
2832 irq_type = MSI_IRQ; 2904 qlge_irq_type = MSI_IRQ;
2833 } else if (err == 0) { 2905 } else if (err == 0) {
2834 set_bit(QL_MSIX_ENABLED, &qdev->flags); 2906 set_bit(QL_MSIX_ENABLED, &qdev->flags);
2835 QPRINTK(qdev, IFUP, INFO, 2907 QPRINTK(qdev, IFUP, INFO,
@@ -2840,7 +2912,7 @@ static void ql_enable_msix(struct ql_adapter *qdev)
2840 } 2912 }
2841msi: 2913msi:
2842 qdev->intr_count = 1; 2914 qdev->intr_count = 1;
2843 if (irq_type == MSI_IRQ) { 2915 if (qlge_irq_type == MSI_IRQ) {
2844 if (!pci_enable_msi(qdev->pdev)) { 2916 if (!pci_enable_msi(qdev->pdev)) {
2845 set_bit(QL_MSI_ENABLED, &qdev->flags); 2917 set_bit(QL_MSI_ENABLED, &qdev->flags);
2846 QPRINTK(qdev, IFUP, INFO, 2918 QPRINTK(qdev, IFUP, INFO,
@@ -2848,7 +2920,7 @@ msi:
2848 return; 2920 return;
2849 } 2921 }
2850 } 2922 }
2851 irq_type = LEG_IRQ; 2923 qlge_irq_type = LEG_IRQ;
2852 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n"); 2924 QPRINTK(qdev, IFUP, DEBUG, "Running with legacy interrupts.\n");
2853} 2925}
2854 2926
@@ -3268,7 +3340,7 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
3268 ql_write32(qdev, FSC, mask | value); 3340 ql_write32(qdev, FSC, mask | value);
3269 3341
3270 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP | 3342 ql_write32(qdev, SPLT_HDR, SPLT_HDR_EP |
3271 min(SMALL_BUFFER_SIZE, MAX_SPLIT_SIZE)); 3343 min(SMALL_BUF_MAP_SIZE, MAX_SPLIT_SIZE));
3272 3344
3273 /* Set RX packet routing to use port/pci function on which the 3345 /* Set RX packet routing to use port/pci function on which the
3274 * packet arrived on in addition to usual frame routing. 3346 * packet arrived on in addition to usual frame routing.
@@ -3276,6 +3348,22 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
3276 * the same MAC address. 3348 * the same MAC address.
3277 */ 3349 */
3278 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ); 3350 ql_write32(qdev, RST_FO, RST_FO_RR_MASK | RST_FO_RR_RCV_FUNC_CQ);
3351 /* Reroute all packets to our Interface.
3352 * They may have been routed to MPI firmware
3353 * due to WOL.
3354 */
3355 value = ql_read32(qdev, MGMT_RCV_CFG);
3356 value &= ~MGMT_RCV_CFG_RM;
3357 mask = 0xffff0000;
3358
3359 /* Sticky reg needs clearing due to WOL. */
3360 ql_write32(qdev, MGMT_RCV_CFG, mask);
3361 ql_write32(qdev, MGMT_RCV_CFG, mask | value);
3362
3363 /* Default WOL is enable on Mezz cards */
3364 if (qdev->pdev->subsystem_device == 0x0068 ||
3365 qdev->pdev->subsystem_device == 0x0180)
3366 qdev->wol = WAKE_MAGIC;
3279 3367
3280 /* Start up the rx queues. */ 3368 /* Start up the rx queues. */
3281 for (i = 0; i < qdev->rx_ring_count; i++) { 3369 for (i = 0; i < qdev->rx_ring_count; i++) {
@@ -3310,10 +3398,8 @@ static int ql_adapter_initialize(struct ql_adapter *qdev)
3310 3398
3311 /* Initialize the port and set the max framesize. */ 3399 /* Initialize the port and set the max framesize. */
3312 status = qdev->nic_ops->port_initialize(qdev); 3400 status = qdev->nic_ops->port_initialize(qdev);
3313 if (status) { 3401 if (status)
3314 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n"); 3402 QPRINTK(qdev, IFUP, ERR, "Failed to start port.\n");
3315 return status;
3316 }
3317 3403
3318 /* Set up the MAC address and frame routing filter. */ 3404 /* Set up the MAC address and frame routing filter. */
3319 status = ql_cam_route_initialize(qdev); 3405 status = ql_cam_route_initialize(qdev);
@@ -3392,6 +3478,52 @@ static void ql_display_dev_info(struct net_device *ndev)
3392 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr); 3478 QPRINTK(qdev, PROBE, INFO, "MAC address %pM\n", ndev->dev_addr);
3393} 3479}
3394 3480
3481int ql_wol(struct ql_adapter *qdev)
3482{
3483 int status = 0;
3484 u32 wol = MB_WOL_DISABLE;
3485
3486 /* The CAM is still intact after a reset, but if we
3487 * are doing WOL, then we may need to program the
3488 * routing regs. We would also need to issue the mailbox
3489 * commands to instruct the MPI what to do per the ethtool
3490 * settings.
3491 */
3492
3493 if (qdev->wol & (WAKE_ARP | WAKE_MAGICSECURE | WAKE_PHY | WAKE_UCAST |
3494 WAKE_MCAST | WAKE_BCAST)) {
3495 QPRINTK(qdev, IFDOWN, ERR,
3496 "Unsupported WOL paramter. qdev->wol = 0x%x.\n",
3497 qdev->wol);
3498 return -EINVAL;
3499 }
3500
3501 if (qdev->wol & WAKE_MAGIC) {
3502 status = ql_mb_wol_set_magic(qdev, 1);
3503 if (status) {
3504 QPRINTK(qdev, IFDOWN, ERR,
3505 "Failed to set magic packet on %s.\n",
3506 qdev->ndev->name);
3507 return status;
3508 } else
3509 QPRINTK(qdev, DRV, INFO,
3510 "Enabled magic packet successfully on %s.\n",
3511 qdev->ndev->name);
3512
3513 wol |= MB_WOL_MAGIC_PKT;
3514 }
3515
3516 if (qdev->wol) {
3517 wol |= MB_WOL_MODE_ON;
3518 status = ql_mb_wol_mode(qdev, wol);
3519 QPRINTK(qdev, DRV, ERR, "WOL %s (wol code 0x%x) on %s\n",
3520 (status == 0) ? "Sucessfully set" : "Failed", wol,
3521 qdev->ndev->name);
3522 }
3523
3524 return status;
3525}
3526
3395static int ql_adapter_down(struct ql_adapter *qdev) 3527static int ql_adapter_down(struct ql_adapter *qdev)
3396{ 3528{
3397 int i, status = 0; 3529 int i, status = 0;
@@ -3497,6 +3629,10 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3497 struct rx_ring *rx_ring; 3629 struct rx_ring *rx_ring;
3498 struct tx_ring *tx_ring; 3630 struct tx_ring *tx_ring;
3499 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus()); 3631 int cpu_cnt = min(MAX_CPUS, (int)num_online_cpus());
3632 unsigned int lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3633 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3634
3635 qdev->lbq_buf_order = get_order(lbq_buf_len);
3500 3636
3501 /* In a perfect world we have one RSS ring for each CPU 3637 /* In a perfect world we have one RSS ring for each CPU
3502 * and each has it's own vector. To do that we ask for 3638 * and each has it's own vector. To do that we ask for
@@ -3544,11 +3680,14 @@ static int ql_configure_rings(struct ql_adapter *qdev)
3544 rx_ring->lbq_len = NUM_LARGE_BUFFERS; 3680 rx_ring->lbq_len = NUM_LARGE_BUFFERS;
3545 rx_ring->lbq_size = 3681 rx_ring->lbq_size =
3546 rx_ring->lbq_len * sizeof(__le64); 3682 rx_ring->lbq_len * sizeof(__le64);
3547 rx_ring->lbq_buf_size = LARGE_BUFFER_SIZE; 3683 rx_ring->lbq_buf_size = (u16)lbq_buf_len;
3684 QPRINTK(qdev, IFUP, DEBUG,
3685 "lbq_buf_size %d, order = %d\n",
3686 rx_ring->lbq_buf_size, qdev->lbq_buf_order);
3548 rx_ring->sbq_len = NUM_SMALL_BUFFERS; 3687 rx_ring->sbq_len = NUM_SMALL_BUFFERS;
3549 rx_ring->sbq_size = 3688 rx_ring->sbq_size =
3550 rx_ring->sbq_len * sizeof(__le64); 3689 rx_ring->sbq_len * sizeof(__le64);
3551 rx_ring->sbq_buf_size = SMALL_BUFFER_SIZE * 2; 3690 rx_ring->sbq_buf_size = SMALL_BUF_MAP_SIZE;
3552 rx_ring->type = RX_Q; 3691 rx_ring->type = RX_Q;
3553 } else { 3692 } else {
3554 /* 3693 /*
@@ -3575,6 +3714,10 @@ static int qlge_open(struct net_device *ndev)
3575 int err = 0; 3714 int err = 0;
3576 struct ql_adapter *qdev = netdev_priv(ndev); 3715 struct ql_adapter *qdev = netdev_priv(ndev);
3577 3716
3717 err = ql_adapter_reset(qdev);
3718 if (err)
3719 return err;
3720
3578 err = ql_configure_rings(qdev); 3721 err = ql_configure_rings(qdev);
3579 if (err) 3722 if (err)
3580 return err; 3723 return err;
@@ -3594,14 +3737,63 @@ error_up:
3594 return err; 3737 return err;
3595} 3738}
3596 3739
3740static int ql_change_rx_buffers(struct ql_adapter *qdev)
3741{
3742 struct rx_ring *rx_ring;
3743 int i, status;
3744 u32 lbq_buf_len;
3745
3746 /* Wait for an oustanding reset to complete. */
3747 if (!test_bit(QL_ADAPTER_UP, &qdev->flags)) {
3748 int i = 3;
3749 while (i-- && !test_bit(QL_ADAPTER_UP, &qdev->flags)) {
3750 QPRINTK(qdev, IFUP, ERR,
3751 "Waiting for adapter UP...\n");
3752 ssleep(1);
3753 }
3754
3755 if (!i) {
3756 QPRINTK(qdev, IFUP, ERR,
3757 "Timed out waiting for adapter UP\n");
3758 return -ETIMEDOUT;
3759 }
3760 }
3761
3762 status = ql_adapter_down(qdev);
3763 if (status)
3764 goto error;
3765
3766 /* Get the new rx buffer size. */
3767 lbq_buf_len = (qdev->ndev->mtu > 1500) ?
3768 LARGE_BUFFER_MAX_SIZE : LARGE_BUFFER_MIN_SIZE;
3769 qdev->lbq_buf_order = get_order(lbq_buf_len);
3770
3771 for (i = 0; i < qdev->rss_ring_count; i++) {
3772 rx_ring = &qdev->rx_ring[i];
3773 /* Set the new size. */
3774 rx_ring->lbq_buf_size = lbq_buf_len;
3775 }
3776
3777 status = ql_adapter_up(qdev);
3778 if (status)
3779 goto error;
3780
3781 return status;
3782error:
3783 QPRINTK(qdev, IFUP, ALERT,
3784 "Driver up/down cycle failed, closing device.\n");
3785 set_bit(QL_ADAPTER_UP, &qdev->flags);
3786 dev_close(qdev->ndev);
3787 return status;
3788}
3789
3597static int qlge_change_mtu(struct net_device *ndev, int new_mtu) 3790static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3598{ 3791{
3599 struct ql_adapter *qdev = netdev_priv(ndev); 3792 struct ql_adapter *qdev = netdev_priv(ndev);
3793 int status;
3600 3794
3601 if (ndev->mtu == 1500 && new_mtu == 9000) { 3795 if (ndev->mtu == 1500 && new_mtu == 9000) {
3602 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n"); 3796 QPRINTK(qdev, IFUP, ERR, "Changing to jumbo MTU.\n");
3603 queue_delayed_work(qdev->workqueue,
3604 &qdev->mpi_port_cfg_work, 0);
3605 } else if (ndev->mtu == 9000 && new_mtu == 1500) { 3797 } else if (ndev->mtu == 9000 && new_mtu == 1500) {
3606 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n"); 3798 QPRINTK(qdev, IFUP, ERR, "Changing to normal MTU.\n");
3607 } else if ((ndev->mtu == 1500 && new_mtu == 1500) || 3799 } else if ((ndev->mtu == 1500 && new_mtu == 1500) ||
@@ -3609,15 +3801,60 @@ static int qlge_change_mtu(struct net_device *ndev, int new_mtu)
3609 return 0; 3801 return 0;
3610 } else 3802 } else
3611 return -EINVAL; 3803 return -EINVAL;
3804
3805 queue_delayed_work(qdev->workqueue,
3806 &qdev->mpi_port_cfg_work, 3*HZ);
3807
3808 if (!netif_running(qdev->ndev)) {
3809 ndev->mtu = new_mtu;
3810 return 0;
3811 }
3812
3612 ndev->mtu = new_mtu; 3813 ndev->mtu = new_mtu;
3613 return 0; 3814 status = ql_change_rx_buffers(qdev);
3815 if (status) {
3816 QPRINTK(qdev, IFUP, ERR,
3817 "Changing MTU failed.\n");
3818 }
3819
3820 return status;
3614} 3821}
3615 3822
3616static struct net_device_stats *qlge_get_stats(struct net_device 3823static struct net_device_stats *qlge_get_stats(struct net_device
3617 *ndev) 3824 *ndev)
3618{ 3825{
3619 struct ql_adapter *qdev = netdev_priv(ndev); 3826 struct ql_adapter *qdev = netdev_priv(ndev);
3620 return &qdev->stats; 3827 struct rx_ring *rx_ring = &qdev->rx_ring[0];
3828 struct tx_ring *tx_ring = &qdev->tx_ring[0];
3829 unsigned long pkts, mcast, dropped, errors, bytes;
3830 int i;
3831
3832 /* Get RX stats. */
3833 pkts = mcast = dropped = errors = bytes = 0;
3834 for (i = 0; i < qdev->rss_ring_count; i++, rx_ring++) {
3835 pkts += rx_ring->rx_packets;
3836 bytes += rx_ring->rx_bytes;
3837 dropped += rx_ring->rx_dropped;
3838 errors += rx_ring->rx_errors;
3839 mcast += rx_ring->rx_multicast;
3840 }
3841 ndev->stats.rx_packets = pkts;
3842 ndev->stats.rx_bytes = bytes;
3843 ndev->stats.rx_dropped = dropped;
3844 ndev->stats.rx_errors = errors;
3845 ndev->stats.multicast = mcast;
3846
3847 /* Get TX stats. */
3848 pkts = errors = bytes = 0;
3849 for (i = 0; i < qdev->tx_ring_count; i++, tx_ring++) {
3850 pkts += tx_ring->tx_packets;
3851 bytes += tx_ring->tx_bytes;
3852 errors += tx_ring->tx_errors;
3853 }
3854 ndev->stats.tx_packets = pkts;
3855 ndev->stats.tx_bytes = bytes;
3856 ndev->stats.tx_errors = errors;
3857 return &ndev->stats;
3621} 3858}
3622 3859
3623static void qlge_set_multicast_list(struct net_device *ndev) 3860static void qlge_set_multicast_list(struct net_device *ndev)
@@ -3868,8 +4105,7 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3868 struct net_device *ndev, int cards_found) 4105 struct net_device *ndev, int cards_found)
3869{ 4106{
3870 struct ql_adapter *qdev = netdev_priv(ndev); 4107 struct ql_adapter *qdev = netdev_priv(ndev);
3871 int pos, err = 0; 4108 int err = 0;
3872 u16 val16;
3873 4109
3874 memset((void *)qdev, 0, sizeof(*qdev)); 4110 memset((void *)qdev, 0, sizeof(*qdev));
3875 err = pci_enable_device(pdev); 4111 err = pci_enable_device(pdev);
@@ -3881,18 +4117,12 @@ static int __devinit ql_init_device(struct pci_dev *pdev,
3881 qdev->ndev = ndev; 4117 qdev->ndev = ndev;
3882 qdev->pdev = pdev; 4118 qdev->pdev = pdev;
3883 pci_set_drvdata(pdev, ndev); 4119 pci_set_drvdata(pdev, ndev);
3884 pos = pci_find_capability(pdev, PCI_CAP_ID_EXP); 4120
3885 if (pos <= 0) { 4121 /* Set PCIe read request size */
3886 dev_err(&pdev->dev, PFX "Cannot find PCI Express capability, " 4122 err = pcie_set_readrq(pdev, 4096);
3887 "aborting.\n"); 4123 if (err) {
3888 return pos; 4124 dev_err(&pdev->dev, "Set readrq failed.\n");
3889 } else { 4125 goto err_out;
3890 pci_read_config_word(pdev, pos + PCI_EXP_DEVCTL, &val16);
3891 val16 &= ~PCI_EXP_DEVCTL_NOSNOOP_EN;
3892 val16 |= (PCI_EXP_DEVCTL_CERE |
3893 PCI_EXP_DEVCTL_NFERE |
3894 PCI_EXP_DEVCTL_FERE | PCI_EXP_DEVCTL_URRE);
3895 pci_write_config_word(pdev, pos + PCI_EXP_DEVCTL, val16);
3896 } 4126 }
3897 4127
3898 err = pci_request_regions(pdev, DRV_NAME); 4128 err = pci_request_regions(pdev, DRV_NAME);
@@ -3991,7 +4221,6 @@ err_out:
3991 return err; 4221 return err;
3992} 4222}
3993 4223
3994
3995static const struct net_device_ops qlge_netdev_ops = { 4224static const struct net_device_ops qlge_netdev_ops = {
3996 .ndo_open = qlge_open, 4225 .ndo_open = qlge_open,
3997 .ndo_stop = qlge_close, 4226 .ndo_stop = qlge_close,
@@ -4002,9 +4231,9 @@ static const struct net_device_ops qlge_netdev_ops = {
4002 .ndo_set_mac_address = qlge_set_mac_address, 4231 .ndo_set_mac_address = qlge_set_mac_address,
4003 .ndo_validate_addr = eth_validate_addr, 4232 .ndo_validate_addr = eth_validate_addr,
4004 .ndo_tx_timeout = qlge_tx_timeout, 4233 .ndo_tx_timeout = qlge_tx_timeout,
4005 .ndo_vlan_rx_register = ql_vlan_rx_register, 4234 .ndo_vlan_rx_register = qlge_vlan_rx_register,
4006 .ndo_vlan_rx_add_vid = ql_vlan_rx_add_vid, 4235 .ndo_vlan_rx_add_vid = qlge_vlan_rx_add_vid,
4007 .ndo_vlan_rx_kill_vid = ql_vlan_rx_kill_vid, 4236 .ndo_vlan_rx_kill_vid = qlge_vlan_rx_kill_vid,
4008}; 4237};
4009 4238
4010static int __devinit qlge_probe(struct pci_dev *pdev, 4239static int __devinit qlge_probe(struct pci_dev *pdev,
@@ -4060,10 +4289,21 @@ static int __devinit qlge_probe(struct pci_dev *pdev,
4060 } 4289 }
4061 ql_link_off(qdev); 4290 ql_link_off(qdev);
4062 ql_display_dev_info(ndev); 4291 ql_display_dev_info(ndev);
4292 atomic_set(&qdev->lb_count, 0);
4063 cards_found++; 4293 cards_found++;
4064 return 0; 4294 return 0;
4065} 4295}
4066 4296
4297netdev_tx_t ql_lb_send(struct sk_buff *skb, struct net_device *ndev)
4298{
4299 return qlge_send(skb, ndev);
4300}
4301
4302int ql_clean_lb_rx_ring(struct rx_ring *rx_ring, int budget)
4303{
4304 return ql_clean_inbound_rx_ring(rx_ring, budget);
4305}
4306
4067static void __devexit qlge_remove(struct pci_dev *pdev) 4307static void __devexit qlge_remove(struct pci_dev *pdev)
4068{ 4308{
4069 struct net_device *ndev = pci_get_drvdata(pdev); 4309 struct net_device *ndev = pci_get_drvdata(pdev);
@@ -4193,6 +4433,7 @@ static int qlge_suspend(struct pci_dev *pdev, pm_message_t state)
4193 return err; 4433 return err;
4194 } 4434 }
4195 4435
4436 ql_wol(qdev);
4196 err = pci_save_state(pdev); 4437 err = pci_save_state(pdev);
4197 if (err) 4438 if (err)
4198 return err; 4439 return err;
diff --git a/drivers/net/qlge/qlge_mpi.c b/drivers/net/qlge/qlge_mpi.c
index aec05f266107..e2b2286102d4 100644
--- a/drivers/net/qlge/qlge_mpi.c
+++ b/drivers/net/qlge/qlge_mpi.c
@@ -1,25 +1,5 @@
1#include "qlge.h" 1#include "qlge.h"
2 2
3static void ql_display_mb_sts(struct ql_adapter *qdev,
4 struct mbox_params *mbcp)
5{
6 int i;
7 static char *err_sts[] = {
8 "Command Complete",
9 "Command Not Supported",
10 "Host Interface Error",
11 "Checksum Error",
12 "Unused Completion Status",
13 "Test Failed",
14 "Command Parameter Error"};
15
16 QPRINTK(qdev, DRV, DEBUG, "%s.\n",
17 err_sts[mbcp->mbox_out[0] & 0x0000000f]);
18 for (i = 0; i < mbcp->out_count; i++)
19 QPRINTK(qdev, DRV, DEBUG, "mbox_out[%d] = 0x%.08x.\n",
20 i, mbcp->mbox_out[i]);
21}
22
23int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data) 3int ql_read_mpi_reg(struct ql_adapter *qdev, u32 reg, u32 *data)
24{ 4{
25 int status; 5 int status;
@@ -317,6 +297,7 @@ static void ql_init_fw_done(struct ql_adapter *qdev, struct mbox_params *mbcp)
317 } else { 297 } else {
318 QPRINTK(qdev, DRV, ERR, "Firmware Revision = 0x%.08x.\n", 298 QPRINTK(qdev, DRV, ERR, "Firmware Revision = 0x%.08x.\n",
319 mbcp->mbox_out[1]); 299 mbcp->mbox_out[1]);
300 qdev->fw_rev_id = mbcp->mbox_out[1];
320 status = ql_cam_route_initialize(qdev); 301 status = ql_cam_route_initialize(qdev);
321 if (status) 302 if (status)
322 QPRINTK(qdev, IFUP, ERR, 303 QPRINTK(qdev, IFUP, ERR,
@@ -446,6 +427,9 @@ static int ql_mpi_handler(struct ql_adapter *qdev, struct mbox_params *mbcp)
446 ql_aen_lost(qdev, mbcp); 427 ql_aen_lost(qdev, mbcp);
447 break; 428 break;
448 429
430 case AEN_DCBX_CHG:
431 /* Need to support AEN 8110 */
432 break;
449 default: 433 default:
450 QPRINTK(qdev, DRV, ERR, 434 QPRINTK(qdev, DRV, ERR,
451 "Unsupported AE %.08x.\n", mbcp->mbox_out[0]); 435 "Unsupported AE %.08x.\n", mbcp->mbox_out[0]);
@@ -537,7 +521,6 @@ done:
537 MB_CMD_STS_GOOD) && 521 MB_CMD_STS_GOOD) &&
538 ((mbcp->mbox_out[0] & 0x0000f000) != 522 ((mbcp->mbox_out[0] & 0x0000f000) !=
539 MB_CMD_STS_INTRMDT)) { 523 MB_CMD_STS_INTRMDT)) {
540 ql_display_mb_sts(qdev, mbcp);
541 status = -EIO; 524 status = -EIO;
542 } 525 }
543end: 526end:
@@ -655,7 +638,7 @@ int ql_mb_idc_ack(struct ql_adapter *qdev)
655 * for the current port. 638 * for the current port.
656 * Most likely will block. 639 * Most likely will block.
657 */ 640 */
658static int ql_mb_set_port_cfg(struct ql_adapter *qdev) 641int ql_mb_set_port_cfg(struct ql_adapter *qdev)
659{ 642{
660 struct mbox_params mbc; 643 struct mbox_params mbc;
661 struct mbox_params *mbcp = &mbc; 644 struct mbox_params *mbcp = &mbc;
@@ -690,7 +673,7 @@ static int ql_mb_set_port_cfg(struct ql_adapter *qdev)
690 * for the current port. 673 * for the current port.
691 * Most likely will block. 674 * Most likely will block.
692 */ 675 */
693static int ql_mb_get_port_cfg(struct ql_adapter *qdev) 676int ql_mb_get_port_cfg(struct ql_adapter *qdev)
694{ 677{
695 struct mbox_params mbc; 678 struct mbox_params mbc;
696 struct mbox_params *mbcp = &mbc; 679 struct mbox_params *mbcp = &mbc;
@@ -720,6 +703,76 @@ static int ql_mb_get_port_cfg(struct ql_adapter *qdev)
720 return status; 703 return status;
721} 704}
722 705
706int ql_mb_wol_mode(struct ql_adapter *qdev, u32 wol)
707{
708 struct mbox_params mbc;
709 struct mbox_params *mbcp = &mbc;
710 int status;
711
712 memset(mbcp, 0, sizeof(struct mbox_params));
713
714 mbcp->in_count = 2;
715 mbcp->out_count = 1;
716
717 mbcp->mbox_in[0] = MB_CMD_SET_WOL_MODE;
718 mbcp->mbox_in[1] = wol;
719
720
721 status = ql_mailbox_command(qdev, mbcp);
722 if (status)
723 return status;
724
725 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
726 QPRINTK(qdev, DRV, ERR,
727 "Failed to set WOL mode.\n");
728 status = -EIO;
729 }
730 return status;
731}
732
733int ql_mb_wol_set_magic(struct ql_adapter *qdev, u32 enable_wol)
734{
735 struct mbox_params mbc;
736 struct mbox_params *mbcp = &mbc;
737 int status;
738 u8 *addr = qdev->ndev->dev_addr;
739
740 memset(mbcp, 0, sizeof(struct mbox_params));
741
742 mbcp->in_count = 8;
743 mbcp->out_count = 1;
744
745 mbcp->mbox_in[0] = MB_CMD_SET_WOL_MAGIC;
746 if (enable_wol) {
747 mbcp->mbox_in[1] = (u32)addr[0];
748 mbcp->mbox_in[2] = (u32)addr[1];
749 mbcp->mbox_in[3] = (u32)addr[2];
750 mbcp->mbox_in[4] = (u32)addr[3];
751 mbcp->mbox_in[5] = (u32)addr[4];
752 mbcp->mbox_in[6] = (u32)addr[5];
753 mbcp->mbox_in[7] = 0;
754 } else {
755 mbcp->mbox_in[1] = 0;
756 mbcp->mbox_in[2] = 1;
757 mbcp->mbox_in[3] = 1;
758 mbcp->mbox_in[4] = 1;
759 mbcp->mbox_in[5] = 1;
760 mbcp->mbox_in[6] = 1;
761 mbcp->mbox_in[7] = 0;
762 }
763
764 status = ql_mailbox_command(qdev, mbcp);
765 if (status)
766 return status;
767
768 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
769 QPRINTK(qdev, DRV, ERR,
770 "Failed to set WOL mode.\n");
771 status = -EIO;
772 }
773 return status;
774}
775
723/* IDC - Inter Device Communication... 776/* IDC - Inter Device Communication...
724 * Some firmware commands require consent of adjacent FCOE 777 * Some firmware commands require consent of adjacent FCOE
725 * function. This function waits for the OK, or a 778 * function. This function waits for the OK, or a
@@ -769,6 +822,61 @@ static int ql_idc_wait(struct ql_adapter *qdev)
769 return status; 822 return status;
770} 823}
771 824
825int ql_mb_set_led_cfg(struct ql_adapter *qdev, u32 led_config)
826{
827 struct mbox_params mbc;
828 struct mbox_params *mbcp = &mbc;
829 int status;
830
831 memset(mbcp, 0, sizeof(struct mbox_params));
832
833 mbcp->in_count = 2;
834 mbcp->out_count = 1;
835
836 mbcp->mbox_in[0] = MB_CMD_SET_LED_CFG;
837 mbcp->mbox_in[1] = led_config;
838
839
840 status = ql_mailbox_command(qdev, mbcp);
841 if (status)
842 return status;
843
844 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
845 QPRINTK(qdev, DRV, ERR,
846 "Failed to set LED Configuration.\n");
847 status = -EIO;
848 }
849
850 return status;
851}
852
853int ql_mb_get_led_cfg(struct ql_adapter *qdev)
854{
855 struct mbox_params mbc;
856 struct mbox_params *mbcp = &mbc;
857 int status;
858
859 memset(mbcp, 0, sizeof(struct mbox_params));
860
861 mbcp->in_count = 1;
862 mbcp->out_count = 2;
863
864 mbcp->mbox_in[0] = MB_CMD_GET_LED_CFG;
865
866 status = ql_mailbox_command(qdev, mbcp);
867 if (status)
868 return status;
869
870 if (mbcp->mbox_out[0] != MB_CMD_STS_GOOD) {
871 QPRINTK(qdev, DRV, ERR,
872 "Failed to get LED Configuration.\n");
873 status = -EIO;
874 } else
875 qdev->led_config = mbcp->mbox_out[1];
876
877 return status;
878}
879
772int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control) 880int ql_mb_set_mgmnt_traffic_ctl(struct ql_adapter *qdev, u32 control)
773{ 881{
774 struct mbox_params mbc; 882 struct mbox_params mbc;
@@ -930,8 +1038,11 @@ void ql_mpi_idc_work(struct work_struct *work)
930 int status; 1038 int status;
931 struct mbox_params *mbcp = &qdev->idc_mbc; 1039 struct mbox_params *mbcp = &qdev->idc_mbc;
932 u32 aen; 1040 u32 aen;
1041 int timeout;
933 1042
1043 rtnl_lock();
934 aen = mbcp->mbox_out[1] >> 16; 1044 aen = mbcp->mbox_out[1] >> 16;
1045 timeout = (mbcp->mbox_out[1] >> 8) & 0xf;
935 1046
936 switch (aen) { 1047 switch (aen) {
937 default: 1048 default:
@@ -939,22 +1050,61 @@ void ql_mpi_idc_work(struct work_struct *work)
939 "Bug: Unhandled IDC action.\n"); 1050 "Bug: Unhandled IDC action.\n");
940 break; 1051 break;
941 case MB_CMD_PORT_RESET: 1052 case MB_CMD_PORT_RESET:
942 case MB_CMD_SET_PORT_CFG:
943 case MB_CMD_STOP_FW: 1053 case MB_CMD_STOP_FW:
944 ql_link_off(qdev); 1054 ql_link_off(qdev);
1055 case MB_CMD_SET_PORT_CFG:
945 /* Signal the resulting link up AEN 1056 /* Signal the resulting link up AEN
946 * that the frame routing and mac addr 1057 * that the frame routing and mac addr
947 * needs to be set. 1058 * needs to be set.
948 * */ 1059 * */
949 set_bit(QL_CAM_RT_SET, &qdev->flags); 1060 set_bit(QL_CAM_RT_SET, &qdev->flags);
950 rtnl_lock(); 1061 /* Do ACK if required */
951 status = ql_mb_idc_ack(qdev); 1062 if (timeout) {
952 rtnl_unlock(); 1063 status = ql_mb_idc_ack(qdev);
953 if (status) { 1064 if (status)
954 QPRINTK(qdev, DRV, ERR, 1065 QPRINTK(qdev, DRV, ERR,
955 "Bug: No pending IDC!\n"); 1066 "Bug: No pending IDC!\n");
1067 } else {
1068 QPRINTK(qdev, DRV, DEBUG,
1069 "IDC ACK not required\n");
1070 status = 0; /* success */
956 } 1071 }
1072 break;
1073
1074 /* These sub-commands issued by another (FCoE)
1075 * function are requesting to do an operation
1076 * on the shared resource (MPI environment).
1077 * We currently don't issue these so we just
1078 * ACK the request.
1079 */
1080 case MB_CMD_IOP_RESTART_MPI:
1081 case MB_CMD_IOP_PREP_LINK_DOWN:
1082 /* Drop the link, reload the routing
1083 * table when link comes up.
1084 */
1085 ql_link_off(qdev);
1086 set_bit(QL_CAM_RT_SET, &qdev->flags);
1087 /* Fall through. */
1088 case MB_CMD_IOP_DVR_START:
1089 case MB_CMD_IOP_FLASH_ACC:
1090 case MB_CMD_IOP_CORE_DUMP_MPI:
1091 case MB_CMD_IOP_PREP_UPDATE_MPI:
1092 case MB_CMD_IOP_COMP_UPDATE_MPI:
1093 case MB_CMD_IOP_NONE: /* an IDC without params */
1094 /* Do ACK if required */
1095 if (timeout) {
1096 status = ql_mb_idc_ack(qdev);
1097 if (status)
1098 QPRINTK(qdev, DRV, ERR,
1099 "Bug: No pending IDC!\n");
1100 } else {
1101 QPRINTK(qdev, DRV, DEBUG,
1102 "IDC ACK not required\n");
1103 status = 0; /* success */
1104 }
1105 break;
957 } 1106 }
1107 rtnl_unlock();
958} 1108}
959 1109
960void ql_mpi_work(struct work_struct *work) 1110void ql_mpi_work(struct work_struct *work)
diff --git a/drivers/net/r8169.c b/drivers/net/r8169.c
index b9221bdc7184..98f6c51b7608 100644
--- a/drivers/net/r8169.c
+++ b/drivers/net/r8169.c
@@ -4443,13 +4443,12 @@ static inline bool rtl8169_try_rx_copy(struct sk_buff **sk_buff,
4443 if (pkt_size >= rx_copybreak) 4443 if (pkt_size >= rx_copybreak)
4444 goto out; 4444 goto out;
4445 4445
4446 skb = netdev_alloc_skb(tp->dev, pkt_size + NET_IP_ALIGN); 4446 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
4447 if (!skb) 4447 if (!skb)
4448 goto out; 4448 goto out;
4449 4449
4450 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, pkt_size, 4450 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, pkt_size,
4451 PCI_DMA_FROMDEVICE); 4451 PCI_DMA_FROMDEVICE);
4452 skb_reserve(skb, NET_IP_ALIGN);
4453 skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size); 4452 skb_copy_from_linear_data(*sk_buff, skb->data, pkt_size);
4454 *sk_buff = skb; 4453 *sk_buff = skb;
4455 done = true; 4454 done = true;
diff --git a/drivers/net/sb1250-mac.c b/drivers/net/sb1250-mac.c
index 508551f1b3fc..7269a875326e 100644
--- a/drivers/net/sb1250-mac.c
+++ b/drivers/net/sb1250-mac.c
@@ -1476,7 +1476,6 @@ static void sbmac_channel_start(struct sbmac_softc *s)
1476 V_MAC_TX_RL_THRSH(4) | 1476 V_MAC_TX_RL_THRSH(4) |
1477 V_MAC_RX_PL_THRSH(4) | 1477 V_MAC_RX_PL_THRSH(4) |
1478 V_MAC_RX_RD_THRSH(4) | /* Must be '4' */ 1478 V_MAC_RX_RD_THRSH(4) | /* Must be '4' */
1479 V_MAC_RX_PL_THRSH(4) |
1480 V_MAC_RX_RL_THRSH(8) | 1479 V_MAC_RX_RL_THRSH(8) |
1481 0; 1480 0;
1482 1481
diff --git a/drivers/net/sc92031.c b/drivers/net/sc92031.c
index 8d6030022d14..b7e0eb40a8bd 100644
--- a/drivers/net/sc92031.c
+++ b/drivers/net/sc92031.c
@@ -793,7 +793,7 @@ static void _sc92031_rx_tasklet(struct net_device *dev)
793 793
794 rx_len -= rx_size_align + 4; 794 rx_len -= rx_size_align + 4;
795 795
796 skb = netdev_alloc_skb(dev, pkt_size + NET_IP_ALIGN); 796 skb = netdev_alloc_skb_ip_align(dev, pkt_size);
797 if (unlikely(!skb)) { 797 if (unlikely(!skb)) {
798 if (printk_ratelimit()) 798 if (printk_ratelimit())
799 printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n", 799 printk(KERN_ERR "%s: Couldn't allocate a skb_buff for a packet of size %u\n",
@@ -801,8 +801,6 @@ static void _sc92031_rx_tasklet(struct net_device *dev)
801 goto next; 801 goto next;
802 } 802 }
803 803
804 skb_reserve(skb, NET_IP_ALIGN);
805
806 if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) { 804 if ((rx_ring_offset + pkt_size) > RX_BUF_LEN) {
807 memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset), 805 memcpy(skb_put(skb, RX_BUF_LEN - rx_ring_offset),
808 rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset); 806 rx_ring + rx_ring_offset, RX_BUF_LEN - rx_ring_offset);
diff --git a/drivers/net/sfc/Makefile b/drivers/net/sfc/Makefile
index b89f9be3cb13..7b52fe10d38f 100644
--- a/drivers/net/sfc/Makefile
+++ b/drivers/net/sfc/Makefile
@@ -1,6 +1,6 @@
1sfc-y += efx.o falcon.o tx.o rx.o falcon_gmac.o \ 1sfc-y += efx.o falcon.o tx.o rx.o falcon_gmac.o \
2 falcon_xmac.o selftest.o ethtool.o xfp_phy.o \ 2 falcon_xmac.o selftest.o ethtool.o qt202x_phy.o \
3 mdio_10g.o tenxpress.o boards.o sfe4001.o 3 mdio_10g.o tenxpress.o falcon_boards.o
4sfc-$(CONFIG_SFC_MTD) += mtd.o 4sfc-$(CONFIG_SFC_MTD) += mtd.o
5 5
6obj-$(CONFIG_SFC) += sfc.o 6obj-$(CONFIG_SFC) += sfc.o
diff --git a/drivers/net/sfc/bitfield.h b/drivers/net/sfc/bitfield.h
index d54d84c267b9..6ad909bba957 100644
--- a/drivers/net/sfc/bitfield.h
+++ b/drivers/net/sfc/bitfield.h
@@ -520,19 +520,6 @@ typedef union efx_oword {
520#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32 520#define EFX_SET_QWORD_FIELD EFX_SET_QWORD_FIELD32
521#endif 521#endif
522 522
523#define EFX_SET_OWORD_FIELD_VER(efx, oword, field, value) do { \
524 if (falcon_rev(efx) >= FALCON_REV_B0) { \
525 EFX_SET_OWORD_FIELD((oword), field##_B0, (value)); \
526 } else { \
527 EFX_SET_OWORD_FIELD((oword), field##_A1, (value)); \
528 } \
529} while (0)
530
531#define EFX_QWORD_FIELD_VER(efx, qword, field) \
532 (falcon_rev(efx) >= FALCON_REV_B0 ? \
533 EFX_QWORD_FIELD((qword), field##_B0) : \
534 EFX_QWORD_FIELD((qword), field##_A1))
535
536/* Used to avoid compiler warnings about shift range exceeding width 523/* Used to avoid compiler warnings about shift range exceeding width
537 * of the data types when dma_addr_t is only 32 bits wide. 524 * of the data types when dma_addr_t is only 32 bits wide.
538 */ 525 */
diff --git a/drivers/net/sfc/boards.c b/drivers/net/sfc/boards.c
deleted file mode 100644
index 4a4c74c891b7..000000000000
--- a/drivers/net/sfc/boards.c
+++ /dev/null
@@ -1,328 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2008 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#include "net_driver.h"
11#include "phy.h"
12#include "boards.h"
13#include "efx.h"
14#include "workarounds.h"
15
16/* Macros for unpacking the board revision */
17/* The revision info is in host byte order. */
18#define BOARD_TYPE(_rev) (_rev >> 8)
19#define BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf)
20#define BOARD_MINOR(_rev) (_rev & 0xf)
21
22/* Blink support. If the PHY has no auto-blink mode so we hang it off a timer */
23#define BLINK_INTERVAL (HZ/2)
24
25static void blink_led_timer(unsigned long context)
26{
27 struct efx_nic *efx = (struct efx_nic *)context;
28 struct efx_blinker *bl = &efx->board_info.blinker;
29 efx->board_info.set_id_led(efx, bl->state);
30 bl->state = !bl->state;
31 if (bl->resubmit)
32 mod_timer(&bl->timer, jiffies + BLINK_INTERVAL);
33}
34
35static void board_blink(struct efx_nic *efx, bool blink)
36{
37 struct efx_blinker *blinker = &efx->board_info.blinker;
38
39 /* The rtnl mutex serialises all ethtool ioctls, so
40 * nothing special needs doing here. */
41 if (blink) {
42 blinker->resubmit = true;
43 blinker->state = false;
44 setup_timer(&blinker->timer, blink_led_timer,
45 (unsigned long)efx);
46 mod_timer(&blinker->timer, jiffies + BLINK_INTERVAL);
47 } else {
48 blinker->resubmit = false;
49 if (blinker->timer.function)
50 del_timer_sync(&blinker->timer);
51 efx->board_info.init_leds(efx);
52 }
53}
54
55/*****************************************************************************
56 * Support for LM87 sensor chip used on several boards
57 */
58#define LM87_REG_ALARMS1 0x41
59#define LM87_REG_ALARMS2 0x42
60#define LM87_IN_LIMITS(nr, _min, _max) \
61 0x2B + (nr) * 2, _max, 0x2C + (nr) * 2, _min
62#define LM87_AIN_LIMITS(nr, _min, _max) \
63 0x3B + (nr), _max, 0x1A + (nr), _min
64#define LM87_TEMP_INT_LIMITS(_min, _max) \
65 0x39, _max, 0x3A, _min
66#define LM87_TEMP_EXT1_LIMITS(_min, _max) \
67 0x37, _max, 0x38, _min
68
69#define LM87_ALARM_TEMP_INT 0x10
70#define LM87_ALARM_TEMP_EXT1 0x20
71
72#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
73
74static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
75 const u8 *reg_values)
76{
77 struct i2c_client *client = i2c_new_device(&efx->i2c_adap, info);
78 int rc;
79
80 if (!client)
81 return -EIO;
82
83 while (*reg_values) {
84 u8 reg = *reg_values++;
85 u8 value = *reg_values++;
86 rc = i2c_smbus_write_byte_data(client, reg, value);
87 if (rc)
88 goto err;
89 }
90
91 efx->board_info.hwmon_client = client;
92 return 0;
93
94err:
95 i2c_unregister_device(client);
96 return rc;
97}
98
99static void efx_fini_lm87(struct efx_nic *efx)
100{
101 i2c_unregister_device(efx->board_info.hwmon_client);
102}
103
104static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
105{
106 struct i2c_client *client = efx->board_info.hwmon_client;
107 s32 alarms1, alarms2;
108
109 /* If link is up then do not monitor temperature */
110 if (EFX_WORKAROUND_7884(efx) && efx->link_up)
111 return 0;
112
113 alarms1 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
114 alarms2 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
115 if (alarms1 < 0)
116 return alarms1;
117 if (alarms2 < 0)
118 return alarms2;
119 alarms1 &= mask;
120 alarms2 &= mask >> 8;
121 if (alarms1 || alarms2) {
122 EFX_ERR(efx,
123 "LM87 detected a hardware failure (status %02x:%02x)"
124 "%s%s\n",
125 alarms1, alarms2,
126 (alarms1 & LM87_ALARM_TEMP_INT) ? " INTERNAL" : "",
127 (alarms1 & LM87_ALARM_TEMP_EXT1) ? " EXTERNAL" : "");
128 return -ERANGE;
129 }
130
131 return 0;
132}
133
134#else /* !CONFIG_SENSORS_LM87 */
135
136static inline int
137efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
138 const u8 *reg_values)
139{
140 return 0;
141}
142static inline void efx_fini_lm87(struct efx_nic *efx)
143{
144}
145static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask)
146{
147 return 0;
148}
149
150#endif /* CONFIG_SENSORS_LM87 */
151
152/*****************************************************************************
153 * Support for the SFE4002
154 *
155 */
156static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */
157
158static const u8 sfe4002_lm87_regs[] = {
159 LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */
160 LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */
161 LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */
162 LM87_IN_LIMITS(3, 0xb0, 0xc9), /* 5V: 4.6-5.2V */
163 LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */
164 LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */
165 LM87_AIN_LIMITS(0, 0xa0, 0xb2), /* AIN1: 1.66V +/- 5% */
166 LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */
167 LM87_TEMP_INT_LIMITS(10, 60), /* board */
168 LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */
169 0
170};
171
172static struct i2c_board_info sfe4002_hwmon_info = {
173 I2C_BOARD_INFO("lm87", 0x2e),
174 .platform_data = &sfe4002_lm87_channel,
175};
176
177/****************************************************************************/
178/* LED allocations. Note that on rev A0 boards the schematic and the reality
179 * differ: red and green are swapped. Below is the fixed (A1) layout (there
180 * are only 3 A0 boards in existence, so no real reason to make this
181 * conditional).
182 */
183#define SFE4002_FAULT_LED (2) /* Red */
184#define SFE4002_RX_LED (0) /* Green */
185#define SFE4002_TX_LED (1) /* Amber */
186
187static void sfe4002_init_leds(struct efx_nic *efx)
188{
189 /* Set the TX and RX LEDs to reflect status and activity, and the
190 * fault LED off */
191 xfp_set_led(efx, SFE4002_TX_LED,
192 QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT);
193 xfp_set_led(efx, SFE4002_RX_LED,
194 QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT);
195 xfp_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
196}
197
198static void sfe4002_set_id_led(struct efx_nic *efx, bool state)
199{
200 xfp_set_led(efx, SFE4002_FAULT_LED, state ? QUAKE_LED_ON :
201 QUAKE_LED_OFF);
202}
203
204static int sfe4002_check_hw(struct efx_nic *efx)
205{
206 /* A0 board rev. 4002s report a temperature fault the whole time
207 * (bad sensor) so we mask it out. */
208 unsigned alarm_mask =
209 (efx->board_info.major == 0 && efx->board_info.minor == 0) ?
210 ~LM87_ALARM_TEMP_EXT1 : ~0;
211
212 return efx_check_lm87(efx, alarm_mask);
213}
214
215static int sfe4002_init(struct efx_nic *efx)
216{
217 int rc = efx_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs);
218 if (rc)
219 return rc;
220 efx->board_info.monitor = sfe4002_check_hw;
221 efx->board_info.init_leds = sfe4002_init_leds;
222 efx->board_info.set_id_led = sfe4002_set_id_led;
223 efx->board_info.blink = board_blink;
224 efx->board_info.fini = efx_fini_lm87;
225 return 0;
226}
227
228/*****************************************************************************
229 * Support for the SFN4112F
230 *
231 */
232static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */
233
234static const u8 sfn4112f_lm87_regs[] = {
235 LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */
236 LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */
237 LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */
238 LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */
239 LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */
240 LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */
241 LM87_TEMP_INT_LIMITS(10, 60), /* board */
242 LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */
243 0
244};
245
246static struct i2c_board_info sfn4112f_hwmon_info = {
247 I2C_BOARD_INFO("lm87", 0x2e),
248 .platform_data = &sfn4112f_lm87_channel,
249};
250
251#define SFN4112F_ACT_LED 0
252#define SFN4112F_LINK_LED 1
253
254static void sfn4112f_init_leds(struct efx_nic *efx)
255{
256 xfp_set_led(efx, SFN4112F_ACT_LED,
257 QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT);
258 xfp_set_led(efx, SFN4112F_LINK_LED,
259 QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT);
260}
261
262static void sfn4112f_set_id_led(struct efx_nic *efx, bool state)
263{
264 xfp_set_led(efx, SFN4112F_LINK_LED,
265 state ? QUAKE_LED_ON : QUAKE_LED_OFF);
266}
267
268static int sfn4112f_check_hw(struct efx_nic *efx)
269{
270 /* Mask out unused sensors */
271 return efx_check_lm87(efx, ~0x48);
272}
273
274static int sfn4112f_init(struct efx_nic *efx)
275{
276 int rc = efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
277 if (rc)
278 return rc;
279 efx->board_info.monitor = sfn4112f_check_hw;
280 efx->board_info.init_leds = sfn4112f_init_leds;
281 efx->board_info.set_id_led = sfn4112f_set_id_led;
282 efx->board_info.blink = board_blink;
283 efx->board_info.fini = efx_fini_lm87;
284 return 0;
285}
286
287/* This will get expanded as board-specific details get moved out of the
288 * PHY drivers. */
289struct efx_board_data {
290 enum efx_board_type type;
291 const char *ref_model;
292 const char *gen_type;
293 int (*init) (struct efx_nic *nic);
294};
295
296
297static struct efx_board_data board_data[] = {
298 { EFX_BOARD_SFE4001, "SFE4001", "10GBASE-T adapter", sfe4001_init },
299 { EFX_BOARD_SFE4002, "SFE4002", "XFP adapter", sfe4002_init },
300 { EFX_BOARD_SFN4111T, "SFN4111T", "100/1000/10GBASE-T adapter",
301 sfn4111t_init },
302 { EFX_BOARD_SFN4112F, "SFN4112F", "SFP+ adapter",
303 sfn4112f_init },
304};
305
306void efx_set_board_info(struct efx_nic *efx, u16 revision_info)
307{
308 struct efx_board_data *data = NULL;
309 int i;
310
311 efx->board_info.type = BOARD_TYPE(revision_info);
312 efx->board_info.major = BOARD_MAJOR(revision_info);
313 efx->board_info.minor = BOARD_MINOR(revision_info);
314
315 for (i = 0; i < ARRAY_SIZE(board_data); i++)
316 if (board_data[i].type == efx->board_info.type)
317 data = &board_data[i];
318
319 if (data) {
320 EFX_INFO(efx, "board is %s rev %c%d\n",
321 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
322 ? data->ref_model : data->gen_type,
323 'A' + efx->board_info.major, efx->board_info.minor);
324 efx->board_info.init = data->init;
325 } else {
326 EFX_ERR(efx, "unknown board type %d\n", efx->board_info.type);
327 }
328}
diff --git a/drivers/net/sfc/boards.h b/drivers/net/sfc/boards.h
deleted file mode 100644
index 44942de0e080..000000000000
--- a/drivers/net/sfc/boards.h
+++ /dev/null
@@ -1,28 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2007-2008 Solarflare Communications Inc.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License version 2 as published
7 * by the Free Software Foundation, incorporated herein by reference.
8 */
9
10#ifndef EFX_BOARDS_H
11#define EFX_BOARDS_H
12
13/* Board IDs (must fit in 8 bits) */
14enum efx_board_type {
15 EFX_BOARD_SFE4001 = 1,
16 EFX_BOARD_SFE4002 = 2,
17 EFX_BOARD_SFN4111T = 0x51,
18 EFX_BOARD_SFN4112F = 0x52,
19};
20
21extern void efx_set_board_info(struct efx_nic *efx, u16 revision_info);
22
23/* SFE4001 (10GBASE-T) */
24extern int sfe4001_init(struct efx_nic *efx);
25/* SFN4111T (100/1000/10GBASE-T) */
26extern int sfn4111t_init(struct efx_nic *efx);
27
28#endif
diff --git a/drivers/net/sfc/efx.c b/drivers/net/sfc/efx.c
index cc4b2f99989d..0d0243b7ac34 100644
--- a/drivers/net/sfc/efx.c
+++ b/drivers/net/sfc/efx.c
@@ -228,26 +228,20 @@ static int efx_poll(struct napi_struct *napi, int budget)
228 if (channel->used_flags & EFX_USED_BY_RX && 228 if (channel->used_flags & EFX_USED_BY_RX &&
229 efx->irq_rx_adaptive && 229 efx->irq_rx_adaptive &&
230 unlikely(++channel->irq_count == 1000)) { 230 unlikely(++channel->irq_count == 1000)) {
231 unsigned old_irq_moderation = channel->irq_moderation;
232
233 if (unlikely(channel->irq_mod_score < 231 if (unlikely(channel->irq_mod_score <
234 irq_adapt_low_thresh)) { 232 irq_adapt_low_thresh)) {
235 channel->irq_moderation = 233 if (channel->irq_moderation > 1) {
236 max_t(int, 234 channel->irq_moderation -= 1;
237 channel->irq_moderation - 235 falcon_set_int_moderation(channel);
238 FALCON_IRQ_MOD_RESOLUTION, 236 }
239 FALCON_IRQ_MOD_RESOLUTION);
240 } else if (unlikely(channel->irq_mod_score > 237 } else if (unlikely(channel->irq_mod_score >
241 irq_adapt_high_thresh)) { 238 irq_adapt_high_thresh)) {
242 channel->irq_moderation = 239 if (channel->irq_moderation <
243 min(channel->irq_moderation + 240 efx->irq_rx_moderation) {
244 FALCON_IRQ_MOD_RESOLUTION, 241 channel->irq_moderation += 1;
245 efx->irq_rx_moderation); 242 falcon_set_int_moderation(channel);
243 }
246 } 244 }
247
248 if (channel->irq_moderation != old_irq_moderation)
249 falcon_set_int_moderation(channel);
250
251 channel->irq_count = 0; 245 channel->irq_count = 0;
252 channel->irq_mod_score = 0; 246 channel->irq_mod_score = 0;
253 } 247 }
@@ -290,7 +284,7 @@ void efx_process_channel_now(struct efx_channel *channel)
290 napi_disable(&channel->napi_str); 284 napi_disable(&channel->napi_str);
291 285
292 /* Poll the channel */ 286 /* Poll the channel */
293 efx_process_channel(channel, efx->type->evq_size); 287 efx_process_channel(channel, EFX_EVQ_SIZE);
294 288
295 /* Ack the eventq. This may cause an interrupt to be generated 289 /* Ack the eventq. This may cause an interrupt to be generated
296 * when they are reenabled */ 290 * when they are reenabled */
@@ -824,9 +818,8 @@ static int efx_init_io(struct efx_nic *efx)
824 goto fail2; 818 goto fail2;
825 } 819 }
826 820
827 efx->membase_phys = pci_resource_start(efx->pci_dev, 821 efx->membase_phys = pci_resource_start(efx->pci_dev, EFX_MEM_BAR);
828 efx->type->mem_bar); 822 rc = pci_request_region(pci_dev, EFX_MEM_BAR, "sfc");
829 rc = pci_request_region(pci_dev, efx->type->mem_bar, "sfc");
830 if (rc) { 823 if (rc) {
831 EFX_ERR(efx, "request for memory BAR failed\n"); 824 EFX_ERR(efx, "request for memory BAR failed\n");
832 rc = -EIO; 825 rc = -EIO;
@@ -835,21 +828,20 @@ static int efx_init_io(struct efx_nic *efx)
835 efx->membase = ioremap_nocache(efx->membase_phys, 828 efx->membase = ioremap_nocache(efx->membase_phys,
836 efx->type->mem_map_size); 829 efx->type->mem_map_size);
837 if (!efx->membase) { 830 if (!efx->membase) {
838 EFX_ERR(efx, "could not map memory BAR %d at %llx+%x\n", 831 EFX_ERR(efx, "could not map memory BAR at %llx+%x\n",
839 efx->type->mem_bar,
840 (unsigned long long)efx->membase_phys, 832 (unsigned long long)efx->membase_phys,
841 efx->type->mem_map_size); 833 efx->type->mem_map_size);
842 rc = -ENOMEM; 834 rc = -ENOMEM;
843 goto fail4; 835 goto fail4;
844 } 836 }
845 EFX_LOG(efx, "memory BAR %u at %llx+%x (virtual %p)\n", 837 EFX_LOG(efx, "memory BAR at %llx+%x (virtual %p)\n",
846 efx->type->mem_bar, (unsigned long long)efx->membase_phys, 838 (unsigned long long)efx->membase_phys,
847 efx->type->mem_map_size, efx->membase); 839 efx->type->mem_map_size, efx->membase);
848 840
849 return 0; 841 return 0;
850 842
851 fail4: 843 fail4:
852 pci_release_region(efx->pci_dev, efx->type->mem_bar); 844 pci_release_region(efx->pci_dev, EFX_MEM_BAR);
853 fail3: 845 fail3:
854 efx->membase_phys = 0; 846 efx->membase_phys = 0;
855 fail2: 847 fail2:
@@ -868,7 +860,7 @@ static void efx_fini_io(struct efx_nic *efx)
868 } 860 }
869 861
870 if (efx->membase_phys) { 862 if (efx->membase_phys) {
871 pci_release_region(efx->pci_dev, efx->type->mem_bar); 863 pci_release_region(efx->pci_dev, EFX_MEM_BAR);
872 efx->membase_phys = 0; 864 efx->membase_phys = 0;
873 } 865 }
874 866
@@ -1220,22 +1212,33 @@ void efx_flush_queues(struct efx_nic *efx)
1220 * 1212 *
1221 **************************************************************************/ 1213 **************************************************************************/
1222 1214
1215static unsigned irq_mod_ticks(int usecs, int resolution)
1216{
1217 if (usecs <= 0)
1218 return 0; /* cannot receive interrupts ahead of time :-) */
1219 if (usecs < resolution)
1220 return 1; /* never round down to 0 */
1221 return usecs / resolution;
1222}
1223
1223/* Set interrupt moderation parameters */ 1224/* Set interrupt moderation parameters */
1224void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs, 1225void efx_init_irq_moderation(struct efx_nic *efx, int tx_usecs, int rx_usecs,
1225 bool rx_adaptive) 1226 bool rx_adaptive)
1226{ 1227{
1227 struct efx_tx_queue *tx_queue; 1228 struct efx_tx_queue *tx_queue;
1228 struct efx_rx_queue *rx_queue; 1229 struct efx_rx_queue *rx_queue;
1230 unsigned tx_ticks = irq_mod_ticks(tx_usecs, FALCON_IRQ_MOD_RESOLUTION);
1231 unsigned rx_ticks = irq_mod_ticks(rx_usecs, FALCON_IRQ_MOD_RESOLUTION);
1229 1232
1230 EFX_ASSERT_RESET_SERIALISED(efx); 1233 EFX_ASSERT_RESET_SERIALISED(efx);
1231 1234
1232 efx_for_each_tx_queue(tx_queue, efx) 1235 efx_for_each_tx_queue(tx_queue, efx)
1233 tx_queue->channel->irq_moderation = tx_usecs; 1236 tx_queue->channel->irq_moderation = tx_ticks;
1234 1237
1235 efx->irq_rx_adaptive = rx_adaptive; 1238 efx->irq_rx_adaptive = rx_adaptive;
1236 efx->irq_rx_moderation = rx_usecs; 1239 efx->irq_rx_moderation = rx_ticks;
1237 efx_for_each_rx_queue(rx_queue, efx) 1240 efx_for_each_rx_queue(rx_queue, efx)
1238 rx_queue->channel->irq_moderation = rx_usecs; 1241 rx_queue->channel->irq_moderation = rx_ticks;
1239} 1242}
1240 1243
1241/************************************************************************** 1244/**************************************************************************
@@ -1981,17 +1984,9 @@ static int efx_init_struct(struct efx_nic *efx, struct efx_nic_type *type,
1981 1984
1982 efx->type = type; 1985 efx->type = type;
1983 1986
1984 /* Sanity-check NIC type */
1985 EFX_BUG_ON_PARANOID(efx->type->txd_ring_mask &
1986 (efx->type->txd_ring_mask + 1));
1987 EFX_BUG_ON_PARANOID(efx->type->rxd_ring_mask &
1988 (efx->type->rxd_ring_mask + 1));
1989 EFX_BUG_ON_PARANOID(efx->type->evq_size &
1990 (efx->type->evq_size - 1));
1991 /* As close as we can get to guaranteeing that we don't overflow */ 1987 /* As close as we can get to guaranteeing that we don't overflow */
1992 EFX_BUG_ON_PARANOID(efx->type->evq_size < 1988 BUILD_BUG_ON(EFX_EVQ_SIZE < EFX_TXQ_SIZE + EFX_RXQ_SIZE);
1993 (efx->type->txd_ring_mask + 1 + 1989
1994 efx->type->rxd_ring_mask + 1));
1995 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS); 1990 EFX_BUG_ON_PARANOID(efx->type->phys_addr_channels > EFX_MAX_CHANNELS);
1996 1991
1997 /* Higher numbered interrupt modes are less capable! */ 1992 /* Higher numbered interrupt modes are less capable! */
@@ -2027,18 +2022,12 @@ static void efx_fini_struct(struct efx_nic *efx)
2027 */ 2022 */
2028static void efx_pci_remove_main(struct efx_nic *efx) 2023static void efx_pci_remove_main(struct efx_nic *efx)
2029{ 2024{
2030 EFX_ASSERT_RESET_SERIALISED(efx); 2025 falcon_fini_interrupt(efx);
2031
2032 /* Skip everything if we never obtained a valid membase */
2033 if (!efx->membase)
2034 return;
2035
2036 efx_fini_channels(efx); 2026 efx_fini_channels(efx);
2037 efx_fini_port(efx); 2027 efx_fini_port(efx);
2038 2028
2039 /* Shutdown the board, then the NIC and board state */ 2029 /* Shutdown the board, then the NIC and board state */
2040 efx->board_info.fini(efx); 2030 efx->board_info.fini(efx);
2041 falcon_fini_interrupt(efx);
2042 2031
2043 efx_fini_napi(efx); 2032 efx_fini_napi(efx);
2044 efx_remove_all(efx); 2033 efx_remove_all(efx);
@@ -2063,9 +2052,6 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
2063 /* Allow any queued efx_resets() to complete */ 2052 /* Allow any queued efx_resets() to complete */
2064 rtnl_unlock(); 2053 rtnl_unlock();
2065 2054
2066 if (efx->membase == NULL)
2067 goto out;
2068
2069 efx_unregister_netdev(efx); 2055 efx_unregister_netdev(efx);
2070 2056
2071 efx_mtd_remove(efx); 2057 efx_mtd_remove(efx);
@@ -2078,7 +2064,6 @@ static void efx_pci_remove(struct pci_dev *pci_dev)
2078 2064
2079 efx_pci_remove_main(efx); 2065 efx_pci_remove_main(efx);
2080 2066
2081out:
2082 efx_fini_io(efx); 2067 efx_fini_io(efx);
2083 EFX_LOG(efx, "shutdown successful\n"); 2068 EFX_LOG(efx, "shutdown successful\n");
2084 2069
@@ -2224,13 +2209,15 @@ static int __devinit efx_pci_probe(struct pci_dev *pci_dev,
2224 * MAC stats succeeds. */ 2209 * MAC stats succeeds. */
2225 efx->state = STATE_RUNNING; 2210 efx->state = STATE_RUNNING;
2226 2211
2227 efx_mtd_probe(efx); /* allowed to fail */
2228
2229 rc = efx_register_netdev(efx); 2212 rc = efx_register_netdev(efx);
2230 if (rc) 2213 if (rc)
2231 goto fail5; 2214 goto fail5;
2232 2215
2233 EFX_LOG(efx, "initialisation successful\n"); 2216 EFX_LOG(efx, "initialisation successful\n");
2217
2218 rtnl_lock();
2219 efx_mtd_probe(efx); /* allowed to fail */
2220 rtnl_unlock();
2234 return 0; 2221 return 0;
2235 2222
2236 fail5: 2223 fail5:
diff --git a/drivers/net/sfc/efx.h b/drivers/net/sfc/efx.h
index aecaf62f4929..179e0e3b0ec6 100644
--- a/drivers/net/sfc/efx.h
+++ b/drivers/net/sfc/efx.h
@@ -19,22 +19,31 @@
19#define FALCON_A_S_DEVID 0x6703 19#define FALCON_A_S_DEVID 0x6703
20#define FALCON_B_P_DEVID 0x0710 20#define FALCON_B_P_DEVID 0x0710
21 21
22/* Solarstorm controllers use BAR 0 for I/O space and BAR 2(&3) for memory */
23#define EFX_MEM_BAR 2
24
22/* TX */ 25/* TX */
23extern netdev_tx_t efx_xmit(struct efx_nic *efx, 26extern netdev_tx_t efx_xmit(struct efx_nic *efx,
24 struct efx_tx_queue *tx_queue, 27 struct efx_tx_queue *tx_queue,
25 struct sk_buff *skb); 28 struct sk_buff *skb);
29extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
26extern void efx_stop_queue(struct efx_nic *efx); 30extern void efx_stop_queue(struct efx_nic *efx);
27extern void efx_wake_queue(struct efx_nic *efx); 31extern void efx_wake_queue(struct efx_nic *efx);
32#define EFX_TXQ_SIZE 1024
33#define EFX_TXQ_MASK (EFX_TXQ_SIZE - 1)
28 34
29/* RX */ 35/* RX */
30extern void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index);
31extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 36extern void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
32 unsigned int len, bool checksummed, bool discard); 37 unsigned int len, bool checksummed, bool discard);
33extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay); 38extern void efx_schedule_slow_fill(struct efx_rx_queue *rx_queue, int delay);
39#define EFX_RXQ_SIZE 1024
40#define EFX_RXQ_MASK (EFX_RXQ_SIZE - 1)
34 41
35/* Channels */ 42/* Channels */
36extern void efx_process_channel_now(struct efx_channel *channel); 43extern void efx_process_channel_now(struct efx_channel *channel);
37extern void efx_flush_queues(struct efx_nic *efx); 44extern void efx_flush_queues(struct efx_nic *efx);
45#define EFX_EVQ_SIZE 4096
46#define EFX_EVQ_MASK (EFX_EVQ_SIZE - 1)
38 47
39/* Ports */ 48/* Ports */
40extern void efx_stats_disable(struct efx_nic *efx); 49extern void efx_stats_disable(struct efx_nic *efx);
diff --git a/drivers/net/sfc/ethtool.c b/drivers/net/sfc/ethtool.c
index 45018f283ffa..a313b61c8ff4 100644
--- a/drivers/net/sfc/ethtool.c
+++ b/drivers/net/sfc/ethtool.c
@@ -618,6 +618,9 @@ static int efx_ethtool_get_coalesce(struct net_device *net_dev,
618 coalesce->use_adaptive_rx_coalesce = efx->irq_rx_adaptive; 618 coalesce->use_adaptive_rx_coalesce = efx->irq_rx_adaptive;
619 coalesce->rx_coalesce_usecs_irq = efx->irq_rx_moderation; 619 coalesce->rx_coalesce_usecs_irq = efx->irq_rx_moderation;
620 620
621 coalesce->tx_coalesce_usecs_irq *= FALCON_IRQ_MOD_RESOLUTION;
622 coalesce->rx_coalesce_usecs_irq *= FALCON_IRQ_MOD_RESOLUTION;
623
621 return 0; 624 return 0;
622} 625}
623 626
@@ -656,11 +659,6 @@ static int efx_ethtool_set_coalesce(struct net_device *net_dev,
656 } 659 }
657 660
658 efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive); 661 efx_init_irq_moderation(efx, tx_usecs, rx_usecs, adaptive);
659
660 /* Reset channel to pick up new moderation value. Note that
661 * this may change the value of the irq_moderation field
662 * (e.g. to allow for hardware timer granularity).
663 */
664 efx_for_each_channel(channel, efx) 662 efx_for_each_channel(channel, efx)
665 falcon_set_int_moderation(channel); 663 falcon_set_int_moderation(channel);
666 664
diff --git a/drivers/net/sfc/falcon.c b/drivers/net/sfc/falcon.c
index c049364aec46..865638b035bf 100644
--- a/drivers/net/sfc/falcon.c
+++ b/drivers/net/sfc/falcon.c
@@ -22,11 +22,10 @@
22#include "mac.h" 22#include "mac.h"
23#include "spi.h" 23#include "spi.h"
24#include "falcon.h" 24#include "falcon.h"
25#include "falcon_hwdefs.h" 25#include "regs.h"
26#include "falcon_io.h" 26#include "io.h"
27#include "mdio_10g.h" 27#include "mdio_10g.h"
28#include "phy.h" 28#include "phy.h"
29#include "boards.h"
30#include "workarounds.h" 29#include "workarounds.h"
31 30
32/* Falcon hardware control. 31/* Falcon hardware control.
@@ -36,19 +35,12 @@
36 35
37/** 36/**
38 * struct falcon_nic_data - Falcon NIC state 37 * struct falcon_nic_data - Falcon NIC state
39 * @next_buffer_table: First available buffer table id
40 * @pci_dev2: The secondary PCI device if present 38 * @pci_dev2: The secondary PCI device if present
41 * @i2c_data: Operations and state for I2C bit-bashing algorithm 39 * @i2c_data: Operations and state for I2C bit-bashing algorithm
42 * @int_error_count: Number of internal errors seen recently
43 * @int_error_expire: Time at which error count will be expired
44 */ 40 */
45struct falcon_nic_data { 41struct falcon_nic_data {
46 unsigned next_buffer_table;
47 struct pci_dev *pci_dev2; 42 struct pci_dev *pci_dev2;
48 struct i2c_algo_bit_data i2c_data; 43 struct i2c_algo_bit_data i2c_data;
49
50 unsigned int_error_count;
51 unsigned long int_error_expire;
52}; 44};
53 45
54/************************************************************************** 46/**************************************************************************
@@ -109,21 +101,6 @@ static int rx_xon_thresh_bytes = -1;
109module_param(rx_xon_thresh_bytes, int, 0644); 101module_param(rx_xon_thresh_bytes, int, 0644);
110MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold"); 102MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
111 103
112/* TX descriptor ring size - min 512 max 4k */
113#define FALCON_TXD_RING_ORDER TX_DESCQ_SIZE_1K
114#define FALCON_TXD_RING_SIZE 1024
115#define FALCON_TXD_RING_MASK (FALCON_TXD_RING_SIZE - 1)
116
117/* RX descriptor ring size - min 512 max 4k */
118#define FALCON_RXD_RING_ORDER RX_DESCQ_SIZE_1K
119#define FALCON_RXD_RING_SIZE 1024
120#define FALCON_RXD_RING_MASK (FALCON_RXD_RING_SIZE - 1)
121
122/* Event queue size - max 32k */
123#define FALCON_EVQ_ORDER EVQ_SIZE_4K
124#define FALCON_EVQ_SIZE 4096
125#define FALCON_EVQ_MASK (FALCON_EVQ_SIZE - 1)
126
127/* If FALCON_MAX_INT_ERRORS internal errors occur within 104/* If FALCON_MAX_INT_ERRORS internal errors occur within
128 * FALCON_INT_ERROR_EXPIRE seconds, we consider the NIC broken and 105 * FALCON_INT_ERROR_EXPIRE seconds, we consider the NIC broken and
129 * disable it. 106 * disable it.
@@ -143,12 +120,6 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
143 ************************************************************************** 120 **************************************************************************
144 */ 121 */
145 122
146/* DMA address mask */
147#define FALCON_DMA_MASK DMA_BIT_MASK(46)
148
149/* TX DMA length mask (13-bit) */
150#define FALCON_TX_DMA_MASK (4096 - 1)
151
152/* Size and alignment of special buffers (4KB) */ 123/* Size and alignment of special buffers (4KB) */
153#define FALCON_BUF_SIZE 4096 124#define FALCON_BUF_SIZE 4096
154 125
@@ -164,6 +135,13 @@ MODULE_PARM_DESC(rx_xon_thresh_bytes, "RX fifo XON threshold");
164 * 135 *
165 **************************************************************************/ 136 **************************************************************************/
166 137
138static inline void falcon_write_buf_tbl(struct efx_nic *efx, efx_qword_t *value,
139 unsigned int index)
140{
141 efx_sram_writeq(efx, efx->membase + efx->type->buf_tbl_base,
142 value, index);
143}
144
167/* Read the current event from the event queue */ 145/* Read the current event from the event queue */
168static inline efx_qword_t *falcon_event(struct efx_channel *channel, 146static inline efx_qword_t *falcon_event(struct efx_channel *channel,
169 unsigned int index) 147 unsigned int index)
@@ -200,9 +178,9 @@ static void falcon_setsda(void *data, int state)
200 struct efx_nic *efx = (struct efx_nic *)data; 178 struct efx_nic *efx = (struct efx_nic *)data;
201 efx_oword_t reg; 179 efx_oword_t reg;
202 180
203 falcon_read(efx, &reg, GPIO_CTL_REG_KER); 181 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
204 EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, !state); 182 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN, !state);
205 falcon_write(efx, &reg, GPIO_CTL_REG_KER); 183 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
206} 184}
207 185
208static void falcon_setscl(void *data, int state) 186static void falcon_setscl(void *data, int state)
@@ -210,9 +188,9 @@ static void falcon_setscl(void *data, int state)
210 struct efx_nic *efx = (struct efx_nic *)data; 188 struct efx_nic *efx = (struct efx_nic *)data;
211 efx_oword_t reg; 189 efx_oword_t reg;
212 190
213 falcon_read(efx, &reg, GPIO_CTL_REG_KER); 191 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
214 EFX_SET_OWORD_FIELD(reg, GPIO0_OEN, !state); 192 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO0_OEN, !state);
215 falcon_write(efx, &reg, GPIO_CTL_REG_KER); 193 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
216} 194}
217 195
218static int falcon_getsda(void *data) 196static int falcon_getsda(void *data)
@@ -220,8 +198,8 @@ static int falcon_getsda(void *data)
220 struct efx_nic *efx = (struct efx_nic *)data; 198 struct efx_nic *efx = (struct efx_nic *)data;
221 efx_oword_t reg; 199 efx_oword_t reg;
222 200
223 falcon_read(efx, &reg, GPIO_CTL_REG_KER); 201 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
224 return EFX_OWORD_FIELD(reg, GPIO3_IN); 202 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO3_IN);
225} 203}
226 204
227static int falcon_getscl(void *data) 205static int falcon_getscl(void *data)
@@ -229,8 +207,8 @@ static int falcon_getscl(void *data)
229 struct efx_nic *efx = (struct efx_nic *)data; 207 struct efx_nic *efx = (struct efx_nic *)data;
230 efx_oword_t reg; 208 efx_oword_t reg;
231 209
232 falcon_read(efx, &reg, GPIO_CTL_REG_KER); 210 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
233 return EFX_OWORD_FIELD(reg, GPIO0_IN); 211 return EFX_OWORD_FIELD(reg, FRF_AB_GPIO0_IN);
234} 212}
235 213
236static struct i2c_algo_bit_data falcon_i2c_bit_operations = { 214static struct i2c_algo_bit_data falcon_i2c_bit_operations = {
@@ -275,12 +253,11 @@ falcon_init_special_buffer(struct efx_nic *efx,
275 dma_addr = buffer->dma_addr + (i * 4096); 253 dma_addr = buffer->dma_addr + (i * 4096);
276 EFX_LOG(efx, "mapping special buffer %d at %llx\n", 254 EFX_LOG(efx, "mapping special buffer %d at %llx\n",
277 index, (unsigned long long)dma_addr); 255 index, (unsigned long long)dma_addr);
278 EFX_POPULATE_QWORD_4(buf_desc, 256 EFX_POPULATE_QWORD_3(buf_desc,
279 IP_DAT_BUF_SIZE, IP_DAT_BUF_SIZE_4K, 257 FRF_AZ_BUF_ADR_REGION, 0,
280 BUF_ADR_REGION, 0, 258 FRF_AZ_BUF_ADR_FBUF, dma_addr >> 12,
281 BUF_ADR_FBUF, (dma_addr >> 12), 259 FRF_AZ_BUF_OWNER_ID_FBUF, 0);
282 BUF_OWNER_ID_FBUF, 0); 260 falcon_write_buf_tbl(efx, &buf_desc, index);
283 falcon_write_sram(efx, &buf_desc, index);
284 } 261 }
285} 262}
286 263
@@ -300,11 +277,11 @@ falcon_fini_special_buffer(struct efx_nic *efx,
300 buffer->index, buffer->index + buffer->entries - 1); 277 buffer->index, buffer->index + buffer->entries - 1);
301 278
302 EFX_POPULATE_OWORD_4(buf_tbl_upd, 279 EFX_POPULATE_OWORD_4(buf_tbl_upd,
303 BUF_UPD_CMD, 0, 280 FRF_AZ_BUF_UPD_CMD, 0,
304 BUF_CLR_CMD, 1, 281 FRF_AZ_BUF_CLR_CMD, 1,
305 BUF_CLR_END_ID, end, 282 FRF_AZ_BUF_CLR_END_ID, end,
306 BUF_CLR_START_ID, start); 283 FRF_AZ_BUF_CLR_START_ID, start);
307 falcon_write(efx, &buf_tbl_upd, BUF_TBL_UPD_REG_KER); 284 efx_writeo(efx, &buf_tbl_upd, FR_AZ_BUF_TBL_UPD);
308} 285}
309 286
310/* 287/*
@@ -320,8 +297,6 @@ static int falcon_alloc_special_buffer(struct efx_nic *efx,
320 struct efx_special_buffer *buffer, 297 struct efx_special_buffer *buffer,
321 unsigned int len) 298 unsigned int len)
322{ 299{
323 struct falcon_nic_data *nic_data = efx->nic_data;
324
325 len = ALIGN(len, FALCON_BUF_SIZE); 300 len = ALIGN(len, FALCON_BUF_SIZE);
326 301
327 buffer->addr = pci_alloc_consistent(efx->pci_dev, len, 302 buffer->addr = pci_alloc_consistent(efx->pci_dev, len,
@@ -336,8 +311,8 @@ static int falcon_alloc_special_buffer(struct efx_nic *efx,
336 memset(buffer->addr, 0xff, len); 311 memset(buffer->addr, 0xff, len);
337 312
338 /* Select new buffer ID */ 313 /* Select new buffer ID */
339 buffer->index = nic_data->next_buffer_table; 314 buffer->index = efx->next_buffer_table;
340 nic_data->next_buffer_table += buffer->entries; 315 efx->next_buffer_table += buffer->entries;
341 316
342 EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x " 317 EFX_LOG(efx, "allocating special buffers %d-%d at %llx+%x "
343 "(virt %p phys %llx)\n", buffer->index, 318 "(virt %p phys %llx)\n", buffer->index,
@@ -415,10 +390,10 @@ static inline void falcon_notify_tx_desc(struct efx_tx_queue *tx_queue)
415 unsigned write_ptr; 390 unsigned write_ptr;
416 efx_dword_t reg; 391 efx_dword_t reg;
417 392
418 write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK; 393 write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
419 EFX_POPULATE_DWORD_1(reg, TX_DESC_WPTR_DWORD, write_ptr); 394 EFX_POPULATE_DWORD_1(reg, FRF_AZ_TX_DESC_WPTR_DWORD, write_ptr);
420 falcon_writel_page(tx_queue->efx, &reg, 395 efx_writed_page(tx_queue->efx, &reg,
421 TX_DESC_UPD_REG_KER_DWORD, tx_queue->queue); 396 FR_AZ_TX_DESC_UPD_DWORD_P0, tx_queue->queue);
422} 397}
423 398
424 399
@@ -436,18 +411,17 @@ void falcon_push_buffers(struct efx_tx_queue *tx_queue)
436 BUG_ON(tx_queue->write_count == tx_queue->insert_count); 411 BUG_ON(tx_queue->write_count == tx_queue->insert_count);
437 412
438 do { 413 do {
439 write_ptr = tx_queue->write_count & FALCON_TXD_RING_MASK; 414 write_ptr = tx_queue->write_count & EFX_TXQ_MASK;
440 buffer = &tx_queue->buffer[write_ptr]; 415 buffer = &tx_queue->buffer[write_ptr];
441 txd = falcon_tx_desc(tx_queue, write_ptr); 416 txd = falcon_tx_desc(tx_queue, write_ptr);
442 ++tx_queue->write_count; 417 ++tx_queue->write_count;
443 418
444 /* Create TX descriptor ring entry */ 419 /* Create TX descriptor ring entry */
445 EFX_POPULATE_QWORD_5(*txd, 420 EFX_POPULATE_QWORD_4(*txd,
446 TX_KER_PORT, 0, 421 FSF_AZ_TX_KER_CONT, buffer->continuation,
447 TX_KER_CONT, buffer->continuation, 422 FSF_AZ_TX_KER_BYTE_COUNT, buffer->len,
448 TX_KER_BYTE_CNT, buffer->len, 423 FSF_AZ_TX_KER_BUF_REGION, 0,
449 TX_KER_BUF_REGION, 0, 424 FSF_AZ_TX_KER_BUF_ADDR, buffer->dma_addr);
450 TX_KER_BUF_ADR, buffer->dma_addr);
451 } while (tx_queue->write_count != tx_queue->insert_count); 425 } while (tx_queue->write_count != tx_queue->insert_count);
452 426
453 wmb(); /* Ensure descriptors are written before they are fetched */ 427 wmb(); /* Ensure descriptors are written before they are fetched */
@@ -458,9 +432,10 @@ void falcon_push_buffers(struct efx_tx_queue *tx_queue)
458int falcon_probe_tx(struct efx_tx_queue *tx_queue) 432int falcon_probe_tx(struct efx_tx_queue *tx_queue)
459{ 433{
460 struct efx_nic *efx = tx_queue->efx; 434 struct efx_nic *efx = tx_queue->efx;
435 BUILD_BUG_ON(EFX_TXQ_SIZE < 512 || EFX_TXQ_SIZE > 4096 ||
436 EFX_TXQ_SIZE & EFX_TXQ_MASK);
461 return falcon_alloc_special_buffer(efx, &tx_queue->txd, 437 return falcon_alloc_special_buffer(efx, &tx_queue->txd,
462 FALCON_TXD_RING_SIZE * 438 EFX_TXQ_SIZE * sizeof(efx_qword_t));
463 sizeof(efx_qword_t));
464} 439}
465 440
466void falcon_init_tx(struct efx_tx_queue *tx_queue) 441void falcon_init_tx(struct efx_tx_queue *tx_queue)
@@ -475,25 +450,28 @@ void falcon_init_tx(struct efx_tx_queue *tx_queue)
475 450
476 /* Push TX descriptor ring to card */ 451 /* Push TX descriptor ring to card */
477 EFX_POPULATE_OWORD_10(tx_desc_ptr, 452 EFX_POPULATE_OWORD_10(tx_desc_ptr,
478 TX_DESCQ_EN, 1, 453 FRF_AZ_TX_DESCQ_EN, 1,
479 TX_ISCSI_DDIG_EN, 0, 454 FRF_AZ_TX_ISCSI_DDIG_EN, 0,
480 TX_ISCSI_HDIG_EN, 0, 455 FRF_AZ_TX_ISCSI_HDIG_EN, 0,
481 TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index, 456 FRF_AZ_TX_DESCQ_BUF_BASE_ID, tx_queue->txd.index,
482 TX_DESCQ_EVQ_ID, tx_queue->channel->channel, 457 FRF_AZ_TX_DESCQ_EVQ_ID,
483 TX_DESCQ_OWNER_ID, 0, 458 tx_queue->channel->channel,
484 TX_DESCQ_LABEL, tx_queue->queue, 459 FRF_AZ_TX_DESCQ_OWNER_ID, 0,
485 TX_DESCQ_SIZE, FALCON_TXD_RING_ORDER, 460 FRF_AZ_TX_DESCQ_LABEL, tx_queue->queue,
486 TX_DESCQ_TYPE, 0, 461 FRF_AZ_TX_DESCQ_SIZE,
487 TX_NON_IP_DROP_DIS_B0, 1); 462 __ffs(tx_queue->txd.entries),
463 FRF_AZ_TX_DESCQ_TYPE, 0,
464 FRF_BZ_TX_NON_IP_DROP_DIS, 1);
488 465
489 if (falcon_rev(efx) >= FALCON_REV_B0) { 466 if (falcon_rev(efx) >= FALCON_REV_B0) {
490 int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM; 467 int csum = tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM;
491 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_IP_CHKSM_DIS_B0, !csum); 468 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_IP_CHKSM_DIS, !csum);
492 EFX_SET_OWORD_FIELD(tx_desc_ptr, TX_TCP_CHKSM_DIS_B0, !csum); 469 EFX_SET_OWORD_FIELD(tx_desc_ptr, FRF_BZ_TX_TCP_CHKSM_DIS,
470 !csum);
493 } 471 }
494 472
495 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 473 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
496 tx_queue->queue); 474 tx_queue->queue);
497 475
498 if (falcon_rev(efx) < FALCON_REV_B0) { 476 if (falcon_rev(efx) < FALCON_REV_B0) {
499 efx_oword_t reg; 477 efx_oword_t reg;
@@ -501,12 +479,12 @@ void falcon_init_tx(struct efx_tx_queue *tx_queue)
501 /* Only 128 bits in this register */ 479 /* Only 128 bits in this register */
502 BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128); 480 BUILD_BUG_ON(EFX_TX_QUEUE_COUNT >= 128);
503 481
504 falcon_read(efx, &reg, TX_CHKSM_CFG_REG_KER_A1); 482 efx_reado(efx, &reg, FR_AA_TX_CHKSM_CFG);
505 if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM) 483 if (tx_queue->queue == EFX_TX_QUEUE_OFFLOAD_CSUM)
506 clear_bit_le(tx_queue->queue, (void *)&reg); 484 clear_bit_le(tx_queue->queue, (void *)&reg);
507 else 485 else
508 set_bit_le(tx_queue->queue, (void *)&reg); 486 set_bit_le(tx_queue->queue, (void *)&reg);
509 falcon_write(efx, &reg, TX_CHKSM_CFG_REG_KER_A1); 487 efx_writeo(efx, &reg, FR_AA_TX_CHKSM_CFG);
510 } 488 }
511} 489}
512 490
@@ -517,9 +495,9 @@ static void falcon_flush_tx_queue(struct efx_tx_queue *tx_queue)
517 495
518 /* Post a flush command */ 496 /* Post a flush command */
519 EFX_POPULATE_OWORD_2(tx_flush_descq, 497 EFX_POPULATE_OWORD_2(tx_flush_descq,
520 TX_FLUSH_DESCQ_CMD, 1, 498 FRF_AZ_TX_FLUSH_DESCQ_CMD, 1,
521 TX_FLUSH_DESCQ, tx_queue->queue); 499 FRF_AZ_TX_FLUSH_DESCQ, tx_queue->queue);
522 falcon_write(efx, &tx_flush_descq, TX_FLUSH_DESCQ_REG_KER); 500 efx_writeo(efx, &tx_flush_descq, FR_AZ_TX_FLUSH_DESCQ);
523} 501}
524 502
525void falcon_fini_tx(struct efx_tx_queue *tx_queue) 503void falcon_fini_tx(struct efx_tx_queue *tx_queue)
@@ -532,8 +510,8 @@ void falcon_fini_tx(struct efx_tx_queue *tx_queue)
532 510
533 /* Remove TX descriptor ring from card */ 511 /* Remove TX descriptor ring from card */
534 EFX_ZERO_OWORD(tx_desc_ptr); 512 EFX_ZERO_OWORD(tx_desc_ptr);
535 falcon_write_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base, 513 efx_writeo_table(efx, &tx_desc_ptr, efx->type->txd_ptr_tbl_base,
536 tx_queue->queue); 514 tx_queue->queue);
537 515
538 /* Unpin TX descriptor ring */ 516 /* Unpin TX descriptor ring */
539 falcon_fini_special_buffer(efx, &tx_queue->txd); 517 falcon_fini_special_buffer(efx, &tx_queue->txd);
@@ -568,11 +546,11 @@ static inline void falcon_build_rx_desc(struct efx_rx_queue *rx_queue,
568 rxd = falcon_rx_desc(rx_queue, index); 546 rxd = falcon_rx_desc(rx_queue, index);
569 rx_buf = efx_rx_buffer(rx_queue, index); 547 rx_buf = efx_rx_buffer(rx_queue, index);
570 EFX_POPULATE_QWORD_3(*rxd, 548 EFX_POPULATE_QWORD_3(*rxd,
571 RX_KER_BUF_SIZE, 549 FSF_AZ_RX_KER_BUF_SIZE,
572 rx_buf->len - 550 rx_buf->len -
573 rx_queue->efx->type->rx_buffer_padding, 551 rx_queue->efx->type->rx_buffer_padding,
574 RX_KER_BUF_REGION, 0, 552 FSF_AZ_RX_KER_BUF_REGION, 0,
575 RX_KER_BUF_ADR, rx_buf->dma_addr); 553 FSF_AZ_RX_KER_BUF_ADDR, rx_buf->dma_addr);
576} 554}
577 555
578/* This writes to the RX_DESC_WPTR register for the specified receive 556/* This writes to the RX_DESC_WPTR register for the specified receive
@@ -586,23 +564,24 @@ void falcon_notify_rx_desc(struct efx_rx_queue *rx_queue)
586 while (rx_queue->notified_count != rx_queue->added_count) { 564 while (rx_queue->notified_count != rx_queue->added_count) {
587 falcon_build_rx_desc(rx_queue, 565 falcon_build_rx_desc(rx_queue,
588 rx_queue->notified_count & 566 rx_queue->notified_count &
589 FALCON_RXD_RING_MASK); 567 EFX_RXQ_MASK);
590 ++rx_queue->notified_count; 568 ++rx_queue->notified_count;
591 } 569 }
592 570
593 wmb(); 571 wmb();
594 write_ptr = rx_queue->added_count & FALCON_RXD_RING_MASK; 572 write_ptr = rx_queue->added_count & EFX_RXQ_MASK;
595 EFX_POPULATE_DWORD_1(reg, RX_DESC_WPTR_DWORD, write_ptr); 573 EFX_POPULATE_DWORD_1(reg, FRF_AZ_RX_DESC_WPTR_DWORD, write_ptr);
596 falcon_writel_page(rx_queue->efx, &reg, 574 efx_writed_page(rx_queue->efx, &reg,
597 RX_DESC_UPD_REG_KER_DWORD, rx_queue->queue); 575 FR_AZ_RX_DESC_UPD_DWORD_P0, rx_queue->queue);
598} 576}
599 577
600int falcon_probe_rx(struct efx_rx_queue *rx_queue) 578int falcon_probe_rx(struct efx_rx_queue *rx_queue)
601{ 579{
602 struct efx_nic *efx = rx_queue->efx; 580 struct efx_nic *efx = rx_queue->efx;
581 BUILD_BUG_ON(EFX_RXQ_SIZE < 512 || EFX_RXQ_SIZE > 4096 ||
582 EFX_RXQ_SIZE & EFX_RXQ_MASK);
603 return falcon_alloc_special_buffer(efx, &rx_queue->rxd, 583 return falcon_alloc_special_buffer(efx, &rx_queue->rxd,
604 FALCON_RXD_RING_SIZE * 584 EFX_RXQ_SIZE * sizeof(efx_qword_t));
605 sizeof(efx_qword_t));
606} 585}
607 586
608void falcon_init_rx(struct efx_rx_queue *rx_queue) 587void falcon_init_rx(struct efx_rx_queue *rx_queue)
@@ -623,19 +602,21 @@ void falcon_init_rx(struct efx_rx_queue *rx_queue)
623 602
624 /* Push RX descriptor ring to card */ 603 /* Push RX descriptor ring to card */
625 EFX_POPULATE_OWORD_10(rx_desc_ptr, 604 EFX_POPULATE_OWORD_10(rx_desc_ptr,
626 RX_ISCSI_DDIG_EN, iscsi_digest_en, 605 FRF_AZ_RX_ISCSI_DDIG_EN, iscsi_digest_en,
627 RX_ISCSI_HDIG_EN, iscsi_digest_en, 606 FRF_AZ_RX_ISCSI_HDIG_EN, iscsi_digest_en,
628 RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index, 607 FRF_AZ_RX_DESCQ_BUF_BASE_ID, rx_queue->rxd.index,
629 RX_DESCQ_EVQ_ID, rx_queue->channel->channel, 608 FRF_AZ_RX_DESCQ_EVQ_ID,
630 RX_DESCQ_OWNER_ID, 0, 609 rx_queue->channel->channel,
631 RX_DESCQ_LABEL, rx_queue->queue, 610 FRF_AZ_RX_DESCQ_OWNER_ID, 0,
632 RX_DESCQ_SIZE, FALCON_RXD_RING_ORDER, 611 FRF_AZ_RX_DESCQ_LABEL, rx_queue->queue,
633 RX_DESCQ_TYPE, 0 /* kernel queue */ , 612 FRF_AZ_RX_DESCQ_SIZE,
613 __ffs(rx_queue->rxd.entries),
614 FRF_AZ_RX_DESCQ_TYPE, 0 /* kernel queue */ ,
634 /* For >=B0 this is scatter so disable */ 615 /* For >=B0 this is scatter so disable */
635 RX_DESCQ_JUMBO, !is_b0, 616 FRF_AZ_RX_DESCQ_JUMBO, !is_b0,
636 RX_DESCQ_EN, 1); 617 FRF_AZ_RX_DESCQ_EN, 1);
637 falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 618 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
638 rx_queue->queue); 619 rx_queue->queue);
639} 620}
640 621
641static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue) 622static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
@@ -645,9 +626,9 @@ static void falcon_flush_rx_queue(struct efx_rx_queue *rx_queue)
645 626
646 /* Post a flush command */ 627 /* Post a flush command */
647 EFX_POPULATE_OWORD_2(rx_flush_descq, 628 EFX_POPULATE_OWORD_2(rx_flush_descq,
648 RX_FLUSH_DESCQ_CMD, 1, 629 FRF_AZ_RX_FLUSH_DESCQ_CMD, 1,
649 RX_FLUSH_DESCQ, rx_queue->queue); 630 FRF_AZ_RX_FLUSH_DESCQ, rx_queue->queue);
650 falcon_write(efx, &rx_flush_descq, RX_FLUSH_DESCQ_REG_KER); 631 efx_writeo(efx, &rx_flush_descq, FR_AZ_RX_FLUSH_DESCQ);
651} 632}
652 633
653void falcon_fini_rx(struct efx_rx_queue *rx_queue) 634void falcon_fini_rx(struct efx_rx_queue *rx_queue)
@@ -660,8 +641,8 @@ void falcon_fini_rx(struct efx_rx_queue *rx_queue)
660 641
661 /* Remove RX descriptor ring from card */ 642 /* Remove RX descriptor ring from card */
662 EFX_ZERO_OWORD(rx_desc_ptr); 643 EFX_ZERO_OWORD(rx_desc_ptr);
663 falcon_write_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base, 644 efx_writeo_table(efx, &rx_desc_ptr, efx->type->rxd_ptr_tbl_base,
664 rx_queue->queue); 645 rx_queue->queue);
665 646
666 /* Unpin RX descriptor ring */ 647 /* Unpin RX descriptor ring */
667 falcon_fini_special_buffer(efx, &rx_queue->rxd); 648 falcon_fini_special_buffer(efx, &rx_queue->rxd);
@@ -694,8 +675,8 @@ void falcon_eventq_read_ack(struct efx_channel *channel)
694 efx_dword_t reg; 675 efx_dword_t reg;
695 struct efx_nic *efx = channel->efx; 676 struct efx_nic *efx = channel->efx;
696 677
697 EFX_POPULATE_DWORD_1(reg, EVQ_RPTR_DWORD, channel->eventq_read_ptr); 678 EFX_POPULATE_DWORD_1(reg, FRF_AZ_EVQ_RPTR, channel->eventq_read_ptr);
698 falcon_writel_table(efx, &reg, efx->type->evq_rptr_tbl_base, 679 efx_writed_table(efx, &reg, efx->type->evq_rptr_tbl_base,
699 channel->channel); 680 channel->channel);
700} 681}
701 682
@@ -704,11 +685,14 @@ void falcon_generate_event(struct efx_channel *channel, efx_qword_t *event)
704{ 685{
705 efx_oword_t drv_ev_reg; 686 efx_oword_t drv_ev_reg;
706 687
707 EFX_POPULATE_OWORD_2(drv_ev_reg, 688 BUILD_BUG_ON(FRF_AZ_DRV_EV_DATA_LBN != 0 ||
708 DRV_EV_QID, channel->channel, 689 FRF_AZ_DRV_EV_DATA_WIDTH != 64);
709 DRV_EV_DATA, 690 drv_ev_reg.u32[0] = event->u32[0];
710 EFX_QWORD_FIELD64(*event, WHOLE_EVENT)); 691 drv_ev_reg.u32[1] = event->u32[1];
711 falcon_write(channel->efx, &drv_ev_reg, DRV_EV_REG_KER); 692 drv_ev_reg.u32[2] = 0;
693 drv_ev_reg.u32[3] = 0;
694 EFX_SET_OWORD_FIELD(drv_ev_reg, FRF_AZ_DRV_EV_QID, channel->channel);
695 efx_writeo(channel->efx, &drv_ev_reg, FR_AZ_DRV_EV);
712} 696}
713 697
714/* Handle a transmit completion event 698/* Handle a transmit completion event
@@ -724,18 +708,18 @@ static void falcon_handle_tx_event(struct efx_channel *channel,
724 struct efx_tx_queue *tx_queue; 708 struct efx_tx_queue *tx_queue;
725 struct efx_nic *efx = channel->efx; 709 struct efx_nic *efx = channel->efx;
726 710
727 if (likely(EFX_QWORD_FIELD(*event, TX_EV_COMP))) { 711 if (likely(EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_COMP))) {
728 /* Transmit completion */ 712 /* Transmit completion */
729 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, TX_EV_DESC_PTR); 713 tx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_DESC_PTR);
730 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); 714 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
731 tx_queue = &efx->tx_queue[tx_ev_q_label]; 715 tx_queue = &efx->tx_queue[tx_ev_q_label];
732 channel->irq_mod_score += 716 channel->irq_mod_score +=
733 (tx_ev_desc_ptr - tx_queue->read_count) & 717 (tx_ev_desc_ptr - tx_queue->read_count) &
734 efx->type->txd_ring_mask; 718 EFX_TXQ_MASK;
735 efx_xmit_done(tx_queue, tx_ev_desc_ptr); 719 efx_xmit_done(tx_queue, tx_ev_desc_ptr);
736 } else if (EFX_QWORD_FIELD(*event, TX_EV_WQ_FF_FULL)) { 720 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_WQ_FF_FULL)) {
737 /* Rewrite the FIFO write pointer */ 721 /* Rewrite the FIFO write pointer */
738 tx_ev_q_label = EFX_QWORD_FIELD(*event, TX_EV_Q_LABEL); 722 tx_ev_q_label = EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_Q_LABEL);
739 tx_queue = &efx->tx_queue[tx_ev_q_label]; 723 tx_queue = &efx->tx_queue[tx_ev_q_label];
740 724
741 if (efx_dev_registered(efx)) 725 if (efx_dev_registered(efx))
@@ -743,7 +727,7 @@ static void falcon_handle_tx_event(struct efx_channel *channel,
743 falcon_notify_tx_desc(tx_queue); 727 falcon_notify_tx_desc(tx_queue);
744 if (efx_dev_registered(efx)) 728 if (efx_dev_registered(efx))
745 netif_tx_unlock(efx->net_dev); 729 netif_tx_unlock(efx->net_dev);
746 } else if (EFX_QWORD_FIELD(*event, TX_EV_PKT_ERR) && 730 } else if (EFX_QWORD_FIELD(*event, FSF_AZ_TX_EV_PKT_ERR) &&
747 EFX_WORKAROUND_10727(efx)) { 731 EFX_WORKAROUND_10727(efx)) {
748 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 732 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
749 } else { 733 } else {
@@ -767,22 +751,22 @@ static void falcon_handle_rx_not_ok(struct efx_rx_queue *rx_queue,
767 bool rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt; 751 bool rx_ev_ip_frag_err, rx_ev_hdr_type, rx_ev_mcast_pkt;
768 unsigned rx_ev_pkt_type; 752 unsigned rx_ev_pkt_type;
769 753
770 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE); 754 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
771 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT); 755 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
772 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, RX_EV_TOBE_DISC); 756 rx_ev_tobe_disc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_TOBE_DISC);
773 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, RX_EV_PKT_TYPE); 757 rx_ev_pkt_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_TYPE);
774 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event, 758 rx_ev_buf_owner_id_err = EFX_QWORD_FIELD(*event,
775 RX_EV_BUF_OWNER_ID_ERR); 759 FSF_AZ_RX_EV_BUF_OWNER_ID_ERR);
776 rx_ev_ip_frag_err = EFX_QWORD_FIELD(*event, RX_EV_IF_FRAG_ERR); 760 rx_ev_ip_frag_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_IP_FRAG_ERR);
777 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event, 761 rx_ev_ip_hdr_chksum_err = EFX_QWORD_FIELD(*event,
778 RX_EV_IP_HDR_CHKSUM_ERR); 762 FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR);
779 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event, 763 rx_ev_tcp_udp_chksum_err = EFX_QWORD_FIELD(*event,
780 RX_EV_TCP_UDP_CHKSUM_ERR); 764 FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR);
781 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, RX_EV_ETH_CRC_ERR); 765 rx_ev_eth_crc_err = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_ETH_CRC_ERR);
782 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, RX_EV_FRM_TRUNC); 766 rx_ev_frm_trunc = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_FRM_TRUNC);
783 rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ? 767 rx_ev_drib_nib = ((falcon_rev(efx) >= FALCON_REV_B0) ?
784 0 : EFX_QWORD_FIELD(*event, RX_EV_DRIB_NIB)); 768 0 : EFX_QWORD_FIELD(*event, FSF_AA_RX_EV_DRIB_NIB));
785 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, RX_EV_PAUSE_FRM_ERR); 769 rx_ev_pause_frm = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PAUSE_FRM_ERR);
786 770
787 /* Every error apart from tobe_disc and pause_frm */ 771 /* Every error apart from tobe_disc and pause_frm */
788 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err | 772 rx_ev_other_err = (rx_ev_drib_nib | rx_ev_tcp_udp_chksum_err |
@@ -838,9 +822,8 @@ static void falcon_handle_rx_bad_index(struct efx_rx_queue *rx_queue,
838 struct efx_nic *efx = rx_queue->efx; 822 struct efx_nic *efx = rx_queue->efx;
839 unsigned expected, dropped; 823 unsigned expected, dropped;
840 824
841 expected = rx_queue->removed_count & FALCON_RXD_RING_MASK; 825 expected = rx_queue->removed_count & EFX_RXQ_MASK;
842 dropped = ((index + FALCON_RXD_RING_SIZE - expected) & 826 dropped = (index - expected) & EFX_RXQ_MASK;
843 FALCON_RXD_RING_MASK);
844 EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n", 827 EFX_INFO(efx, "dropped %d events (index=%d expected=%d)\n",
845 dropped, index, expected); 828 dropped, index, expected);
846 829
@@ -866,17 +849,18 @@ static void falcon_handle_rx_event(struct efx_channel *channel,
866 struct efx_nic *efx = channel->efx; 849 struct efx_nic *efx = channel->efx;
867 850
868 /* Basic packet information */ 851 /* Basic packet information */
869 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, RX_EV_BYTE_CNT); 852 rx_ev_byte_cnt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_BYTE_CNT);
870 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, RX_EV_PKT_OK); 853 rx_ev_pkt_ok = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_PKT_OK);
871 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, RX_EV_HDR_TYPE); 854 rx_ev_hdr_type = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_HDR_TYPE);
872 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_JUMBO_CONT)); 855 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_JUMBO_CONT));
873 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_SOP) != 1); 856 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_SOP) != 1);
874 WARN_ON(EFX_QWORD_FIELD(*event, RX_EV_Q_LABEL) != channel->channel); 857 WARN_ON(EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_Q_LABEL) !=
858 channel->channel);
875 859
876 rx_queue = &efx->rx_queue[channel->channel]; 860 rx_queue = &efx->rx_queue[channel->channel];
877 861
878 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, RX_EV_DESC_PTR); 862 rx_ev_desc_ptr = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_DESC_PTR);
879 expected_ptr = rx_queue->removed_count & FALCON_RXD_RING_MASK; 863 expected_ptr = rx_queue->removed_count & EFX_RXQ_MASK;
880 if (unlikely(rx_ev_desc_ptr != expected_ptr)) 864 if (unlikely(rx_ev_desc_ptr != expected_ptr))
881 falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr); 865 falcon_handle_rx_bad_index(rx_queue, rx_ev_desc_ptr);
882 866
@@ -884,7 +868,10 @@ static void falcon_handle_rx_event(struct efx_channel *channel,
884 /* If packet is marked as OK and packet type is TCP/IPv4 or 868 /* If packet is marked as OK and packet type is TCP/IPv4 or
885 * UDP/IPv4, then we can rely on the hardware checksum. 869 * UDP/IPv4, then we can rely on the hardware checksum.
886 */ 870 */
887 checksummed = RX_EV_HDR_TYPE_HAS_CHECKSUMS(rx_ev_hdr_type); 871 checksummed =
872 efx->rx_checksum_enabled &&
873 (rx_ev_hdr_type == FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP ||
874 rx_ev_hdr_type == FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP);
888 } else { 875 } else {
889 falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok, 876 falcon_handle_rx_not_ok(rx_queue, event, &rx_ev_pkt_ok,
890 &discard); 877 &discard);
@@ -892,10 +879,10 @@ static void falcon_handle_rx_event(struct efx_channel *channel,
892 } 879 }
893 880
894 /* Detect multicast packets that didn't match the filter */ 881 /* Detect multicast packets that didn't match the filter */
895 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, RX_EV_MCAST_PKT); 882 rx_ev_mcast_pkt = EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_PKT);
896 if (rx_ev_mcast_pkt) { 883 if (rx_ev_mcast_pkt) {
897 unsigned int rx_ev_mcast_hash_match = 884 unsigned int rx_ev_mcast_hash_match =
898 EFX_QWORD_FIELD(*event, RX_EV_MCAST_HASH_MATCH); 885 EFX_QWORD_FIELD(*event, FSF_AZ_RX_EV_MCAST_HASH_MATCH);
899 886
900 if (unlikely(!rx_ev_mcast_hash_match)) 887 if (unlikely(!rx_ev_mcast_hash_match))
901 discard = true; 888 discard = true;
@@ -915,22 +902,23 @@ static void falcon_handle_global_event(struct efx_channel *channel,
915 struct efx_nic *efx = channel->efx; 902 struct efx_nic *efx = channel->efx;
916 bool handled = false; 903 bool handled = false;
917 904
918 if (EFX_QWORD_FIELD(*event, G_PHY0_INTR) || 905 if (EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_G_PHY0_INTR) ||
919 EFX_QWORD_FIELD(*event, G_PHY1_INTR) || 906 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XG_PHY0_INTR) ||
920 EFX_QWORD_FIELD(*event, XG_PHY_INTR) || 907 EFX_QWORD_FIELD(*event, FSF_AB_GLB_EV_XFP_PHY0_INTR)) {
921 EFX_QWORD_FIELD(*event, XFP_PHY_INTR)) {
922 efx->phy_op->clear_interrupt(efx); 908 efx->phy_op->clear_interrupt(efx);
923 queue_work(efx->workqueue, &efx->phy_work); 909 queue_work(efx->workqueue, &efx->phy_work);
924 handled = true; 910 handled = true;
925 } 911 }
926 912
927 if ((falcon_rev(efx) >= FALCON_REV_B0) && 913 if ((falcon_rev(efx) >= FALCON_REV_B0) &&
928 EFX_QWORD_FIELD(*event, XG_MNT_INTR_B0)) { 914 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_XG_MGT_INTR)) {
929 queue_work(efx->workqueue, &efx->mac_work); 915 queue_work(efx->workqueue, &efx->mac_work);
930 handled = true; 916 handled = true;
931 } 917 }
932 918
933 if (EFX_QWORD_FIELD_VER(efx, *event, RX_RECOVERY)) { 919 if (falcon_rev(efx) <= FALCON_REV_A1 ?
920 EFX_QWORD_FIELD(*event, FSF_AA_GLB_EV_RX_RECOVERY) :
921 EFX_QWORD_FIELD(*event, FSF_BB_GLB_EV_RX_RECOVERY)) {
934 EFX_ERR(efx, "channel %d seen global RX_RESET " 922 EFX_ERR(efx, "channel %d seen global RX_RESET "
935 "event. Resetting.\n", channel->channel); 923 "event. Resetting.\n", channel->channel);
936 924
@@ -953,35 +941,35 @@ static void falcon_handle_driver_event(struct efx_channel *channel,
953 unsigned int ev_sub_code; 941 unsigned int ev_sub_code;
954 unsigned int ev_sub_data; 942 unsigned int ev_sub_data;
955 943
956 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE); 944 ev_sub_code = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBCODE);
957 ev_sub_data = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_DATA); 945 ev_sub_data = EFX_QWORD_FIELD(*event, FSF_AZ_DRIVER_EV_SUBDATA);
958 946
959 switch (ev_sub_code) { 947 switch (ev_sub_code) {
960 case TX_DESCQ_FLS_DONE_EV_DECODE: 948 case FSE_AZ_TX_DESCQ_FLS_DONE_EV:
961 EFX_TRACE(efx, "channel %d TXQ %d flushed\n", 949 EFX_TRACE(efx, "channel %d TXQ %d flushed\n",
962 channel->channel, ev_sub_data); 950 channel->channel, ev_sub_data);
963 break; 951 break;
964 case RX_DESCQ_FLS_DONE_EV_DECODE: 952 case FSE_AZ_RX_DESCQ_FLS_DONE_EV:
965 EFX_TRACE(efx, "channel %d RXQ %d flushed\n", 953 EFX_TRACE(efx, "channel %d RXQ %d flushed\n",
966 channel->channel, ev_sub_data); 954 channel->channel, ev_sub_data);
967 break; 955 break;
968 case EVQ_INIT_DONE_EV_DECODE: 956 case FSE_AZ_EVQ_INIT_DONE_EV:
969 EFX_LOG(efx, "channel %d EVQ %d initialised\n", 957 EFX_LOG(efx, "channel %d EVQ %d initialised\n",
970 channel->channel, ev_sub_data); 958 channel->channel, ev_sub_data);
971 break; 959 break;
972 case SRM_UPD_DONE_EV_DECODE: 960 case FSE_AZ_SRM_UPD_DONE_EV:
973 EFX_TRACE(efx, "channel %d SRAM update done\n", 961 EFX_TRACE(efx, "channel %d SRAM update done\n",
974 channel->channel); 962 channel->channel);
975 break; 963 break;
976 case WAKE_UP_EV_DECODE: 964 case FSE_AZ_WAKE_UP_EV:
977 EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n", 965 EFX_TRACE(efx, "channel %d RXQ %d wakeup event\n",
978 channel->channel, ev_sub_data); 966 channel->channel, ev_sub_data);
979 break; 967 break;
980 case TIMER_EV_DECODE: 968 case FSE_AZ_TIMER_EV:
981 EFX_TRACE(efx, "channel %d RX queue %d timer expired\n", 969 EFX_TRACE(efx, "channel %d RX queue %d timer expired\n",
982 channel->channel, ev_sub_data); 970 channel->channel, ev_sub_data);
983 break; 971 break;
984 case RX_RECOVERY_EV_DECODE: 972 case FSE_AA_RX_RECOVER_EV:
985 EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. " 973 EFX_ERR(efx, "channel %d seen DRIVER RX_RESET event. "
986 "Resetting.\n", channel->channel); 974 "Resetting.\n", channel->channel);
987 atomic_inc(&efx->rx_reset); 975 atomic_inc(&efx->rx_reset);
@@ -990,12 +978,12 @@ static void falcon_handle_driver_event(struct efx_channel *channel,
990 RESET_TYPE_RX_RECOVERY : 978 RESET_TYPE_RX_RECOVERY :
991 RESET_TYPE_DISABLE); 979 RESET_TYPE_DISABLE);
992 break; 980 break;
993 case RX_DSC_ERROR_EV_DECODE: 981 case FSE_BZ_RX_DSC_ERROR_EV:
994 EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error." 982 EFX_ERR(efx, "RX DMA Q %d reports descriptor fetch error."
995 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data); 983 " RX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
996 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH); 984 efx_schedule_reset(efx, RESET_TYPE_RX_DESC_FETCH);
997 break; 985 break;
998 case TX_DSC_ERROR_EV_DECODE: 986 case FSE_BZ_TX_DSC_ERROR_EV:
999 EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error." 987 EFX_ERR(efx, "TX DMA Q %d reports descriptor fetch error."
1000 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data); 988 " TX Q %d is disabled.\n", ev_sub_data, ev_sub_data);
1001 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH); 989 efx_schedule_reset(efx, RESET_TYPE_TX_DESC_FETCH);
@@ -1031,27 +1019,27 @@ int falcon_process_eventq(struct efx_channel *channel, int rx_quota)
1031 /* Clear this event by marking it all ones */ 1019 /* Clear this event by marking it all ones */
1032 EFX_SET_QWORD(*p_event); 1020 EFX_SET_QWORD(*p_event);
1033 1021
1034 ev_code = EFX_QWORD_FIELD(event, EV_CODE); 1022 ev_code = EFX_QWORD_FIELD(event, FSF_AZ_EV_CODE);
1035 1023
1036 switch (ev_code) { 1024 switch (ev_code) {
1037 case RX_IP_EV_DECODE: 1025 case FSE_AZ_EV_CODE_RX_EV:
1038 falcon_handle_rx_event(channel, &event); 1026 falcon_handle_rx_event(channel, &event);
1039 ++rx_packets; 1027 ++rx_packets;
1040 break; 1028 break;
1041 case TX_IP_EV_DECODE: 1029 case FSE_AZ_EV_CODE_TX_EV:
1042 falcon_handle_tx_event(channel, &event); 1030 falcon_handle_tx_event(channel, &event);
1043 break; 1031 break;
1044 case DRV_GEN_EV_DECODE: 1032 case FSE_AZ_EV_CODE_DRV_GEN_EV:
1045 channel->eventq_magic 1033 channel->eventq_magic = EFX_QWORD_FIELD(
1046 = EFX_QWORD_FIELD(event, EVQ_MAGIC); 1034 event, FSF_AZ_DRV_GEN_EV_MAGIC);
1047 EFX_LOG(channel->efx, "channel %d received generated " 1035 EFX_LOG(channel->efx, "channel %d received generated "
1048 "event "EFX_QWORD_FMT"\n", channel->channel, 1036 "event "EFX_QWORD_FMT"\n", channel->channel,
1049 EFX_QWORD_VAL(event)); 1037 EFX_QWORD_VAL(event));
1050 break; 1038 break;
1051 case GLOBAL_EV_DECODE: 1039 case FSE_AZ_EV_CODE_GLOBAL_EV:
1052 falcon_handle_global_event(channel, &event); 1040 falcon_handle_global_event(channel, &event);
1053 break; 1041 break;
1054 case DRIVER_EV_DECODE: 1042 case FSE_AZ_EV_CODE_DRIVER_EV:
1055 falcon_handle_driver_event(channel, &event); 1043 falcon_handle_driver_event(channel, &event);
1056 break; 1044 break;
1057 default: 1045 default:
@@ -1061,7 +1049,7 @@ int falcon_process_eventq(struct efx_channel *channel, int rx_quota)
1061 } 1049 }
1062 1050
1063 /* Increment read pointer */ 1051 /* Increment read pointer */
1064 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK; 1052 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
1065 1053
1066 } while (rx_packets < rx_quota); 1054 } while (rx_packets < rx_quota);
1067 1055
@@ -1076,26 +1064,20 @@ void falcon_set_int_moderation(struct efx_channel *channel)
1076 1064
1077 /* Set timer register */ 1065 /* Set timer register */
1078 if (channel->irq_moderation) { 1066 if (channel->irq_moderation) {
1079 /* Round to resolution supported by hardware. The value we
1080 * program is based at 0. So actual interrupt moderation
1081 * achieved is ((x + 1) * res).
1082 */
1083 channel->irq_moderation -= (channel->irq_moderation %
1084 FALCON_IRQ_MOD_RESOLUTION);
1085 if (channel->irq_moderation < FALCON_IRQ_MOD_RESOLUTION)
1086 channel->irq_moderation = FALCON_IRQ_MOD_RESOLUTION;
1087 EFX_POPULATE_DWORD_2(timer_cmd, 1067 EFX_POPULATE_DWORD_2(timer_cmd,
1088 TIMER_MODE, TIMER_MODE_INT_HLDOFF, 1068 FRF_AB_TC_TIMER_MODE,
1089 TIMER_VAL, 1069 FFE_BB_TIMER_MODE_INT_HLDOFF,
1090 channel->irq_moderation / 1070 FRF_AB_TC_TIMER_VAL,
1091 FALCON_IRQ_MOD_RESOLUTION - 1); 1071 channel->irq_moderation - 1);
1092 } else { 1072 } else {
1093 EFX_POPULATE_DWORD_2(timer_cmd, 1073 EFX_POPULATE_DWORD_2(timer_cmd,
1094 TIMER_MODE, TIMER_MODE_DIS, 1074 FRF_AB_TC_TIMER_MODE,
1095 TIMER_VAL, 0); 1075 FFE_BB_TIMER_MODE_DIS,
1076 FRF_AB_TC_TIMER_VAL, 0);
1096 } 1077 }
1097 falcon_writel_page_locked(efx, &timer_cmd, TIMER_CMD_REG_KER, 1078 BUILD_BUG_ON(FR_AA_TIMER_COMMAND_KER != FR_BZ_TIMER_COMMAND_P0);
1098 channel->channel); 1079 efx_writed_page_locked(efx, &timer_cmd, FR_BZ_TIMER_COMMAND_P0,
1080 channel->channel);
1099 1081
1100} 1082}
1101 1083
@@ -1103,10 +1085,10 @@ void falcon_set_int_moderation(struct efx_channel *channel)
1103int falcon_probe_eventq(struct efx_channel *channel) 1085int falcon_probe_eventq(struct efx_channel *channel)
1104{ 1086{
1105 struct efx_nic *efx = channel->efx; 1087 struct efx_nic *efx = channel->efx;
1106 unsigned int evq_size; 1088 BUILD_BUG_ON(EFX_EVQ_SIZE < 512 || EFX_EVQ_SIZE > 32768 ||
1107 1089 EFX_EVQ_SIZE & EFX_EVQ_MASK);
1108 evq_size = FALCON_EVQ_SIZE * sizeof(efx_qword_t); 1090 return falcon_alloc_special_buffer(efx, &channel->eventq,
1109 return falcon_alloc_special_buffer(efx, &channel->eventq, evq_size); 1091 EFX_EVQ_SIZE * sizeof(efx_qword_t));
1110} 1092}
1111 1093
1112void falcon_init_eventq(struct efx_channel *channel) 1094void falcon_init_eventq(struct efx_channel *channel)
@@ -1126,11 +1108,11 @@ void falcon_init_eventq(struct efx_channel *channel)
1126 1108
1127 /* Push event queue to card */ 1109 /* Push event queue to card */
1128 EFX_POPULATE_OWORD_3(evq_ptr, 1110 EFX_POPULATE_OWORD_3(evq_ptr,
1129 EVQ_EN, 1, 1111 FRF_AZ_EVQ_EN, 1,
1130 EVQ_SIZE, FALCON_EVQ_ORDER, 1112 FRF_AZ_EVQ_SIZE, __ffs(channel->eventq.entries),
1131 EVQ_BUF_BASE_ID, channel->eventq.index); 1113 FRF_AZ_EVQ_BUF_BASE_ID, channel->eventq.index);
1132 falcon_write_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base, 1114 efx_writeo_table(efx, &evq_ptr, efx->type->evq_ptr_tbl_base,
1133 channel->channel); 1115 channel->channel);
1134 1116
1135 falcon_set_int_moderation(channel); 1117 falcon_set_int_moderation(channel);
1136} 1118}
@@ -1142,8 +1124,8 @@ void falcon_fini_eventq(struct efx_channel *channel)
1142 1124
1143 /* Remove event queue from card */ 1125 /* Remove event queue from card */
1144 EFX_ZERO_OWORD(eventq_ptr); 1126 EFX_ZERO_OWORD(eventq_ptr);
1145 falcon_write_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base, 1127 efx_writeo_table(efx, &eventq_ptr, efx->type->evq_ptr_tbl_base,
1146 channel->channel); 1128 channel->channel);
1147 1129
1148 /* Unpin event queue */ 1130 /* Unpin event queue */
1149 falcon_fini_special_buffer(efx, &channel->eventq); 1131 falcon_fini_special_buffer(efx, &channel->eventq);
@@ -1164,9 +1146,9 @@ void falcon_generate_test_event(struct efx_channel *channel, unsigned int magic)
1164{ 1146{
1165 efx_qword_t test_event; 1147 efx_qword_t test_event;
1166 1148
1167 EFX_POPULATE_QWORD_2(test_event, 1149 EFX_POPULATE_QWORD_2(test_event, FSF_AZ_EV_CODE,
1168 EV_CODE, DRV_GEN_EV_DECODE, 1150 FSE_AZ_EV_CODE_DRV_GEN_EV,
1169 EVQ_MAGIC, magic); 1151 FSF_AZ_DRV_GEN_EV_MAGIC, magic);
1170 falcon_generate_event(channel, &test_event); 1152 falcon_generate_event(channel, &test_event);
1171} 1153}
1172 1154
@@ -1174,11 +1156,12 @@ void falcon_sim_phy_event(struct efx_nic *efx)
1174{ 1156{
1175 efx_qword_t phy_event; 1157 efx_qword_t phy_event;
1176 1158
1177 EFX_POPULATE_QWORD_1(phy_event, EV_CODE, GLOBAL_EV_DECODE); 1159 EFX_POPULATE_QWORD_1(phy_event, FSF_AZ_EV_CODE,
1160 FSE_AZ_EV_CODE_GLOBAL_EV);
1178 if (EFX_IS10G(efx)) 1161 if (EFX_IS10G(efx))
1179 EFX_SET_QWORD_FIELD(phy_event, XG_PHY_INTR, 1); 1162 EFX_SET_QWORD_FIELD(phy_event, FSF_AB_GLB_EV_XG_PHY0_INTR, 1);
1180 else 1163 else
1181 EFX_SET_QWORD_FIELD(phy_event, G_PHY0_INTR, 1); 1164 EFX_SET_QWORD_FIELD(phy_event, FSF_AB_GLB_EV_G_PHY0_INTR, 1);
1182 1165
1183 falcon_generate_event(&efx->channel[0], &phy_event); 1166 falcon_generate_event(&efx->channel[0], &phy_event);
1184} 1167}
@@ -1196,7 +1179,7 @@ static void falcon_poll_flush_events(struct efx_nic *efx)
1196 struct efx_tx_queue *tx_queue; 1179 struct efx_tx_queue *tx_queue;
1197 struct efx_rx_queue *rx_queue; 1180 struct efx_rx_queue *rx_queue;
1198 unsigned int read_ptr = channel->eventq_read_ptr; 1181 unsigned int read_ptr = channel->eventq_read_ptr;
1199 unsigned int end_ptr = (read_ptr - 1) & FALCON_EVQ_MASK; 1182 unsigned int end_ptr = (read_ptr - 1) & EFX_EVQ_MASK;
1200 1183
1201 do { 1184 do {
1202 efx_qword_t *event = falcon_event(channel, read_ptr); 1185 efx_qword_t *event = falcon_event(channel, read_ptr);
@@ -1206,22 +1189,23 @@ static void falcon_poll_flush_events(struct efx_nic *efx)
1206 if (!falcon_event_present(event)) 1189 if (!falcon_event_present(event))
1207 break; 1190 break;
1208 1191
1209 ev_code = EFX_QWORD_FIELD(*event, EV_CODE); 1192 ev_code = EFX_QWORD_FIELD(*event, FSF_AZ_EV_CODE);
1210 ev_sub_code = EFX_QWORD_FIELD(*event, DRIVER_EV_SUB_CODE); 1193 ev_sub_code = EFX_QWORD_FIELD(*event,
1211 if (ev_code == DRIVER_EV_DECODE && 1194 FSF_AZ_DRIVER_EV_SUBCODE);
1212 ev_sub_code == TX_DESCQ_FLS_DONE_EV_DECODE) { 1195 if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1196 ev_sub_code == FSE_AZ_TX_DESCQ_FLS_DONE_EV) {
1213 ev_queue = EFX_QWORD_FIELD(*event, 1197 ev_queue = EFX_QWORD_FIELD(*event,
1214 DRIVER_EV_TX_DESCQ_ID); 1198 FSF_AZ_DRIVER_EV_SUBDATA);
1215 if (ev_queue < EFX_TX_QUEUE_COUNT) { 1199 if (ev_queue < EFX_TX_QUEUE_COUNT) {
1216 tx_queue = efx->tx_queue + ev_queue; 1200 tx_queue = efx->tx_queue + ev_queue;
1217 tx_queue->flushed = true; 1201 tx_queue->flushed = true;
1218 } 1202 }
1219 } else if (ev_code == DRIVER_EV_DECODE && 1203 } else if (ev_code == FSE_AZ_EV_CODE_DRIVER_EV &&
1220 ev_sub_code == RX_DESCQ_FLS_DONE_EV_DECODE) { 1204 ev_sub_code == FSE_AZ_RX_DESCQ_FLS_DONE_EV) {
1221 ev_queue = EFX_QWORD_FIELD(*event, 1205 ev_queue = EFX_QWORD_FIELD(
1222 DRIVER_EV_RX_DESCQ_ID); 1206 *event, FSF_AZ_DRIVER_EV_RX_DESCQ_ID);
1223 ev_failed = EFX_QWORD_FIELD(*event, 1207 ev_failed = EFX_QWORD_FIELD(
1224 DRIVER_EV_RX_FLUSH_FAIL); 1208 *event, FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL);
1225 if (ev_queue < efx->n_rx_queues) { 1209 if (ev_queue < efx->n_rx_queues) {
1226 rx_queue = efx->rx_queue + ev_queue; 1210 rx_queue = efx->rx_queue + ev_queue;
1227 1211
@@ -1233,7 +1217,7 @@ static void falcon_poll_flush_events(struct efx_nic *efx)
1233 } 1217 }
1234 } 1218 }
1235 1219
1236 read_ptr = (read_ptr + 1) & FALCON_EVQ_MASK; 1220 read_ptr = (read_ptr + 1) & EFX_EVQ_MASK;
1237 } while (read_ptr != end_ptr); 1221 } while (read_ptr != end_ptr);
1238} 1222}
1239 1223
@@ -1311,9 +1295,9 @@ static inline void falcon_interrupts(struct efx_nic *efx, int enabled,
1311 efx_oword_t int_en_reg_ker; 1295 efx_oword_t int_en_reg_ker;
1312 1296
1313 EFX_POPULATE_OWORD_2(int_en_reg_ker, 1297 EFX_POPULATE_OWORD_2(int_en_reg_ker,
1314 KER_INT_KER, force, 1298 FRF_AZ_KER_INT_KER, force,
1315 DRV_INT_EN_KER, enabled); 1299 FRF_AZ_DRV_INT_EN_KER, enabled);
1316 falcon_write(efx, &int_en_reg_ker, INT_EN_REG_KER); 1300 efx_writeo(efx, &int_en_reg_ker, FR_AZ_INT_EN_KER);
1317} 1301}
1318 1302
1319void falcon_enable_interrupts(struct efx_nic *efx) 1303void falcon_enable_interrupts(struct efx_nic *efx)
@@ -1326,9 +1310,10 @@ void falcon_enable_interrupts(struct efx_nic *efx)
1326 1310
1327 /* Program address */ 1311 /* Program address */
1328 EFX_POPULATE_OWORD_2(int_adr_reg_ker, 1312 EFX_POPULATE_OWORD_2(int_adr_reg_ker,
1329 NORM_INT_VEC_DIS_KER, EFX_INT_MODE_USE_MSI(efx), 1313 FRF_AZ_NORM_INT_VEC_DIS_KER,
1330 INT_ADR_KER, efx->irq_status.dma_addr); 1314 EFX_INT_MODE_USE_MSI(efx),
1331 falcon_write(efx, &int_adr_reg_ker, INT_ADR_REG_KER); 1315 FRF_AZ_INT_ADR_KER, efx->irq_status.dma_addr);
1316 efx_writeo(efx, &int_adr_reg_ker, FR_AZ_INT_ADR_KER);
1332 1317
1333 /* Enable interrupts */ 1318 /* Enable interrupts */
1334 falcon_interrupts(efx, 1, 0); 1319 falcon_interrupts(efx, 1, 0);
@@ -1368,9 +1353,9 @@ static inline void falcon_irq_ack_a1(struct efx_nic *efx)
1368{ 1353{
1369 efx_dword_t reg; 1354 efx_dword_t reg;
1370 1355
1371 EFX_POPULATE_DWORD_1(reg, INT_ACK_DUMMY_DATA, 0xb7eb7e); 1356 EFX_POPULATE_DWORD_1(reg, FRF_AA_INT_ACK_KER_FIELD, 0xb7eb7e);
1372 falcon_writel(efx, &reg, INT_ACK_REG_KER_A1); 1357 efx_writed(efx, &reg, FR_AA_INT_ACK_KER);
1373 falcon_readl(efx, &reg, WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1); 1358 efx_readd(efx, &reg, FR_AA_WORK_AROUND_BROKEN_PCI_READS);
1374} 1359}
1375 1360
1376/* Process a fatal interrupt 1361/* Process a fatal interrupt
@@ -1383,8 +1368,8 @@ static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1383 efx_oword_t fatal_intr; 1368 efx_oword_t fatal_intr;
1384 int error, mem_perr; 1369 int error, mem_perr;
1385 1370
1386 falcon_read(efx, &fatal_intr, FATAL_INTR_REG_KER); 1371 efx_reado(efx, &fatal_intr, FR_AZ_FATAL_INTR_KER);
1387 error = EFX_OWORD_FIELD(fatal_intr, INT_KER_ERROR); 1372 error = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_FATAL_INTR);
1388 1373
1389 EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status " 1374 EFX_ERR(efx, "SYSTEM ERROR " EFX_OWORD_FMT " status "
1390 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker), 1375 EFX_OWORD_FMT ": %s\n", EFX_OWORD_VAL(*int_ker),
@@ -1394,10 +1379,10 @@ static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1394 goto out; 1379 goto out;
1395 1380
1396 /* If this is a memory parity error dump which blocks are offending */ 1381 /* If this is a memory parity error dump which blocks are offending */
1397 mem_perr = EFX_OWORD_FIELD(fatal_intr, MEM_PERR_INT_KER); 1382 mem_perr = EFX_OWORD_FIELD(fatal_intr, FRF_AZ_MEM_PERR_INT_KER);
1398 if (mem_perr) { 1383 if (mem_perr) {
1399 efx_oword_t reg; 1384 efx_oword_t reg;
1400 falcon_read(efx, &reg, MEM_STAT_REG_KER); 1385 efx_reado(efx, &reg, FR_AZ_MEM_STAT);
1401 EFX_ERR(efx, "SYSTEM ERROR: memory parity error " 1386 EFX_ERR(efx, "SYSTEM ERROR: memory parity error "
1402 EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg)); 1387 EFX_OWORD_FMT "\n", EFX_OWORD_VAL(reg));
1403 } 1388 }
@@ -1409,13 +1394,13 @@ static irqreturn_t falcon_fatal_interrupt(struct efx_nic *efx)
1409 falcon_disable_interrupts(efx); 1394 falcon_disable_interrupts(efx);
1410 1395
1411 /* Count errors and reset or disable the NIC accordingly */ 1396 /* Count errors and reset or disable the NIC accordingly */
1412 if (nic_data->int_error_count == 0 || 1397 if (efx->int_error_count == 0 ||
1413 time_after(jiffies, nic_data->int_error_expire)) { 1398 time_after(jiffies, efx->int_error_expire)) {
1414 nic_data->int_error_count = 0; 1399 efx->int_error_count = 0;
1415 nic_data->int_error_expire = 1400 efx->int_error_expire =
1416 jiffies + FALCON_INT_ERROR_EXPIRE * HZ; 1401 jiffies + FALCON_INT_ERROR_EXPIRE * HZ;
1417 } 1402 }
1418 if (++nic_data->int_error_count < FALCON_MAX_INT_ERRORS) { 1403 if (++efx->int_error_count < FALCON_MAX_INT_ERRORS) {
1419 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n"); 1404 EFX_ERR(efx, "SYSTEM ERROR - reset scheduled\n");
1420 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR); 1405 efx_schedule_reset(efx, RESET_TYPE_INT_ERROR);
1421 } else { 1406 } else {
@@ -1441,11 +1426,11 @@ static irqreturn_t falcon_legacy_interrupt_b0(int irq, void *dev_id)
1441 int syserr; 1426 int syserr;
1442 1427
1443 /* Read the ISR which also ACKs the interrupts */ 1428 /* Read the ISR which also ACKs the interrupts */
1444 falcon_readl(efx, &reg, INT_ISR0_B0); 1429 efx_readd(efx, &reg, FR_BZ_INT_ISR0);
1445 queues = EFX_EXTRACT_DWORD(reg, 0, 31); 1430 queues = EFX_EXTRACT_DWORD(reg, 0, 31);
1446 1431
1447 /* Check to see if we have a serious error condition */ 1432 /* Check to see if we have a serious error condition */
1448 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT); 1433 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1449 if (unlikely(syserr)) 1434 if (unlikely(syserr))
1450 return falcon_fatal_interrupt(efx); 1435 return falcon_fatal_interrupt(efx);
1451 1436
@@ -1491,7 +1476,7 @@ static irqreturn_t falcon_legacy_interrupt_a1(int irq, void *dev_id)
1491 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker)); 1476 irq, raw_smp_processor_id(), EFX_OWORD_VAL(*int_ker));
1492 1477
1493 /* Check to see if we have a serious error condition */ 1478 /* Check to see if we have a serious error condition */
1494 syserr = EFX_OWORD_FIELD(*int_ker, FATAL_INT); 1479 syserr = EFX_OWORD_FIELD(*int_ker, FSF_AZ_NET_IVEC_FATAL_INT);
1495 if (unlikely(syserr)) 1480 if (unlikely(syserr))
1496 return falcon_fatal_interrupt(efx); 1481 return falcon_fatal_interrupt(efx);
1497 1482
@@ -1558,12 +1543,12 @@ static void falcon_setup_rss_indir_table(struct efx_nic *efx)
1558 if (falcon_rev(efx) < FALCON_REV_B0) 1543 if (falcon_rev(efx) < FALCON_REV_B0)
1559 return; 1544 return;
1560 1545
1561 for (offset = RX_RSS_INDIR_TBL_B0; 1546 for (offset = FR_BZ_RX_INDIRECTION_TBL;
1562 offset < RX_RSS_INDIR_TBL_B0 + 0x800; 1547 offset < FR_BZ_RX_INDIRECTION_TBL + 0x800;
1563 offset += 0x10) { 1548 offset += 0x10) {
1564 EFX_POPULATE_DWORD_1(dword, RX_RSS_INDIR_ENT_B0, 1549 EFX_POPULATE_DWORD_1(dword, FRF_BZ_IT_QUEUE,
1565 i % efx->n_rx_queues); 1550 i % efx->n_rx_queues);
1566 falcon_writel(efx, &dword, offset); 1551 efx_writed(efx, &dword, offset);
1567 i++; 1552 i++;
1568 } 1553 }
1569} 1554}
@@ -1626,7 +1611,7 @@ void falcon_fini_interrupt(struct efx_nic *efx)
1626 1611
1627 /* ACK legacy interrupt */ 1612 /* ACK legacy interrupt */
1628 if (falcon_rev(efx) >= FALCON_REV_B0) 1613 if (falcon_rev(efx) >= FALCON_REV_B0)
1629 falcon_read(efx, &reg, INT_ISR0_B0); 1614 efx_reado(efx, &reg, FR_BZ_INT_ISR0);
1630 else 1615 else
1631 falcon_irq_ack_a1(efx); 1616 falcon_irq_ack_a1(efx);
1632 1617
@@ -1647,8 +1632,8 @@ void falcon_fini_interrupt(struct efx_nic *efx)
1647static int falcon_spi_poll(struct efx_nic *efx) 1632static int falcon_spi_poll(struct efx_nic *efx)
1648{ 1633{
1649 efx_oword_t reg; 1634 efx_oword_t reg;
1650 falcon_read(efx, &reg, EE_SPI_HCMD_REG_KER); 1635 efx_reado(efx, &reg, FR_AB_EE_SPI_HCMD);
1651 return EFX_OWORD_FIELD(reg, EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0; 1636 return EFX_OWORD_FIELD(reg, FRF_AB_EE_SPI_HCMD_CMD_EN) ? -EBUSY : 0;
1652} 1637}
1653 1638
1654/* Wait for SPI command completion */ 1639/* Wait for SPI command completion */
@@ -1700,27 +1685,27 @@ int falcon_spi_cmd(const struct efx_spi_device *spi,
1700 1685
1701 /* Program address register, if we have an address */ 1686 /* Program address register, if we have an address */
1702 if (addressed) { 1687 if (addressed) {
1703 EFX_POPULATE_OWORD_1(reg, EE_SPI_HADR_ADR, address); 1688 EFX_POPULATE_OWORD_1(reg, FRF_AB_EE_SPI_HADR_ADR, address);
1704 falcon_write(efx, &reg, EE_SPI_HADR_REG_KER); 1689 efx_writeo(efx, &reg, FR_AB_EE_SPI_HADR);
1705 } 1690 }
1706 1691
1707 /* Program data register, if we have data */ 1692 /* Program data register, if we have data */
1708 if (in != NULL) { 1693 if (in != NULL) {
1709 memcpy(&reg, in, len); 1694 memcpy(&reg, in, len);
1710 falcon_write(efx, &reg, EE_SPI_HDATA_REG_KER); 1695 efx_writeo(efx, &reg, FR_AB_EE_SPI_HDATA);
1711 } 1696 }
1712 1697
1713 /* Issue read/write command */ 1698 /* Issue read/write command */
1714 EFX_POPULATE_OWORD_7(reg, 1699 EFX_POPULATE_OWORD_7(reg,
1715 EE_SPI_HCMD_CMD_EN, 1, 1700 FRF_AB_EE_SPI_HCMD_CMD_EN, 1,
1716 EE_SPI_HCMD_SF_SEL, spi->device_id, 1701 FRF_AB_EE_SPI_HCMD_SF_SEL, spi->device_id,
1717 EE_SPI_HCMD_DABCNT, len, 1702 FRF_AB_EE_SPI_HCMD_DABCNT, len,
1718 EE_SPI_HCMD_READ, reading, 1703 FRF_AB_EE_SPI_HCMD_READ, reading,
1719 EE_SPI_HCMD_DUBCNT, 0, 1704 FRF_AB_EE_SPI_HCMD_DUBCNT, 0,
1720 EE_SPI_HCMD_ADBCNT, 1705 FRF_AB_EE_SPI_HCMD_ADBCNT,
1721 (addressed ? spi->addr_len : 0), 1706 (addressed ? spi->addr_len : 0),
1722 EE_SPI_HCMD_ENC, command); 1707 FRF_AB_EE_SPI_HCMD_ENC, command);
1723 falcon_write(efx, &reg, EE_SPI_HCMD_REG_KER); 1708 efx_writeo(efx, &reg, FR_AB_EE_SPI_HCMD);
1724 1709
1725 /* Wait for read/write to complete */ 1710 /* Wait for read/write to complete */
1726 rc = falcon_spi_wait(efx); 1711 rc = falcon_spi_wait(efx);
@@ -1729,7 +1714,7 @@ int falcon_spi_cmd(const struct efx_spi_device *spi,
1729 1714
1730 /* Read data */ 1715 /* Read data */
1731 if (out != NULL) { 1716 if (out != NULL) {
1732 falcon_read(efx, &reg, EE_SPI_HDATA_REG_KER); 1717 efx_reado(efx, &reg, FR_AB_EE_SPI_HDATA);
1733 memcpy(out, &reg, len); 1718 memcpy(out, &reg, len);
1734 } 1719 }
1735 1720
@@ -1870,21 +1855,22 @@ static int falcon_reset_macs(struct efx_nic *efx)
1870 * macs, so instead use the internal MAC resets 1855 * macs, so instead use the internal MAC resets
1871 */ 1856 */
1872 if (!EFX_IS10G(efx)) { 1857 if (!EFX_IS10G(efx)) {
1873 EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 1); 1858 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 1);
1874 falcon_write(efx, &reg, GM_CFG1_REG); 1859 efx_writeo(efx, &reg, FR_AB_GM_CFG1);
1875 udelay(1000); 1860 udelay(1000);
1876 1861
1877 EFX_POPULATE_OWORD_1(reg, GM_SW_RST, 0); 1862 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_SW_RST, 0);
1878 falcon_write(efx, &reg, GM_CFG1_REG); 1863 efx_writeo(efx, &reg, FR_AB_GM_CFG1);
1879 udelay(1000); 1864 udelay(1000);
1880 return 0; 1865 return 0;
1881 } else { 1866 } else {
1882 EFX_POPULATE_OWORD_1(reg, XM_CORE_RST, 1); 1867 EFX_POPULATE_OWORD_1(reg, FRF_AB_XM_CORE_RST, 1);
1883 falcon_write(efx, &reg, XM_GLB_CFG_REG); 1868 efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
1884 1869
1885 for (count = 0; count < 10000; count++) { 1870 for (count = 0; count < 10000; count++) {
1886 falcon_read(efx, &reg, XM_GLB_CFG_REG); 1871 efx_reado(efx, &reg, FR_AB_XM_GLB_CFG);
1887 if (EFX_OWORD_FIELD(reg, XM_CORE_RST) == 0) 1872 if (EFX_OWORD_FIELD(reg, FRF_AB_XM_CORE_RST) ==
1873 0)
1888 return 0; 1874 return 0;
1889 udelay(10); 1875 udelay(10);
1890 } 1876 }
@@ -1898,22 +1884,22 @@ static int falcon_reset_macs(struct efx_nic *efx)
1898 * the drain sequence with the statistics fetch */ 1884 * the drain sequence with the statistics fetch */
1899 efx_stats_disable(efx); 1885 efx_stats_disable(efx);
1900 1886
1901 falcon_read(efx, &reg, MAC0_CTRL_REG_KER); 1887 efx_reado(efx, &reg, FR_AB_MAC_CTRL);
1902 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1); 1888 EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN, 1);
1903 falcon_write(efx, &reg, MAC0_CTRL_REG_KER); 1889 efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
1904 1890
1905 falcon_read(efx, &reg, GLB_CTL_REG_KER); 1891 efx_reado(efx, &reg, FR_AB_GLB_CTL);
1906 EFX_SET_OWORD_FIELD(reg, RST_XGTX, 1); 1892 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGTX, 1);
1907 EFX_SET_OWORD_FIELD(reg, RST_XGRX, 1); 1893 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_XGRX, 1);
1908 EFX_SET_OWORD_FIELD(reg, RST_EM, 1); 1894 EFX_SET_OWORD_FIELD(reg, FRF_AB_RST_EM, 1);
1909 falcon_write(efx, &reg, GLB_CTL_REG_KER); 1895 efx_writeo(efx, &reg, FR_AB_GLB_CTL);
1910 1896
1911 count = 0; 1897 count = 0;
1912 while (1) { 1898 while (1) {
1913 falcon_read(efx, &reg, GLB_CTL_REG_KER); 1899 efx_reado(efx, &reg, FR_AB_GLB_CTL);
1914 if (!EFX_OWORD_FIELD(reg, RST_XGTX) && 1900 if (!EFX_OWORD_FIELD(reg, FRF_AB_RST_XGTX) &&
1915 !EFX_OWORD_FIELD(reg, RST_XGRX) && 1901 !EFX_OWORD_FIELD(reg, FRF_AB_RST_XGRX) &&
1916 !EFX_OWORD_FIELD(reg, RST_EM)) { 1902 !EFX_OWORD_FIELD(reg, FRF_AB_RST_EM)) {
1917 EFX_LOG(efx, "Completed MAC reset after %d loops\n", 1903 EFX_LOG(efx, "Completed MAC reset after %d loops\n",
1918 count); 1904 count);
1919 break; 1905 break;
@@ -1944,9 +1930,9 @@ void falcon_drain_tx_fifo(struct efx_nic *efx)
1944 (efx->loopback_mode != LOOPBACK_NONE)) 1930 (efx->loopback_mode != LOOPBACK_NONE))
1945 return; 1931 return;
1946 1932
1947 falcon_read(efx, &reg, MAC0_CTRL_REG_KER); 1933 efx_reado(efx, &reg, FR_AB_MAC_CTRL);
1948 /* There is no point in draining more than once */ 1934 /* There is no point in draining more than once */
1949 if (EFX_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0)) 1935 if (EFX_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN))
1950 return; 1936 return;
1951 1937
1952 falcon_reset_macs(efx); 1938 falcon_reset_macs(efx);
@@ -1960,9 +1946,9 @@ void falcon_deconfigure_mac_wrapper(struct efx_nic *efx)
1960 return; 1946 return;
1961 1947
1962 /* Isolate the MAC -> RX */ 1948 /* Isolate the MAC -> RX */
1963 falcon_read(efx, &reg, RX_CFG_REG_KER); 1949 efx_reado(efx, &reg, FR_AZ_RX_CFG);
1964 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 0); 1950 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 0);
1965 falcon_write(efx, &reg, RX_CFG_REG_KER); 1951 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
1966 1952
1967 if (!efx->link_up) 1953 if (!efx->link_up)
1968 falcon_drain_tx_fifo(efx); 1954 falcon_drain_tx_fifo(efx);
@@ -1985,19 +1971,19 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
1985 * indefinitely held and TX queue can be flushed at any point 1971 * indefinitely held and TX queue can be flushed at any point
1986 * while the link is down. */ 1972 * while the link is down. */
1987 EFX_POPULATE_OWORD_5(reg, 1973 EFX_POPULATE_OWORD_5(reg,
1988 MAC_XOFF_VAL, 0xffff /* max pause time */, 1974 FRF_AB_MAC_XOFF_VAL, 0xffff /* max pause time */,
1989 MAC_BCAD_ACPT, 1, 1975 FRF_AB_MAC_BCAD_ACPT, 1,
1990 MAC_UC_PROM, efx->promiscuous, 1976 FRF_AB_MAC_UC_PROM, efx->promiscuous,
1991 MAC_LINK_STATUS, 1, /* always set */ 1977 FRF_AB_MAC_LINK_STATUS, 1, /* always set */
1992 MAC_SPEED, link_speed); 1978 FRF_AB_MAC_SPEED, link_speed);
1993 /* On B0, MAC backpressure can be disabled and packets get 1979 /* On B0, MAC backpressure can be disabled and packets get
1994 * discarded. */ 1980 * discarded. */
1995 if (falcon_rev(efx) >= FALCON_REV_B0) { 1981 if (falcon_rev(efx) >= FALCON_REV_B0) {
1996 EFX_SET_OWORD_FIELD(reg, TXFIFO_DRAIN_EN_B0, 1982 EFX_SET_OWORD_FIELD(reg, FRF_BB_TXFIFO_DRAIN_EN,
1997 !efx->link_up); 1983 !efx->link_up);
1998 } 1984 }
1999 1985
2000 falcon_write(efx, &reg, MAC0_CTRL_REG_KER); 1986 efx_writeo(efx, &reg, FR_AB_MAC_CTRL);
2001 1987
2002 /* Restore the multicast hash registers. */ 1988 /* Restore the multicast hash registers. */
2003 falcon_set_multicast_hash(efx); 1989 falcon_set_multicast_hash(efx);
@@ -2006,13 +1992,13 @@ void falcon_reconfigure_mac_wrapper(struct efx_nic *efx)
2006 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL. 1992 * covered by RX_XOFF_MAC_EN and XM_TX_CFG_REG:XM_FCNTL.
2007 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */ 1993 * Action on receipt of pause frames is controller by XM_DIS_FCNTL */
2008 tx_fc = !!(efx->link_fc & EFX_FC_TX); 1994 tx_fc = !!(efx->link_fc & EFX_FC_TX);
2009 falcon_read(efx, &reg, RX_CFG_REG_KER); 1995 efx_reado(efx, &reg, FR_AZ_RX_CFG);
2010 EFX_SET_OWORD_FIELD_VER(efx, reg, RX_XOFF_MAC_EN, tx_fc); 1996 EFX_SET_OWORD_FIELD(reg, FRF_AZ_RX_XOFF_MAC_EN, tx_fc);
2011 1997
2012 /* Unisolate the MAC -> RX */ 1998 /* Unisolate the MAC -> RX */
2013 if (falcon_rev(efx) >= FALCON_REV_B0) 1999 if (falcon_rev(efx) >= FALCON_REV_B0)
2014 EFX_SET_OWORD_FIELD(reg, RX_INGR_EN_B0, 1); 2000 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
2015 falcon_write(efx, &reg, RX_CFG_REG_KER); 2001 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
2016} 2002}
2017 2003
2018int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset) 2004int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
@@ -2027,8 +2013,8 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
2027 /* Statistics fetch will fail if the MAC is in TX drain */ 2013 /* Statistics fetch will fail if the MAC is in TX drain */
2028 if (falcon_rev(efx) >= FALCON_REV_B0) { 2014 if (falcon_rev(efx) >= FALCON_REV_B0) {
2029 efx_oword_t temp; 2015 efx_oword_t temp;
2030 falcon_read(efx, &temp, MAC0_CTRL_REG_KER); 2016 efx_reado(efx, &temp, FR_AB_MAC_CTRL);
2031 if (EFX_OWORD_FIELD(temp, TXFIFO_DRAIN_EN_B0)) 2017 if (EFX_OWORD_FIELD(temp, FRF_BB_TXFIFO_DRAIN_EN))
2032 return 0; 2018 return 0;
2033 } 2019 }
2034 2020
@@ -2038,10 +2024,10 @@ int falcon_dma_stats(struct efx_nic *efx, unsigned int done_offset)
2038 2024
2039 /* Initiate DMA transfer of stats */ 2025 /* Initiate DMA transfer of stats */
2040 EFX_POPULATE_OWORD_2(reg, 2026 EFX_POPULATE_OWORD_2(reg,
2041 MAC_STAT_DMA_CMD, 1, 2027 FRF_AB_MAC_STAT_DMA_CMD, 1,
2042 MAC_STAT_DMA_ADR, 2028 FRF_AB_MAC_STAT_DMA_ADR,
2043 efx->stats_buffer.dma_addr); 2029 efx->stats_buffer.dma_addr);
2044 falcon_write(efx, &reg, MAC0_STAT_DMA_REG_KER); 2030 efx_writeo(efx, &reg, FR_AB_MAC_STAT_DMA);
2045 2031
2046 /* Wait for transfer to complete */ 2032 /* Wait for transfer to complete */
2047 for (i = 0; i < 400; i++) { 2033 for (i = 0; i < 400; i++) {
@@ -2071,10 +2057,10 @@ static int falcon_gmii_wait(struct efx_nic *efx)
2071 2057
2072 /* wait upto 50ms - taken max from datasheet */ 2058 /* wait upto 50ms - taken max from datasheet */
2073 for (count = 0; count < 5000; count++) { 2059 for (count = 0; count < 5000; count++) {
2074 falcon_readl(efx, &md_stat, MD_STAT_REG_KER); 2060 efx_readd(efx, &md_stat, FR_AB_MD_STAT);
2075 if (EFX_DWORD_FIELD(md_stat, MD_BSY) == 0) { 2061 if (EFX_DWORD_FIELD(md_stat, FRF_AB_MD_BSY) == 0) {
2076 if (EFX_DWORD_FIELD(md_stat, MD_LNFL) != 0 || 2062 if (EFX_DWORD_FIELD(md_stat, FRF_AB_MD_LNFL) != 0 ||
2077 EFX_DWORD_FIELD(md_stat, MD_BSERR) != 0) { 2063 EFX_DWORD_FIELD(md_stat, FRF_AB_MD_BSERR) != 0) {
2078 EFX_ERR(efx, "error from GMII access " 2064 EFX_ERR(efx, "error from GMII access "
2079 EFX_DWORD_FMT"\n", 2065 EFX_DWORD_FMT"\n",
2080 EFX_DWORD_VAL(md_stat)); 2066 EFX_DWORD_VAL(md_stat));
@@ -2107,29 +2093,30 @@ static int falcon_mdio_write(struct net_device *net_dev,
2107 goto out; 2093 goto out;
2108 2094
2109 /* Write the address/ID register */ 2095 /* Write the address/ID register */
2110 EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr); 2096 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
2111 falcon_write(efx, &reg, MD_PHY_ADR_REG_KER); 2097 efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
2112 2098
2113 EFX_POPULATE_OWORD_2(reg, MD_PRT_ADR, prtad, MD_DEV_ADR, devad); 2099 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
2114 falcon_write(efx, &reg, MD_ID_REG_KER); 2100 FRF_AB_MD_DEV_ADR, devad);
2101 efx_writeo(efx, &reg, FR_AB_MD_ID);
2115 2102
2116 /* Write data */ 2103 /* Write data */
2117 EFX_POPULATE_OWORD_1(reg, MD_TXD, value); 2104 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_TXD, value);
2118 falcon_write(efx, &reg, MD_TXD_REG_KER); 2105 efx_writeo(efx, &reg, FR_AB_MD_TXD);
2119 2106
2120 EFX_POPULATE_OWORD_2(reg, 2107 EFX_POPULATE_OWORD_2(reg,
2121 MD_WRC, 1, 2108 FRF_AB_MD_WRC, 1,
2122 MD_GC, 0); 2109 FRF_AB_MD_GC, 0);
2123 falcon_write(efx, &reg, MD_CS_REG_KER); 2110 efx_writeo(efx, &reg, FR_AB_MD_CS);
2124 2111
2125 /* Wait for data to be written */ 2112 /* Wait for data to be written */
2126 rc = falcon_gmii_wait(efx); 2113 rc = falcon_gmii_wait(efx);
2127 if (rc) { 2114 if (rc) {
2128 /* Abort the write operation */ 2115 /* Abort the write operation */
2129 EFX_POPULATE_OWORD_2(reg, 2116 EFX_POPULATE_OWORD_2(reg,
2130 MD_WRC, 0, 2117 FRF_AB_MD_WRC, 0,
2131 MD_GC, 1); 2118 FRF_AB_MD_GC, 1);
2132 falcon_write(efx, &reg, MD_CS_REG_KER); 2119 efx_writeo(efx, &reg, FR_AB_MD_CS);
2133 udelay(10); 2120 udelay(10);
2134 } 2121 }
2135 2122
@@ -2153,29 +2140,30 @@ static int falcon_mdio_read(struct net_device *net_dev,
2153 if (rc) 2140 if (rc)
2154 goto out; 2141 goto out;
2155 2142
2156 EFX_POPULATE_OWORD_1(reg, MD_PHY_ADR, addr); 2143 EFX_POPULATE_OWORD_1(reg, FRF_AB_MD_PHY_ADR, addr);
2157 falcon_write(efx, &reg, MD_PHY_ADR_REG_KER); 2144 efx_writeo(efx, &reg, FR_AB_MD_PHY_ADR);
2158 2145
2159 EFX_POPULATE_OWORD_2(reg, MD_PRT_ADR, prtad, MD_DEV_ADR, devad); 2146 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_PRT_ADR, prtad,
2160 falcon_write(efx, &reg, MD_ID_REG_KER); 2147 FRF_AB_MD_DEV_ADR, devad);
2148 efx_writeo(efx, &reg, FR_AB_MD_ID);
2161 2149
2162 /* Request data to be read */ 2150 /* Request data to be read */
2163 EFX_POPULATE_OWORD_2(reg, MD_RDC, 1, MD_GC, 0); 2151 EFX_POPULATE_OWORD_2(reg, FRF_AB_MD_RDC, 1, FRF_AB_MD_GC, 0);
2164 falcon_write(efx, &reg, MD_CS_REG_KER); 2152 efx_writeo(efx, &reg, FR_AB_MD_CS);
2165 2153
2166 /* Wait for data to become available */ 2154 /* Wait for data to become available */
2167 rc = falcon_gmii_wait(efx); 2155 rc = falcon_gmii_wait(efx);
2168 if (rc == 0) { 2156 if (rc == 0) {
2169 falcon_read(efx, &reg, MD_RXD_REG_KER); 2157 efx_reado(efx, &reg, FR_AB_MD_RXD);
2170 rc = EFX_OWORD_FIELD(reg, MD_RXD); 2158 rc = EFX_OWORD_FIELD(reg, FRF_AB_MD_RXD);
2171 EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n", 2159 EFX_REGDUMP(efx, "read from MDIO %d register %d.%d, got %04x\n",
2172 prtad, devad, addr, rc); 2160 prtad, devad, addr, rc);
2173 } else { 2161 } else {
2174 /* Abort the read operation */ 2162 /* Abort the read operation */
2175 EFX_POPULATE_OWORD_2(reg, 2163 EFX_POPULATE_OWORD_2(reg,
2176 MD_RIC, 0, 2164 FRF_AB_MD_RIC, 0,
2177 MD_GC, 1); 2165 FRF_AB_MD_GC, 1);
2178 falcon_write(efx, &reg, MD_CS_REG_KER); 2166 efx_writeo(efx, &reg, FR_AB_MD_CS);
2179 2167
2180 EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n", 2168 EFX_LOG(efx, "read from MDIO %d register %d.%d, got error %d\n",
2181 prtad, devad, addr, rc); 2169 prtad, devad, addr, rc);
@@ -2186,37 +2174,6 @@ static int falcon_mdio_read(struct net_device *net_dev,
2186 return rc; 2174 return rc;
2187} 2175}
2188 2176
2189static int falcon_probe_phy(struct efx_nic *efx)
2190{
2191 switch (efx->phy_type) {
2192 case PHY_TYPE_SFX7101:
2193 efx->phy_op = &falcon_sfx7101_phy_ops;
2194 break;
2195 case PHY_TYPE_SFT9001A:
2196 case PHY_TYPE_SFT9001B:
2197 efx->phy_op = &falcon_sft9001_phy_ops;
2198 break;
2199 case PHY_TYPE_QT2022C2:
2200 case PHY_TYPE_QT2025C:
2201 efx->phy_op = &falcon_xfp_phy_ops;
2202 break;
2203 default:
2204 EFX_ERR(efx, "Unknown PHY type %d\n",
2205 efx->phy_type);
2206 return -1;
2207 }
2208
2209 if (efx->phy_op->macs & EFX_XMAC)
2210 efx->loopback_modes |= ((1 << LOOPBACK_XGMII) |
2211 (1 << LOOPBACK_XGXS) |
2212 (1 << LOOPBACK_XAUI));
2213 if (efx->phy_op->macs & EFX_GMAC)
2214 efx->loopback_modes |= (1 << LOOPBACK_GMAC);
2215 efx->loopback_modes |= efx->phy_op->loopbacks;
2216
2217 return 0;
2218}
2219
2220int falcon_switch_mac(struct efx_nic *efx) 2177int falcon_switch_mac(struct efx_nic *efx)
2221{ 2178{
2222 struct efx_mac_operations *old_mac_op = efx->mac_op; 2179 struct efx_mac_operations *old_mac_op = efx->mac_op;
@@ -2242,16 +2199,17 @@ int falcon_switch_mac(struct efx_nic *efx)
2242 2199
2243 /* Always push the NIC_STAT_REG setting even if the mac hasn't 2200 /* Always push the NIC_STAT_REG setting even if the mac hasn't
2244 * changed, because this function is run post online reset */ 2201 * changed, because this function is run post online reset */
2245 falcon_read(efx, &nic_stat, NIC_STAT_REG); 2202 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2246 strap_val = EFX_IS10G(efx) ? 5 : 3; 2203 strap_val = EFX_IS10G(efx) ? 5 : 3;
2247 if (falcon_rev(efx) >= FALCON_REV_B0) { 2204 if (falcon_rev(efx) >= FALCON_REV_B0) {
2248 EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_EN, 1); 2205 EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP_EN, 1);
2249 EFX_SET_OWORD_FIELD(nic_stat, EE_STRAP_OVR, strap_val); 2206 EFX_SET_OWORD_FIELD(nic_stat, FRF_BB_EE_STRAP, strap_val);
2250 falcon_write(efx, &nic_stat, NIC_STAT_REG); 2207 efx_writeo(efx, &nic_stat, FR_AB_NIC_STAT);
2251 } else { 2208 } else {
2252 /* Falcon A1 does not support 1G/10G speed switching 2209 /* Falcon A1 does not support 1G/10G speed switching
2253 * and must not be used with a PHY that does. */ 2210 * and must not be used with a PHY that does. */
2254 BUG_ON(EFX_OWORD_FIELD(nic_stat, STRAP_PINS) != strap_val); 2211 BUG_ON(EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_PINS) !=
2212 strap_val);
2255 } 2213 }
2256 2214
2257 if (old_mac_op == efx->mac_op) 2215 if (old_mac_op == efx->mac_op)
@@ -2272,10 +2230,31 @@ int falcon_probe_port(struct efx_nic *efx)
2272{ 2230{
2273 int rc; 2231 int rc;
2274 2232
2275 /* Hook in PHY operations table */ 2233 switch (efx->phy_type) {
2276 rc = falcon_probe_phy(efx); 2234 case PHY_TYPE_SFX7101:
2277 if (rc) 2235 efx->phy_op = &falcon_sfx7101_phy_ops;
2278 return rc; 2236 break;
2237 case PHY_TYPE_SFT9001A:
2238 case PHY_TYPE_SFT9001B:
2239 efx->phy_op = &falcon_sft9001_phy_ops;
2240 break;
2241 case PHY_TYPE_QT2022C2:
2242 case PHY_TYPE_QT2025C:
2243 efx->phy_op = &falcon_qt202x_phy_ops;
2244 break;
2245 default:
2246 EFX_ERR(efx, "Unknown PHY type %d\n",
2247 efx->phy_type);
2248 return -ENODEV;
2249 }
2250
2251 if (efx->phy_op->macs & EFX_XMAC)
2252 efx->loopback_modes |= ((1 << LOOPBACK_XGMII) |
2253 (1 << LOOPBACK_XGXS) |
2254 (1 << LOOPBACK_XAUI));
2255 if (efx->phy_op->macs & EFX_GMAC)
2256 efx->loopback_modes |= (1 << LOOPBACK_GMAC);
2257 efx->loopback_modes |= efx->phy_op->loopbacks;
2279 2258
2280 /* Set up MDIO structure for PHY */ 2259 /* Set up MDIO structure for PHY */
2281 efx->mdio.mmds = efx->phy_op->mmds; 2260 efx->mdio.mmds = efx->phy_op->mmds;
@@ -2324,8 +2303,8 @@ void falcon_set_multicast_hash(struct efx_nic *efx)
2324 */ 2303 */
2325 set_bit_le(0xff, mc_hash->byte); 2304 set_bit_le(0xff, mc_hash->byte);
2326 2305
2327 falcon_write(efx, &mc_hash->oword[0], MAC_MCAST_HASH_REG0_KER); 2306 efx_writeo(efx, &mc_hash->oword[0], FR_AB_MAC_MC_HASH_REG0);
2328 falcon_write(efx, &mc_hash->oword[1], MAC_MCAST_HASH_REG1_KER); 2307 efx_writeo(efx, &mc_hash->oword[1], FR_AB_MAC_MC_HASH_REG1);
2329} 2308}
2330 2309
2331 2310
@@ -2351,7 +2330,7 @@ int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
2351 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL); 2330 region = kmalloc(FALCON_NVCONFIG_END, GFP_KERNEL);
2352 if (!region) 2331 if (!region)
2353 return -ENOMEM; 2332 return -ENOMEM;
2354 nvconfig = region + NVCONFIG_OFFSET; 2333 nvconfig = region + FALCON_NVCONFIG_OFFSET;
2355 2334
2356 mutex_lock(&efx->spi_lock); 2335 mutex_lock(&efx->spi_lock);
2357 rc = falcon_spi_read(spi, 0, FALCON_NVCONFIG_END, NULL, region); 2336 rc = falcon_spi_read(spi, 0, FALCON_NVCONFIG_END, NULL, region);
@@ -2367,7 +2346,7 @@ int falcon_read_nvram(struct efx_nic *efx, struct falcon_nvconfig *nvconfig_out)
2367 struct_ver = le16_to_cpu(nvconfig->board_struct_ver); 2346 struct_ver = le16_to_cpu(nvconfig->board_struct_ver);
2368 2347
2369 rc = -EINVAL; 2348 rc = -EINVAL;
2370 if (magic_num != NVCONFIG_BOARD_MAGIC_NUM) { 2349 if (magic_num != FALCON_NVCONFIG_BOARD_MAGIC_NUM) {
2371 EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num); 2350 EFX_ERR(efx, "NVRAM bad magic 0x%x\n", magic_num);
2372 goto out; 2351 goto out;
2373 } 2352 }
@@ -2403,41 +2382,41 @@ static struct {
2403 unsigned address; 2382 unsigned address;
2404 efx_oword_t mask; 2383 efx_oword_t mask;
2405} efx_test_registers[] = { 2384} efx_test_registers[] = {
2406 { ADR_REGION_REG_KER, 2385 { FR_AZ_ADR_REGION,
2407 EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) }, 2386 EFX_OWORD32(0x0001FFFF, 0x0001FFFF, 0x0001FFFF, 0x0001FFFF) },
2408 { RX_CFG_REG_KER, 2387 { FR_AZ_RX_CFG,
2409 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) }, 2388 EFX_OWORD32(0xFFFFFFFE, 0x00017FFF, 0x00000000, 0x00000000) },
2410 { TX_CFG_REG_KER, 2389 { FR_AZ_TX_CFG,
2411 EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) }, 2390 EFX_OWORD32(0x7FFF0037, 0x00000000, 0x00000000, 0x00000000) },
2412 { TX_CFG2_REG_KER, 2391 { FR_AZ_TX_RESERVED,
2413 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) }, 2392 EFX_OWORD32(0xFFFEFE80, 0x1FFFFFFF, 0x020000FE, 0x007FFFFF) },
2414 { MAC0_CTRL_REG_KER, 2393 { FR_AB_MAC_CTRL,
2415 EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) }, 2394 EFX_OWORD32(0xFFFF0000, 0x00000000, 0x00000000, 0x00000000) },
2416 { SRM_TX_DC_CFG_REG_KER, 2395 { FR_AZ_SRM_TX_DC_CFG,
2417 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) }, 2396 EFX_OWORD32(0x001FFFFF, 0x00000000, 0x00000000, 0x00000000) },
2418 { RX_DC_CFG_REG_KER, 2397 { FR_AZ_RX_DC_CFG,
2419 EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) }, 2398 EFX_OWORD32(0x0000000F, 0x00000000, 0x00000000, 0x00000000) },
2420 { RX_DC_PF_WM_REG_KER, 2399 { FR_AZ_RX_DC_PF_WM,
2421 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) }, 2400 EFX_OWORD32(0x000003FF, 0x00000000, 0x00000000, 0x00000000) },
2422 { DP_CTRL_REG, 2401 { FR_BZ_DP_CTRL,
2423 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) }, 2402 EFX_OWORD32(0x00000FFF, 0x00000000, 0x00000000, 0x00000000) },
2424 { GM_CFG2_REG, 2403 { FR_AB_GM_CFG2,
2425 EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) }, 2404 EFX_OWORD32(0x00007337, 0x00000000, 0x00000000, 0x00000000) },
2426 { GMF_CFG0_REG, 2405 { FR_AB_GMF_CFG0,
2427 EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) }, 2406 EFX_OWORD32(0x00001F1F, 0x00000000, 0x00000000, 0x00000000) },
2428 { XM_GLB_CFG_REG, 2407 { FR_AB_XM_GLB_CFG,
2429 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) }, 2408 EFX_OWORD32(0x00000C68, 0x00000000, 0x00000000, 0x00000000) },
2430 { XM_TX_CFG_REG, 2409 { FR_AB_XM_TX_CFG,
2431 EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) }, 2410 EFX_OWORD32(0x00080164, 0x00000000, 0x00000000, 0x00000000) },
2432 { XM_RX_CFG_REG, 2411 { FR_AB_XM_RX_CFG,
2433 EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) }, 2412 EFX_OWORD32(0x07100A0C, 0x00000000, 0x00000000, 0x00000000) },
2434 { XM_RX_PARAM_REG, 2413 { FR_AB_XM_RX_PARAM,
2435 EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) }, 2414 EFX_OWORD32(0x00001FF8, 0x00000000, 0x00000000, 0x00000000) },
2436 { XM_FC_REG, 2415 { FR_AB_XM_FC,
2437 EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) }, 2416 EFX_OWORD32(0xFFFF0001, 0x00000000, 0x00000000, 0x00000000) },
2438 { XM_ADR_LO_REG, 2417 { FR_AB_XM_ADR_LO,
2439 EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) }, 2418 EFX_OWORD32(0xFFFFFFFF, 0x00000000, 0x00000000, 0x00000000) },
2440 { XX_SD_CTL_REG, 2419 { FR_AB_XX_SD_CTL,
2441 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) }, 2420 EFX_OWORD32(0x0003FF0F, 0x00000000, 0x00000000, 0x00000000) },
2442}; 2421};
2443 2422
@@ -2461,7 +2440,7 @@ int falcon_test_registers(struct efx_nic *efx)
2461 mask = imask = efx_test_registers[i].mask; 2440 mask = imask = efx_test_registers[i].mask;
2462 EFX_INVERT_OWORD(imask); 2441 EFX_INVERT_OWORD(imask);
2463 2442
2464 falcon_read(efx, &original, address); 2443 efx_reado(efx, &original, address);
2465 2444
2466 /* bit sweep on and off */ 2445 /* bit sweep on and off */
2467 for (j = 0; j < 128; j++) { 2446 for (j = 0; j < 128; j++) {
@@ -2472,8 +2451,8 @@ int falcon_test_registers(struct efx_nic *efx)
2472 EFX_AND_OWORD(reg, original, mask); 2451 EFX_AND_OWORD(reg, original, mask);
2473 EFX_SET_OWORD32(reg, j, j, 1); 2452 EFX_SET_OWORD32(reg, j, j, 1);
2474 2453
2475 falcon_write(efx, &reg, address); 2454 efx_writeo(efx, &reg, address);
2476 falcon_read(efx, &buf, address); 2455 efx_reado(efx, &buf, address);
2477 2456
2478 if (efx_masked_compare_oword(&reg, &buf, &mask)) 2457 if (efx_masked_compare_oword(&reg, &buf, &mask))
2479 goto fail; 2458 goto fail;
@@ -2482,14 +2461,14 @@ int falcon_test_registers(struct efx_nic *efx)
2482 EFX_OR_OWORD(reg, original, mask); 2461 EFX_OR_OWORD(reg, original, mask);
2483 EFX_SET_OWORD32(reg, j, j, 0); 2462 EFX_SET_OWORD32(reg, j, j, 0);
2484 2463
2485 falcon_write(efx, &reg, address); 2464 efx_writeo(efx, &reg, address);
2486 falcon_read(efx, &buf, address); 2465 efx_reado(efx, &buf, address);
2487 2466
2488 if (efx_masked_compare_oword(&reg, &buf, &mask)) 2467 if (efx_masked_compare_oword(&reg, &buf, &mask))
2489 goto fail; 2468 goto fail;
2490 } 2469 }
2491 2470
2492 falcon_write(efx, &original, address); 2471 efx_writeo(efx, &original, address);
2493 } 2472 }
2494 2473
2495 return 0; 2474 return 0;
@@ -2537,22 +2516,24 @@ int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
2537 } 2516 }
2538 2517
2539 EFX_POPULATE_OWORD_2(glb_ctl_reg_ker, 2518 EFX_POPULATE_OWORD_2(glb_ctl_reg_ker,
2540 EXT_PHY_RST_DUR, 0x7, 2519 FRF_AB_EXT_PHY_RST_DUR,
2541 SWRST, 1); 2520 FFE_AB_EXT_PHY_RST_DUR_10240US,
2521 FRF_AB_SWRST, 1);
2542 } else { 2522 } else {
2543 int reset_phy = (method == RESET_TYPE_INVISIBLE ?
2544 EXCLUDE_FROM_RESET : 0);
2545
2546 EFX_POPULATE_OWORD_7(glb_ctl_reg_ker, 2523 EFX_POPULATE_OWORD_7(glb_ctl_reg_ker,
2547 EXT_PHY_RST_CTL, reset_phy, 2524 /* exclude PHY from "invisible" reset */
2548 PCIE_CORE_RST_CTL, EXCLUDE_FROM_RESET, 2525 FRF_AB_EXT_PHY_RST_CTL,
2549 PCIE_NSTCK_RST_CTL, EXCLUDE_FROM_RESET, 2526 method == RESET_TYPE_INVISIBLE,
2550 PCIE_SD_RST_CTL, EXCLUDE_FROM_RESET, 2527 /* exclude EEPROM/flash and PCIe */
2551 EE_RST_CTL, EXCLUDE_FROM_RESET, 2528 FRF_AB_PCIE_CORE_RST_CTL, 1,
2552 EXT_PHY_RST_DUR, 0x7 /* 10ms */, 2529 FRF_AB_PCIE_NSTKY_RST_CTL, 1,
2553 SWRST, 1); 2530 FRF_AB_PCIE_SD_RST_CTL, 1,
2554 } 2531 FRF_AB_EE_RST_CTL, 1,
2555 falcon_write(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER); 2532 FRF_AB_EXT_PHY_RST_DUR,
2533 FFE_AB_EXT_PHY_RST_DUR_10240US,
2534 FRF_AB_SWRST, 1);
2535 }
2536 efx_writeo(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2556 2537
2557 EFX_LOG(efx, "waiting for hardware reset\n"); 2538 EFX_LOG(efx, "waiting for hardware reset\n");
2558 schedule_timeout_uninterruptible(HZ / 20); 2539 schedule_timeout_uninterruptible(HZ / 20);
@@ -2577,8 +2558,8 @@ int falcon_reset_hw(struct efx_nic *efx, enum reset_type method)
2577 } 2558 }
2578 2559
2579 /* Assert that reset complete */ 2560 /* Assert that reset complete */
2580 falcon_read(efx, &glb_ctl_reg_ker, GLB_CTL_REG_KER); 2561 efx_reado(efx, &glb_ctl_reg_ker, FR_AB_GLB_CTL);
2581 if (EFX_OWORD_FIELD(glb_ctl_reg_ker, SWRST) != 0) { 2562 if (EFX_OWORD_FIELD(glb_ctl_reg_ker, FRF_AB_SWRST) != 0) {
2582 rc = -ETIMEDOUT; 2563 rc = -ETIMEDOUT;
2583 EFX_ERR(efx, "timed out waiting for hardware reset\n"); 2564 EFX_ERR(efx, "timed out waiting for hardware reset\n");
2584 goto fail5; 2565 goto fail5;
@@ -2606,16 +2587,16 @@ static int falcon_reset_sram(struct efx_nic *efx)
2606 int count; 2587 int count;
2607 2588
2608 /* Set the SRAM wake/sleep GPIO appropriately. */ 2589 /* Set the SRAM wake/sleep GPIO appropriately. */
2609 falcon_read(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER); 2590 efx_reado(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2610 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OEN, 1); 2591 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OEN, 1);
2611 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, GPIO1_OUT, 1); 2592 EFX_SET_OWORD_FIELD(gpio_cfg_reg_ker, FRF_AB_GPIO1_OUT, 1);
2612 falcon_write(efx, &gpio_cfg_reg_ker, GPIO_CTL_REG_KER); 2593 efx_writeo(efx, &gpio_cfg_reg_ker, FR_AB_GPIO_CTL);
2613 2594
2614 /* Initiate SRAM reset */ 2595 /* Initiate SRAM reset */
2615 EFX_POPULATE_OWORD_2(srm_cfg_reg_ker, 2596 EFX_POPULATE_OWORD_2(srm_cfg_reg_ker,
2616 SRAM_OOB_BT_INIT_EN, 1, 2597 FRF_AZ_SRM_INIT_EN, 1,
2617 SRM_NUM_BANKS_AND_BANK_SIZE, 0); 2598 FRF_AZ_SRM_NB_SZ, 0);
2618 falcon_write(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER); 2599 efx_writeo(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2619 2600
2620 /* Wait for SRAM reset to complete */ 2601 /* Wait for SRAM reset to complete */
2621 count = 0; 2602 count = 0;
@@ -2626,8 +2607,8 @@ static int falcon_reset_sram(struct efx_nic *efx)
2626 schedule_timeout_uninterruptible(HZ / 50); 2607 schedule_timeout_uninterruptible(HZ / 50);
2627 2608
2628 /* Check for reset complete */ 2609 /* Check for reset complete */
2629 falcon_read(efx, &srm_cfg_reg_ker, SRM_CFG_REG_KER); 2610 efx_reado(efx, &srm_cfg_reg_ker, FR_AZ_SRM_CFG);
2630 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, SRAM_OOB_BT_INIT_EN)) { 2611 if (!EFX_OWORD_FIELD(srm_cfg_reg_ker, FRF_AZ_SRM_INIT_EN)) {
2631 EFX_LOG(efx, "SRAM reset complete\n"); 2612 EFX_LOG(efx, "SRAM reset complete\n");
2632 2613
2633 return 0; 2614 return 0;
@@ -2712,16 +2693,16 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
2712 board_rev = le16_to_cpu(v2->board_revision); 2693 board_rev = le16_to_cpu(v2->board_revision);
2713 2694
2714 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) { 2695 if (le16_to_cpu(nvconfig->board_struct_ver) >= 3) {
2715 __le32 fl = v3->spi_device_type[EE_SPI_FLASH]; 2696 rc = falcon_spi_device_init(
2716 __le32 ee = v3->spi_device_type[EE_SPI_EEPROM]; 2697 efx, &efx->spi_flash, FFE_AB_SPI_DEVICE_FLASH,
2717 rc = falcon_spi_device_init(efx, &efx->spi_flash, 2698 le32_to_cpu(v3->spi_device_type
2718 EE_SPI_FLASH, 2699 [FFE_AB_SPI_DEVICE_FLASH]));
2719 le32_to_cpu(fl));
2720 if (rc) 2700 if (rc)
2721 goto fail2; 2701 goto fail2;
2722 rc = falcon_spi_device_init(efx, &efx->spi_eeprom, 2702 rc = falcon_spi_device_init(
2723 EE_SPI_EEPROM, 2703 efx, &efx->spi_eeprom, FFE_AB_SPI_DEVICE_EEPROM,
2724 le32_to_cpu(ee)); 2704 le32_to_cpu(v3->spi_device_type
2705 [FFE_AB_SPI_DEVICE_EEPROM]));
2725 if (rc) 2706 if (rc)
2726 goto fail2; 2707 goto fail2;
2727 } 2708 }
@@ -2732,7 +2713,7 @@ static int falcon_probe_nvconfig(struct efx_nic *efx)
2732 2713
2733 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad); 2714 EFX_LOG(efx, "PHY is %d phy_id %d\n", efx->phy_type, efx->mdio.prtad);
2734 2715
2735 efx_set_board_info(efx, board_rev); 2716 falcon_probe_board(efx, board_rev);
2736 2717
2737 kfree(nvconfig); 2718 kfree(nvconfig);
2738 return 0; 2719 return 0;
@@ -2752,13 +2733,13 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
2752 efx_oword_t altera_build; 2733 efx_oword_t altera_build;
2753 efx_oword_t nic_stat; 2734 efx_oword_t nic_stat;
2754 2735
2755 falcon_read(efx, &altera_build, ALTERA_BUILD_REG_KER); 2736 efx_reado(efx, &altera_build, FR_AZ_ALTERA_BUILD);
2756 if (EFX_OWORD_FIELD(altera_build, VER_ALL)) { 2737 if (EFX_OWORD_FIELD(altera_build, FRF_AZ_ALTERA_BUILD_VER)) {
2757 EFX_ERR(efx, "Falcon FPGA not supported\n"); 2738 EFX_ERR(efx, "Falcon FPGA not supported\n");
2758 return -ENODEV; 2739 return -ENODEV;
2759 } 2740 }
2760 2741
2761 falcon_read(efx, &nic_stat, NIC_STAT_REG); 2742 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2762 2743
2763 switch (falcon_rev(efx)) { 2744 switch (falcon_rev(efx)) {
2764 case FALCON_REV_A0: 2745 case FALCON_REV_A0:
@@ -2767,7 +2748,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
2767 return -ENODEV; 2748 return -ENODEV;
2768 2749
2769 case FALCON_REV_A1: 2750 case FALCON_REV_A1:
2770 if (EFX_OWORD_FIELD(nic_stat, STRAP_PCIE) == 0) { 2751 if (EFX_OWORD_FIELD(nic_stat, FRF_AA_STRAP_PCIE) == 0) {
2771 EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n"); 2752 EFX_ERR(efx, "Falcon rev A1 PCI-X not supported\n");
2772 return -ENODEV; 2753 return -ENODEV;
2773 } 2754 }
@@ -2782,7 +2763,7 @@ static int falcon_probe_nic_variant(struct efx_nic *efx)
2782 } 2763 }
2783 2764
2784 /* Initial assumed speed */ 2765 /* Initial assumed speed */
2785 efx->link_speed = EFX_OWORD_FIELD(nic_stat, STRAP_10G) ? 10000 : 1000; 2766 efx->link_speed = EFX_OWORD_FIELD(nic_stat, FRF_AB_STRAP_10G) ? 10000 : 1000;
2786 2767
2787 return 0; 2768 return 0;
2788} 2769}
@@ -2793,34 +2774,36 @@ static void falcon_probe_spi_devices(struct efx_nic *efx)
2793 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg; 2774 efx_oword_t nic_stat, gpio_ctl, ee_vpd_cfg;
2794 int boot_dev; 2775 int boot_dev;
2795 2776
2796 falcon_read(efx, &gpio_ctl, GPIO_CTL_REG_KER); 2777 efx_reado(efx, &gpio_ctl, FR_AB_GPIO_CTL);
2797 falcon_read(efx, &nic_stat, NIC_STAT_REG); 2778 efx_reado(efx, &nic_stat, FR_AB_NIC_STAT);
2798 falcon_read(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER); 2779 efx_reado(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2799 2780
2800 if (EFX_OWORD_FIELD(gpio_ctl, BOOTED_USING_NVDEVICE)) { 2781 if (EFX_OWORD_FIELD(gpio_ctl, FRF_AB_GPIO3_PWRUP_VALUE)) {
2801 boot_dev = (EFX_OWORD_FIELD(nic_stat, SF_PRST) ? 2782 boot_dev = (EFX_OWORD_FIELD(nic_stat, FRF_AB_SF_PRST) ?
2802 EE_SPI_FLASH : EE_SPI_EEPROM); 2783 FFE_AB_SPI_DEVICE_FLASH : FFE_AB_SPI_DEVICE_EEPROM);
2803 EFX_LOG(efx, "Booted from %s\n", 2784 EFX_LOG(efx, "Booted from %s\n",
2804 boot_dev == EE_SPI_FLASH ? "flash" : "EEPROM"); 2785 boot_dev == FFE_AB_SPI_DEVICE_FLASH ? "flash" : "EEPROM");
2805 } else { 2786 } else {
2806 /* Disable VPD and set clock dividers to safe 2787 /* Disable VPD and set clock dividers to safe
2807 * values for initial programming. */ 2788 * values for initial programming. */
2808 boot_dev = -1; 2789 boot_dev = -1;
2809 EFX_LOG(efx, "Booted from internal ASIC settings;" 2790 EFX_LOG(efx, "Booted from internal ASIC settings;"
2810 " setting SPI config\n"); 2791 " setting SPI config\n");
2811 EFX_POPULATE_OWORD_3(ee_vpd_cfg, EE_VPD_EN, 0, 2792 EFX_POPULATE_OWORD_3(ee_vpd_cfg, FRF_AB_EE_VPD_EN, 0,
2812 /* 125 MHz / 7 ~= 20 MHz */ 2793 /* 125 MHz / 7 ~= 20 MHz */
2813 EE_SF_CLOCK_DIV, 7, 2794 FRF_AB_EE_SF_CLOCK_DIV, 7,
2814 /* 125 MHz / 63 ~= 2 MHz */ 2795 /* 125 MHz / 63 ~= 2 MHz */
2815 EE_EE_CLOCK_DIV, 63); 2796 FRF_AB_EE_EE_CLOCK_DIV, 63);
2816 falcon_write(efx, &ee_vpd_cfg, EE_VPD_CFG_REG_KER); 2797 efx_writeo(efx, &ee_vpd_cfg, FR_AB_EE_VPD_CFG0);
2817 } 2798 }
2818 2799
2819 if (boot_dev == EE_SPI_FLASH) 2800 if (boot_dev == FFE_AB_SPI_DEVICE_FLASH)
2820 falcon_spi_device_init(efx, &efx->spi_flash, EE_SPI_FLASH, 2801 falcon_spi_device_init(efx, &efx->spi_flash,
2802 FFE_AB_SPI_DEVICE_FLASH,
2821 default_flash_type); 2803 default_flash_type);
2822 if (boot_dev == EE_SPI_EEPROM) 2804 if (boot_dev == FFE_AB_SPI_DEVICE_EEPROM)
2823 falcon_spi_device_init(efx, &efx->spi_eeprom, EE_SPI_EEPROM, 2805 falcon_spi_device_init(efx, &efx->spi_eeprom,
2806 FFE_AB_SPI_DEVICE_EEPROM,
2824 large_eeprom_type); 2807 large_eeprom_type);
2825} 2808}
2826 2809
@@ -2911,6 +2894,52 @@ int falcon_probe_nic(struct efx_nic *efx)
2911 return rc; 2894 return rc;
2912} 2895}
2913 2896
2897static void falcon_init_rx_cfg(struct efx_nic *efx)
2898{
2899 /* Prior to Siena the RX DMA engine will split each frame at
2900 * intervals of RX_USR_BUF_SIZE (32-byte units). We set it to
2901 * be so large that that never happens. */
2902 const unsigned huge_buf_size = (3 * 4096) >> 5;
2903 /* RX control FIFO thresholds (32 entries) */
2904 const unsigned ctrl_xon_thr = 20;
2905 const unsigned ctrl_xoff_thr = 25;
2906 /* RX data FIFO thresholds (256-byte units; size varies) */
2907 int data_xon_thr = rx_xon_thresh_bytes >> 8;
2908 int data_xoff_thr = rx_xoff_thresh_bytes >> 8;
2909 efx_oword_t reg;
2910
2911 efx_reado(efx, &reg, FR_AZ_RX_CFG);
2912 if (falcon_rev(efx) <= FALCON_REV_A1) {
2913 /* Data FIFO size is 5.5K */
2914 if (data_xon_thr < 0)
2915 data_xon_thr = 512 >> 8;
2916 if (data_xoff_thr < 0)
2917 data_xoff_thr = 2048 >> 8;
2918 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_DESC_PUSH_EN, 0);
2919 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_USR_BUF_SIZE,
2920 huge_buf_size);
2921 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_MAC_TH, data_xon_thr);
2922 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_MAC_TH, data_xoff_thr);
2923 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XON_TX_TH, ctrl_xon_thr);
2924 EFX_SET_OWORD_FIELD(reg, FRF_AA_RX_XOFF_TX_TH, ctrl_xoff_thr);
2925 } else {
2926 /* Data FIFO size is 80K; register fields moved */
2927 if (data_xon_thr < 0)
2928 data_xon_thr = 27648 >> 8; /* ~3*max MTU */
2929 if (data_xoff_thr < 0)
2930 data_xoff_thr = 54272 >> 8; /* ~80Kb - 3*max MTU */
2931 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_DESC_PUSH_EN, 0);
2932 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_USR_BUF_SIZE,
2933 huge_buf_size);
2934 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_MAC_TH, data_xon_thr);
2935 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_MAC_TH, data_xoff_thr);
2936 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XON_TX_TH, ctrl_xon_thr);
2937 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_XOFF_TX_TH, ctrl_xoff_thr);
2938 EFX_SET_OWORD_FIELD(reg, FRF_BZ_RX_INGR_EN, 1);
2939 }
2940 efx_writeo(efx, &reg, FR_AZ_RX_CFG);
2941}
2942
2914/* This call performs hardware-specific global initialisation, such as 2943/* This call performs hardware-specific global initialisation, such as
2915 * defining the descriptor cache sizes and number of RSS channels. 2944 * defining the descriptor cache sizes and number of RSS channels.
2916 * It does not set up any buffers, descriptor rings or event queues. 2945 * It does not set up any buffers, descriptor rings or event queues.
@@ -2918,56 +2947,51 @@ int falcon_probe_nic(struct efx_nic *efx)
2918int falcon_init_nic(struct efx_nic *efx) 2947int falcon_init_nic(struct efx_nic *efx)
2919{ 2948{
2920 efx_oword_t temp; 2949 efx_oword_t temp;
2921 unsigned thresh;
2922 int rc; 2950 int rc;
2923 2951
2924 /* Use on-chip SRAM */ 2952 /* Use on-chip SRAM */
2925 falcon_read(efx, &temp, NIC_STAT_REG); 2953 efx_reado(efx, &temp, FR_AB_NIC_STAT);
2926 EFX_SET_OWORD_FIELD(temp, ONCHIP_SRAM, 1); 2954 EFX_SET_OWORD_FIELD(temp, FRF_AB_ONCHIP_SRAM, 1);
2927 falcon_write(efx, &temp, NIC_STAT_REG); 2955 efx_writeo(efx, &temp, FR_AB_NIC_STAT);
2928 2956
2929 /* Set the source of the GMAC clock */ 2957 /* Set the source of the GMAC clock */
2930 if (falcon_rev(efx) == FALCON_REV_B0) { 2958 if (falcon_rev(efx) == FALCON_REV_B0) {
2931 falcon_read(efx, &temp, GPIO_CTL_REG_KER); 2959 efx_reado(efx, &temp, FR_AB_GPIO_CTL);
2932 EFX_SET_OWORD_FIELD(temp, GPIO_USE_NIC_CLK, true); 2960 EFX_SET_OWORD_FIELD(temp, FRF_AB_USE_NIC_CLK, true);
2933 falcon_write(efx, &temp, GPIO_CTL_REG_KER); 2961 efx_writeo(efx, &temp, FR_AB_GPIO_CTL);
2934 } 2962 }
2935 2963
2936 /* Set buffer table mode */
2937 EFX_POPULATE_OWORD_1(temp, BUF_TBL_MODE, BUF_TBL_MODE_FULL);
2938 falcon_write(efx, &temp, BUF_TBL_CFG_REG_KER);
2939
2940 rc = falcon_reset_sram(efx); 2964 rc = falcon_reset_sram(efx);
2941 if (rc) 2965 if (rc)
2942 return rc; 2966 return rc;
2943 2967
2944 /* Set positions of descriptor caches in SRAM. */ 2968 /* Set positions of descriptor caches in SRAM. */
2945 EFX_POPULATE_OWORD_1(temp, SRM_TX_DC_BASE_ADR, TX_DC_BASE / 8); 2969 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_TX_DC_BASE_ADR, TX_DC_BASE / 8);
2946 falcon_write(efx, &temp, SRM_TX_DC_CFG_REG_KER); 2970 efx_writeo(efx, &temp, FR_AZ_SRM_TX_DC_CFG);
2947 EFX_POPULATE_OWORD_1(temp, SRM_RX_DC_BASE_ADR, RX_DC_BASE / 8); 2971 EFX_POPULATE_OWORD_1(temp, FRF_AZ_SRM_RX_DC_BASE_ADR, RX_DC_BASE / 8);
2948 falcon_write(efx, &temp, SRM_RX_DC_CFG_REG_KER); 2972 efx_writeo(efx, &temp, FR_AZ_SRM_RX_DC_CFG);
2949 2973
2950 /* Set TX descriptor cache size. */ 2974 /* Set TX descriptor cache size. */
2951 BUILD_BUG_ON(TX_DC_ENTRIES != (16 << TX_DC_ENTRIES_ORDER)); 2975 BUILD_BUG_ON(TX_DC_ENTRIES != (16 << TX_DC_ENTRIES_ORDER));
2952 EFX_POPULATE_OWORD_1(temp, TX_DC_SIZE, TX_DC_ENTRIES_ORDER); 2976 EFX_POPULATE_OWORD_1(temp, FRF_AZ_TX_DC_SIZE, TX_DC_ENTRIES_ORDER);
2953 falcon_write(efx, &temp, TX_DC_CFG_REG_KER); 2977 efx_writeo(efx, &temp, FR_AZ_TX_DC_CFG);
2954 2978
2955 /* Set RX descriptor cache size. Set low watermark to size-8, as 2979 /* Set RX descriptor cache size. Set low watermark to size-8, as
2956 * this allows most efficient prefetching. 2980 * this allows most efficient prefetching.
2957 */ 2981 */
2958 BUILD_BUG_ON(RX_DC_ENTRIES != (16 << RX_DC_ENTRIES_ORDER)); 2982 BUILD_BUG_ON(RX_DC_ENTRIES != (16 << RX_DC_ENTRIES_ORDER));
2959 EFX_POPULATE_OWORD_1(temp, RX_DC_SIZE, RX_DC_ENTRIES_ORDER); 2983 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_SIZE, RX_DC_ENTRIES_ORDER);
2960 falcon_write(efx, &temp, RX_DC_CFG_REG_KER); 2984 efx_writeo(efx, &temp, FR_AZ_RX_DC_CFG);
2961 EFX_POPULATE_OWORD_1(temp, RX_DC_PF_LWM, RX_DC_ENTRIES - 8); 2985 EFX_POPULATE_OWORD_1(temp, FRF_AZ_RX_DC_PF_LWM, RX_DC_ENTRIES - 8);
2962 falcon_write(efx, &temp, RX_DC_PF_WM_REG_KER); 2986 efx_writeo(efx, &temp, FR_AZ_RX_DC_PF_WM);
2963 2987
2964 /* Clear the parity enables on the TX data fifos as 2988 /* Clear the parity enables on the TX data fifos as
2965 * they produce false parity errors because of timing issues 2989 * they produce false parity errors because of timing issues
2966 */ 2990 */
2967 if (EFX_WORKAROUND_5129(efx)) { 2991 if (EFX_WORKAROUND_5129(efx)) {
2968 falcon_read(efx, &temp, SPARE_REG_KER); 2992 efx_reado(efx, &temp, FR_AZ_CSR_SPARE);
2969 EFX_SET_OWORD_FIELD(temp, MEM_PERR_EN_TX_DATA, 0); 2993 EFX_SET_OWORD_FIELD(temp, FRF_AB_MEM_PERR_EN_TX_DATA, 0);
2970 falcon_write(efx, &temp, SPARE_REG_KER); 2994 efx_writeo(efx, &temp, FR_AZ_CSR_SPARE);
2971 } 2995 }
2972 2996
2973 /* Enable all the genuinely fatal interrupts. (They are still 2997 /* Enable all the genuinely fatal interrupts. (They are still
@@ -2977,83 +3001,65 @@ int falcon_init_nic(struct efx_nic *efx)
2977 * Note: All other fatal interrupts are enabled 3001 * Note: All other fatal interrupts are enabled
2978 */ 3002 */
2979 EFX_POPULATE_OWORD_3(temp, 3003 EFX_POPULATE_OWORD_3(temp,
2980 ILL_ADR_INT_KER_EN, 1, 3004 FRF_AZ_ILL_ADR_INT_KER_EN, 1,
2981 RBUF_OWN_INT_KER_EN, 1, 3005 FRF_AZ_RBUF_OWN_INT_KER_EN, 1,
2982 TBUF_OWN_INT_KER_EN, 1); 3006 FRF_AZ_TBUF_OWN_INT_KER_EN, 1);
2983 EFX_INVERT_OWORD(temp); 3007 EFX_INVERT_OWORD(temp);
2984 falcon_write(efx, &temp, FATAL_INTR_REG_KER); 3008 efx_writeo(efx, &temp, FR_AZ_FATAL_INTR_KER);
2985 3009
2986 if (EFX_WORKAROUND_7244(efx)) { 3010 if (EFX_WORKAROUND_7244(efx)) {
2987 falcon_read(efx, &temp, RX_FILTER_CTL_REG); 3011 efx_reado(efx, &temp, FR_BZ_RX_FILTER_CTL);
2988 EFX_SET_OWORD_FIELD(temp, UDP_FULL_SRCH_LIMIT, 8); 3012 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_FULL_SRCH_LIMIT, 8);
2989 EFX_SET_OWORD_FIELD(temp, UDP_WILD_SRCH_LIMIT, 8); 3013 EFX_SET_OWORD_FIELD(temp, FRF_BZ_UDP_WILD_SRCH_LIMIT, 8);
2990 EFX_SET_OWORD_FIELD(temp, TCP_FULL_SRCH_LIMIT, 8); 3014 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_FULL_SRCH_LIMIT, 8);
2991 EFX_SET_OWORD_FIELD(temp, TCP_WILD_SRCH_LIMIT, 8); 3015 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TCP_WILD_SRCH_LIMIT, 8);
2992 falcon_write(efx, &temp, RX_FILTER_CTL_REG); 3016 efx_writeo(efx, &temp, FR_BZ_RX_FILTER_CTL);
2993 } 3017 }
2994 3018
2995 falcon_setup_rss_indir_table(efx); 3019 falcon_setup_rss_indir_table(efx);
2996 3020
3021 /* XXX This is documented only for Falcon A0/A1 */
2997 /* Setup RX. Wait for descriptor is broken and must 3022 /* Setup RX. Wait for descriptor is broken and must
2998 * be disabled. RXDP recovery shouldn't be needed, but is. 3023 * be disabled. RXDP recovery shouldn't be needed, but is.
2999 */ 3024 */
3000 falcon_read(efx, &temp, RX_SELF_RST_REG_KER); 3025 efx_reado(efx, &temp, FR_AA_RX_SELF_RST);
3001 EFX_SET_OWORD_FIELD(temp, RX_NODESC_WAIT_DIS, 1); 3026 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_NODESC_WAIT_DIS, 1);
3002 EFX_SET_OWORD_FIELD(temp, RX_RECOVERY_EN, 1); 3027 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_SELF_RST_EN, 1);
3003 if (EFX_WORKAROUND_5583(efx)) 3028 if (EFX_WORKAROUND_5583(efx))
3004 EFX_SET_OWORD_FIELD(temp, RX_ISCSI_DIS, 1); 3029 EFX_SET_OWORD_FIELD(temp, FRF_AA_RX_ISCSI_DIS, 1);
3005 falcon_write(efx, &temp, RX_SELF_RST_REG_KER); 3030 efx_writeo(efx, &temp, FR_AA_RX_SELF_RST);
3006 3031
3007 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be 3032 /* Disable the ugly timer-based TX DMA backoff and allow TX DMA to be
3008 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q. 3033 * controlled by the RX FIFO fill level. Set arbitration to one pkt/Q.
3009 */ 3034 */
3010 falcon_read(efx, &temp, TX_CFG2_REG_KER); 3035 efx_reado(efx, &temp, FR_AZ_TX_RESERVED);
3011 EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER, 0xfe); 3036 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER, 0xfe);
3012 EFX_SET_OWORD_FIELD(temp, TX_RX_SPACER_EN, 1); 3037 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_RX_SPACER_EN, 1);
3013 EFX_SET_OWORD_FIELD(temp, TX_ONE_PKT_PER_Q, 1); 3038 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_ONE_PKT_PER_Q, 1);
3014 EFX_SET_OWORD_FIELD(temp, TX_CSR_PUSH_EN, 0); 3039 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PUSH_EN, 0);
3015 EFX_SET_OWORD_FIELD(temp, TX_DIS_NON_IP_EV, 1); 3040 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_DIS_NON_IP_EV, 1);
3016 /* Enable SW_EV to inherit in char driver - assume harmless here */ 3041 /* Enable SW_EV to inherit in char driver - assume harmless here */
3017 EFX_SET_OWORD_FIELD(temp, TX_SW_EV_EN, 1); 3042 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_SOFT_EVT_EN, 1);
3018 /* Prefetch threshold 2 => fetch when descriptor cache half empty */ 3043 /* Prefetch threshold 2 => fetch when descriptor cache half empty */
3019 EFX_SET_OWORD_FIELD(temp, TX_PREF_THRESHOLD, 2); 3044 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_PREF_THRESHOLD, 2);
3020 /* Squash TX of packets of 16 bytes or less */ 3045 /* Squash TX of packets of 16 bytes or less */
3021 if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx)) 3046 if (falcon_rev(efx) >= FALCON_REV_B0 && EFX_WORKAROUND_9141(efx))
3022 EFX_SET_OWORD_FIELD(temp, TX_FLUSH_MIN_LEN_EN_B0, 1); 3047 EFX_SET_OWORD_FIELD(temp, FRF_BZ_TX_FLUSH_MIN_LEN_EN, 1);
3023 falcon_write(efx, &temp, TX_CFG2_REG_KER); 3048 efx_writeo(efx, &temp, FR_AZ_TX_RESERVED);
3024 3049
3025 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16 3050 /* Do not enable TX_NO_EOP_DISC_EN, since it limits packets to 16
3026 * descriptors (which is bad). 3051 * descriptors (which is bad).
3027 */ 3052 */
3028 falcon_read(efx, &temp, TX_CFG_REG_KER); 3053 efx_reado(efx, &temp, FR_AZ_TX_CFG);
3029 EFX_SET_OWORD_FIELD(temp, TX_NO_EOP_DISC_EN, 0); 3054 EFX_SET_OWORD_FIELD(temp, FRF_AZ_TX_NO_EOP_DISC_EN, 0);
3030 falcon_write(efx, &temp, TX_CFG_REG_KER); 3055 efx_writeo(efx, &temp, FR_AZ_TX_CFG);
3031 3056
3032 /* RX config */ 3057 falcon_init_rx_cfg(efx);
3033 falcon_read(efx, &temp, RX_CFG_REG_KER);
3034 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_DESC_PUSH_EN, 0);
3035 if (EFX_WORKAROUND_7575(efx))
3036 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_USR_BUF_SIZE,
3037 (3 * 4096) / 32);
3038 if (falcon_rev(efx) >= FALCON_REV_B0)
3039 EFX_SET_OWORD_FIELD(temp, RX_INGR_EN_B0, 1);
3040
3041 /* RX FIFO flow control thresholds */
3042 thresh = ((rx_xon_thresh_bytes >= 0) ?
3043 rx_xon_thresh_bytes : efx->type->rx_xon_thresh);
3044 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_MAC_TH, thresh / 256);
3045 thresh = ((rx_xoff_thresh_bytes >= 0) ?
3046 rx_xoff_thresh_bytes : efx->type->rx_xoff_thresh);
3047 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_MAC_TH, thresh / 256);
3048 /* RX control FIFO thresholds [32 entries] */
3049 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XON_TX_TH, 20);
3050 EFX_SET_OWORD_FIELD_VER(efx, temp, RX_XOFF_TX_TH, 25);
3051 falcon_write(efx, &temp, RX_CFG_REG_KER);
3052 3058
3053 /* Set destination of both TX and RX Flush events */ 3059 /* Set destination of both TX and RX Flush events */
3054 if (falcon_rev(efx) >= FALCON_REV_B0) { 3060 if (falcon_rev(efx) >= FALCON_REV_B0) {
3055 EFX_POPULATE_OWORD_1(temp, FLS_EVQ_ID, 0); 3061 EFX_POPULATE_OWORD_1(temp, FRF_BZ_FLS_EVQ_ID, 0);
3056 falcon_write(efx, &temp, DP_CTRL_REG); 3062 efx_writeo(efx, &temp, FR_BZ_DP_CTRL);
3057 } 3063 }
3058 3064
3059 return 0; 3065 return 0;
@@ -3089,8 +3095,9 @@ void falcon_update_nic_stats(struct efx_nic *efx)
3089{ 3095{
3090 efx_oword_t cnt; 3096 efx_oword_t cnt;
3091 3097
3092 falcon_read(efx, &cnt, RX_NODESC_DROP_REG_KER); 3098 efx_reado(efx, &cnt, FR_AZ_RX_NODESC_DROP);
3093 efx->n_rx_nodesc_drop_cnt += EFX_OWORD_FIELD(cnt, RX_NODESC_DROP_CNT); 3099 efx->n_rx_nodesc_drop_cnt +=
3100 EFX_OWORD_FIELD(cnt, FRF_AB_RX_NODESC_DROP_CNT);
3094} 3101}
3095 3102
3096/************************************************************************** 3103/**************************************************************************
@@ -3101,45 +3108,31 @@ void falcon_update_nic_stats(struct efx_nic *efx)
3101 */ 3108 */
3102 3109
3103struct efx_nic_type falcon_a_nic_type = { 3110struct efx_nic_type falcon_a_nic_type = {
3104 .mem_bar = 2,
3105 .mem_map_size = 0x20000, 3111 .mem_map_size = 0x20000,
3106 .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_A1, 3112 .txd_ptr_tbl_base = FR_AA_TX_DESC_PTR_TBL_KER,
3107 .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_A1, 3113 .rxd_ptr_tbl_base = FR_AA_RX_DESC_PTR_TBL_KER,
3108 .buf_tbl_base = BUF_TBL_KER_A1, 3114 .buf_tbl_base = FR_AA_BUF_FULL_TBL_KER,
3109 .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_A1, 3115 .evq_ptr_tbl_base = FR_AA_EVQ_PTR_TBL_KER,
3110 .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_A1, 3116 .evq_rptr_tbl_base = FR_AA_EVQ_RPTR_KER,
3111 .txd_ring_mask = FALCON_TXD_RING_MASK, 3117 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
3112 .rxd_ring_mask = FALCON_RXD_RING_MASK,
3113 .evq_size = FALCON_EVQ_SIZE,
3114 .max_dma_mask = FALCON_DMA_MASK,
3115 .tx_dma_mask = FALCON_TX_DMA_MASK,
3116 .bug5391_mask = 0xf,
3117 .rx_xoff_thresh = 2048,
3118 .rx_xon_thresh = 512,
3119 .rx_buffer_padding = 0x24, 3118 .rx_buffer_padding = 0x24,
3120 .max_interrupt_mode = EFX_INT_MODE_MSI, 3119 .max_interrupt_mode = EFX_INT_MODE_MSI,
3121 .phys_addr_channels = 4, 3120 .phys_addr_channels = 4,
3122}; 3121};
3123 3122
3124struct efx_nic_type falcon_b_nic_type = { 3123struct efx_nic_type falcon_b_nic_type = {
3125 .mem_bar = 2,
3126 /* Map everything up to and including the RSS indirection 3124 /* Map everything up to and including the RSS indirection
3127 * table. Don't map MSI-X table, MSI-X PBA since Linux 3125 * table. Don't map MSI-X table, MSI-X PBA since Linux
3128 * requires that they not be mapped. */ 3126 * requires that they not be mapped. */
3129 .mem_map_size = RX_RSS_INDIR_TBL_B0 + 0x800, 3127 .mem_map_size = (FR_BZ_RX_INDIRECTION_TBL +
3130 .txd_ptr_tbl_base = TX_DESC_PTR_TBL_KER_B0, 3128 FR_BZ_RX_INDIRECTION_TBL_STEP *
3131 .rxd_ptr_tbl_base = RX_DESC_PTR_TBL_KER_B0, 3129 FR_BZ_RX_INDIRECTION_TBL_ROWS),
3132 .buf_tbl_base = BUF_TBL_KER_B0, 3130 .txd_ptr_tbl_base = FR_BZ_TX_DESC_PTR_TBL,
3133 .evq_ptr_tbl_base = EVQ_PTR_TBL_KER_B0, 3131 .rxd_ptr_tbl_base = FR_BZ_RX_DESC_PTR_TBL,
3134 .evq_rptr_tbl_base = EVQ_RPTR_REG_KER_B0, 3132 .buf_tbl_base = FR_BZ_BUF_FULL_TBL,
3135 .txd_ring_mask = FALCON_TXD_RING_MASK, 3133 .evq_ptr_tbl_base = FR_BZ_EVQ_PTR_TBL,
3136 .rxd_ring_mask = FALCON_RXD_RING_MASK, 3134 .evq_rptr_tbl_base = FR_BZ_EVQ_RPTR,
3137 .evq_size = FALCON_EVQ_SIZE, 3135 .max_dma_mask = DMA_BIT_MASK(FSF_AZ_TX_KER_BUF_ADDR_WIDTH),
3138 .max_dma_mask = FALCON_DMA_MASK,
3139 .tx_dma_mask = FALCON_TX_DMA_MASK,
3140 .bug5391_mask = 0,
3141 .rx_xoff_thresh = 54272, /* ~80Kb - 3*max MTU */
3142 .rx_xon_thresh = 27648, /* ~3*max MTU */
3143 .rx_buffer_padding = 0, 3136 .rx_buffer_padding = 0,
3144 .max_interrupt_mode = EFX_INT_MODE_MSIX, 3137 .max_interrupt_mode = EFX_INT_MODE_MSIX,
3145 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy 3138 .phys_addr_channels = 32, /* Hardware limit is 64, but the legacy
diff --git a/drivers/net/sfc/falcon.h b/drivers/net/sfc/falcon.h
index 77f2e0db7ca1..4dd965774a90 100644
--- a/drivers/net/sfc/falcon.h
+++ b/drivers/net/sfc/falcon.h
@@ -39,6 +39,8 @@ extern struct efx_nic_type falcon_b_nic_type;
39 ************************************************************************** 39 **************************************************************************
40 */ 40 */
41 41
42extern void falcon_probe_board(struct efx_nic *efx, u16 revision_info);
43
42/* TX data path */ 44/* TX data path */
43extern int falcon_probe_tx(struct efx_tx_queue *tx_queue); 45extern int falcon_probe_tx(struct efx_tx_queue *tx_queue);
44extern void falcon_init_tx(struct efx_tx_queue *tx_queue); 46extern void falcon_init_tx(struct efx_tx_queue *tx_queue);
@@ -89,11 +91,9 @@ extern void falcon_fini_interrupt(struct efx_nic *efx);
89 91
90/* Global Resources */ 92/* Global Resources */
91extern int falcon_probe_nic(struct efx_nic *efx); 93extern int falcon_probe_nic(struct efx_nic *efx);
92extern int falcon_probe_resources(struct efx_nic *efx);
93extern int falcon_init_nic(struct efx_nic *efx); 94extern int falcon_init_nic(struct efx_nic *efx);
94extern int falcon_flush_queues(struct efx_nic *efx); 95extern int falcon_flush_queues(struct efx_nic *efx);
95extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method); 96extern int falcon_reset_hw(struct efx_nic *efx, enum reset_type method);
96extern void falcon_remove_resources(struct efx_nic *efx);
97extern void falcon_remove_nic(struct efx_nic *efx); 97extern void falcon_remove_nic(struct efx_nic *efx);
98extern void falcon_update_nic_stats(struct efx_nic *efx); 98extern void falcon_update_nic_stats(struct efx_nic *efx);
99extern void falcon_set_multicast_hash(struct efx_nic *efx); 99extern void falcon_set_multicast_hash(struct efx_nic *efx);
diff --git a/drivers/net/sfc/sfe4001.c b/drivers/net/sfc/falcon_boards.c
index cee00ad49b57..99f737223b10 100644
--- a/drivers/net/sfc/sfe4001.c
+++ b/drivers/net/sfc/falcon_boards.c
@@ -7,6 +7,159 @@
7 * by the Free Software Foundation, incorporated herein by reference. 7 * by the Free Software Foundation, incorporated herein by reference.
8 */ 8 */
9 9
10#include <linux/rtnetlink.h>
11
12#include "net_driver.h"
13#include "phy.h"
14#include "efx.h"
15#include "falcon.h"
16#include "regs.h"
17#include "io.h"
18#include "workarounds.h"
19
20/* Macros for unpacking the board revision */
21/* The revision info is in host byte order. */
22#define FALCON_BOARD_TYPE(_rev) (_rev >> 8)
23#define FALCON_BOARD_MAJOR(_rev) ((_rev >> 4) & 0xf)
24#define FALCON_BOARD_MINOR(_rev) (_rev & 0xf)
25
26/* Board types */
27#define FALCON_BOARD_SFE4001 0x01
28#define FALCON_BOARD_SFE4002 0x02
29#define FALCON_BOARD_SFN4111T 0x51
30#define FALCON_BOARD_SFN4112F 0x52
31
32/* Blink support. If the PHY has no auto-blink mode so we hang it off a timer */
33#define BLINK_INTERVAL (HZ/2)
34
35static void blink_led_timer(unsigned long context)
36{
37 struct efx_nic *efx = (struct efx_nic *)context;
38 struct efx_board *board = &efx->board_info;
39
40 board->set_id_led(efx, board->blink_state);
41 board->blink_state = !board->blink_state;
42 if (board->blink_resubmit)
43 mod_timer(&board->blink_timer, jiffies + BLINK_INTERVAL);
44}
45
46static void board_blink(struct efx_nic *efx, bool blink)
47{
48 struct efx_board *board = &efx->board_info;
49
50 /* The rtnl mutex serialises all ethtool ioctls, so
51 * nothing special needs doing here. */
52 if (blink) {
53 board->blink_resubmit = true;
54 board->blink_state = false;
55 setup_timer(&board->blink_timer, blink_led_timer,
56 (unsigned long)efx);
57 mod_timer(&board->blink_timer, jiffies + BLINK_INTERVAL);
58 } else {
59 board->blink_resubmit = false;
60 if (board->blink_timer.function)
61 del_timer_sync(&board->blink_timer);
62 board->init_leds(efx);
63 }
64}
65
66/*****************************************************************************
67 * Support for LM87 sensor chip used on several boards
68 */
69#define LM87_REG_ALARMS1 0x41
70#define LM87_REG_ALARMS2 0x42
71#define LM87_IN_LIMITS(nr, _min, _max) \
72 0x2B + (nr) * 2, _max, 0x2C + (nr) * 2, _min
73#define LM87_AIN_LIMITS(nr, _min, _max) \
74 0x3B + (nr), _max, 0x1A + (nr), _min
75#define LM87_TEMP_INT_LIMITS(_min, _max) \
76 0x39, _max, 0x3A, _min
77#define LM87_TEMP_EXT1_LIMITS(_min, _max) \
78 0x37, _max, 0x38, _min
79
80#define LM87_ALARM_TEMP_INT 0x10
81#define LM87_ALARM_TEMP_EXT1 0x20
82
83#if defined(CONFIG_SENSORS_LM87) || defined(CONFIG_SENSORS_LM87_MODULE)
84
85static int efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
86 const u8 *reg_values)
87{
88 struct i2c_client *client = i2c_new_device(&efx->i2c_adap, info);
89 int rc;
90
91 if (!client)
92 return -EIO;
93
94 while (*reg_values) {
95 u8 reg = *reg_values++;
96 u8 value = *reg_values++;
97 rc = i2c_smbus_write_byte_data(client, reg, value);
98 if (rc)
99 goto err;
100 }
101
102 efx->board_info.hwmon_client = client;
103 return 0;
104
105err:
106 i2c_unregister_device(client);
107 return rc;
108}
109
110static void efx_fini_lm87(struct efx_nic *efx)
111{
112 i2c_unregister_device(efx->board_info.hwmon_client);
113}
114
115static int efx_check_lm87(struct efx_nic *efx, unsigned mask)
116{
117 struct i2c_client *client = efx->board_info.hwmon_client;
118 s32 alarms1, alarms2;
119
120 /* If link is up then do not monitor temperature */
121 if (EFX_WORKAROUND_7884(efx) && efx->link_up)
122 return 0;
123
124 alarms1 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS1);
125 alarms2 = i2c_smbus_read_byte_data(client, LM87_REG_ALARMS2);
126 if (alarms1 < 0)
127 return alarms1;
128 if (alarms2 < 0)
129 return alarms2;
130 alarms1 &= mask;
131 alarms2 &= mask >> 8;
132 if (alarms1 || alarms2) {
133 EFX_ERR(efx,
134 "LM87 detected a hardware failure (status %02x:%02x)"
135 "%s%s\n",
136 alarms1, alarms2,
137 (alarms1 & LM87_ALARM_TEMP_INT) ? " INTERNAL" : "",
138 (alarms1 & LM87_ALARM_TEMP_EXT1) ? " EXTERNAL" : "");
139 return -ERANGE;
140 }
141
142 return 0;
143}
144
145#else /* !CONFIG_SENSORS_LM87 */
146
147static inline int
148efx_init_lm87(struct efx_nic *efx, struct i2c_board_info *info,
149 const u8 *reg_values)
150{
151 return 0;
152}
153static inline void efx_fini_lm87(struct efx_nic *efx)
154{
155}
156static inline int efx_check_lm87(struct efx_nic *efx, unsigned mask)
157{
158 return 0;
159}
160
161#endif /* CONFIG_SENSORS_LM87 */
162
10/***************************************************************************** 163/*****************************************************************************
11 * Support for the SFE4001 and SFN4111T NICs. 164 * Support for the SFE4001 and SFN4111T NICs.
12 * 165 *
@@ -23,23 +176,9 @@
23 * exclusive with the network device being open. 176 * exclusive with the network device being open.
24 */ 177 */
25 178
26#include <linux/delay.h>
27#include <linux/rtnetlink.h>
28#include "net_driver.h"
29#include "efx.h"
30#include "phy.h"
31#include "boards.h"
32#include "falcon.h"
33#include "falcon_hwdefs.h"
34#include "falcon_io.h"
35#include "mac.h"
36#include "workarounds.h"
37
38/************************************************************************** 179/**************************************************************************
39 * 180 * Support for I2C IO Expander device on SFE40001
40 * I2C IO Expander device 181 */
41 *
42 **************************************************************************/
43#define PCA9539 0x74 182#define PCA9539 0x74
44 183
45#define P0_IN 0x00 184#define P0_IN 0x00
@@ -194,14 +333,14 @@ static int sfn4111t_reset(struct efx_nic *efx)
194 * FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the 333 * FLASH_CFG_1 strap (GPIO 3) appropriately. Only change the
195 * output enables; the output levels should always be 0 (low) 334 * output enables; the output levels should always be 0 (low)
196 * and we rely on external pull-ups. */ 335 * and we rely on external pull-ups. */
197 falcon_read(efx, &reg, GPIO_CTL_REG_KER); 336 efx_reado(efx, &reg, FR_AB_GPIO_CTL);
198 EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, true); 337 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO2_OEN, true);
199 falcon_write(efx, &reg, GPIO_CTL_REG_KER); 338 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
200 msleep(1000); 339 msleep(1000);
201 EFX_SET_OWORD_FIELD(reg, GPIO2_OEN, false); 340 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO2_OEN, false);
202 EFX_SET_OWORD_FIELD(reg, GPIO3_OEN, 341 EFX_SET_OWORD_FIELD(reg, FRF_AB_GPIO3_OEN,
203 !!(efx->phy_mode & PHY_MODE_SPECIAL)); 342 !!(efx->phy_mode & PHY_MODE_SPECIAL));
204 falcon_write(efx, &reg, GPIO_CTL_REG_KER); 343 efx_writeo(efx, &reg, FR_AB_GPIO_CTL);
205 msleep(1); 344 msleep(1);
206 345
207 mutex_unlock(&efx->i2c_adap.bus_lock); 346 mutex_unlock(&efx->i2c_adap.bus_lock);
@@ -241,7 +380,7 @@ static ssize_t set_phy_flash_cfg(struct device *dev,
241 efx->phy_mode = new_mode; 380 efx->phy_mode = new_mode;
242 if (new_mode & PHY_MODE_SPECIAL) 381 if (new_mode & PHY_MODE_SPECIAL)
243 efx_stats_disable(efx); 382 efx_stats_disable(efx);
244 if (efx->board_info.type == EFX_BOARD_SFE4001) 383 if (efx->board_info.type == FALCON_BOARD_SFE4001)
245 err = sfe4001_poweron(efx); 384 err = sfe4001_poweron(efx);
246 else 385 else
247 err = sfn4111t_reset(efx); 386 err = sfn4111t_reset(efx);
@@ -302,7 +441,7 @@ static struct i2c_board_info sfe4001_hwmon_info = {
302 * be turned on before the PHY can be used. 441 * be turned on before the PHY can be used.
303 * Context: Process context, rtnl lock held 442 * Context: Process context, rtnl lock held
304 */ 443 */
305int sfe4001_init(struct efx_nic *efx) 444static int sfe4001_init(struct efx_nic *efx)
306{ 445{
307 int rc; 446 int rc;
308 447
@@ -394,7 +533,7 @@ static struct i2c_board_info sfn4111t_r5_hwmon_info = {
394 I2C_BOARD_INFO("max6646", 0x4d), 533 I2C_BOARD_INFO("max6646", 0x4d),
395}; 534};
396 535
397int sfn4111t_init(struct efx_nic *efx) 536static int sfn4111t_init(struct efx_nic *efx)
398{ 537{
399 int i = 0; 538 int i = 0;
400 int rc; 539 int rc;
@@ -433,3 +572,181 @@ fail_hwmon:
433 i2c_unregister_device(efx->board_info.hwmon_client); 572 i2c_unregister_device(efx->board_info.hwmon_client);
434 return rc; 573 return rc;
435} 574}
575
576/*****************************************************************************
577 * Support for the SFE4002
578 *
579 */
580static u8 sfe4002_lm87_channel = 0x03; /* use AIN not FAN inputs */
581
582static const u8 sfe4002_lm87_regs[] = {
583 LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */
584 LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */
585 LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */
586 LM87_IN_LIMITS(3, 0xb0, 0xc9), /* 5V: 4.6-5.2V */
587 LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */
588 LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */
589 LM87_AIN_LIMITS(0, 0xa0, 0xb2), /* AIN1: 1.66V +/- 5% */
590 LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */
591 LM87_TEMP_INT_LIMITS(10, 60), /* board */
592 LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */
593 0
594};
595
596static struct i2c_board_info sfe4002_hwmon_info = {
597 I2C_BOARD_INFO("lm87", 0x2e),
598 .platform_data = &sfe4002_lm87_channel,
599};
600
601/****************************************************************************/
602/* LED allocations. Note that on rev A0 boards the schematic and the reality
603 * differ: red and green are swapped. Below is the fixed (A1) layout (there
604 * are only 3 A0 boards in existence, so no real reason to make this
605 * conditional).
606 */
607#define SFE4002_FAULT_LED (2) /* Red */
608#define SFE4002_RX_LED (0) /* Green */
609#define SFE4002_TX_LED (1) /* Amber */
610
611static void sfe4002_init_leds(struct efx_nic *efx)
612{
613 /* Set the TX and RX LEDs to reflect status and activity, and the
614 * fault LED off */
615 falcon_qt202x_set_led(efx, SFE4002_TX_LED,
616 QUAKE_LED_TXLINK | QUAKE_LED_LINK_ACTSTAT);
617 falcon_qt202x_set_led(efx, SFE4002_RX_LED,
618 QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACTSTAT);
619 falcon_qt202x_set_led(efx, SFE4002_FAULT_LED, QUAKE_LED_OFF);
620}
621
622static void sfe4002_set_id_led(struct efx_nic *efx, bool state)
623{
624 falcon_qt202x_set_led(efx, SFE4002_FAULT_LED, state ? QUAKE_LED_ON :
625 QUAKE_LED_OFF);
626}
627
628static int sfe4002_check_hw(struct efx_nic *efx)
629{
630 /* A0 board rev. 4002s report a temperature fault the whole time
631 * (bad sensor) so we mask it out. */
632 unsigned alarm_mask =
633 (efx->board_info.major == 0 && efx->board_info.minor == 0) ?
634 ~LM87_ALARM_TEMP_EXT1 : ~0;
635
636 return efx_check_lm87(efx, alarm_mask);
637}
638
639static int sfe4002_init(struct efx_nic *efx)
640{
641 int rc = efx_init_lm87(efx, &sfe4002_hwmon_info, sfe4002_lm87_regs);
642 if (rc)
643 return rc;
644 efx->board_info.monitor = sfe4002_check_hw;
645 efx->board_info.init_leds = sfe4002_init_leds;
646 efx->board_info.set_id_led = sfe4002_set_id_led;
647 efx->board_info.blink = board_blink;
648 efx->board_info.fini = efx_fini_lm87;
649 return 0;
650}
651
652/*****************************************************************************
653 * Support for the SFN4112F
654 *
655 */
656static u8 sfn4112f_lm87_channel = 0x03; /* use AIN not FAN inputs */
657
658static const u8 sfn4112f_lm87_regs[] = {
659 LM87_IN_LIMITS(0, 0x83, 0x91), /* 2.5V: 1.8V +/- 5% */
660 LM87_IN_LIMITS(1, 0x51, 0x5a), /* Vccp1: 1.2V +/- 5% */
661 LM87_IN_LIMITS(2, 0xb6, 0xca), /* 3.3V: 3.3V +/- 5% */
662 LM87_IN_LIMITS(4, 0xb0, 0xe0), /* 12V: 11-14V */
663 LM87_IN_LIMITS(5, 0x44, 0x4b), /* Vccp2: 1.0V +/- 5% */
664 LM87_AIN_LIMITS(1, 0x91, 0xa1), /* AIN2: 1.5V +/- 5% */
665 LM87_TEMP_INT_LIMITS(10, 60), /* board */
666 LM87_TEMP_EXT1_LIMITS(10, 70), /* Falcon */
667 0
668};
669
670static struct i2c_board_info sfn4112f_hwmon_info = {
671 I2C_BOARD_INFO("lm87", 0x2e),
672 .platform_data = &sfn4112f_lm87_channel,
673};
674
675#define SFN4112F_ACT_LED 0
676#define SFN4112F_LINK_LED 1
677
678static void sfn4112f_init_leds(struct efx_nic *efx)
679{
680 falcon_qt202x_set_led(efx, SFN4112F_ACT_LED,
681 QUAKE_LED_RXLINK | QUAKE_LED_LINK_ACT);
682 falcon_qt202x_set_led(efx, SFN4112F_LINK_LED,
683 QUAKE_LED_RXLINK | QUAKE_LED_LINK_STAT);
684}
685
686static void sfn4112f_set_id_led(struct efx_nic *efx, bool state)
687{
688 falcon_qt202x_set_led(efx, SFN4112F_LINK_LED,
689 state ? QUAKE_LED_ON : QUAKE_LED_OFF);
690}
691
692static int sfn4112f_check_hw(struct efx_nic *efx)
693{
694 /* Mask out unused sensors */
695 return efx_check_lm87(efx, ~0x48);
696}
697
698static int sfn4112f_init(struct efx_nic *efx)
699{
700 int rc = efx_init_lm87(efx, &sfn4112f_hwmon_info, sfn4112f_lm87_regs);
701 if (rc)
702 return rc;
703 efx->board_info.monitor = sfn4112f_check_hw;
704 efx->board_info.init_leds = sfn4112f_init_leds;
705 efx->board_info.set_id_led = sfn4112f_set_id_led;
706 efx->board_info.blink = board_blink;
707 efx->board_info.fini = efx_fini_lm87;
708 return 0;
709}
710
711/* This will get expanded as board-specific details get moved out of the
712 * PHY drivers. */
713struct falcon_board_data {
714 u8 type;
715 const char *ref_model;
716 const char *gen_type;
717 int (*init) (struct efx_nic *nic);
718};
719
720
721static struct falcon_board_data board_data[] = {
722 { FALCON_BOARD_SFE4001, "SFE4001", "10GBASE-T adapter", sfe4001_init },
723 { FALCON_BOARD_SFE4002, "SFE4002", "XFP adapter", sfe4002_init },
724 { FALCON_BOARD_SFN4111T, "SFN4111T", "100/1000/10GBASE-T adapter",
725 sfn4111t_init },
726 { FALCON_BOARD_SFN4112F, "SFN4112F", "SFP+ adapter",
727 sfn4112f_init },
728};
729
730void falcon_probe_board(struct efx_nic *efx, u16 revision_info)
731{
732 struct falcon_board_data *data = NULL;
733 int i;
734
735 efx->board_info.type = FALCON_BOARD_TYPE(revision_info);
736 efx->board_info.major = FALCON_BOARD_MAJOR(revision_info);
737 efx->board_info.minor = FALCON_BOARD_MINOR(revision_info);
738
739 for (i = 0; i < ARRAY_SIZE(board_data); i++)
740 if (board_data[i].type == efx->board_info.type)
741 data = &board_data[i];
742
743 if (data) {
744 EFX_INFO(efx, "board is %s rev %c%d\n",
745 (efx->pci_dev->subsystem_vendor == EFX_VENDID_SFC)
746 ? data->ref_model : data->gen_type,
747 'A' + efx->board_info.major, efx->board_info.minor);
748 efx->board_info.init = data->init;
749 } else {
750 EFX_ERR(efx, "unknown board type %d\n", efx->board_info.type);
751 }
752}
diff --git a/drivers/net/sfc/falcon_gmac.c b/drivers/net/sfc/falcon_gmac.c
index 8865eae20ac5..8a1b80d1ff28 100644
--- a/drivers/net/sfc/falcon_gmac.c
+++ b/drivers/net/sfc/falcon_gmac.c
@@ -13,9 +13,8 @@
13#include "efx.h" 13#include "efx.h"
14#include "falcon.h" 14#include "falcon.h"
15#include "mac.h" 15#include "mac.h"
16#include "falcon_hwdefs.h" 16#include "regs.h"
17#include "falcon_io.h" 17#include "io.h"
18#include "gmii.h"
19 18
20/************************************************************************** 19/**************************************************************************
21 * 20 *
@@ -37,89 +36,89 @@ static void falcon_reconfigure_gmac(struct efx_nic *efx)
37 bytemode = (efx->link_speed == 1000); 36 bytemode = (efx->link_speed == 1000);
38 37
39 EFX_POPULATE_OWORD_5(reg, 38 EFX_POPULATE_OWORD_5(reg,
40 GM_LOOP, loopback, 39 FRF_AB_GM_LOOP, loopback,
41 GM_TX_EN, 1, 40 FRF_AB_GM_TX_EN, 1,
42 GM_TX_FC_EN, tx_fc, 41 FRF_AB_GM_TX_FC_EN, tx_fc,
43 GM_RX_EN, 1, 42 FRF_AB_GM_RX_EN, 1,
44 GM_RX_FC_EN, rx_fc); 43 FRF_AB_GM_RX_FC_EN, rx_fc);
45 falcon_write(efx, &reg, GM_CFG1_REG); 44 efx_writeo(efx, &reg, FR_AB_GM_CFG1);
46 udelay(10); 45 udelay(10);
47 46
48 /* Configuration register 2 */ 47 /* Configuration register 2 */
49 if_mode = (bytemode) ? 2 : 1; 48 if_mode = (bytemode) ? 2 : 1;
50 EFX_POPULATE_OWORD_5(reg, 49 EFX_POPULATE_OWORD_5(reg,
51 GM_IF_MODE, if_mode, 50 FRF_AB_GM_IF_MODE, if_mode,
52 GM_PAD_CRC_EN, 1, 51 FRF_AB_GM_PAD_CRC_EN, 1,
53 GM_LEN_CHK, 1, 52 FRF_AB_GM_LEN_CHK, 1,
54 GM_FD, efx->link_fd, 53 FRF_AB_GM_FD, efx->link_fd,
55 GM_PAMBL_LEN, 0x7/*datasheet recommended */); 54 FRF_AB_GM_PAMBL_LEN, 0x7/*datasheet recommended */);
56 55
57 falcon_write(efx, &reg, GM_CFG2_REG); 56 efx_writeo(efx, &reg, FR_AB_GM_CFG2);
58 udelay(10); 57 udelay(10);
59 58
60 /* Max frame len register */ 59 /* Max frame len register */
61 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu); 60 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
62 EFX_POPULATE_OWORD_1(reg, GM_MAX_FLEN, max_frame_len); 61 EFX_POPULATE_OWORD_1(reg, FRF_AB_GM_MAX_FLEN, max_frame_len);
63 falcon_write(efx, &reg, GM_MAX_FLEN_REG); 62 efx_writeo(efx, &reg, FR_AB_GM_MAX_FLEN);
64 udelay(10); 63 udelay(10);
65 64
66 /* FIFO configuration register 0 */ 65 /* FIFO configuration register 0 */
67 EFX_POPULATE_OWORD_5(reg, 66 EFX_POPULATE_OWORD_5(reg,
68 GMF_FTFENREQ, 1, 67 FRF_AB_GMF_FTFENREQ, 1,
69 GMF_STFENREQ, 1, 68 FRF_AB_GMF_STFENREQ, 1,
70 GMF_FRFENREQ, 1, 69 FRF_AB_GMF_FRFENREQ, 1,
71 GMF_SRFENREQ, 1, 70 FRF_AB_GMF_SRFENREQ, 1,
72 GMF_WTMENREQ, 1); 71 FRF_AB_GMF_WTMENREQ, 1);
73 falcon_write(efx, &reg, GMF_CFG0_REG); 72 efx_writeo(efx, &reg, FR_AB_GMF_CFG0);
74 udelay(10); 73 udelay(10);
75 74
76 /* FIFO configuration register 1 */ 75 /* FIFO configuration register 1 */
77 EFX_POPULATE_OWORD_2(reg, 76 EFX_POPULATE_OWORD_2(reg,
78 GMF_CFGFRTH, 0x12, 77 FRF_AB_GMF_CFGFRTH, 0x12,
79 GMF_CFGXOFFRTX, 0xffff); 78 FRF_AB_GMF_CFGXOFFRTX, 0xffff);
80 falcon_write(efx, &reg, GMF_CFG1_REG); 79 efx_writeo(efx, &reg, FR_AB_GMF_CFG1);
81 udelay(10); 80 udelay(10);
82 81
83 /* FIFO configuration register 2 */ 82 /* FIFO configuration register 2 */
84 EFX_POPULATE_OWORD_2(reg, 83 EFX_POPULATE_OWORD_2(reg,
85 GMF_CFGHWM, 0x3f, 84 FRF_AB_GMF_CFGHWM, 0x3f,
86 GMF_CFGLWM, 0xa); 85 FRF_AB_GMF_CFGLWM, 0xa);
87 falcon_write(efx, &reg, GMF_CFG2_REG); 86 efx_writeo(efx, &reg, FR_AB_GMF_CFG2);
88 udelay(10); 87 udelay(10);
89 88
90 /* FIFO configuration register 3 */ 89 /* FIFO configuration register 3 */
91 EFX_POPULATE_OWORD_2(reg, 90 EFX_POPULATE_OWORD_2(reg,
92 GMF_CFGHWMFT, 0x1c, 91 FRF_AB_GMF_CFGHWMFT, 0x1c,
93 GMF_CFGFTTH, 0x08); 92 FRF_AB_GMF_CFGFTTH, 0x08);
94 falcon_write(efx, &reg, GMF_CFG3_REG); 93 efx_writeo(efx, &reg, FR_AB_GMF_CFG3);
95 udelay(10); 94 udelay(10);
96 95
97 /* FIFO configuration register 4 */ 96 /* FIFO configuration register 4 */
98 EFX_POPULATE_OWORD_1(reg, GMF_HSTFLTRFRM_PAUSE, 1); 97 EFX_POPULATE_OWORD_1(reg, FRF_AB_GMF_HSTFLTRFRM_PAUSE, 1);
99 falcon_write(efx, &reg, GMF_CFG4_REG); 98 efx_writeo(efx, &reg, FR_AB_GMF_CFG4);
100 udelay(10); 99 udelay(10);
101 100
102 /* FIFO configuration register 5 */ 101 /* FIFO configuration register 5 */
103 falcon_read(efx, &reg, GMF_CFG5_REG); 102 efx_reado(efx, &reg, FR_AB_GMF_CFG5);
104 EFX_SET_OWORD_FIELD(reg, GMF_CFGBYTMODE, bytemode); 103 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_CFGBYTMODE, bytemode);
105 EFX_SET_OWORD_FIELD(reg, GMF_CFGHDPLX, !efx->link_fd); 104 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_CFGHDPLX, !efx->link_fd);
106 EFX_SET_OWORD_FIELD(reg, GMF_HSTDRPLT64, !efx->link_fd); 105 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_HSTDRPLT64, !efx->link_fd);
107 EFX_SET_OWORD_FIELD(reg, GMF_HSTFLTRFRMDC_PAUSE, 0); 106 EFX_SET_OWORD_FIELD(reg, FRF_AB_GMF_HSTFLTRFRMDC_PAUSE, 0);
108 falcon_write(efx, &reg, GMF_CFG5_REG); 107 efx_writeo(efx, &reg, FR_AB_GMF_CFG5);
109 udelay(10); 108 udelay(10);
110 109
111 /* MAC address */ 110 /* MAC address */
112 EFX_POPULATE_OWORD_4(reg, 111 EFX_POPULATE_OWORD_4(reg,
113 GM_HWADDR_5, efx->net_dev->dev_addr[5], 112 FRF_AB_GM_ADR_B0, efx->net_dev->dev_addr[5],
114 GM_HWADDR_4, efx->net_dev->dev_addr[4], 113 FRF_AB_GM_ADR_B1, efx->net_dev->dev_addr[4],
115 GM_HWADDR_3, efx->net_dev->dev_addr[3], 114 FRF_AB_GM_ADR_B2, efx->net_dev->dev_addr[3],
116 GM_HWADDR_2, efx->net_dev->dev_addr[2]); 115 FRF_AB_GM_ADR_B3, efx->net_dev->dev_addr[2]);
117 falcon_write(efx, &reg, GM_ADR1_REG); 116 efx_writeo(efx, &reg, FR_AB_GM_ADR1);
118 udelay(10); 117 udelay(10);
119 EFX_POPULATE_OWORD_2(reg, 118 EFX_POPULATE_OWORD_2(reg,
120 GM_HWADDR_1, efx->net_dev->dev_addr[1], 119 FRF_AB_GM_ADR_B4, efx->net_dev->dev_addr[1],
121 GM_HWADDR_0, efx->net_dev->dev_addr[0]); 120 FRF_AB_GM_ADR_B5, efx->net_dev->dev_addr[0]);
122 falcon_write(efx, &reg, GM_ADR2_REG); 121 efx_writeo(efx, &reg, FR_AB_GM_ADR2);
123 udelay(10); 122 udelay(10);
124 123
125 falcon_reconfigure_mac_wrapper(efx); 124 falcon_reconfigure_mac_wrapper(efx);
diff --git a/drivers/net/sfc/falcon_hwdefs.h b/drivers/net/sfc/falcon_hwdefs.h
deleted file mode 100644
index 2d2261117ace..000000000000
--- a/drivers/net/sfc/falcon_hwdefs.h
+++ /dev/null
@@ -1,1333 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_FALCON_HWDEFS_H
12#define EFX_FALCON_HWDEFS_H
13
14/*
15 * Falcon hardware value definitions.
16 * Falcon is the internal codename for the SFC4000 controller that is
17 * present in SFE400X evaluation boards
18 */
19
20/**************************************************************************
21 *
22 * Falcon registers
23 *
24 **************************************************************************
25 */
26
27/* Address region register */
28#define ADR_REGION_REG_KER 0x00
29#define ADR_REGION0_LBN 0
30#define ADR_REGION0_WIDTH 18
31#define ADR_REGION1_LBN 32
32#define ADR_REGION1_WIDTH 18
33#define ADR_REGION2_LBN 64
34#define ADR_REGION2_WIDTH 18
35#define ADR_REGION3_LBN 96
36#define ADR_REGION3_WIDTH 18
37
38/* Interrupt enable register */
39#define INT_EN_REG_KER 0x0010
40#define KER_INT_KER_LBN 3
41#define KER_INT_KER_WIDTH 1
42#define DRV_INT_EN_KER_LBN 0
43#define DRV_INT_EN_KER_WIDTH 1
44
45/* Interrupt status address register */
46#define INT_ADR_REG_KER 0x0030
47#define NORM_INT_VEC_DIS_KER_LBN 64
48#define NORM_INT_VEC_DIS_KER_WIDTH 1
49#define INT_ADR_KER_LBN 0
50#define INT_ADR_KER_WIDTH EFX_DMA_TYPE_WIDTH(64) /* not 46 for this one */
51
52/* Interrupt status register (B0 only) */
53#define INT_ISR0_B0 0x90
54#define INT_ISR1_B0 0xA0
55
56/* Interrupt acknowledge register (A0/A1 only) */
57#define INT_ACK_REG_KER_A1 0x0050
58#define INT_ACK_DUMMY_DATA_LBN 0
59#define INT_ACK_DUMMY_DATA_WIDTH 32
60
61/* Interrupt acknowledge work-around register (A0/A1 only )*/
62#define WORK_AROUND_BROKEN_PCI_READS_REG_KER_A1 0x0070
63
64/* SPI host command register */
65#define EE_SPI_HCMD_REG_KER 0x0100
66#define EE_SPI_HCMD_CMD_EN_LBN 31
67#define EE_SPI_HCMD_CMD_EN_WIDTH 1
68#define EE_WR_TIMER_ACTIVE_LBN 28
69#define EE_WR_TIMER_ACTIVE_WIDTH 1
70#define EE_SPI_HCMD_SF_SEL_LBN 24
71#define EE_SPI_HCMD_SF_SEL_WIDTH 1
72#define EE_SPI_EEPROM 0
73#define EE_SPI_FLASH 1
74#define EE_SPI_HCMD_DABCNT_LBN 16
75#define EE_SPI_HCMD_DABCNT_WIDTH 5
76#define EE_SPI_HCMD_READ_LBN 15
77#define EE_SPI_HCMD_READ_WIDTH 1
78#define EE_SPI_READ 1
79#define EE_SPI_WRITE 0
80#define EE_SPI_HCMD_DUBCNT_LBN 12
81#define EE_SPI_HCMD_DUBCNT_WIDTH 2
82#define EE_SPI_HCMD_ADBCNT_LBN 8
83#define EE_SPI_HCMD_ADBCNT_WIDTH 2
84#define EE_SPI_HCMD_ENC_LBN 0
85#define EE_SPI_HCMD_ENC_WIDTH 8
86
87/* SPI host address register */
88#define EE_SPI_HADR_REG_KER 0x0110
89#define EE_SPI_HADR_ADR_LBN 0
90#define EE_SPI_HADR_ADR_WIDTH 24
91
92/* SPI host data register */
93#define EE_SPI_HDATA_REG_KER 0x0120
94
95/* SPI/VPD config register */
96#define EE_VPD_CFG_REG_KER 0x0140
97#define EE_VPD_EN_LBN 0
98#define EE_VPD_EN_WIDTH 1
99#define EE_VPD_EN_AD9_MODE_LBN 1
100#define EE_VPD_EN_AD9_MODE_WIDTH 1
101#define EE_EE_CLOCK_DIV_LBN 112
102#define EE_EE_CLOCK_DIV_WIDTH 7
103#define EE_SF_CLOCK_DIV_LBN 120
104#define EE_SF_CLOCK_DIV_WIDTH 7
105
106/* PCIE CORE ACCESS REG */
107#define PCIE_CORE_ADDR_PCIE_DEVICE_CTRL_STAT 0x68
108#define PCIE_CORE_ADDR_PCIE_LINK_CTRL_STAT 0x70
109#define PCIE_CORE_ADDR_ACK_RPL_TIMER 0x700
110#define PCIE_CORE_ADDR_ACK_FREQ 0x70C
111
112/* NIC status register */
113#define NIC_STAT_REG 0x0200
114#define EE_STRAP_EN_LBN 31
115#define EE_STRAP_EN_WIDTH 1
116#define EE_STRAP_OVR_LBN 24
117#define EE_STRAP_OVR_WIDTH 4
118#define ONCHIP_SRAM_LBN 16
119#define ONCHIP_SRAM_WIDTH 1
120#define SF_PRST_LBN 9
121#define SF_PRST_WIDTH 1
122#define EE_PRST_LBN 8
123#define EE_PRST_WIDTH 1
124#define STRAP_PINS_LBN 0
125#define STRAP_PINS_WIDTH 3
126/* These bit definitions are extrapolated from the list of numerical
127 * values for STRAP_PINS.
128 */
129#define STRAP_10G_LBN 2
130#define STRAP_10G_WIDTH 1
131#define STRAP_PCIE_LBN 0
132#define STRAP_PCIE_WIDTH 1
133
134#define BOOTED_USING_NVDEVICE_LBN 3
135#define BOOTED_USING_NVDEVICE_WIDTH 1
136
137/* GPIO control register */
138#define GPIO_CTL_REG_KER 0x0210
139#define GPIO_USE_NIC_CLK_LBN (30)
140#define GPIO_USE_NIC_CLK_WIDTH (1)
141#define GPIO_OUTPUTS_LBN (16)
142#define GPIO_OUTPUTS_WIDTH (4)
143#define GPIO_INPUTS_LBN (8)
144#define GPIO_DIRECTION_LBN (24)
145#define GPIO_DIRECTION_WIDTH (4)
146#define GPIO_DIRECTION_OUT (1)
147#define GPIO_SRAM_SLEEP (1 << 1)
148
149#define GPIO3_OEN_LBN (GPIO_DIRECTION_LBN + 3)
150#define GPIO3_OEN_WIDTH 1
151#define GPIO2_OEN_LBN (GPIO_DIRECTION_LBN + 2)
152#define GPIO2_OEN_WIDTH 1
153#define GPIO1_OEN_LBN (GPIO_DIRECTION_LBN + 1)
154#define GPIO1_OEN_WIDTH 1
155#define GPIO0_OEN_LBN (GPIO_DIRECTION_LBN + 0)
156#define GPIO0_OEN_WIDTH 1
157
158#define GPIO3_OUT_LBN (GPIO_OUTPUTS_LBN + 3)
159#define GPIO3_OUT_WIDTH 1
160#define GPIO2_OUT_LBN (GPIO_OUTPUTS_LBN + 2)
161#define GPIO2_OUT_WIDTH 1
162#define GPIO1_OUT_LBN (GPIO_OUTPUTS_LBN + 1)
163#define GPIO1_OUT_WIDTH 1
164#define GPIO0_OUT_LBN (GPIO_OUTPUTS_LBN + 0)
165#define GPIO0_OUT_WIDTH 1
166
167#define GPIO3_IN_LBN (GPIO_INPUTS_LBN + 3)
168#define GPIO3_IN_WIDTH 1
169#define GPIO2_IN_WIDTH 1
170#define GPIO1_IN_WIDTH 1
171#define GPIO0_IN_LBN (GPIO_INPUTS_LBN + 0)
172#define GPIO0_IN_WIDTH 1
173
174/* Global control register */
175#define GLB_CTL_REG_KER 0x0220
176#define EXT_PHY_RST_CTL_LBN 63
177#define EXT_PHY_RST_CTL_WIDTH 1
178#define PCIE_SD_RST_CTL_LBN 61
179#define PCIE_SD_RST_CTL_WIDTH 1
180
181#define PCIE_NSTCK_RST_CTL_LBN 58
182#define PCIE_NSTCK_RST_CTL_WIDTH 1
183#define PCIE_CORE_RST_CTL_LBN 57
184#define PCIE_CORE_RST_CTL_WIDTH 1
185#define EE_RST_CTL_LBN 49
186#define EE_RST_CTL_WIDTH 1
187#define RST_XGRX_LBN 24
188#define RST_XGRX_WIDTH 1
189#define RST_XGTX_LBN 23
190#define RST_XGTX_WIDTH 1
191#define RST_EM_LBN 22
192#define RST_EM_WIDTH 1
193#define EXT_PHY_RST_DUR_LBN 1
194#define EXT_PHY_RST_DUR_WIDTH 3
195#define SWRST_LBN 0
196#define SWRST_WIDTH 1
197#define INCLUDE_IN_RESET 0
198#define EXCLUDE_FROM_RESET 1
199
200/* Fatal interrupt register */
201#define FATAL_INTR_REG_KER 0x0230
202#define RBUF_OWN_INT_KER_EN_LBN 39
203#define RBUF_OWN_INT_KER_EN_WIDTH 1
204#define TBUF_OWN_INT_KER_EN_LBN 38
205#define TBUF_OWN_INT_KER_EN_WIDTH 1
206#define ILL_ADR_INT_KER_EN_LBN 33
207#define ILL_ADR_INT_KER_EN_WIDTH 1
208#define MEM_PERR_INT_KER_LBN 8
209#define MEM_PERR_INT_KER_WIDTH 1
210#define INT_KER_ERROR_LBN 0
211#define INT_KER_ERROR_WIDTH 12
212
213#define DP_CTRL_REG 0x250
214#define FLS_EVQ_ID_LBN 0
215#define FLS_EVQ_ID_WIDTH 11
216
217#define MEM_STAT_REG_KER 0x260
218
219/* Debug probe register */
220#define DEBUG_BLK_SEL_MISC 7
221#define DEBUG_BLK_SEL_SERDES 6
222#define DEBUG_BLK_SEL_EM 5
223#define DEBUG_BLK_SEL_SR 4
224#define DEBUG_BLK_SEL_EV 3
225#define DEBUG_BLK_SEL_RX 2
226#define DEBUG_BLK_SEL_TX 1
227#define DEBUG_BLK_SEL_BIU 0
228
229/* FPGA build version */
230#define ALTERA_BUILD_REG_KER 0x0300
231#define VER_ALL_LBN 0
232#define VER_ALL_WIDTH 32
233
234/* Spare EEPROM bits register (flash 0x390) */
235#define SPARE_REG_KER 0x310
236#define MEM_PERR_EN_TX_DATA_LBN 72
237#define MEM_PERR_EN_TX_DATA_WIDTH 2
238
239/* Timer table for kernel access */
240#define TIMER_CMD_REG_KER 0x420
241#define TIMER_MODE_LBN 12
242#define TIMER_MODE_WIDTH 2
243#define TIMER_MODE_DIS 0
244#define TIMER_MODE_INT_HLDOFF 2
245#define TIMER_VAL_LBN 0
246#define TIMER_VAL_WIDTH 12
247
248/* Driver generated event register */
249#define DRV_EV_REG_KER 0x440
250#define DRV_EV_QID_LBN 64
251#define DRV_EV_QID_WIDTH 12
252#define DRV_EV_DATA_LBN 0
253#define DRV_EV_DATA_WIDTH 64
254
255/* Buffer table configuration register */
256#define BUF_TBL_CFG_REG_KER 0x600
257#define BUF_TBL_MODE_LBN 3
258#define BUF_TBL_MODE_WIDTH 1
259#define BUF_TBL_MODE_HALF 0
260#define BUF_TBL_MODE_FULL 1
261
262/* SRAM receive descriptor cache configuration register */
263#define SRM_RX_DC_CFG_REG_KER 0x610
264#define SRM_RX_DC_BASE_ADR_LBN 0
265#define SRM_RX_DC_BASE_ADR_WIDTH 21
266
267/* SRAM transmit descriptor cache configuration register */
268#define SRM_TX_DC_CFG_REG_KER 0x620
269#define SRM_TX_DC_BASE_ADR_LBN 0
270#define SRM_TX_DC_BASE_ADR_WIDTH 21
271
272/* SRAM configuration register */
273#define SRM_CFG_REG_KER 0x630
274#define SRAM_OOB_BT_INIT_EN_LBN 3
275#define SRAM_OOB_BT_INIT_EN_WIDTH 1
276#define SRM_NUM_BANKS_AND_BANK_SIZE_LBN 0
277#define SRM_NUM_BANKS_AND_BANK_SIZE_WIDTH 3
278#define SRM_NB_BSZ_1BANKS_2M 0
279#define SRM_NB_BSZ_1BANKS_4M 1
280#define SRM_NB_BSZ_1BANKS_8M 2
281#define SRM_NB_BSZ_DEFAULT 3 /* char driver will set the default */
282#define SRM_NB_BSZ_2BANKS_4M 4
283#define SRM_NB_BSZ_2BANKS_8M 5
284#define SRM_NB_BSZ_2BANKS_16M 6
285#define SRM_NB_BSZ_RESERVED 7
286
287/* Special buffer table update register */
288#define BUF_TBL_UPD_REG_KER 0x0650
289#define BUF_UPD_CMD_LBN 63
290#define BUF_UPD_CMD_WIDTH 1
291#define BUF_CLR_CMD_LBN 62
292#define BUF_CLR_CMD_WIDTH 1
293#define BUF_CLR_END_ID_LBN 32
294#define BUF_CLR_END_ID_WIDTH 20
295#define BUF_CLR_START_ID_LBN 0
296#define BUF_CLR_START_ID_WIDTH 20
297
298/* Receive configuration register */
299#define RX_CFG_REG_KER 0x800
300
301/* B0 */
302#define RX_INGR_EN_B0_LBN 47
303#define RX_INGR_EN_B0_WIDTH 1
304#define RX_DESC_PUSH_EN_B0_LBN 43
305#define RX_DESC_PUSH_EN_B0_WIDTH 1
306#define RX_XON_TX_TH_B0_LBN 33
307#define RX_XON_TX_TH_B0_WIDTH 5
308#define RX_XOFF_TX_TH_B0_LBN 28
309#define RX_XOFF_TX_TH_B0_WIDTH 5
310#define RX_USR_BUF_SIZE_B0_LBN 19
311#define RX_USR_BUF_SIZE_B0_WIDTH 9
312#define RX_XON_MAC_TH_B0_LBN 10
313#define RX_XON_MAC_TH_B0_WIDTH 9
314#define RX_XOFF_MAC_TH_B0_LBN 1
315#define RX_XOFF_MAC_TH_B0_WIDTH 9
316#define RX_XOFF_MAC_EN_B0_LBN 0
317#define RX_XOFF_MAC_EN_B0_WIDTH 1
318
319/* A1 */
320#define RX_DESC_PUSH_EN_A1_LBN 35
321#define RX_DESC_PUSH_EN_A1_WIDTH 1
322#define RX_XON_TX_TH_A1_LBN 25
323#define RX_XON_TX_TH_A1_WIDTH 5
324#define RX_XOFF_TX_TH_A1_LBN 20
325#define RX_XOFF_TX_TH_A1_WIDTH 5
326#define RX_USR_BUF_SIZE_A1_LBN 11
327#define RX_USR_BUF_SIZE_A1_WIDTH 9
328#define RX_XON_MAC_TH_A1_LBN 6
329#define RX_XON_MAC_TH_A1_WIDTH 5
330#define RX_XOFF_MAC_TH_A1_LBN 1
331#define RX_XOFF_MAC_TH_A1_WIDTH 5
332#define RX_XOFF_MAC_EN_A1_LBN 0
333#define RX_XOFF_MAC_EN_A1_WIDTH 1
334
335/* Receive filter control register */
336#define RX_FILTER_CTL_REG 0x810
337#define UDP_FULL_SRCH_LIMIT_LBN 32
338#define UDP_FULL_SRCH_LIMIT_WIDTH 8
339#define NUM_KER_LBN 24
340#define NUM_KER_WIDTH 2
341#define UDP_WILD_SRCH_LIMIT_LBN 16
342#define UDP_WILD_SRCH_LIMIT_WIDTH 8
343#define TCP_WILD_SRCH_LIMIT_LBN 8
344#define TCP_WILD_SRCH_LIMIT_WIDTH 8
345#define TCP_FULL_SRCH_LIMIT_LBN 0
346#define TCP_FULL_SRCH_LIMIT_WIDTH 8
347
348/* RX queue flush register */
349#define RX_FLUSH_DESCQ_REG_KER 0x0820
350#define RX_FLUSH_DESCQ_CMD_LBN 24
351#define RX_FLUSH_DESCQ_CMD_WIDTH 1
352#define RX_FLUSH_DESCQ_LBN 0
353#define RX_FLUSH_DESCQ_WIDTH 12
354
355/* Receive descriptor update register */
356#define RX_DESC_UPD_REG_KER_DWORD (0x830 + 12)
357#define RX_DESC_WPTR_DWORD_LBN 0
358#define RX_DESC_WPTR_DWORD_WIDTH 12
359
360/* Receive descriptor cache configuration register */
361#define RX_DC_CFG_REG_KER 0x840
362#define RX_DC_SIZE_LBN 0
363#define RX_DC_SIZE_WIDTH 2
364
365#define RX_DC_PF_WM_REG_KER 0x850
366#define RX_DC_PF_LWM_LBN 0
367#define RX_DC_PF_LWM_WIDTH 6
368
369/* RX no descriptor drop counter */
370#define RX_NODESC_DROP_REG_KER 0x880
371#define RX_NODESC_DROP_CNT_LBN 0
372#define RX_NODESC_DROP_CNT_WIDTH 16
373
374/* RX black magic register */
375#define RX_SELF_RST_REG_KER 0x890
376#define RX_ISCSI_DIS_LBN 17
377#define RX_ISCSI_DIS_WIDTH 1
378#define RX_NODESC_WAIT_DIS_LBN 9
379#define RX_NODESC_WAIT_DIS_WIDTH 1
380#define RX_RECOVERY_EN_LBN 8
381#define RX_RECOVERY_EN_WIDTH 1
382
383/* TX queue flush register */
384#define TX_FLUSH_DESCQ_REG_KER 0x0a00
385#define TX_FLUSH_DESCQ_CMD_LBN 12
386#define TX_FLUSH_DESCQ_CMD_WIDTH 1
387#define TX_FLUSH_DESCQ_LBN 0
388#define TX_FLUSH_DESCQ_WIDTH 12
389
390/* Transmit descriptor update register */
391#define TX_DESC_UPD_REG_KER_DWORD (0xa10 + 12)
392#define TX_DESC_WPTR_DWORD_LBN 0
393#define TX_DESC_WPTR_DWORD_WIDTH 12
394
395/* Transmit descriptor cache configuration register */
396#define TX_DC_CFG_REG_KER 0xa20
397#define TX_DC_SIZE_LBN 0
398#define TX_DC_SIZE_WIDTH 2
399
400/* Transmit checksum configuration register (A0/A1 only) */
401#define TX_CHKSM_CFG_REG_KER_A1 0xa30
402
403/* Transmit configuration register */
404#define TX_CFG_REG_KER 0xa50
405#define TX_NO_EOP_DISC_EN_LBN 5
406#define TX_NO_EOP_DISC_EN_WIDTH 1
407
408/* Transmit configuration register 2 */
409#define TX_CFG2_REG_KER 0xa80
410#define TX_CSR_PUSH_EN_LBN 89
411#define TX_CSR_PUSH_EN_WIDTH 1
412#define TX_RX_SPACER_LBN 64
413#define TX_RX_SPACER_WIDTH 8
414#define TX_SW_EV_EN_LBN 59
415#define TX_SW_EV_EN_WIDTH 1
416#define TX_RX_SPACER_EN_LBN 57
417#define TX_RX_SPACER_EN_WIDTH 1
418#define TX_PREF_THRESHOLD_LBN 19
419#define TX_PREF_THRESHOLD_WIDTH 2
420#define TX_ONE_PKT_PER_Q_LBN 18
421#define TX_ONE_PKT_PER_Q_WIDTH 1
422#define TX_DIS_NON_IP_EV_LBN 17
423#define TX_DIS_NON_IP_EV_WIDTH 1
424#define TX_FLUSH_MIN_LEN_EN_B0_LBN 7
425#define TX_FLUSH_MIN_LEN_EN_B0_WIDTH 1
426
427/* PHY management transmit data register */
428#define MD_TXD_REG_KER 0xc00
429#define MD_TXD_LBN 0
430#define MD_TXD_WIDTH 16
431
432/* PHY management receive data register */
433#define MD_RXD_REG_KER 0xc10
434#define MD_RXD_LBN 0
435#define MD_RXD_WIDTH 16
436
437/* PHY management configuration & status register */
438#define MD_CS_REG_KER 0xc20
439#define MD_GC_LBN 4
440#define MD_GC_WIDTH 1
441#define MD_RIC_LBN 2
442#define MD_RIC_WIDTH 1
443#define MD_RDC_LBN 1
444#define MD_RDC_WIDTH 1
445#define MD_WRC_LBN 0
446#define MD_WRC_WIDTH 1
447
448/* PHY management PHY address register */
449#define MD_PHY_ADR_REG_KER 0xc30
450#define MD_PHY_ADR_LBN 0
451#define MD_PHY_ADR_WIDTH 16
452
453/* PHY management ID register */
454#define MD_ID_REG_KER 0xc40
455#define MD_PRT_ADR_LBN 11
456#define MD_PRT_ADR_WIDTH 5
457#define MD_DEV_ADR_LBN 6
458#define MD_DEV_ADR_WIDTH 5
459
460/* PHY management status & mask register (DWORD read only) */
461#define MD_STAT_REG_KER 0xc50
462#define MD_BSERR_LBN 2
463#define MD_BSERR_WIDTH 1
464#define MD_LNFL_LBN 1
465#define MD_LNFL_WIDTH 1
466#define MD_BSY_LBN 0
467#define MD_BSY_WIDTH 1
468
469/* Port 0 and 1 MAC stats registers */
470#define MAC0_STAT_DMA_REG_KER 0xc60
471#define MAC_STAT_DMA_CMD_LBN 48
472#define MAC_STAT_DMA_CMD_WIDTH 1
473#define MAC_STAT_DMA_ADR_LBN 0
474#define MAC_STAT_DMA_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
475
476/* Port 0 and 1 MAC control registers */
477#define MAC0_CTRL_REG_KER 0xc80
478#define MAC_XOFF_VAL_LBN 16
479#define MAC_XOFF_VAL_WIDTH 16
480#define TXFIFO_DRAIN_EN_B0_LBN 7
481#define TXFIFO_DRAIN_EN_B0_WIDTH 1
482#define MAC_BCAD_ACPT_LBN 4
483#define MAC_BCAD_ACPT_WIDTH 1
484#define MAC_UC_PROM_LBN 3
485#define MAC_UC_PROM_WIDTH 1
486#define MAC_LINK_STATUS_LBN 2
487#define MAC_LINK_STATUS_WIDTH 1
488#define MAC_SPEED_LBN 0
489#define MAC_SPEED_WIDTH 2
490
491/* 10G XAUI XGXS default values */
492#define XX_TXDRV_DEQ_DEFAULT 0xe /* deq=.6 */
493#define XX_TXDRV_DTX_DEFAULT 0x5 /* 1.25 */
494#define XX_SD_CTL_DRV_DEFAULT 0 /* 20mA */
495
496/* Multicast address hash table */
497#define MAC_MCAST_HASH_REG0_KER 0xca0
498#define MAC_MCAST_HASH_REG1_KER 0xcb0
499
500/* GMAC configuration register 1 */
501#define GM_CFG1_REG 0xe00
502#define GM_SW_RST_LBN 31
503#define GM_SW_RST_WIDTH 1
504#define GM_LOOP_LBN 8
505#define GM_LOOP_WIDTH 1
506#define GM_RX_FC_EN_LBN 5
507#define GM_RX_FC_EN_WIDTH 1
508#define GM_TX_FC_EN_LBN 4
509#define GM_TX_FC_EN_WIDTH 1
510#define GM_RX_EN_LBN 2
511#define GM_RX_EN_WIDTH 1
512#define GM_TX_EN_LBN 0
513#define GM_TX_EN_WIDTH 1
514
515/* GMAC configuration register 2 */
516#define GM_CFG2_REG 0xe10
517#define GM_PAMBL_LEN_LBN 12
518#define GM_PAMBL_LEN_WIDTH 4
519#define GM_IF_MODE_LBN 8
520#define GM_IF_MODE_WIDTH 2
521#define GM_LEN_CHK_LBN 4
522#define GM_LEN_CHK_WIDTH 1
523#define GM_PAD_CRC_EN_LBN 2
524#define GM_PAD_CRC_EN_WIDTH 1
525#define GM_FD_LBN 0
526#define GM_FD_WIDTH 1
527
528/* GMAC maximum frame length register */
529#define GM_MAX_FLEN_REG 0xe40
530#define GM_MAX_FLEN_LBN 0
531#define GM_MAX_FLEN_WIDTH 16
532
533/* GMAC station address register 1 */
534#define GM_ADR1_REG 0xf00
535#define GM_HWADDR_5_LBN 24
536#define GM_HWADDR_5_WIDTH 8
537#define GM_HWADDR_4_LBN 16
538#define GM_HWADDR_4_WIDTH 8
539#define GM_HWADDR_3_LBN 8
540#define GM_HWADDR_3_WIDTH 8
541#define GM_HWADDR_2_LBN 0
542#define GM_HWADDR_2_WIDTH 8
543
544/* GMAC station address register 2 */
545#define GM_ADR2_REG 0xf10
546#define GM_HWADDR_1_LBN 24
547#define GM_HWADDR_1_WIDTH 8
548#define GM_HWADDR_0_LBN 16
549#define GM_HWADDR_0_WIDTH 8
550
551/* GMAC FIFO configuration register 0 */
552#define GMF_CFG0_REG 0xf20
553#define GMF_FTFENREQ_LBN 12
554#define GMF_FTFENREQ_WIDTH 1
555#define GMF_STFENREQ_LBN 11
556#define GMF_STFENREQ_WIDTH 1
557#define GMF_FRFENREQ_LBN 10
558#define GMF_FRFENREQ_WIDTH 1
559#define GMF_SRFENREQ_LBN 9
560#define GMF_SRFENREQ_WIDTH 1
561#define GMF_WTMENREQ_LBN 8
562#define GMF_WTMENREQ_WIDTH 1
563
564/* GMAC FIFO configuration register 1 */
565#define GMF_CFG1_REG 0xf30
566#define GMF_CFGFRTH_LBN 16
567#define GMF_CFGFRTH_WIDTH 5
568#define GMF_CFGXOFFRTX_LBN 0
569#define GMF_CFGXOFFRTX_WIDTH 16
570
571/* GMAC FIFO configuration register 2 */
572#define GMF_CFG2_REG 0xf40
573#define GMF_CFGHWM_LBN 16
574#define GMF_CFGHWM_WIDTH 6
575#define GMF_CFGLWM_LBN 0
576#define GMF_CFGLWM_WIDTH 6
577
578/* GMAC FIFO configuration register 3 */
579#define GMF_CFG3_REG 0xf50
580#define GMF_CFGHWMFT_LBN 16
581#define GMF_CFGHWMFT_WIDTH 6
582#define GMF_CFGFTTH_LBN 0
583#define GMF_CFGFTTH_WIDTH 6
584
585/* GMAC FIFO configuration register 4 */
586#define GMF_CFG4_REG 0xf60
587#define GMF_HSTFLTRFRM_PAUSE_LBN 12
588#define GMF_HSTFLTRFRM_PAUSE_WIDTH 12
589
590/* GMAC FIFO configuration register 5 */
591#define GMF_CFG5_REG 0xf70
592#define GMF_CFGHDPLX_LBN 22
593#define GMF_CFGHDPLX_WIDTH 1
594#define GMF_CFGBYTMODE_LBN 19
595#define GMF_CFGBYTMODE_WIDTH 1
596#define GMF_HSTDRPLT64_LBN 18
597#define GMF_HSTDRPLT64_WIDTH 1
598#define GMF_HSTFLTRFRMDC_PAUSE_LBN 12
599#define GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1
600
601/* XGMAC address register low */
602#define XM_ADR_LO_REG 0x1200
603#define XM_ADR_3_LBN 24
604#define XM_ADR_3_WIDTH 8
605#define XM_ADR_2_LBN 16
606#define XM_ADR_2_WIDTH 8
607#define XM_ADR_1_LBN 8
608#define XM_ADR_1_WIDTH 8
609#define XM_ADR_0_LBN 0
610#define XM_ADR_0_WIDTH 8
611
612/* XGMAC address register high */
613#define XM_ADR_HI_REG 0x1210
614#define XM_ADR_5_LBN 8
615#define XM_ADR_5_WIDTH 8
616#define XM_ADR_4_LBN 0
617#define XM_ADR_4_WIDTH 8
618
619/* XGMAC global configuration */
620#define XM_GLB_CFG_REG 0x1220
621#define XM_RX_STAT_EN_LBN 11
622#define XM_RX_STAT_EN_WIDTH 1
623#define XM_TX_STAT_EN_LBN 10
624#define XM_TX_STAT_EN_WIDTH 1
625#define XM_RX_JUMBO_MODE_LBN 6
626#define XM_RX_JUMBO_MODE_WIDTH 1
627#define XM_INTCLR_MODE_LBN 3
628#define XM_INTCLR_MODE_WIDTH 1
629#define XM_CORE_RST_LBN 0
630#define XM_CORE_RST_WIDTH 1
631
632/* XGMAC transmit configuration */
633#define XM_TX_CFG_REG 0x1230
634#define XM_IPG_LBN 16
635#define XM_IPG_WIDTH 4
636#define XM_FCNTL_LBN 10
637#define XM_FCNTL_WIDTH 1
638#define XM_TXCRC_LBN 8
639#define XM_TXCRC_WIDTH 1
640#define XM_AUTO_PAD_LBN 5
641#define XM_AUTO_PAD_WIDTH 1
642#define XM_TX_PRMBL_LBN 2
643#define XM_TX_PRMBL_WIDTH 1
644#define XM_TXEN_LBN 1
645#define XM_TXEN_WIDTH 1
646
647/* XGMAC receive configuration */
648#define XM_RX_CFG_REG 0x1240
649#define XM_PASS_CRC_ERR_LBN 25
650#define XM_PASS_CRC_ERR_WIDTH 1
651#define XM_ACPT_ALL_MCAST_LBN 11
652#define XM_ACPT_ALL_MCAST_WIDTH 1
653#define XM_ACPT_ALL_UCAST_LBN 9
654#define XM_ACPT_ALL_UCAST_WIDTH 1
655#define XM_AUTO_DEPAD_LBN 8
656#define XM_AUTO_DEPAD_WIDTH 1
657#define XM_RXEN_LBN 1
658#define XM_RXEN_WIDTH 1
659
660/* XGMAC management interrupt mask register */
661#define XM_MGT_INT_MSK_REG_B0 0x1250
662#define XM_MSK_PRMBLE_ERR_LBN 2
663#define XM_MSK_PRMBLE_ERR_WIDTH 1
664#define XM_MSK_RMTFLT_LBN 1
665#define XM_MSK_RMTFLT_WIDTH 1
666#define XM_MSK_LCLFLT_LBN 0
667#define XM_MSK_LCLFLT_WIDTH 1
668
669/* XGMAC flow control register */
670#define XM_FC_REG 0x1270
671#define XM_PAUSE_TIME_LBN 16
672#define XM_PAUSE_TIME_WIDTH 16
673#define XM_DIS_FCNTL_LBN 0
674#define XM_DIS_FCNTL_WIDTH 1
675
676/* XGMAC pause time count register */
677#define XM_PAUSE_TIME_REG 0x1290
678
679/* XGMAC transmit parameter register */
680#define XM_TX_PARAM_REG 0x012d0
681#define XM_TX_JUMBO_MODE_LBN 31
682#define XM_TX_JUMBO_MODE_WIDTH 1
683#define XM_MAX_TX_FRM_SIZE_LBN 16
684#define XM_MAX_TX_FRM_SIZE_WIDTH 14
685
686/* XGMAC receive parameter register */
687#define XM_RX_PARAM_REG 0x12e0
688#define XM_MAX_RX_FRM_SIZE_LBN 0
689#define XM_MAX_RX_FRM_SIZE_WIDTH 14
690
691/* XGMAC management interrupt status register */
692#define XM_MGT_INT_REG_B0 0x12f0
693#define XM_PRMBLE_ERR 2
694#define XM_PRMBLE_WIDTH 1
695#define XM_RMTFLT_LBN 1
696#define XM_RMTFLT_WIDTH 1
697#define XM_LCLFLT_LBN 0
698#define XM_LCLFLT_WIDTH 1
699
700/* XGXS/XAUI powerdown/reset register */
701#define XX_PWR_RST_REG 0x1300
702
703#define XX_SD_RST_ACT_LBN 16
704#define XX_SD_RST_ACT_WIDTH 1
705#define XX_PWRDND_EN_LBN 15
706#define XX_PWRDND_EN_WIDTH 1
707#define XX_PWRDNC_EN_LBN 14
708#define XX_PWRDNC_EN_WIDTH 1
709#define XX_PWRDNB_EN_LBN 13
710#define XX_PWRDNB_EN_WIDTH 1
711#define XX_PWRDNA_EN_LBN 12
712#define XX_PWRDNA_EN_WIDTH 1
713#define XX_RSTPLLCD_EN_LBN 9
714#define XX_RSTPLLCD_EN_WIDTH 1
715#define XX_RSTPLLAB_EN_LBN 8
716#define XX_RSTPLLAB_EN_WIDTH 1
717#define XX_RESETD_EN_LBN 7
718#define XX_RESETD_EN_WIDTH 1
719#define XX_RESETC_EN_LBN 6
720#define XX_RESETC_EN_WIDTH 1
721#define XX_RESETB_EN_LBN 5
722#define XX_RESETB_EN_WIDTH 1
723#define XX_RESETA_EN_LBN 4
724#define XX_RESETA_EN_WIDTH 1
725#define XX_RSTXGXSRX_EN_LBN 2
726#define XX_RSTXGXSRX_EN_WIDTH 1
727#define XX_RSTXGXSTX_EN_LBN 1
728#define XX_RSTXGXSTX_EN_WIDTH 1
729#define XX_RST_XX_EN_LBN 0
730#define XX_RST_XX_EN_WIDTH 1
731
732/* XGXS/XAUI powerdown/reset control register */
733#define XX_SD_CTL_REG 0x1310
734#define XX_HIDRVD_LBN 15
735#define XX_HIDRVD_WIDTH 1
736#define XX_LODRVD_LBN 14
737#define XX_LODRVD_WIDTH 1
738#define XX_HIDRVC_LBN 13
739#define XX_HIDRVC_WIDTH 1
740#define XX_LODRVC_LBN 12
741#define XX_LODRVC_WIDTH 1
742#define XX_HIDRVB_LBN 11
743#define XX_HIDRVB_WIDTH 1
744#define XX_LODRVB_LBN 10
745#define XX_LODRVB_WIDTH 1
746#define XX_HIDRVA_LBN 9
747#define XX_HIDRVA_WIDTH 1
748#define XX_LODRVA_LBN 8
749#define XX_LODRVA_WIDTH 1
750#define XX_LPBKD_LBN 3
751#define XX_LPBKD_WIDTH 1
752#define XX_LPBKC_LBN 2
753#define XX_LPBKC_WIDTH 1
754#define XX_LPBKB_LBN 1
755#define XX_LPBKB_WIDTH 1
756#define XX_LPBKA_LBN 0
757#define XX_LPBKA_WIDTH 1
758
759#define XX_TXDRV_CTL_REG 0x1320
760#define XX_DEQD_LBN 28
761#define XX_DEQD_WIDTH 4
762#define XX_DEQC_LBN 24
763#define XX_DEQC_WIDTH 4
764#define XX_DEQB_LBN 20
765#define XX_DEQB_WIDTH 4
766#define XX_DEQA_LBN 16
767#define XX_DEQA_WIDTH 4
768#define XX_DTXD_LBN 12
769#define XX_DTXD_WIDTH 4
770#define XX_DTXC_LBN 8
771#define XX_DTXC_WIDTH 4
772#define XX_DTXB_LBN 4
773#define XX_DTXB_WIDTH 4
774#define XX_DTXA_LBN 0
775#define XX_DTXA_WIDTH 4
776
777/* XAUI XGXS core status register */
778#define XX_CORE_STAT_REG 0x1360
779#define XX_FORCE_SIG_LBN 24
780#define XX_FORCE_SIG_WIDTH 8
781#define XX_FORCE_SIG_DECODE_FORCED 0xff
782#define XX_XGXS_LB_EN_LBN 23
783#define XX_XGXS_LB_EN_WIDTH 1
784#define XX_XGMII_LB_EN_LBN 22
785#define XX_XGMII_LB_EN_WIDTH 1
786#define XX_ALIGN_DONE_LBN 20
787#define XX_ALIGN_DONE_WIDTH 1
788#define XX_SYNC_STAT_LBN 16
789#define XX_SYNC_STAT_WIDTH 4
790#define XX_SYNC_STAT_DECODE_SYNCED 0xf
791#define XX_COMMA_DET_LBN 12
792#define XX_COMMA_DET_WIDTH 4
793#define XX_COMMA_DET_DECODE_DETECTED 0xf
794#define XX_COMMA_DET_RESET 0xf
795#define XX_CHARERR_LBN 4
796#define XX_CHARERR_WIDTH 4
797#define XX_CHARERR_RESET 0xf
798#define XX_DISPERR_LBN 0
799#define XX_DISPERR_WIDTH 4
800#define XX_DISPERR_RESET 0xf
801
802/* Receive filter table */
803#define RX_FILTER_TBL0 0xF00000
804
805/* Receive descriptor pointer table */
806#define RX_DESC_PTR_TBL_KER_A1 0x11800
807#define RX_DESC_PTR_TBL_KER_B0 0xF40000
808#define RX_DESC_PTR_TBL_KER_P0 0x900
809#define RX_ISCSI_DDIG_EN_LBN 88
810#define RX_ISCSI_DDIG_EN_WIDTH 1
811#define RX_ISCSI_HDIG_EN_LBN 87
812#define RX_ISCSI_HDIG_EN_WIDTH 1
813#define RX_DESCQ_BUF_BASE_ID_LBN 36
814#define RX_DESCQ_BUF_BASE_ID_WIDTH 20
815#define RX_DESCQ_EVQ_ID_LBN 24
816#define RX_DESCQ_EVQ_ID_WIDTH 12
817#define RX_DESCQ_OWNER_ID_LBN 10
818#define RX_DESCQ_OWNER_ID_WIDTH 14
819#define RX_DESCQ_LABEL_LBN 5
820#define RX_DESCQ_LABEL_WIDTH 5
821#define RX_DESCQ_SIZE_LBN 3
822#define RX_DESCQ_SIZE_WIDTH 2
823#define RX_DESCQ_SIZE_4K 3
824#define RX_DESCQ_SIZE_2K 2
825#define RX_DESCQ_SIZE_1K 1
826#define RX_DESCQ_SIZE_512 0
827#define RX_DESCQ_TYPE_LBN 2
828#define RX_DESCQ_TYPE_WIDTH 1
829#define RX_DESCQ_JUMBO_LBN 1
830#define RX_DESCQ_JUMBO_WIDTH 1
831#define RX_DESCQ_EN_LBN 0
832#define RX_DESCQ_EN_WIDTH 1
833
834/* Transmit descriptor pointer table */
835#define TX_DESC_PTR_TBL_KER_A1 0x11900
836#define TX_DESC_PTR_TBL_KER_B0 0xF50000
837#define TX_DESC_PTR_TBL_KER_P0 0xa40
838#define TX_NON_IP_DROP_DIS_B0_LBN 91
839#define TX_NON_IP_DROP_DIS_B0_WIDTH 1
840#define TX_IP_CHKSM_DIS_B0_LBN 90
841#define TX_IP_CHKSM_DIS_B0_WIDTH 1
842#define TX_TCP_CHKSM_DIS_B0_LBN 89
843#define TX_TCP_CHKSM_DIS_B0_WIDTH 1
844#define TX_DESCQ_EN_LBN 88
845#define TX_DESCQ_EN_WIDTH 1
846#define TX_ISCSI_DDIG_EN_LBN 87
847#define TX_ISCSI_DDIG_EN_WIDTH 1
848#define TX_ISCSI_HDIG_EN_LBN 86
849#define TX_ISCSI_HDIG_EN_WIDTH 1
850#define TX_DESCQ_BUF_BASE_ID_LBN 36
851#define TX_DESCQ_BUF_BASE_ID_WIDTH 20
852#define TX_DESCQ_EVQ_ID_LBN 24
853#define TX_DESCQ_EVQ_ID_WIDTH 12
854#define TX_DESCQ_OWNER_ID_LBN 10
855#define TX_DESCQ_OWNER_ID_WIDTH 14
856#define TX_DESCQ_LABEL_LBN 5
857#define TX_DESCQ_LABEL_WIDTH 5
858#define TX_DESCQ_SIZE_LBN 3
859#define TX_DESCQ_SIZE_WIDTH 2
860#define TX_DESCQ_SIZE_4K 3
861#define TX_DESCQ_SIZE_2K 2
862#define TX_DESCQ_SIZE_1K 1
863#define TX_DESCQ_SIZE_512 0
864#define TX_DESCQ_TYPE_LBN 1
865#define TX_DESCQ_TYPE_WIDTH 2
866
867/* Event queue pointer */
868#define EVQ_PTR_TBL_KER_A1 0x11a00
869#define EVQ_PTR_TBL_KER_B0 0xf60000
870#define EVQ_PTR_TBL_KER_P0 0x500
871#define EVQ_EN_LBN 23
872#define EVQ_EN_WIDTH 1
873#define EVQ_SIZE_LBN 20
874#define EVQ_SIZE_WIDTH 3
875#define EVQ_SIZE_32K 6
876#define EVQ_SIZE_16K 5
877#define EVQ_SIZE_8K 4
878#define EVQ_SIZE_4K 3
879#define EVQ_SIZE_2K 2
880#define EVQ_SIZE_1K 1
881#define EVQ_SIZE_512 0
882#define EVQ_BUF_BASE_ID_LBN 0
883#define EVQ_BUF_BASE_ID_WIDTH 20
884
885/* Event queue read pointer */
886#define EVQ_RPTR_REG_KER_A1 0x11b00
887#define EVQ_RPTR_REG_KER_B0 0xfa0000
888#define EVQ_RPTR_REG_KER_DWORD (EVQ_RPTR_REG_KER + 0)
889#define EVQ_RPTR_DWORD_LBN 0
890#define EVQ_RPTR_DWORD_WIDTH 14
891
892/* RSS indirection table */
893#define RX_RSS_INDIR_TBL_B0 0xFB0000
894#define RX_RSS_INDIR_ENT_B0_LBN 0
895#define RX_RSS_INDIR_ENT_B0_WIDTH 6
896
897/* Special buffer descriptors (full-mode) */
898#define BUF_FULL_TBL_KER_A1 0x8000
899#define BUF_FULL_TBL_KER_B0 0x800000
900#define IP_DAT_BUF_SIZE_LBN 50
901#define IP_DAT_BUF_SIZE_WIDTH 1
902#define IP_DAT_BUF_SIZE_8K 1
903#define IP_DAT_BUF_SIZE_4K 0
904#define BUF_ADR_REGION_LBN 48
905#define BUF_ADR_REGION_WIDTH 2
906#define BUF_ADR_FBUF_LBN 14
907#define BUF_ADR_FBUF_WIDTH 34
908#define BUF_OWNER_ID_FBUF_LBN 0
909#define BUF_OWNER_ID_FBUF_WIDTH 14
910
911/* Transmit descriptor */
912#define TX_KER_PORT_LBN 63
913#define TX_KER_PORT_WIDTH 1
914#define TX_KER_CONT_LBN 62
915#define TX_KER_CONT_WIDTH 1
916#define TX_KER_BYTE_CNT_LBN 48
917#define TX_KER_BYTE_CNT_WIDTH 14
918#define TX_KER_BUF_REGION_LBN 46
919#define TX_KER_BUF_REGION_WIDTH 2
920#define TX_KER_BUF_REGION0_DECODE 0
921#define TX_KER_BUF_REGION1_DECODE 1
922#define TX_KER_BUF_REGION2_DECODE 2
923#define TX_KER_BUF_REGION3_DECODE 3
924#define TX_KER_BUF_ADR_LBN 0
925#define TX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
926
927/* Receive descriptor */
928#define RX_KER_BUF_SIZE_LBN 48
929#define RX_KER_BUF_SIZE_WIDTH 14
930#define RX_KER_BUF_REGION_LBN 46
931#define RX_KER_BUF_REGION_WIDTH 2
932#define RX_KER_BUF_REGION0_DECODE 0
933#define RX_KER_BUF_REGION1_DECODE 1
934#define RX_KER_BUF_REGION2_DECODE 2
935#define RX_KER_BUF_REGION3_DECODE 3
936#define RX_KER_BUF_ADR_LBN 0
937#define RX_KER_BUF_ADR_WIDTH EFX_DMA_TYPE_WIDTH(46)
938
939/**************************************************************************
940 *
941 * Falcon events
942 *
943 **************************************************************************
944 */
945
946/* Event queue entries */
947#define EV_CODE_LBN 60
948#define EV_CODE_WIDTH 4
949#define RX_IP_EV_DECODE 0
950#define TX_IP_EV_DECODE 2
951#define DRIVER_EV_DECODE 5
952#define GLOBAL_EV_DECODE 6
953#define DRV_GEN_EV_DECODE 7
954#define WHOLE_EVENT_LBN 0
955#define WHOLE_EVENT_WIDTH 64
956
957/* Receive events */
958#define RX_EV_PKT_OK_LBN 56
959#define RX_EV_PKT_OK_WIDTH 1
960#define RX_EV_PAUSE_FRM_ERR_LBN 55
961#define RX_EV_PAUSE_FRM_ERR_WIDTH 1
962#define RX_EV_BUF_OWNER_ID_ERR_LBN 54
963#define RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
964#define RX_EV_IF_FRAG_ERR_LBN 53
965#define RX_EV_IF_FRAG_ERR_WIDTH 1
966#define RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
967#define RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
968#define RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
969#define RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
970#define RX_EV_ETH_CRC_ERR_LBN 50
971#define RX_EV_ETH_CRC_ERR_WIDTH 1
972#define RX_EV_FRM_TRUNC_LBN 49
973#define RX_EV_FRM_TRUNC_WIDTH 1
974#define RX_EV_DRIB_NIB_LBN 48
975#define RX_EV_DRIB_NIB_WIDTH 1
976#define RX_EV_TOBE_DISC_LBN 47
977#define RX_EV_TOBE_DISC_WIDTH 1
978#define RX_EV_PKT_TYPE_LBN 44
979#define RX_EV_PKT_TYPE_WIDTH 3
980#define RX_EV_PKT_TYPE_ETH_DECODE 0
981#define RX_EV_PKT_TYPE_LLC_DECODE 1
982#define RX_EV_PKT_TYPE_JUMBO_DECODE 2
983#define RX_EV_PKT_TYPE_VLAN_DECODE 3
984#define RX_EV_PKT_TYPE_VLAN_LLC_DECODE 4
985#define RX_EV_PKT_TYPE_VLAN_JUMBO_DECODE 5
986#define RX_EV_HDR_TYPE_LBN 42
987#define RX_EV_HDR_TYPE_WIDTH 2
988#define RX_EV_HDR_TYPE_TCP_IPV4_DECODE 0
989#define RX_EV_HDR_TYPE_UDP_IPV4_DECODE 1
990#define RX_EV_HDR_TYPE_OTHER_IP_DECODE 2
991#define RX_EV_HDR_TYPE_NON_IP_DECODE 3
992#define RX_EV_HDR_TYPE_HAS_CHECKSUMS(hdr_type) \
993 ((hdr_type) <= RX_EV_HDR_TYPE_UDP_IPV4_DECODE)
994#define RX_EV_MCAST_HASH_MATCH_LBN 40
995#define RX_EV_MCAST_HASH_MATCH_WIDTH 1
996#define RX_EV_MCAST_PKT_LBN 39
997#define RX_EV_MCAST_PKT_WIDTH 1
998#define RX_EV_Q_LABEL_LBN 32
999#define RX_EV_Q_LABEL_WIDTH 5
1000#define RX_EV_JUMBO_CONT_LBN 31
1001#define RX_EV_JUMBO_CONT_WIDTH 1
1002#define RX_EV_BYTE_CNT_LBN 16
1003#define RX_EV_BYTE_CNT_WIDTH 14
1004#define RX_EV_SOP_LBN 15
1005#define RX_EV_SOP_WIDTH 1
1006#define RX_EV_DESC_PTR_LBN 0
1007#define RX_EV_DESC_PTR_WIDTH 12
1008
1009/* Transmit events */
1010#define TX_EV_PKT_ERR_LBN 38
1011#define TX_EV_PKT_ERR_WIDTH 1
1012#define TX_EV_Q_LABEL_LBN 32
1013#define TX_EV_Q_LABEL_WIDTH 5
1014#define TX_EV_WQ_FF_FULL_LBN 15
1015#define TX_EV_WQ_FF_FULL_WIDTH 1
1016#define TX_EV_COMP_LBN 12
1017#define TX_EV_COMP_WIDTH 1
1018#define TX_EV_DESC_PTR_LBN 0
1019#define TX_EV_DESC_PTR_WIDTH 12
1020
1021/* Driver events */
1022#define DRIVER_EV_SUB_CODE_LBN 56
1023#define DRIVER_EV_SUB_CODE_WIDTH 4
1024#define DRIVER_EV_SUB_DATA_LBN 0
1025#define DRIVER_EV_SUB_DATA_WIDTH 14
1026#define TX_DESCQ_FLS_DONE_EV_DECODE 0
1027#define RX_DESCQ_FLS_DONE_EV_DECODE 1
1028#define EVQ_INIT_DONE_EV_DECODE 2
1029#define EVQ_NOT_EN_EV_DECODE 3
1030#define RX_DESCQ_FLSFF_OVFL_EV_DECODE 4
1031#define SRM_UPD_DONE_EV_DECODE 5
1032#define WAKE_UP_EV_DECODE 6
1033#define TX_PKT_NON_TCP_UDP_DECODE 9
1034#define TIMER_EV_DECODE 10
1035#define RX_RECOVERY_EV_DECODE 11
1036#define RX_DSC_ERROR_EV_DECODE 14
1037#define TX_DSC_ERROR_EV_DECODE 15
1038#define DRIVER_EV_TX_DESCQ_ID_LBN 0
1039#define DRIVER_EV_TX_DESCQ_ID_WIDTH 12
1040#define DRIVER_EV_RX_FLUSH_FAIL_LBN 12
1041#define DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
1042#define DRIVER_EV_RX_DESCQ_ID_LBN 0
1043#define DRIVER_EV_RX_DESCQ_ID_WIDTH 12
1044#define SRM_CLR_EV_DECODE 0
1045#define SRM_UPD_EV_DECODE 1
1046#define SRM_ILLCLR_EV_DECODE 2
1047
1048/* Global events */
1049#define RX_RECOVERY_B0_LBN 12
1050#define RX_RECOVERY_B0_WIDTH 1
1051#define XG_MNT_INTR_B0_LBN 11
1052#define XG_MNT_INTR_B0_WIDTH 1
1053#define RX_RECOVERY_A1_LBN 11
1054#define RX_RECOVERY_A1_WIDTH 1
1055#define XFP_PHY_INTR_LBN 10
1056#define XFP_PHY_INTR_WIDTH 1
1057#define XG_PHY_INTR_LBN 9
1058#define XG_PHY_INTR_WIDTH 1
1059#define G_PHY1_INTR_LBN 8
1060#define G_PHY1_INTR_WIDTH 1
1061#define G_PHY0_INTR_LBN 7
1062#define G_PHY0_INTR_WIDTH 1
1063
1064/* Driver-generated test events */
1065#define EVQ_MAGIC_LBN 0
1066#define EVQ_MAGIC_WIDTH 32
1067
1068/**************************************************************************
1069 *
1070 * Falcon MAC stats
1071 *
1072 **************************************************************************
1073 *
1074 */
1075
1076#define GRxGoodOct_offset 0x0
1077#define GRxGoodOct_WIDTH 48
1078#define GRxBadOct_offset 0x8
1079#define GRxBadOct_WIDTH 48
1080#define GRxMissPkt_offset 0x10
1081#define GRxMissPkt_WIDTH 32
1082#define GRxFalseCRS_offset 0x14
1083#define GRxFalseCRS_WIDTH 32
1084#define GRxPausePkt_offset 0x18
1085#define GRxPausePkt_WIDTH 32
1086#define GRxBadPkt_offset 0x1C
1087#define GRxBadPkt_WIDTH 32
1088#define GRxUcastPkt_offset 0x20
1089#define GRxUcastPkt_WIDTH 32
1090#define GRxMcastPkt_offset 0x24
1091#define GRxMcastPkt_WIDTH 32
1092#define GRxBcastPkt_offset 0x28
1093#define GRxBcastPkt_WIDTH 32
1094#define GRxGoodLt64Pkt_offset 0x2C
1095#define GRxGoodLt64Pkt_WIDTH 32
1096#define GRxBadLt64Pkt_offset 0x30
1097#define GRxBadLt64Pkt_WIDTH 32
1098#define GRx64Pkt_offset 0x34
1099#define GRx64Pkt_WIDTH 32
1100#define GRx65to127Pkt_offset 0x38
1101#define GRx65to127Pkt_WIDTH 32
1102#define GRx128to255Pkt_offset 0x3C
1103#define GRx128to255Pkt_WIDTH 32
1104#define GRx256to511Pkt_offset 0x40
1105#define GRx256to511Pkt_WIDTH 32
1106#define GRx512to1023Pkt_offset 0x44
1107#define GRx512to1023Pkt_WIDTH 32
1108#define GRx1024to15xxPkt_offset 0x48
1109#define GRx1024to15xxPkt_WIDTH 32
1110#define GRx15xxtoJumboPkt_offset 0x4C
1111#define GRx15xxtoJumboPkt_WIDTH 32
1112#define GRxGtJumboPkt_offset 0x50
1113#define GRxGtJumboPkt_WIDTH 32
1114#define GRxFcsErr64to15xxPkt_offset 0x54
1115#define GRxFcsErr64to15xxPkt_WIDTH 32
1116#define GRxFcsErr15xxtoJumboPkt_offset 0x58
1117#define GRxFcsErr15xxtoJumboPkt_WIDTH 32
1118#define GRxFcsErrGtJumboPkt_offset 0x5C
1119#define GRxFcsErrGtJumboPkt_WIDTH 32
1120#define GTxGoodBadOct_offset 0x80
1121#define GTxGoodBadOct_WIDTH 48
1122#define GTxGoodOct_offset 0x88
1123#define GTxGoodOct_WIDTH 48
1124#define GTxSglColPkt_offset 0x90
1125#define GTxSglColPkt_WIDTH 32
1126#define GTxMultColPkt_offset 0x94
1127#define GTxMultColPkt_WIDTH 32
1128#define GTxExColPkt_offset 0x98
1129#define GTxExColPkt_WIDTH 32
1130#define GTxDefPkt_offset 0x9C
1131#define GTxDefPkt_WIDTH 32
1132#define GTxLateCol_offset 0xA0
1133#define GTxLateCol_WIDTH 32
1134#define GTxExDefPkt_offset 0xA4
1135#define GTxExDefPkt_WIDTH 32
1136#define GTxPausePkt_offset 0xA8
1137#define GTxPausePkt_WIDTH 32
1138#define GTxBadPkt_offset 0xAC
1139#define GTxBadPkt_WIDTH 32
1140#define GTxUcastPkt_offset 0xB0
1141#define GTxUcastPkt_WIDTH 32
1142#define GTxMcastPkt_offset 0xB4
1143#define GTxMcastPkt_WIDTH 32
1144#define GTxBcastPkt_offset 0xB8
1145#define GTxBcastPkt_WIDTH 32
1146#define GTxLt64Pkt_offset 0xBC
1147#define GTxLt64Pkt_WIDTH 32
1148#define GTx64Pkt_offset 0xC0
1149#define GTx64Pkt_WIDTH 32
1150#define GTx65to127Pkt_offset 0xC4
1151#define GTx65to127Pkt_WIDTH 32
1152#define GTx128to255Pkt_offset 0xC8
1153#define GTx128to255Pkt_WIDTH 32
1154#define GTx256to511Pkt_offset 0xCC
1155#define GTx256to511Pkt_WIDTH 32
1156#define GTx512to1023Pkt_offset 0xD0
1157#define GTx512to1023Pkt_WIDTH 32
1158#define GTx1024to15xxPkt_offset 0xD4
1159#define GTx1024to15xxPkt_WIDTH 32
1160#define GTx15xxtoJumboPkt_offset 0xD8
1161#define GTx15xxtoJumboPkt_WIDTH 32
1162#define GTxGtJumboPkt_offset 0xDC
1163#define GTxGtJumboPkt_WIDTH 32
1164#define GTxNonTcpUdpPkt_offset 0xE0
1165#define GTxNonTcpUdpPkt_WIDTH 16
1166#define GTxMacSrcErrPkt_offset 0xE4
1167#define GTxMacSrcErrPkt_WIDTH 16
1168#define GTxIpSrcErrPkt_offset 0xE8
1169#define GTxIpSrcErrPkt_WIDTH 16
1170#define GDmaDone_offset 0xEC
1171#define GDmaDone_WIDTH 32
1172
1173#define XgRxOctets_offset 0x0
1174#define XgRxOctets_WIDTH 48
1175#define XgRxOctetsOK_offset 0x8
1176#define XgRxOctetsOK_WIDTH 48
1177#define XgRxPkts_offset 0x10
1178#define XgRxPkts_WIDTH 32
1179#define XgRxPktsOK_offset 0x14
1180#define XgRxPktsOK_WIDTH 32
1181#define XgRxBroadcastPkts_offset 0x18
1182#define XgRxBroadcastPkts_WIDTH 32
1183#define XgRxMulticastPkts_offset 0x1C
1184#define XgRxMulticastPkts_WIDTH 32
1185#define XgRxUnicastPkts_offset 0x20
1186#define XgRxUnicastPkts_WIDTH 32
1187#define XgRxUndersizePkts_offset 0x24
1188#define XgRxUndersizePkts_WIDTH 32
1189#define XgRxOversizePkts_offset 0x28
1190#define XgRxOversizePkts_WIDTH 32
1191#define XgRxJabberPkts_offset 0x2C
1192#define XgRxJabberPkts_WIDTH 32
1193#define XgRxUndersizeFCSerrorPkts_offset 0x30
1194#define XgRxUndersizeFCSerrorPkts_WIDTH 32
1195#define XgRxDropEvents_offset 0x34
1196#define XgRxDropEvents_WIDTH 32
1197#define XgRxFCSerrorPkts_offset 0x38
1198#define XgRxFCSerrorPkts_WIDTH 32
1199#define XgRxAlignError_offset 0x3C
1200#define XgRxAlignError_WIDTH 32
1201#define XgRxSymbolError_offset 0x40
1202#define XgRxSymbolError_WIDTH 32
1203#define XgRxInternalMACError_offset 0x44
1204#define XgRxInternalMACError_WIDTH 32
1205#define XgRxControlPkts_offset 0x48
1206#define XgRxControlPkts_WIDTH 32
1207#define XgRxPausePkts_offset 0x4C
1208#define XgRxPausePkts_WIDTH 32
1209#define XgRxPkts64Octets_offset 0x50
1210#define XgRxPkts64Octets_WIDTH 32
1211#define XgRxPkts65to127Octets_offset 0x54
1212#define XgRxPkts65to127Octets_WIDTH 32
1213#define XgRxPkts128to255Octets_offset 0x58
1214#define XgRxPkts128to255Octets_WIDTH 32
1215#define XgRxPkts256to511Octets_offset 0x5C
1216#define XgRxPkts256to511Octets_WIDTH 32
1217#define XgRxPkts512to1023Octets_offset 0x60
1218#define XgRxPkts512to1023Octets_WIDTH 32
1219#define XgRxPkts1024to15xxOctets_offset 0x64
1220#define XgRxPkts1024to15xxOctets_WIDTH 32
1221#define XgRxPkts15xxtoMaxOctets_offset 0x68
1222#define XgRxPkts15xxtoMaxOctets_WIDTH 32
1223#define XgRxLengthError_offset 0x6C
1224#define XgRxLengthError_WIDTH 32
1225#define XgTxPkts_offset 0x80
1226#define XgTxPkts_WIDTH 32
1227#define XgTxOctets_offset 0x88
1228#define XgTxOctets_WIDTH 48
1229#define XgTxMulticastPkts_offset 0x90
1230#define XgTxMulticastPkts_WIDTH 32
1231#define XgTxBroadcastPkts_offset 0x94
1232#define XgTxBroadcastPkts_WIDTH 32
1233#define XgTxUnicastPkts_offset 0x98
1234#define XgTxUnicastPkts_WIDTH 32
1235#define XgTxControlPkts_offset 0x9C
1236#define XgTxControlPkts_WIDTH 32
1237#define XgTxPausePkts_offset 0xA0
1238#define XgTxPausePkts_WIDTH 32
1239#define XgTxPkts64Octets_offset 0xA4
1240#define XgTxPkts64Octets_WIDTH 32
1241#define XgTxPkts65to127Octets_offset 0xA8
1242#define XgTxPkts65to127Octets_WIDTH 32
1243#define XgTxPkts128to255Octets_offset 0xAC
1244#define XgTxPkts128to255Octets_WIDTH 32
1245#define XgTxPkts256to511Octets_offset 0xB0
1246#define XgTxPkts256to511Octets_WIDTH 32
1247#define XgTxPkts512to1023Octets_offset 0xB4
1248#define XgTxPkts512to1023Octets_WIDTH 32
1249#define XgTxPkts1024to15xxOctets_offset 0xB8
1250#define XgTxPkts1024to15xxOctets_WIDTH 32
1251#define XgTxPkts1519toMaxOctets_offset 0xBC
1252#define XgTxPkts1519toMaxOctets_WIDTH 32
1253#define XgTxUndersizePkts_offset 0xC0
1254#define XgTxUndersizePkts_WIDTH 32
1255#define XgTxOversizePkts_offset 0xC4
1256#define XgTxOversizePkts_WIDTH 32
1257#define XgTxNonTcpUdpPkt_offset 0xC8
1258#define XgTxNonTcpUdpPkt_WIDTH 16
1259#define XgTxMacSrcErrPkt_offset 0xCC
1260#define XgTxMacSrcErrPkt_WIDTH 16
1261#define XgTxIpSrcErrPkt_offset 0xD0
1262#define XgTxIpSrcErrPkt_WIDTH 16
1263#define XgDmaDone_offset 0xD4
1264
1265#define FALCON_STATS_NOT_DONE 0x00000000
1266#define FALCON_STATS_DONE 0xffffffff
1267
1268/* Interrupt status register bits */
1269#define FATAL_INT_LBN 64
1270#define FATAL_INT_WIDTH 1
1271#define INT_EVQS_LBN 40
1272#define INT_EVQS_WIDTH 4
1273
1274/**************************************************************************
1275 *
1276 * Falcon non-volatile configuration
1277 *
1278 **************************************************************************
1279 */
1280
1281/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
1282struct falcon_nvconfig_board_v2 {
1283 __le16 nports;
1284 u8 port0_phy_addr;
1285 u8 port0_phy_type;
1286 u8 port1_phy_addr;
1287 u8 port1_phy_type;
1288 __le16 asic_sub_revision;
1289 __le16 board_revision;
1290} __packed;
1291
1292/* Board configuration v3 extra information */
1293struct falcon_nvconfig_board_v3 {
1294 __le32 spi_device_type[2];
1295} __packed;
1296
1297/* Bit numbers for spi_device_type */
1298#define SPI_DEV_TYPE_SIZE_LBN 0
1299#define SPI_DEV_TYPE_SIZE_WIDTH 5
1300#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
1301#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
1302#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
1303#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
1304#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
1305#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
1306#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
1307#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
1308#define SPI_DEV_TYPE_FIELD(type, field) \
1309 (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
1310
1311#define NVCONFIG_OFFSET 0x300
1312
1313#define NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
1314struct falcon_nvconfig {
1315 efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
1316 u8 mac_address[2][8]; /* 0x310 */
1317 efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
1318 efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
1319 efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
1320 efx_oword_t hw_init_reg; /* 0x350 */
1321 efx_oword_t nic_stat_reg; /* 0x360 */
1322 efx_oword_t glb_ctl_reg; /* 0x370 */
1323 efx_oword_t srm_cfg_reg; /* 0x380 */
1324 efx_oword_t spare_reg; /* 0x390 */
1325 __le16 board_magic_num; /* 0x3A0 */
1326 __le16 board_struct_ver;
1327 __le16 board_checksum;
1328 struct falcon_nvconfig_board_v2 board_v2;
1329 efx_oword_t ee_base_page_reg; /* 0x3B0 */
1330 struct falcon_nvconfig_board_v3 board_v3;
1331} __packed;
1332
1333#endif /* EFX_FALCON_HWDEFS_H */
diff --git a/drivers/net/sfc/falcon_io.h b/drivers/net/sfc/falcon_io.h
deleted file mode 100644
index 8883092dae97..000000000000
--- a/drivers/net/sfc/falcon_io.h
+++ /dev/null
@@ -1,258 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_FALCON_IO_H
12#define EFX_FALCON_IO_H
13
14#include <linux/io.h>
15#include <linux/spinlock.h>
16
17/**************************************************************************
18 *
19 * Falcon hardware access
20 *
21 **************************************************************************
22 *
23 * Notes on locking strategy:
24 *
25 * Most Falcon registers require 16-byte (or 8-byte, for SRAM
26 * registers) atomic writes which necessitates locking.
27 * Under normal operation few writes to the Falcon BAR are made and these
28 * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special
29 * cased to allow 4-byte (hence lockless) accesses.
30 *
31 * It *is* safe to write to these 4-byte registers in the middle of an
32 * access to an 8-byte or 16-byte register. We therefore use a
33 * spinlock to protect accesses to the larger registers, but no locks
34 * for the 4-byte registers.
35 *
36 * A write barrier is needed to ensure that DW3 is written after DW0/1/2
37 * due to the way the 16byte registers are "collected" in the Falcon BIU
38 *
39 * We also lock when carrying out reads, to ensure consistency of the
40 * data (made possible since the BIU reads all 128 bits into a cache).
41 * Reads are very rare, so this isn't a significant performance
42 * impact. (Most data transferred from NIC to host is DMAed directly
43 * into host memory).
44 *
45 * I/O BAR access uses locks for both reads and writes (but is only provided
46 * for testing purposes).
47 */
48
49/* Special buffer descriptors (Falcon SRAM) */
50#define BUF_TBL_KER_A1 0x18000
51#define BUF_TBL_KER_B0 0x800000
52
53
54#if BITS_PER_LONG == 64
55#define FALCON_USE_QWORD_IO 1
56#endif
57
58#ifdef FALCON_USE_QWORD_IO
59static inline void _falcon_writeq(struct efx_nic *efx, __le64 value,
60 unsigned int reg)
61{
62 __raw_writeq((__force u64)value, efx->membase + reg);
63}
64static inline __le64 _falcon_readq(struct efx_nic *efx, unsigned int reg)
65{
66 return (__force __le64)__raw_readq(efx->membase + reg);
67}
68#endif
69
70static inline void _falcon_writel(struct efx_nic *efx, __le32 value,
71 unsigned int reg)
72{
73 __raw_writel((__force u32)value, efx->membase + reg);
74}
75static inline __le32 _falcon_readl(struct efx_nic *efx, unsigned int reg)
76{
77 return (__force __le32)__raw_readl(efx->membase + reg);
78}
79
80/* Writes to a normal 16-byte Falcon register, locking as appropriate. */
81static inline void falcon_write(struct efx_nic *efx, efx_oword_t *value,
82 unsigned int reg)
83{
84 unsigned long flags;
85
86 EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg,
87 EFX_OWORD_VAL(*value));
88
89 spin_lock_irqsave(&efx->biu_lock, flags);
90#ifdef FALCON_USE_QWORD_IO
91 _falcon_writeq(efx, value->u64[0], reg + 0);
92 wmb();
93 _falcon_writeq(efx, value->u64[1], reg + 8);
94#else
95 _falcon_writel(efx, value->u32[0], reg + 0);
96 _falcon_writel(efx, value->u32[1], reg + 4);
97 _falcon_writel(efx, value->u32[2], reg + 8);
98 wmb();
99 _falcon_writel(efx, value->u32[3], reg + 12);
100#endif
101 mmiowb();
102 spin_unlock_irqrestore(&efx->biu_lock, flags);
103}
104
105/* Writes to an 8-byte Falcon SRAM register, locking as appropriate. */
106static inline void falcon_write_sram(struct efx_nic *efx, efx_qword_t *value,
107 unsigned int index)
108{
109 unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value));
110 unsigned long flags;
111
112 EFX_REGDUMP(efx, "writing SRAM register %x with " EFX_QWORD_FMT "\n",
113 reg, EFX_QWORD_VAL(*value));
114
115 spin_lock_irqsave(&efx->biu_lock, flags);
116#ifdef FALCON_USE_QWORD_IO
117 _falcon_writeq(efx, value->u64[0], reg + 0);
118#else
119 _falcon_writel(efx, value->u32[0], reg + 0);
120 wmb();
121 _falcon_writel(efx, value->u32[1], reg + 4);
122#endif
123 mmiowb();
124 spin_unlock_irqrestore(&efx->biu_lock, flags);
125}
126
127/* Write dword to Falcon register that allows partial writes
128 *
129 * Some Falcon registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and
130 * TX_DESC_UPD_REG) can be written to as a single dword. This allows
131 * for lockless writes.
132 */
133static inline void falcon_writel(struct efx_nic *efx, efx_dword_t *value,
134 unsigned int reg)
135{
136 EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n",
137 reg, EFX_DWORD_VAL(*value));
138
139 /* No lock required */
140 _falcon_writel(efx, value->u32[0], reg);
141}
142
143/* Read from a Falcon register
144 *
145 * This reads an entire 16-byte Falcon register in one go, locking as
146 * appropriate. It is essential to read the first dword first, as this
147 * prompts Falcon to load the current value into the shadow register.
148 */
149static inline void falcon_read(struct efx_nic *efx, efx_oword_t *value,
150 unsigned int reg)
151{
152 unsigned long flags;
153
154 spin_lock_irqsave(&efx->biu_lock, flags);
155 value->u32[0] = _falcon_readl(efx, reg + 0);
156 rmb();
157 value->u32[1] = _falcon_readl(efx, reg + 4);
158 value->u32[2] = _falcon_readl(efx, reg + 8);
159 value->u32[3] = _falcon_readl(efx, reg + 12);
160 spin_unlock_irqrestore(&efx->biu_lock, flags);
161
162 EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg,
163 EFX_OWORD_VAL(*value));
164}
165
166/* This reads an 8-byte Falcon SRAM entry in one go. */
167static inline void falcon_read_sram(struct efx_nic *efx, efx_qword_t *value,
168 unsigned int index)
169{
170 unsigned int reg = efx->type->buf_tbl_base + (index * sizeof(*value));
171 unsigned long flags;
172
173 spin_lock_irqsave(&efx->biu_lock, flags);
174#ifdef FALCON_USE_QWORD_IO
175 value->u64[0] = _falcon_readq(efx, reg + 0);
176#else
177 value->u32[0] = _falcon_readl(efx, reg + 0);
178 rmb();
179 value->u32[1] = _falcon_readl(efx, reg + 4);
180#endif
181 spin_unlock_irqrestore(&efx->biu_lock, flags);
182
183 EFX_REGDUMP(efx, "read from SRAM register %x, got "EFX_QWORD_FMT"\n",
184 reg, EFX_QWORD_VAL(*value));
185}
186
187/* Read dword from Falcon register that allows partial writes (sic) */
188static inline void falcon_readl(struct efx_nic *efx, efx_dword_t *value,
189 unsigned int reg)
190{
191 value->u32[0] = _falcon_readl(efx, reg);
192 EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n",
193 reg, EFX_DWORD_VAL(*value));
194}
195
196/* Write to a register forming part of a table */
197static inline void falcon_write_table(struct efx_nic *efx, efx_oword_t *value,
198 unsigned int reg, unsigned int index)
199{
200 falcon_write(efx, value, reg + index * sizeof(efx_oword_t));
201}
202
203/* Read to a register forming part of a table */
204static inline void falcon_read_table(struct efx_nic *efx, efx_oword_t *value,
205 unsigned int reg, unsigned int index)
206{
207 falcon_read(efx, value, reg + index * sizeof(efx_oword_t));
208}
209
210/* Write to a dword register forming part of a table */
211static inline void falcon_writel_table(struct efx_nic *efx, efx_dword_t *value,
212 unsigned int reg, unsigned int index)
213{
214 falcon_writel(efx, value, reg + index * sizeof(efx_oword_t));
215}
216
217/* Page-mapped register block size */
218#define FALCON_PAGE_BLOCK_SIZE 0x2000
219
220/* Calculate offset to page-mapped register block */
221#define FALCON_PAGED_REG(page, reg) \
222 ((page) * FALCON_PAGE_BLOCK_SIZE + (reg))
223
224/* As for falcon_write(), but for a page-mapped register. */
225static inline void falcon_write_page(struct efx_nic *efx, efx_oword_t *value,
226 unsigned int reg, unsigned int page)
227{
228 falcon_write(efx, value, FALCON_PAGED_REG(page, reg));
229}
230
231/* As for falcon_writel(), but for a page-mapped register. */
232static inline void falcon_writel_page(struct efx_nic *efx, efx_dword_t *value,
233 unsigned int reg, unsigned int page)
234{
235 falcon_writel(efx, value, FALCON_PAGED_REG(page, reg));
236}
237
238/* Write dword to Falcon page-mapped register with an extra lock.
239 *
240 * As for falcon_writel_page(), but for a register that suffers from
241 * SFC bug 3181. If writing to page 0, take out a lock so the BIU
242 * collector cannot be confused.
243 */
244static inline void falcon_writel_page_locked(struct efx_nic *efx,
245 efx_dword_t *value,
246 unsigned int reg,
247 unsigned int page)
248{
249 unsigned long flags = 0;
250
251 if (page == 0)
252 spin_lock_irqsave(&efx->biu_lock, flags);
253 falcon_writel(efx, value, FALCON_PAGED_REG(page, reg));
254 if (page == 0)
255 spin_unlock_irqrestore(&efx->biu_lock, flags);
256}
257
258#endif /* EFX_FALCON_IO_H */
diff --git a/drivers/net/sfc/falcon_xmac.c b/drivers/net/sfc/falcon_xmac.c
index bec52ca37eee..7e57b4a54b37 100644
--- a/drivers/net/sfc/falcon_xmac.c
+++ b/drivers/net/sfc/falcon_xmac.c
@@ -12,12 +12,11 @@
12#include "net_driver.h" 12#include "net_driver.h"
13#include "efx.h" 13#include "efx.h"
14#include "falcon.h" 14#include "falcon.h"
15#include "falcon_hwdefs.h" 15#include "regs.h"
16#include "falcon_io.h" 16#include "io.h"
17#include "mac.h" 17#include "mac.h"
18#include "mdio_10g.h" 18#include "mdio_10g.h"
19#include "phy.h" 19#include "phy.h"
20#include "boards.h"
21#include "workarounds.h" 20#include "workarounds.h"
22 21
23/************************************************************************** 22/**************************************************************************
@@ -36,27 +35,27 @@ static void falcon_setup_xaui(struct efx_nic *efx)
36 if (efx->phy_type == PHY_TYPE_NONE) 35 if (efx->phy_type == PHY_TYPE_NONE)
37 return; 36 return;
38 37
39 falcon_read(efx, &sdctl, XX_SD_CTL_REG); 38 efx_reado(efx, &sdctl, FR_AB_XX_SD_CTL);
40 EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVD, XX_SD_CTL_DRV_DEFAULT); 39 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
41 EFX_SET_OWORD_FIELD(sdctl, XX_LODRVD, XX_SD_CTL_DRV_DEFAULT); 40 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVD, FFE_AB_XX_SD_CTL_DRV_DEF);
42 EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVC, XX_SD_CTL_DRV_DEFAULT); 41 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
43 EFX_SET_OWORD_FIELD(sdctl, XX_LODRVC, XX_SD_CTL_DRV_DEFAULT); 42 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVC, FFE_AB_XX_SD_CTL_DRV_DEF);
44 EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVB, XX_SD_CTL_DRV_DEFAULT); 43 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
45 EFX_SET_OWORD_FIELD(sdctl, XX_LODRVB, XX_SD_CTL_DRV_DEFAULT); 44 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVB, FFE_AB_XX_SD_CTL_DRV_DEF);
46 EFX_SET_OWORD_FIELD(sdctl, XX_HIDRVA, XX_SD_CTL_DRV_DEFAULT); 45 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_HIDRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
47 EFX_SET_OWORD_FIELD(sdctl, XX_LODRVA, XX_SD_CTL_DRV_DEFAULT); 46 EFX_SET_OWORD_FIELD(sdctl, FRF_AB_XX_LODRVA, FFE_AB_XX_SD_CTL_DRV_DEF);
48 falcon_write(efx, &sdctl, XX_SD_CTL_REG); 47 efx_writeo(efx, &sdctl, FR_AB_XX_SD_CTL);
49 48
50 EFX_POPULATE_OWORD_8(txdrv, 49 EFX_POPULATE_OWORD_8(txdrv,
51 XX_DEQD, XX_TXDRV_DEQ_DEFAULT, 50 FRF_AB_XX_DEQD, FFE_AB_XX_TXDRV_DEQ_DEF,
52 XX_DEQC, XX_TXDRV_DEQ_DEFAULT, 51 FRF_AB_XX_DEQC, FFE_AB_XX_TXDRV_DEQ_DEF,
53 XX_DEQB, XX_TXDRV_DEQ_DEFAULT, 52 FRF_AB_XX_DEQB, FFE_AB_XX_TXDRV_DEQ_DEF,
54 XX_DEQA, XX_TXDRV_DEQ_DEFAULT, 53 FRF_AB_XX_DEQA, FFE_AB_XX_TXDRV_DEQ_DEF,
55 XX_DTXD, XX_TXDRV_DTX_DEFAULT, 54 FRF_AB_XX_DTXD, FFE_AB_XX_TXDRV_DTX_DEF,
56 XX_DTXC, XX_TXDRV_DTX_DEFAULT, 55 FRF_AB_XX_DTXC, FFE_AB_XX_TXDRV_DTX_DEF,
57 XX_DTXB, XX_TXDRV_DTX_DEFAULT, 56 FRF_AB_XX_DTXB, FFE_AB_XX_TXDRV_DTX_DEF,
58 XX_DTXA, XX_TXDRV_DTX_DEFAULT); 57 FRF_AB_XX_DTXA, FFE_AB_XX_TXDRV_DTX_DEF);
59 falcon_write(efx, &txdrv, XX_TXDRV_CTL_REG); 58 efx_writeo(efx, &txdrv, FR_AB_XX_TXDRV_CTL);
60} 59}
61 60
62int falcon_reset_xaui(struct efx_nic *efx) 61int falcon_reset_xaui(struct efx_nic *efx)
@@ -65,14 +64,14 @@ int falcon_reset_xaui(struct efx_nic *efx)
65 int count; 64 int count;
66 65
67 /* Start reset sequence */ 66 /* Start reset sequence */
68 EFX_POPULATE_DWORD_1(reg, XX_RST_XX_EN, 1); 67 EFX_POPULATE_DWORD_1(reg, FRF_AB_XX_RST_XX_EN, 1);
69 falcon_write(efx, &reg, XX_PWR_RST_REG); 68 efx_writeo(efx, &reg, FR_AB_XX_PWR_RST);
70 69
71 /* Wait up to 10 ms for completion, then reinitialise */ 70 /* Wait up to 10 ms for completion, then reinitialise */
72 for (count = 0; count < 1000; count++) { 71 for (count = 0; count < 1000; count++) {
73 falcon_read(efx, &reg, XX_PWR_RST_REG); 72 efx_reado(efx, &reg, FR_AB_XX_PWR_RST);
74 if (EFX_OWORD_FIELD(reg, XX_RST_XX_EN) == 0 && 73 if (EFX_OWORD_FIELD(reg, FRF_AB_XX_RST_XX_EN) == 0 &&
75 EFX_OWORD_FIELD(reg, XX_SD_RST_ACT) == 0) { 74 EFX_OWORD_FIELD(reg, FRF_AB_XX_SD_RST_ACT) == 0) {
76 falcon_setup_xaui(efx); 75 falcon_setup_xaui(efx);
77 return 0; 76 return 0;
78 } 77 }
@@ -100,12 +99,12 @@ static void falcon_mask_status_intr(struct efx_nic *efx, bool enable)
100 99
101 /* Flush the ISR */ 100 /* Flush the ISR */
102 if (enable) 101 if (enable)
103 falcon_read(efx, &reg, XM_MGT_INT_REG_B0); 102 efx_reado(efx, &reg, FR_AB_XM_MGT_INT_MSK);
104 103
105 EFX_POPULATE_OWORD_2(reg, 104 EFX_POPULATE_OWORD_2(reg,
106 XM_MSK_RMTFLT, !enable, 105 FRF_AB_XM_MSK_RMTFLT, !enable,
107 XM_MSK_LCLFLT, !enable); 106 FRF_AB_XM_MSK_LCLFLT, !enable);
108 falcon_write(efx, &reg, XM_MGT_INT_MSK_REG_B0); 107 efx_writeo(efx, &reg, FR_AB_XM_MGT_INT_MASK);
109} 108}
110 109
111/* Get status of XAUI link */ 110/* Get status of XAUI link */
@@ -119,18 +118,18 @@ bool falcon_xaui_link_ok(struct efx_nic *efx)
119 return true; 118 return true;
120 119
121 /* Read link status */ 120 /* Read link status */
122 falcon_read(efx, &reg, XX_CORE_STAT_REG); 121 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
123 122
124 align_done = EFX_OWORD_FIELD(reg, XX_ALIGN_DONE); 123 align_done = EFX_OWORD_FIELD(reg, FRF_AB_XX_ALIGN_DONE);
125 sync_status = EFX_OWORD_FIELD(reg, XX_SYNC_STAT); 124 sync_status = EFX_OWORD_FIELD(reg, FRF_AB_XX_SYNC_STAT);
126 if (align_done && (sync_status == XX_SYNC_STAT_DECODE_SYNCED)) 125 if (align_done && (sync_status == FFE_AB_XX_STAT_ALL_LANES))
127 link_ok = true; 126 link_ok = true;
128 127
129 /* Clear link status ready for next read */ 128 /* Clear link status ready for next read */
130 EFX_SET_OWORD_FIELD(reg, XX_COMMA_DET, XX_COMMA_DET_RESET); 129 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_COMMA_DET, FFE_AB_XX_STAT_ALL_LANES);
131 EFX_SET_OWORD_FIELD(reg, XX_CHARERR, XX_CHARERR_RESET); 130 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_CHAR_ERR, FFE_AB_XX_STAT_ALL_LANES);
132 EFX_SET_OWORD_FIELD(reg, XX_DISPERR, XX_DISPERR_RESET); 131 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_DISPERR, FFE_AB_XX_STAT_ALL_LANES);
133 falcon_write(efx, &reg, XX_CORE_STAT_REG); 132 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
134 133
135 /* If the link is up, then check the phy side of the xaui link */ 134 /* If the link is up, then check the phy side of the xaui link */
136 if (efx->link_up && link_ok) 135 if (efx->link_up && link_ok)
@@ -148,55 +147,49 @@ static void falcon_reconfigure_xmac_core(struct efx_nic *efx)
148 147
149 /* Configure MAC - cut-thru mode is hard wired on */ 148 /* Configure MAC - cut-thru mode is hard wired on */
150 EFX_POPULATE_DWORD_3(reg, 149 EFX_POPULATE_DWORD_3(reg,
151 XM_RX_JUMBO_MODE, 1, 150 FRF_AB_XM_RX_JUMBO_MODE, 1,
152 XM_TX_STAT_EN, 1, 151 FRF_AB_XM_TX_STAT_EN, 1,
153 XM_RX_STAT_EN, 1); 152 FRF_AB_XM_RX_STAT_EN, 1);
154 falcon_write(efx, &reg, XM_GLB_CFG_REG); 153 efx_writeo(efx, &reg, FR_AB_XM_GLB_CFG);
155 154
156 /* Configure TX */ 155 /* Configure TX */
157 EFX_POPULATE_DWORD_6(reg, 156 EFX_POPULATE_DWORD_6(reg,
158 XM_TXEN, 1, 157 FRF_AB_XM_TXEN, 1,
159 XM_TX_PRMBL, 1, 158 FRF_AB_XM_TX_PRMBL, 1,
160 XM_AUTO_PAD, 1, 159 FRF_AB_XM_AUTO_PAD, 1,
161 XM_TXCRC, 1, 160 FRF_AB_XM_TXCRC, 1,
162 XM_FCNTL, 1, 161 FRF_AB_XM_FCNTL, 1,
163 XM_IPG, 0x3); 162 FRF_AB_XM_IPG, 0x3);
164 falcon_write(efx, &reg, XM_TX_CFG_REG); 163 efx_writeo(efx, &reg, FR_AB_XM_TX_CFG);
165 164
166 /* Configure RX */ 165 /* Configure RX */
167 EFX_POPULATE_DWORD_5(reg, 166 EFX_POPULATE_DWORD_5(reg,
168 XM_RXEN, 1, 167 FRF_AB_XM_RXEN, 1,
169 XM_AUTO_DEPAD, 0, 168 FRF_AB_XM_AUTO_DEPAD, 0,
170 XM_ACPT_ALL_MCAST, 1, 169 FRF_AB_XM_ACPT_ALL_MCAST, 1,
171 XM_ACPT_ALL_UCAST, efx->promiscuous, 170 FRF_AB_XM_ACPT_ALL_UCAST, efx->promiscuous,
172 XM_PASS_CRC_ERR, 1); 171 FRF_AB_XM_PASS_CRC_ERR, 1);
173 falcon_write(efx, &reg, XM_RX_CFG_REG); 172 efx_writeo(efx, &reg, FR_AB_XM_RX_CFG);
174 173
175 /* Set frame length */ 174 /* Set frame length */
176 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu); 175 max_frame_len = EFX_MAX_FRAME_LEN(efx->net_dev->mtu);
177 EFX_POPULATE_DWORD_1(reg, XM_MAX_RX_FRM_SIZE, max_frame_len); 176 EFX_POPULATE_DWORD_1(reg, FRF_AB_XM_MAX_RX_FRM_SIZE, max_frame_len);
178 falcon_write(efx, &reg, XM_RX_PARAM_REG); 177 efx_writeo(efx, &reg, FR_AB_XM_RX_PARAM);
179 EFX_POPULATE_DWORD_2(reg, 178 EFX_POPULATE_DWORD_2(reg,
180 XM_MAX_TX_FRM_SIZE, max_frame_len, 179 FRF_AB_XM_MAX_TX_FRM_SIZE, max_frame_len,
181 XM_TX_JUMBO_MODE, 1); 180 FRF_AB_XM_TX_JUMBO_MODE, 1);
182 falcon_write(efx, &reg, XM_TX_PARAM_REG); 181 efx_writeo(efx, &reg, FR_AB_XM_TX_PARAM);
183 182
184 EFX_POPULATE_DWORD_2(reg, 183 EFX_POPULATE_DWORD_2(reg,
185 XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */ 184 FRF_AB_XM_PAUSE_TIME, 0xfffe, /* MAX PAUSE TIME */
186 XM_DIS_FCNTL, !rx_fc); 185 FRF_AB_XM_DIS_FCNTL, !rx_fc);
187 falcon_write(efx, &reg, XM_FC_REG); 186 efx_writeo(efx, &reg, FR_AB_XM_FC);
188 187
189 /* Set MAC address */ 188 /* Set MAC address */
190 EFX_POPULATE_DWORD_4(reg, 189 memcpy(&reg, &efx->net_dev->dev_addr[0], 4);
191 XM_ADR_0, efx->net_dev->dev_addr[0], 190 efx_writeo(efx, &reg, FR_AB_XM_ADR_LO);
192 XM_ADR_1, efx->net_dev->dev_addr[1], 191 memcpy(&reg, &efx->net_dev->dev_addr[4], 2);
193 XM_ADR_2, efx->net_dev->dev_addr[2], 192 efx_writeo(efx, &reg, FR_AB_XM_ADR_HI);
194 XM_ADR_3, efx->net_dev->dev_addr[3]);
195 falcon_write(efx, &reg, XM_ADR_LO_REG);
196 EFX_POPULATE_DWORD_2(reg,
197 XM_ADR_4, efx->net_dev->dev_addr[4],
198 XM_ADR_5, efx->net_dev->dev_addr[5]);
199 falcon_write(efx, &reg, XM_ADR_HI_REG);
200} 193}
201 194
202static void falcon_reconfigure_xgxs_core(struct efx_nic *efx) 195static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
@@ -212,12 +205,13 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
212 bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback; 205 bool old_xgmii_loopback, old_xgxs_loopback, old_xaui_loopback;
213 bool reset_xgxs; 206 bool reset_xgxs;
214 207
215 falcon_read(efx, &reg, XX_CORE_STAT_REG); 208 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
216 old_xgxs_loopback = EFX_OWORD_FIELD(reg, XX_XGXS_LB_EN); 209 old_xgxs_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN);
217 old_xgmii_loopback = EFX_OWORD_FIELD(reg, XX_XGMII_LB_EN); 210 old_xgmii_loopback =
211 EFX_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN);
218 212
219 falcon_read(efx, &reg, XX_SD_CTL_REG); 213 efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
220 old_xaui_loopback = EFX_OWORD_FIELD(reg, XX_LPBKA); 214 old_xaui_loopback = EFX_OWORD_FIELD(reg, FRF_AB_XX_LPBKA);
221 215
222 /* The PHY driver may have turned XAUI off */ 216 /* The PHY driver may have turned XAUI off */
223 reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) || 217 reset_xgxs = ((xgxs_loopback != old_xgxs_loopback) ||
@@ -228,20 +222,20 @@ static void falcon_reconfigure_xgxs_core(struct efx_nic *efx)
228 falcon_reset_xaui(efx); 222 falcon_reset_xaui(efx);
229 } 223 }
230 224
231 falcon_read(efx, &reg, XX_CORE_STAT_REG); 225 efx_reado(efx, &reg, FR_AB_XX_CORE_STAT);
232 EFX_SET_OWORD_FIELD(reg, XX_FORCE_SIG, 226 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_FORCE_SIG,
233 (xgxs_loopback || xaui_loopback) ? 227 (xgxs_loopback || xaui_loopback) ?
234 XX_FORCE_SIG_DECODE_FORCED : 0); 228 FFE_AB_XX_FORCE_SIG_ALL_LANES : 0);
235 EFX_SET_OWORD_FIELD(reg, XX_XGXS_LB_EN, xgxs_loopback); 229 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGXS_LB_EN, xgxs_loopback);
236 EFX_SET_OWORD_FIELD(reg, XX_XGMII_LB_EN, xgmii_loopback); 230 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_XGMII_LB_EN, xgmii_loopback);
237 falcon_write(efx, &reg, XX_CORE_STAT_REG); 231 efx_writeo(efx, &reg, FR_AB_XX_CORE_STAT);
238 232
239 falcon_read(efx, &reg, XX_SD_CTL_REG); 233 efx_reado(efx, &reg, FR_AB_XX_SD_CTL);
240 EFX_SET_OWORD_FIELD(reg, XX_LPBKD, xaui_loopback); 234 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKD, xaui_loopback);
241 EFX_SET_OWORD_FIELD(reg, XX_LPBKC, xaui_loopback); 235 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKC, xaui_loopback);
242 EFX_SET_OWORD_FIELD(reg, XX_LPBKB, xaui_loopback); 236 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKB, xaui_loopback);
243 EFX_SET_OWORD_FIELD(reg, XX_LPBKA, xaui_loopback); 237 EFX_SET_OWORD_FIELD(reg, FRF_AB_XX_LPBKA, xaui_loopback);
244 falcon_write(efx, &reg, XX_SD_CTL_REG); 238 efx_writeo(efx, &reg, FR_AB_XX_SD_CTL);
245} 239}
246 240
247 241
diff --git a/drivers/net/sfc/gmii.h b/drivers/net/sfc/gmii.h
deleted file mode 100644
index dfccaa7b573e..000000000000
--- a/drivers/net/sfc/gmii.h
+++ /dev/null
@@ -1,60 +0,0 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2008 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_GMII_H
12#define EFX_GMII_H
13
14/*
15 * GMII interface
16 */
17
18#include <linux/mii.h>
19
20/* GMII registers, excluding registers already defined as MII
21 * registers in mii.h
22 */
23#define GMII_IER 0x12 /* Interrupt enable register */
24#define GMII_ISR 0x13 /* Interrupt status register */
25
26/* Interrupt enable register */
27#define IER_ANEG_ERR 0x8000 /* Bit 15 - autonegotiation error */
28#define IER_SPEED_CHG 0x4000 /* Bit 14 - speed changed */
29#define IER_DUPLEX_CHG 0x2000 /* Bit 13 - duplex changed */
30#define IER_PAGE_RCVD 0x1000 /* Bit 12 - page received */
31#define IER_ANEG_DONE 0x0800 /* Bit 11 - autonegotiation complete */
32#define IER_LINK_CHG 0x0400 /* Bit 10 - link status changed */
33#define IER_SYM_ERR 0x0200 /* Bit 9 - symbol error */
34#define IER_FALSE_CARRIER 0x0100 /* Bit 8 - false carrier */
35#define IER_FIFO_ERR 0x0080 /* Bit 7 - FIFO over/underflow */
36#define IER_MDIX_CHG 0x0040 /* Bit 6 - MDI crossover changed */
37#define IER_DOWNSHIFT 0x0020 /* Bit 5 - downshift */
38#define IER_ENERGY 0x0010 /* Bit 4 - energy detect */
39#define IER_DTE_POWER 0x0004 /* Bit 2 - DTE power detect */
40#define IER_POLARITY_CHG 0x0002 /* Bit 1 - polarity changed */
41#define IER_JABBER 0x0001 /* Bit 0 - jabber */
42
43/* Interrupt status register */
44#define ISR_ANEG_ERR 0x8000 /* Bit 15 - autonegotiation error */
45#define ISR_SPEED_CHG 0x4000 /* Bit 14 - speed changed */
46#define ISR_DUPLEX_CHG 0x2000 /* Bit 13 - duplex changed */
47#define ISR_PAGE_RCVD 0x1000 /* Bit 12 - page received */
48#define ISR_ANEG_DONE 0x0800 /* Bit 11 - autonegotiation complete */
49#define ISR_LINK_CHG 0x0400 /* Bit 10 - link status changed */
50#define ISR_SYM_ERR 0x0200 /* Bit 9 - symbol error */
51#define ISR_FALSE_CARRIER 0x0100 /* Bit 8 - false carrier */
52#define ISR_FIFO_ERR 0x0080 /* Bit 7 - FIFO over/underflow */
53#define ISR_MDIX_CHG 0x0040 /* Bit 6 - MDI crossover changed */
54#define ISR_DOWNSHIFT 0x0020 /* Bit 5 - downshift */
55#define ISR_ENERGY 0x0010 /* Bit 4 - energy detect */
56#define ISR_DTE_POWER 0x0004 /* Bit 2 - DTE power detect */
57#define ISR_POLARITY_CHG 0x0002 /* Bit 1 - polarity changed */
58#define ISR_JABBER 0x0001 /* Bit 0 - jabber */
59
60#endif /* EFX_GMII_H */
diff --git a/drivers/net/sfc/io.h b/drivers/net/sfc/io.h
new file mode 100644
index 000000000000..b89177c27f4a
--- /dev/null
+++ b/drivers/net/sfc/io.h
@@ -0,0 +1,256 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_IO_H
12#define EFX_IO_H
13
14#include <linux/io.h>
15#include <linux/spinlock.h>
16
17/**************************************************************************
18 *
19 * NIC register I/O
20 *
21 **************************************************************************
22 *
23 * Notes on locking strategy:
24 *
25 * Most NIC registers require 16-byte (or 8-byte, for SRAM) atomic writes
26 * which necessitates locking.
27 * Under normal operation few writes to NIC registers are made and these
28 * registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and TX_DESC_UPD_REG) are special
29 * cased to allow 4-byte (hence lockless) accesses.
30 *
31 * It *is* safe to write to these 4-byte registers in the middle of an
32 * access to an 8-byte or 16-byte register. We therefore use a
33 * spinlock to protect accesses to the larger registers, but no locks
34 * for the 4-byte registers.
35 *
36 * A write barrier is needed to ensure that DW3 is written after DW0/1/2
37 * due to the way the 16byte registers are "collected" in the BIU.
38 *
39 * We also lock when carrying out reads, to ensure consistency of the
40 * data (made possible since the BIU reads all 128 bits into a cache).
41 * Reads are very rare, so this isn't a significant performance
42 * impact. (Most data transferred from NIC to host is DMAed directly
43 * into host memory).
44 *
45 * I/O BAR access uses locks for both reads and writes (but is only provided
46 * for testing purposes).
47 */
48
49#if BITS_PER_LONG == 64
50#define EFX_USE_QWORD_IO 1
51#endif
52
53#ifdef EFX_USE_QWORD_IO
54static inline void _efx_writeq(struct efx_nic *efx, __le64 value,
55 unsigned int reg)
56{
57 __raw_writeq((__force u64)value, efx->membase + reg);
58}
59static inline __le64 _efx_readq(struct efx_nic *efx, unsigned int reg)
60{
61 return (__force __le64)__raw_readq(efx->membase + reg);
62}
63#endif
64
65static inline void _efx_writed(struct efx_nic *efx, __le32 value,
66 unsigned int reg)
67{
68 __raw_writel((__force u32)value, efx->membase + reg);
69}
70static inline __le32 _efx_readd(struct efx_nic *efx, unsigned int reg)
71{
72 return (__force __le32)__raw_readl(efx->membase + reg);
73}
74
75/* Writes to a normal 16-byte Efx register, locking as appropriate. */
76static inline void efx_writeo(struct efx_nic *efx, efx_oword_t *value,
77 unsigned int reg)
78{
79 unsigned long flags __attribute__ ((unused));
80
81 EFX_REGDUMP(efx, "writing register %x with " EFX_OWORD_FMT "\n", reg,
82 EFX_OWORD_VAL(*value));
83
84 spin_lock_irqsave(&efx->biu_lock, flags);
85#ifdef EFX_USE_QWORD_IO
86 _efx_writeq(efx, value->u64[0], reg + 0);
87 wmb();
88 _efx_writeq(efx, value->u64[1], reg + 8);
89#else
90 _efx_writed(efx, value->u32[0], reg + 0);
91 _efx_writed(efx, value->u32[1], reg + 4);
92 _efx_writed(efx, value->u32[2], reg + 8);
93 wmb();
94 _efx_writed(efx, value->u32[3], reg + 12);
95#endif
96 mmiowb();
97 spin_unlock_irqrestore(&efx->biu_lock, flags);
98}
99
100/* Write an 8-byte NIC SRAM entry through the supplied mapping,
101 * locking as appropriate. */
102static inline void efx_sram_writeq(struct efx_nic *efx, void __iomem *membase,
103 efx_qword_t *value, unsigned int index)
104{
105 unsigned int addr = index * sizeof(*value);
106 unsigned long flags __attribute__ ((unused));
107
108 EFX_REGDUMP(efx, "writing SRAM address %x with " EFX_QWORD_FMT "\n",
109 addr, EFX_QWORD_VAL(*value));
110
111 spin_lock_irqsave(&efx->biu_lock, flags);
112#ifdef EFX_USE_QWORD_IO
113 __raw_writeq((__force u64)value->u64[0], membase + addr);
114#else
115 __raw_writel((__force u32)value->u32[0], membase + addr);
116 wmb();
117 __raw_writel((__force u32)value->u32[1], membase + addr + 4);
118#endif
119 mmiowb();
120 spin_unlock_irqrestore(&efx->biu_lock, flags);
121}
122
123/* Write dword to NIC register that allows partial writes
124 *
125 * Some registers (EVQ_RPTR_REG, RX_DESC_UPD_REG and
126 * TX_DESC_UPD_REG) can be written to as a single dword. This allows
127 * for lockless writes.
128 */
129static inline void efx_writed(struct efx_nic *efx, efx_dword_t *value,
130 unsigned int reg)
131{
132 EFX_REGDUMP(efx, "writing partial register %x with "EFX_DWORD_FMT"\n",
133 reg, EFX_DWORD_VAL(*value));
134
135 /* No lock required */
136 _efx_writed(efx, value->u32[0], reg);
137}
138
139/* Read from a NIC register
140 *
141 * This reads an entire 16-byte register in one go, locking as
142 * appropriate. It is essential to read the first dword first, as this
143 * prompts the NIC to load the current value into the shadow register.
144 */
145static inline void efx_reado(struct efx_nic *efx, efx_oword_t *value,
146 unsigned int reg)
147{
148 unsigned long flags __attribute__ ((unused));
149
150 spin_lock_irqsave(&efx->biu_lock, flags);
151 value->u32[0] = _efx_readd(efx, reg + 0);
152 rmb();
153 value->u32[1] = _efx_readd(efx, reg + 4);
154 value->u32[2] = _efx_readd(efx, reg + 8);
155 value->u32[3] = _efx_readd(efx, reg + 12);
156 spin_unlock_irqrestore(&efx->biu_lock, flags);
157
158 EFX_REGDUMP(efx, "read from register %x, got " EFX_OWORD_FMT "\n", reg,
159 EFX_OWORD_VAL(*value));
160}
161
162/* Read an 8-byte SRAM entry through supplied mapping,
163 * locking as appropriate. */
164static inline void efx_sram_readq(struct efx_nic *efx, void __iomem *membase,
165 efx_qword_t *value, unsigned int index)
166{
167 unsigned int addr = index * sizeof(*value);
168 unsigned long flags __attribute__ ((unused));
169
170 spin_lock_irqsave(&efx->biu_lock, flags);
171#ifdef EFX_USE_QWORD_IO
172 value->u64[0] = (__force __le64)__raw_readq(membase + addr);
173#else
174 value->u32[0] = (__force __le32)__raw_readl(membase + addr);
175 rmb();
176 value->u32[1] = (__force __le32)__raw_readl(membase + addr + 4);
177#endif
178 spin_unlock_irqrestore(&efx->biu_lock, flags);
179
180 EFX_REGDUMP(efx, "read from SRAM address %x, got "EFX_QWORD_FMT"\n",
181 addr, EFX_QWORD_VAL(*value));
182}
183
184/* Read dword from register that allows partial writes (sic) */
185static inline void efx_readd(struct efx_nic *efx, efx_dword_t *value,
186 unsigned int reg)
187{
188 value->u32[0] = _efx_readd(efx, reg);
189 EFX_REGDUMP(efx, "read from register %x, got "EFX_DWORD_FMT"\n",
190 reg, EFX_DWORD_VAL(*value));
191}
192
193/* Write to a register forming part of a table */
194static inline void efx_writeo_table(struct efx_nic *efx, efx_oword_t *value,
195 unsigned int reg, unsigned int index)
196{
197 efx_writeo(efx, value, reg + index * sizeof(efx_oword_t));
198}
199
200/* Read to a register forming part of a table */
201static inline void efx_reado_table(struct efx_nic *efx, efx_oword_t *value,
202 unsigned int reg, unsigned int index)
203{
204 efx_reado(efx, value, reg + index * sizeof(efx_oword_t));
205}
206
207/* Write to a dword register forming part of a table */
208static inline void efx_writed_table(struct efx_nic *efx, efx_dword_t *value,
209 unsigned int reg, unsigned int index)
210{
211 efx_writed(efx, value, reg + index * sizeof(efx_oword_t));
212}
213
214/* Page-mapped register block size */
215#define EFX_PAGE_BLOCK_SIZE 0x2000
216
217/* Calculate offset to page-mapped register block */
218#define EFX_PAGED_REG(page, reg) \
219 ((page) * EFX_PAGE_BLOCK_SIZE + (reg))
220
221/* As for efx_writeo(), but for a page-mapped register. */
222static inline void efx_writeo_page(struct efx_nic *efx, efx_oword_t *value,
223 unsigned int reg, unsigned int page)
224{
225 efx_writeo(efx, value, EFX_PAGED_REG(page, reg));
226}
227
228/* As for efx_writed(), but for a page-mapped register. */
229static inline void efx_writed_page(struct efx_nic *efx, efx_dword_t *value,
230 unsigned int reg, unsigned int page)
231{
232 efx_writed(efx, value, EFX_PAGED_REG(page, reg));
233}
234
235/* Write dword to page-mapped register with an extra lock.
236 *
237 * As for efx_writed_page(), but for a register that suffers from
238 * SFC bug 3181. Take out a lock so the BIU collector cannot be
239 * confused. */
240static inline void efx_writed_page_locked(struct efx_nic *efx,
241 efx_dword_t *value,
242 unsigned int reg,
243 unsigned int page)
244{
245 unsigned long flags __attribute__ ((unused));
246
247 if (page == 0) {
248 spin_lock_irqsave(&efx->biu_lock, flags);
249 efx_writed(efx, value, EFX_PAGED_REG(page, reg));
250 spin_unlock_irqrestore(&efx->biu_lock, flags);
251 } else {
252 efx_writed(efx, value, EFX_PAGED_REG(page, reg));
253 }
254}
255
256#endif /* EFX_IO_H */
diff --git a/drivers/net/sfc/mdio_10g.c b/drivers/net/sfc/mdio_10g.c
index 6c33459f9ea9..231e580acc9a 100644
--- a/drivers/net/sfc/mdio_10g.c
+++ b/drivers/net/sfc/mdio_10g.c
@@ -14,7 +14,6 @@
14#include <linux/delay.h> 14#include <linux/delay.h>
15#include "net_driver.h" 15#include "net_driver.h"
16#include "mdio_10g.h" 16#include "mdio_10g.h"
17#include "boards.h"
18#include "workarounds.h" 17#include "workarounds.h"
19 18
20unsigned efx_mdio_id_oui(u32 id) 19unsigned efx_mdio_id_oui(u32 id)
@@ -249,7 +248,7 @@ void efx_mdio_set_mmds_lpower(struct efx_nic *efx,
249int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 248int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
250{ 249{
251 struct ethtool_cmd prev; 250 struct ethtool_cmd prev;
252 u32 required; 251 bool xnp;
253 int reg; 252 int reg;
254 253
255 efx->phy_op->get_settings(efx, &prev); 254 efx->phy_op->get_settings(efx, &prev);
@@ -266,86 +265,60 @@ int efx_mdio_set_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
266 return -EINVAL; 265 return -EINVAL;
267 266
268 /* Check that PHY supports these settings */ 267 /* Check that PHY supports these settings */
269 if (ecmd->autoneg) { 268 if (!ecmd->autoneg ||
270 required = SUPPORTED_Autoneg; 269 (ecmd->advertising | SUPPORTED_Autoneg) & ~prev.supported)
271 } else if (ecmd->duplex) {
272 switch (ecmd->speed) {
273 case SPEED_10: required = SUPPORTED_10baseT_Full; break;
274 case SPEED_100: required = SUPPORTED_100baseT_Full; break;
275 default: return -EINVAL;
276 }
277 } else {
278 switch (ecmd->speed) {
279 case SPEED_10: required = SUPPORTED_10baseT_Half; break;
280 case SPEED_100: required = SUPPORTED_100baseT_Half; break;
281 default: return -EINVAL;
282 }
283 }
284 required |= ecmd->advertising;
285 if (required & ~prev.supported)
286 return -EINVAL; 270 return -EINVAL;
287 271
288 if (ecmd->autoneg) { 272 xnp = (ecmd->advertising & ADVERTISED_10000baseT_Full
289 bool xnp = (ecmd->advertising & ADVERTISED_10000baseT_Full 273 || EFX_WORKAROUND_13204(efx));
290 || EFX_WORKAROUND_13204(efx)); 274
291 275 /* Set up the base page */
292 /* Set up the base page */ 276 reg = ADVERTISE_CSMA;
293 reg = ADVERTISE_CSMA; 277 if (ecmd->advertising & ADVERTISED_10baseT_Half)
294 if (ecmd->advertising & ADVERTISED_10baseT_Half) 278 reg |= ADVERTISE_10HALF;
295 reg |= ADVERTISE_10HALF; 279 if (ecmd->advertising & ADVERTISED_10baseT_Full)
296 if (ecmd->advertising & ADVERTISED_10baseT_Full) 280 reg |= ADVERTISE_10FULL;
297 reg |= ADVERTISE_10FULL; 281 if (ecmd->advertising & ADVERTISED_100baseT_Half)
298 if (ecmd->advertising & ADVERTISED_100baseT_Half) 282 reg |= ADVERTISE_100HALF;
299 reg |= ADVERTISE_100HALF; 283 if (ecmd->advertising & ADVERTISED_100baseT_Full)
300 if (ecmd->advertising & ADVERTISED_100baseT_Full) 284 reg |= ADVERTISE_100FULL;
301 reg |= ADVERTISE_100FULL; 285 if (xnp)
302 if (xnp) 286 reg |= ADVERTISE_RESV;
303 reg |= ADVERTISE_RESV; 287 else if (ecmd->advertising & (ADVERTISED_1000baseT_Half |
304 else if (ecmd->advertising & (ADVERTISED_1000baseT_Half | 288 ADVERTISED_1000baseT_Full))
305 ADVERTISED_1000baseT_Full)) 289 reg |= ADVERTISE_NPAGE;
306 reg |= ADVERTISE_NPAGE; 290 reg |= mii_advertise_flowctrl(efx->wanted_fc);
307 reg |= mii_advertise_flowctrl(efx->wanted_fc); 291 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg);
308 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, reg); 292
309 293 /* Set up the (extended) next page if necessary */
310 /* Set up the (extended) next page if necessary */ 294 if (efx->phy_op->set_npage_adv)
311 if (efx->phy_op->set_npage_adv) 295 efx->phy_op->set_npage_adv(efx, ecmd->advertising);
312 efx->phy_op->set_npage_adv(efx, ecmd->advertising); 296
313 297 /* Enable and restart AN */
314 /* Enable and restart AN */ 298 reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1);
315 reg = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_CTRL1); 299 reg |= MDIO_AN_CTRL1_ENABLE;
316 reg |= MDIO_AN_CTRL1_ENABLE; 300 if (!(EFX_WORKAROUND_15195(efx) &&
317 if (!(EFX_WORKAROUND_15195(efx) && 301 LOOPBACK_MASK(efx) & efx->phy_op->loopbacks))
318 LOOPBACK_MASK(efx) & efx->phy_op->loopbacks)) 302 reg |= MDIO_AN_CTRL1_RESTART;
319 reg |= MDIO_AN_CTRL1_RESTART; 303 if (xnp)
320 if (xnp) 304 reg |= MDIO_AN_CTRL1_XNP;
321 reg |= MDIO_AN_CTRL1_XNP; 305 else
322 else 306 reg &= ~MDIO_AN_CTRL1_XNP;
323 reg &= ~MDIO_AN_CTRL1_XNP; 307 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
324 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_CTRL1, reg);
325 } else {
326 /* Disable AN */
327 efx_mdio_set_flag(efx, MDIO_MMD_AN, MDIO_CTRL1,
328 MDIO_AN_CTRL1_ENABLE, false);
329
330 /* Set the basic control bits */
331 reg = efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1);
332 reg &= ~(MDIO_CTRL1_SPEEDSEL | MDIO_CTRL1_FULLDPLX);
333 if (ecmd->speed == SPEED_100)
334 reg |= MDIO_PMA_CTRL1_SPEED100;
335 if (ecmd->duplex)
336 reg |= MDIO_CTRL1_FULLDPLX;
337 efx_mdio_write(efx, MDIO_MMD_PMAPMD, MDIO_CTRL1, reg);
338 }
339 308
340 return 0; 309 return 0;
341} 310}
342 311
343enum efx_fc_type efx_mdio_get_pause(struct efx_nic *efx) 312enum efx_fc_type efx_mdio_get_pause(struct efx_nic *efx)
344{ 313{
345 int lpa; 314 BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX));
346 315
347 if (!(efx->phy_op->mmds & MDIO_DEVS_AN)) 316 if (!(efx->wanted_fc & EFX_FC_AUTO))
348 return efx->wanted_fc; 317 return efx->wanted_fc;
349 lpa = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA); 318
350 return efx_fc_resolve(efx->wanted_fc, lpa); 319 WARN_ON(!(efx->mdio.mmds & MDIO_DEVS_AN));
320
321 return mii_resolve_flowctrl_fdx(
322 mii_advertise_flowctrl(efx->wanted_fc),
323 efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_LPA));
351} 324}
diff --git a/drivers/net/sfc/mdio_10g.h b/drivers/net/sfc/mdio_10g.h
index 6b14421a7444..75b37f101231 100644
--- a/drivers/net/sfc/mdio_10g.h
+++ b/drivers/net/sfc/mdio_10g.h
@@ -17,7 +17,6 @@
17 */ 17 */
18 18
19#include "efx.h" 19#include "efx.h"
20#include "boards.h"
21 20
22static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; } 21static inline unsigned efx_mdio_id_rev(u32 id) { return id & 0xf; }
23static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; } 22static inline unsigned efx_mdio_id_model(u32 id) { return (id >> 4) & 0x3f; }
diff --git a/drivers/net/sfc/net_driver.h b/drivers/net/sfc/net_driver.h
index 298566da638b..bb3d258bd5e8 100644
--- a/drivers/net/sfc/net_driver.h
+++ b/drivers/net/sfc/net_driver.h
@@ -327,7 +327,7 @@ enum efx_rx_alloc_method {
327 * @used_flags: Channel is used by net driver 327 * @used_flags: Channel is used by net driver
328 * @enabled: Channel enabled indicator 328 * @enabled: Channel enabled indicator
329 * @irq: IRQ number (MSI and MSI-X only) 329 * @irq: IRQ number (MSI and MSI-X only)
330 * @irq_moderation: IRQ moderation value (in us) 330 * @irq_moderation: IRQ moderation value (in hardware ticks)
331 * @napi_dev: Net device used with NAPI 331 * @napi_dev: Net device used with NAPI
332 * @napi_str: NAPI control structure 332 * @napi_str: NAPI control structure
333 * @reset_work: Scheduled reset work thread 333 * @reset_work: Scheduled reset work thread
@@ -389,19 +389,6 @@ struct efx_channel {
389}; 389};
390 390
391/** 391/**
392 * struct efx_blinker - S/W LED blinking context
393 * @state: Current state - on or off
394 * @resubmit: Timer resubmission flag
395 * @timer: Control timer for blinking
396 */
397struct efx_blinker {
398 bool state;
399 bool resubmit;
400 struct timer_list timer;
401};
402
403
404/**
405 * struct efx_board - board information 392 * struct efx_board - board information
406 * @type: Board model type 393 * @type: Board model type
407 * @major: Major rev. ('A', 'B' ...) 394 * @major: Major rev. ('A', 'B' ...)
@@ -412,7 +399,9 @@ struct efx_blinker {
412 * @blink: Starts/stops blinking 399 * @blink: Starts/stops blinking
413 * @monitor: Board-specific health check function 400 * @monitor: Board-specific health check function
414 * @fini: Cleanup function 401 * @fini: Cleanup function
415 * @blinker: used to blink LEDs in software 402 * @blink_state: Current blink state
403 * @blink_resubmit: Blink timer resubmission flag
404 * @blink_timer: Blink timer
416 * @hwmon_client: I2C client for hardware monitor 405 * @hwmon_client: I2C client for hardware monitor
417 * @ioexp_client: I2C client for power/port control 406 * @ioexp_client: I2C client for power/port control
418 */ 407 */
@@ -429,7 +418,9 @@ struct efx_board {
429 int (*monitor) (struct efx_nic *nic); 418 int (*monitor) (struct efx_nic *nic);
430 void (*blink) (struct efx_nic *efx, bool start); 419 void (*blink) (struct efx_nic *efx, bool start);
431 void (*fini) (struct efx_nic *nic); 420 void (*fini) (struct efx_nic *nic);
432 struct efx_blinker blinker; 421 bool blink_state;
422 bool blink_resubmit;
423 struct timer_list blink_timer;
433 struct i2c_client *hwmon_client, *ioexp_client; 424 struct i2c_client *hwmon_client, *ioexp_client;
434}; 425};
435 426
@@ -506,17 +497,6 @@ enum efx_mac_type {
506 EFX_XMAC = 2, 497 EFX_XMAC = 2,
507}; 498};
508 499
509static inline enum efx_fc_type efx_fc_resolve(enum efx_fc_type wanted_fc,
510 unsigned int lpa)
511{
512 BUILD_BUG_ON(EFX_FC_AUTO & (EFX_FC_RX | EFX_FC_TX));
513
514 if (!(wanted_fc & EFX_FC_AUTO))
515 return wanted_fc;
516
517 return mii_resolve_flowctrl_fdx(mii_advertise_flowctrl(wanted_fc), lpa);
518}
519
520/** 500/**
521 * struct efx_mac_operations - Efx MAC operations table 501 * struct efx_mac_operations - Efx MAC operations table
522 * @reconfigure: Reconfigure MAC. Serialised by the mac_lock 502 * @reconfigure: Reconfigure MAC. Serialised by the mac_lock
@@ -537,7 +517,6 @@ struct efx_mac_operations {
537 * @fini: Shut down PHY 517 * @fini: Shut down PHY
538 * @reconfigure: Reconfigure PHY (e.g. for new link parameters) 518 * @reconfigure: Reconfigure PHY (e.g. for new link parameters)
539 * @clear_interrupt: Clear down interrupt 519 * @clear_interrupt: Clear down interrupt
540 * @blink: Blink LEDs
541 * @poll: Poll for hardware state. Serialised by the mac_lock. 520 * @poll: Poll for hardware state. Serialised by the mac_lock.
542 * @get_settings: Get ethtool settings. Serialised by the mac_lock. 521 * @get_settings: Get ethtool settings. Serialised by the mac_lock.
543 * @set_settings: Set ethtool settings. Serialised by the mac_lock. 522 * @set_settings: Set ethtool settings. Serialised by the mac_lock.
@@ -697,10 +676,13 @@ union efx_multicast_hash {
697 * @tx_queue: TX DMA queues 676 * @tx_queue: TX DMA queues
698 * @rx_queue: RX DMA queues 677 * @rx_queue: RX DMA queues
699 * @channel: Channels 678 * @channel: Channels
679 * @next_buffer_table: First available buffer table id
700 * @n_rx_queues: Number of RX queues 680 * @n_rx_queues: Number of RX queues
701 * @n_channels: Number of channels in use 681 * @n_channels: Number of channels in use
702 * @rx_buffer_len: RX buffer length 682 * @rx_buffer_len: RX buffer length
703 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer 683 * @rx_buffer_order: Order (log2) of number of pages for each RX buffer
684 * @int_error_count: Number of internal errors seen recently
685 * @int_error_expire: Time at which error count will be expired
704 * @irq_status: Interrupt status buffer 686 * @irq_status: Interrupt status buffer
705 * @last_irq_cpu: Last CPU to handle interrupt. 687 * @last_irq_cpu: Last CPU to handle interrupt.
706 * This register is written with the SMP processor ID whenever an 688 * This register is written with the SMP processor ID whenever an
@@ -784,11 +766,15 @@ struct efx_nic {
784 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES]; 766 struct efx_rx_queue rx_queue[EFX_MAX_RX_QUEUES];
785 struct efx_channel channel[EFX_MAX_CHANNELS]; 767 struct efx_channel channel[EFX_MAX_CHANNELS];
786 768
769 unsigned next_buffer_table;
787 int n_rx_queues; 770 int n_rx_queues;
788 int n_channels; 771 int n_channels;
789 unsigned int rx_buffer_len; 772 unsigned int rx_buffer_len;
790 unsigned int rx_buffer_order; 773 unsigned int rx_buffer_order;
791 774
775 unsigned int_error_count;
776 unsigned long int_error_expire;
777
792 struct efx_buffer irq_status; 778 struct efx_buffer irq_status;
793 volatile signed int last_irq_cpu; 779 volatile signed int last_irq_cpu;
794 780
@@ -869,14 +855,7 @@ static inline const char *efx_dev_name(struct efx_nic *efx)
869 * @buf_tbl_base: Buffer table base address 855 * @buf_tbl_base: Buffer table base address
870 * @evq_ptr_tbl_base: Event queue pointer table base address 856 * @evq_ptr_tbl_base: Event queue pointer table base address
871 * @evq_rptr_tbl_base: Event queue read-pointer table base address 857 * @evq_rptr_tbl_base: Event queue read-pointer table base address
872 * @txd_ring_mask: TX descriptor ring size - 1 (must be a power of two - 1)
873 * @rxd_ring_mask: RX descriptor ring size - 1 (must be a power of two - 1)
874 * @evq_size: Event queue size (must be a power of two)
875 * @max_dma_mask: Maximum possible DMA mask 858 * @max_dma_mask: Maximum possible DMA mask
876 * @tx_dma_mask: TX DMA mask
877 * @bug5391_mask: Address mask for bug 5391 workaround
878 * @rx_xoff_thresh: RX FIFO XOFF watermark (bytes)
879 * @rx_xon_thresh: RX FIFO XON watermark (bytes)
880 * @rx_buffer_padding: Padding added to each RX buffer 859 * @rx_buffer_padding: Padding added to each RX buffer
881 * @max_interrupt_mode: Highest capability interrupt mode supported 860 * @max_interrupt_mode: Highest capability interrupt mode supported
882 * from &enum efx_init_mode. 861 * from &enum efx_init_mode.
@@ -892,15 +871,8 @@ struct efx_nic_type {
892 unsigned int evq_ptr_tbl_base; 871 unsigned int evq_ptr_tbl_base;
893 unsigned int evq_rptr_tbl_base; 872 unsigned int evq_rptr_tbl_base;
894 873
895 unsigned int txd_ring_mask;
896 unsigned int rxd_ring_mask;
897 unsigned int evq_size;
898 u64 max_dma_mask; 874 u64 max_dma_mask;
899 unsigned int tx_dma_mask;
900 unsigned bug5391_mask;
901 875
902 int rx_xoff_thresh;
903 int rx_xon_thresh;
904 unsigned int rx_buffer_padding; 876 unsigned int rx_buffer_padding;
905 unsigned int max_interrupt_mode; 877 unsigned int max_interrupt_mode;
906 unsigned int phys_addr_channels; 878 unsigned int phys_addr_channels;
diff --git a/drivers/net/sfc/phy.h b/drivers/net/sfc/phy.h
index c1cff9c0c173..b5150f3bca31 100644
--- a/drivers/net/sfc/phy.h
+++ b/drivers/net/sfc/phy.h
@@ -23,9 +23,9 @@ extern void tenxpress_phy_blink(struct efx_nic *efx, bool blink);
23extern int sft9001_wait_boot(struct efx_nic *efx); 23extern int sft9001_wait_boot(struct efx_nic *efx);
24 24
25/**************************************************************************** 25/****************************************************************************
26 * AMCC/Quake QT20xx PHYs 26 * AMCC/Quake QT202x PHYs
27 */ 27 */
28extern struct efx_phy_operations falcon_xfp_phy_ops; 28extern struct efx_phy_operations falcon_qt202x_phy_ops;
29 29
30/* These PHYs provide various H/W control states for LEDs */ 30/* These PHYs provide various H/W control states for LEDs */
31#define QUAKE_LED_LINK_INVAL (0) 31#define QUAKE_LED_LINK_INVAL (0)
@@ -39,6 +39,6 @@ extern struct efx_phy_operations falcon_xfp_phy_ops;
39#define QUAKE_LED_TXLINK (0) 39#define QUAKE_LED_TXLINK (0)
40#define QUAKE_LED_RXLINK (8) 40#define QUAKE_LED_RXLINK (8)
41 41
42extern void xfp_set_led(struct efx_nic *p, int led, int state); 42extern void falcon_qt202x_set_led(struct efx_nic *p, int led, int state);
43 43
44#endif 44#endif
diff --git a/drivers/net/sfc/xfp_phy.c b/drivers/net/sfc/qt202x_phy.c
index e6b3d5eaddba..560eb18280e1 100644
--- a/drivers/net/sfc/xfp_phy.c
+++ b/drivers/net/sfc/qt202x_phy.c
@@ -7,8 +7,7 @@
7 * by the Free Software Foundation, incorporated herein by reference. 7 * by the Free Software Foundation, incorporated herein by reference.
8 */ 8 */
9/* 9/*
10 * Driver for SFP+ and XFP optical PHYs plus some support specific to the 10 * Driver for AMCC QT202x SFP+ and XFP adapters; see www.amcc.com for details
11 * AMCC QT20xx adapters; see www.amcc.com for details
12 */ 11 */
13 12
14#include <linux/timer.h> 13#include <linux/timer.h>
@@ -18,13 +17,13 @@
18#include "phy.h" 17#include "phy.h"
19#include "falcon.h" 18#include "falcon.h"
20 19
21#define XFP_REQUIRED_DEVS (MDIO_DEVS_PCS | \ 20#define QT202X_REQUIRED_DEVS (MDIO_DEVS_PCS | \
22 MDIO_DEVS_PMAPMD | \ 21 MDIO_DEVS_PMAPMD | \
23 MDIO_DEVS_PHYXS) 22 MDIO_DEVS_PHYXS)
24 23
25#define XFP_LOOPBACKS ((1 << LOOPBACK_PCS) | \ 24#define QT202X_LOOPBACKS ((1 << LOOPBACK_PCS) | \
26 (1 << LOOPBACK_PMAPMD) | \ 25 (1 << LOOPBACK_PMAPMD) | \
27 (1 << LOOPBACK_NETWORK)) 26 (1 << LOOPBACK_NETWORK))
28 27
29/****************************************************************************/ 28/****************************************************************************/
30/* Quake-specific MDIO registers */ 29/* Quake-specific MDIO registers */
@@ -45,18 +44,18 @@
45#define PCS_VEND1_REG 0xc000 44#define PCS_VEND1_REG 0xc000
46#define PCS_VEND1_LBTXD_LBN 5 45#define PCS_VEND1_LBTXD_LBN 5
47 46
48void xfp_set_led(struct efx_nic *p, int led, int mode) 47void falcon_qt202x_set_led(struct efx_nic *p, int led, int mode)
49{ 48{
50 int addr = MDIO_QUAKE_LED0_REG + led; 49 int addr = MDIO_QUAKE_LED0_REG + led;
51 efx_mdio_write(p, MDIO_MMD_PMAPMD, addr, mode); 50 efx_mdio_write(p, MDIO_MMD_PMAPMD, addr, mode);
52} 51}
53 52
54struct xfp_phy_data { 53struct qt202x_phy_data {
55 enum efx_phy_mode phy_mode; 54 enum efx_phy_mode phy_mode;
56}; 55};
57 56
58#define XFP_MAX_RESET_TIME 500 57#define QT2022C2_MAX_RESET_TIME 500
59#define XFP_RESET_WAIT 10 58#define QT2022C2_RESET_WAIT 10
60 59
61static int qt2025c_wait_reset(struct efx_nic *efx) 60static int qt2025c_wait_reset(struct efx_nic *efx)
62{ 61{
@@ -97,7 +96,7 @@ static int qt2025c_wait_reset(struct efx_nic *efx)
97 return 0; 96 return 0;
98} 97}
99 98
100static int xfp_reset_phy(struct efx_nic *efx) 99static int qt202x_reset_phy(struct efx_nic *efx)
101{ 100{
102 int rc; 101 int rc;
103 102
@@ -111,8 +110,9 @@ static int xfp_reset_phy(struct efx_nic *efx)
111 /* Reset the PHYXS MMD. This is documented as doing 110 /* Reset the PHYXS MMD. This is documented as doing
112 * a complete soft reset. */ 111 * a complete soft reset. */
113 rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PHYXS, 112 rc = efx_mdio_reset_mmd(efx, MDIO_MMD_PHYXS,
114 XFP_MAX_RESET_TIME / XFP_RESET_WAIT, 113 QT2022C2_MAX_RESET_TIME /
115 XFP_RESET_WAIT); 114 QT2022C2_RESET_WAIT,
115 QT2022C2_RESET_WAIT);
116 if (rc < 0) 116 if (rc < 0)
117 goto fail; 117 goto fail;
118 } 118 }
@@ -122,7 +122,7 @@ static int xfp_reset_phy(struct efx_nic *efx)
122 122
123 /* Check that all the MMDs we expect are present and responding. We 123 /* Check that all the MMDs we expect are present and responding. We
124 * expect faults on some if the link is down, but not on the PHY XS */ 124 * expect faults on some if the link is down, but not on the PHY XS */
125 rc = efx_mdio_check_mmds(efx, XFP_REQUIRED_DEVS, MDIO_DEVS_PHYXS); 125 rc = efx_mdio_check_mmds(efx, QT202X_REQUIRED_DEVS, MDIO_DEVS_PHYXS);
126 if (rc < 0) 126 if (rc < 0)
127 goto fail; 127 goto fail;
128 128
@@ -135,13 +135,13 @@ static int xfp_reset_phy(struct efx_nic *efx)
135 return rc; 135 return rc;
136} 136}
137 137
138static int xfp_phy_init(struct efx_nic *efx) 138static int qt202x_phy_init(struct efx_nic *efx)
139{ 139{
140 struct xfp_phy_data *phy_data; 140 struct qt202x_phy_data *phy_data;
141 u32 devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS); 141 u32 devid = efx_mdio_read_id(efx, MDIO_MMD_PHYXS);
142 int rc; 142 int rc;
143 143
144 phy_data = kzalloc(sizeof(struct xfp_phy_data), GFP_KERNEL); 144 phy_data = kzalloc(sizeof(struct qt202x_phy_data), GFP_KERNEL);
145 if (!phy_data) 145 if (!phy_data)
146 return -ENOMEM; 146 return -ENOMEM;
147 efx->phy_data = phy_data; 147 efx->phy_data = phy_data;
@@ -152,7 +152,7 @@ static int xfp_phy_init(struct efx_nic *efx)
152 152
153 phy_data->phy_mode = efx->phy_mode; 153 phy_data->phy_mode = efx->phy_mode;
154 154
155 rc = xfp_reset_phy(efx); 155 rc = qt202x_reset_phy(efx);
156 156
157 EFX_INFO(efx, "PHY init %s.\n", 157 EFX_INFO(efx, "PHY init %s.\n",
158 rc ? "failed" : "successful"); 158 rc ? "failed" : "successful");
@@ -167,28 +167,28 @@ static int xfp_phy_init(struct efx_nic *efx)
167 return rc; 167 return rc;
168} 168}
169 169
170static void xfp_phy_clear_interrupt(struct efx_nic *efx) 170static void qt202x_phy_clear_interrupt(struct efx_nic *efx)
171{ 171{
172 /* Read to clear link status alarm */ 172 /* Read to clear link status alarm */
173 efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT); 173 efx_mdio_read(efx, MDIO_MMD_PMAPMD, MDIO_PMA_LASI_STAT);
174} 174}
175 175
176static int xfp_link_ok(struct efx_nic *efx) 176static int qt202x_link_ok(struct efx_nic *efx)
177{ 177{
178 return efx_mdio_links_ok(efx, XFP_REQUIRED_DEVS); 178 return efx_mdio_links_ok(efx, QT202X_REQUIRED_DEVS);
179} 179}
180 180
181static void xfp_phy_poll(struct efx_nic *efx) 181static void qt202x_phy_poll(struct efx_nic *efx)
182{ 182{
183 int link_up = xfp_link_ok(efx); 183 int link_up = qt202x_link_ok(efx);
184 /* Simulate a PHY event if link state has changed */ 184 /* Simulate a PHY event if link state has changed */
185 if (link_up != efx->link_up) 185 if (link_up != efx->link_up)
186 falcon_sim_phy_event(efx); 186 falcon_sim_phy_event(efx);
187} 187}
188 188
189static void xfp_phy_reconfigure(struct efx_nic *efx) 189static void qt202x_phy_reconfigure(struct efx_nic *efx)
190{ 190{
191 struct xfp_phy_data *phy_data = efx->phy_data; 191 struct qt202x_phy_data *phy_data = efx->phy_data;
192 192
193 if (efx->phy_type == PHY_TYPE_QT2025C) { 193 if (efx->phy_type == PHY_TYPE_QT2025C) {
194 /* There are several different register bits which can 194 /* There are several different register bits which can
@@ -207,7 +207,7 @@ static void xfp_phy_reconfigure(struct efx_nic *efx)
207 /* Reset the PHY when moving from tx off to tx on */ 207 /* Reset the PHY when moving from tx off to tx on */
208 if (!(efx->phy_mode & PHY_MODE_TX_DISABLED) && 208 if (!(efx->phy_mode & PHY_MODE_TX_DISABLED) &&
209 (phy_data->phy_mode & PHY_MODE_TX_DISABLED)) 209 (phy_data->phy_mode & PHY_MODE_TX_DISABLED))
210 xfp_reset_phy(efx); 210 qt202x_reset_phy(efx);
211 211
212 efx_mdio_transmit_disable(efx); 212 efx_mdio_transmit_disable(efx);
213 } 213 }
@@ -215,18 +215,18 @@ static void xfp_phy_reconfigure(struct efx_nic *efx)
215 efx_mdio_phy_reconfigure(efx); 215 efx_mdio_phy_reconfigure(efx);
216 216
217 phy_data->phy_mode = efx->phy_mode; 217 phy_data->phy_mode = efx->phy_mode;
218 efx->link_up = xfp_link_ok(efx); 218 efx->link_up = qt202x_link_ok(efx);
219 efx->link_speed = 10000; 219 efx->link_speed = 10000;
220 efx->link_fd = true; 220 efx->link_fd = true;
221 efx->link_fc = efx->wanted_fc; 221 efx->link_fc = efx->wanted_fc;
222} 222}
223 223
224static void xfp_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd) 224static void qt202x_phy_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
225{ 225{
226 mdio45_ethtool_gset(&efx->mdio, ecmd); 226 mdio45_ethtool_gset(&efx->mdio, ecmd);
227} 227}
228 228
229static void xfp_phy_fini(struct efx_nic *efx) 229static void qt202x_phy_fini(struct efx_nic *efx)
230{ 230{
231 /* Clobber the LED if it was blinking */ 231 /* Clobber the LED if it was blinking */
232 efx->board_info.blink(efx, false); 232 efx->board_info.blink(efx, false);
@@ -236,15 +236,15 @@ static void xfp_phy_fini(struct efx_nic *efx)
236 efx->phy_data = NULL; 236 efx->phy_data = NULL;
237} 237}
238 238
239struct efx_phy_operations falcon_xfp_phy_ops = { 239struct efx_phy_operations falcon_qt202x_phy_ops = {
240 .macs = EFX_XMAC, 240 .macs = EFX_XMAC,
241 .init = xfp_phy_init, 241 .init = qt202x_phy_init,
242 .reconfigure = xfp_phy_reconfigure, 242 .reconfigure = qt202x_phy_reconfigure,
243 .poll = xfp_phy_poll, 243 .poll = qt202x_phy_poll,
244 .fini = xfp_phy_fini, 244 .fini = qt202x_phy_fini,
245 .clear_interrupt = xfp_phy_clear_interrupt, 245 .clear_interrupt = qt202x_phy_clear_interrupt,
246 .get_settings = xfp_phy_get_settings, 246 .get_settings = qt202x_phy_get_settings,
247 .set_settings = efx_mdio_set_settings, 247 .set_settings = efx_mdio_set_settings,
248 .mmds = XFP_REQUIRED_DEVS, 248 .mmds = QT202X_REQUIRED_DEVS,
249 .loopbacks = XFP_LOOPBACKS, 249 .loopbacks = QT202X_LOOPBACKS,
250}; 250};
diff --git a/drivers/net/sfc/regs.h b/drivers/net/sfc/regs.h
new file mode 100644
index 000000000000..f336d83d5fa0
--- /dev/null
+++ b/drivers/net/sfc/regs.h
@@ -0,0 +1,3180 @@
1/****************************************************************************
2 * Driver for Solarflare Solarstorm network controllers and boards
3 * Copyright 2005-2006 Fen Systems Ltd.
4 * Copyright 2006-2009 Solarflare Communications Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License version 2 as published
8 * by the Free Software Foundation, incorporated herein by reference.
9 */
10
11#ifndef EFX_REGS_H
12#define EFX_REGS_H
13
14/*
15 * Falcon hardware architecture definitions have a name prefix following
16 * the format:
17 *
18 * F<type>_<min-rev><max-rev>_
19 *
20 * The following <type> strings are used:
21 *
22 * MMIO register MC register Host memory structure
23 * -------------------------------------------------------------
24 * Address R MCR
25 * Bitfield RF MCRF SF
26 * Enumerator FE MCFE SE
27 *
28 * <min-rev> is the first revision to which the definition applies:
29 *
30 * A: Falcon A1 (SFC4000AB)
31 * B: Falcon B0 (SFC4000BA)
32 * C: Siena A0 (SFL9021AA)
33 *
34 * If the definition has been changed or removed in later revisions
35 * then <max-rev> is the last revision to which the definition applies;
36 * otherwise it is "Z".
37 */
38
39/**************************************************************************
40 *
41 * Falcon/Siena registers and descriptors
42 *
43 **************************************************************************
44 */
45
46/* ADR_REGION_REG: Address region register */
47#define FR_AZ_ADR_REGION 0x00000000
48#define FRF_AZ_ADR_REGION3_LBN 96
49#define FRF_AZ_ADR_REGION3_WIDTH 18
50#define FRF_AZ_ADR_REGION2_LBN 64
51#define FRF_AZ_ADR_REGION2_WIDTH 18
52#define FRF_AZ_ADR_REGION1_LBN 32
53#define FRF_AZ_ADR_REGION1_WIDTH 18
54#define FRF_AZ_ADR_REGION0_LBN 0
55#define FRF_AZ_ADR_REGION0_WIDTH 18
56
57/* INT_EN_REG_KER: Kernel driver Interrupt enable register */
58#define FR_AZ_INT_EN_KER 0x00000010
59#define FRF_AZ_KER_INT_LEVE_SEL_LBN 8
60#define FRF_AZ_KER_INT_LEVE_SEL_WIDTH 6
61#define FRF_AZ_KER_INT_CHAR_LBN 4
62#define FRF_AZ_KER_INT_CHAR_WIDTH 1
63#define FRF_AZ_KER_INT_KER_LBN 3
64#define FRF_AZ_KER_INT_KER_WIDTH 1
65#define FRF_AZ_DRV_INT_EN_KER_LBN 0
66#define FRF_AZ_DRV_INT_EN_KER_WIDTH 1
67
68/* INT_EN_REG_CHAR: Char Driver interrupt enable register */
69#define FR_BZ_INT_EN_CHAR 0x00000020
70#define FRF_BZ_CHAR_INT_LEVE_SEL_LBN 8
71#define FRF_BZ_CHAR_INT_LEVE_SEL_WIDTH 6
72#define FRF_BZ_CHAR_INT_CHAR_LBN 4
73#define FRF_BZ_CHAR_INT_CHAR_WIDTH 1
74#define FRF_BZ_CHAR_INT_KER_LBN 3
75#define FRF_BZ_CHAR_INT_KER_WIDTH 1
76#define FRF_BZ_DRV_INT_EN_CHAR_LBN 0
77#define FRF_BZ_DRV_INT_EN_CHAR_WIDTH 1
78
79/* INT_ADR_REG_KER: Interrupt host address for Kernel driver */
80#define FR_AZ_INT_ADR_KER 0x00000030
81#define FRF_AZ_NORM_INT_VEC_DIS_KER_LBN 64
82#define FRF_AZ_NORM_INT_VEC_DIS_KER_WIDTH 1
83#define FRF_AZ_INT_ADR_KER_LBN 0
84#define FRF_AZ_INT_ADR_KER_WIDTH 64
85
86/* INT_ADR_REG_CHAR: Interrupt host address for Char driver */
87#define FR_BZ_INT_ADR_CHAR 0x00000040
88#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_LBN 64
89#define FRF_BZ_NORM_INT_VEC_DIS_CHAR_WIDTH 1
90#define FRF_BZ_INT_ADR_CHAR_LBN 0
91#define FRF_BZ_INT_ADR_CHAR_WIDTH 64
92
93/* INT_ACK_KER: Kernel interrupt acknowledge register */
94#define FR_AA_INT_ACK_KER 0x00000050
95#define FRF_AA_INT_ACK_KER_FIELD_LBN 0
96#define FRF_AA_INT_ACK_KER_FIELD_WIDTH 32
97
98/* INT_ISR0_REG: Function 0 Interrupt Acknowlege Status register */
99#define FR_BZ_INT_ISR0 0x00000090
100#define FRF_BZ_INT_ISR_REG_LBN 0
101#define FRF_BZ_INT_ISR_REG_WIDTH 64
102
103/* HW_INIT_REG: Hardware initialization register */
104#define FR_AZ_HW_INIT 0x000000c0
105#define FRF_BB_BDMRD_CPLF_FULL_LBN 124
106#define FRF_BB_BDMRD_CPLF_FULL_WIDTH 1
107#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_LBN 121
108#define FRF_BB_PCIE_CPL_TIMEOUT_CTRL_WIDTH 3
109#define FRF_CZ_TX_MRG_TAGS_LBN 120
110#define FRF_CZ_TX_MRG_TAGS_WIDTH 1
111#define FRF_AB_TRGT_MASK_ALL_LBN 100
112#define FRF_AB_TRGT_MASK_ALL_WIDTH 1
113#define FRF_AZ_DOORBELL_DROP_LBN 92
114#define FRF_AZ_DOORBELL_DROP_WIDTH 8
115#define FRF_AB_TX_RREQ_MASK_EN_LBN 76
116#define FRF_AB_TX_RREQ_MASK_EN_WIDTH 1
117#define FRF_AB_PE_EIDLE_DIS_LBN 75
118#define FRF_AB_PE_EIDLE_DIS_WIDTH 1
119#define FRF_AA_FC_BLOCKING_EN_LBN 45
120#define FRF_AA_FC_BLOCKING_EN_WIDTH 1
121#define FRF_BZ_B2B_REQ_EN_LBN 45
122#define FRF_BZ_B2B_REQ_EN_WIDTH 1
123#define FRF_AA_B2B_REQ_EN_LBN 44
124#define FRF_AA_B2B_REQ_EN_WIDTH 1
125#define FRF_BB_FC_BLOCKING_EN_LBN 44
126#define FRF_BB_FC_BLOCKING_EN_WIDTH 1
127#define FRF_AZ_POST_WR_MASK_LBN 40
128#define FRF_AZ_POST_WR_MASK_WIDTH 4
129#define FRF_AZ_TLP_TC_LBN 34
130#define FRF_AZ_TLP_TC_WIDTH 3
131#define FRF_AZ_TLP_ATTR_LBN 32
132#define FRF_AZ_TLP_ATTR_WIDTH 2
133#define FRF_AB_INTB_VEC_LBN 24
134#define FRF_AB_INTB_VEC_WIDTH 5
135#define FRF_AB_INTA_VEC_LBN 16
136#define FRF_AB_INTA_VEC_WIDTH 5
137#define FRF_AZ_WD_TIMER_LBN 8
138#define FRF_AZ_WD_TIMER_WIDTH 8
139#define FRF_AZ_US_DISABLE_LBN 5
140#define FRF_AZ_US_DISABLE_WIDTH 1
141#define FRF_AZ_TLP_EP_LBN 4
142#define FRF_AZ_TLP_EP_WIDTH 1
143#define FRF_AZ_ATTR_SEL_LBN 3
144#define FRF_AZ_ATTR_SEL_WIDTH 1
145#define FRF_AZ_TD_SEL_LBN 1
146#define FRF_AZ_TD_SEL_WIDTH 1
147#define FRF_AZ_TLP_TD_LBN 0
148#define FRF_AZ_TLP_TD_WIDTH 1
149
150/* EE_SPI_HCMD_REG: SPI host command register */
151#define FR_AB_EE_SPI_HCMD 0x00000100
152#define FRF_AB_EE_SPI_HCMD_CMD_EN_LBN 31
153#define FRF_AB_EE_SPI_HCMD_CMD_EN_WIDTH 1
154#define FRF_AB_EE_WR_TIMER_ACTIVE_LBN 28
155#define FRF_AB_EE_WR_TIMER_ACTIVE_WIDTH 1
156#define FRF_AB_EE_SPI_HCMD_SF_SEL_LBN 24
157#define FRF_AB_EE_SPI_HCMD_SF_SEL_WIDTH 1
158#define FRF_AB_EE_SPI_HCMD_DABCNT_LBN 16
159#define FRF_AB_EE_SPI_HCMD_DABCNT_WIDTH 5
160#define FRF_AB_EE_SPI_HCMD_READ_LBN 15
161#define FRF_AB_EE_SPI_HCMD_READ_WIDTH 1
162#define FRF_AB_EE_SPI_HCMD_DUBCNT_LBN 12
163#define FRF_AB_EE_SPI_HCMD_DUBCNT_WIDTH 2
164#define FRF_AB_EE_SPI_HCMD_ADBCNT_LBN 8
165#define FRF_AB_EE_SPI_HCMD_ADBCNT_WIDTH 2
166#define FRF_AB_EE_SPI_HCMD_ENC_LBN 0
167#define FRF_AB_EE_SPI_HCMD_ENC_WIDTH 8
168
169/* USR_EV_CFG: User Level Event Configuration register */
170#define FR_CZ_USR_EV_CFG 0x00000100
171#define FRF_CZ_USREV_DIS_LBN 16
172#define FRF_CZ_USREV_DIS_WIDTH 1
173#define FRF_CZ_DFLT_EVQ_LBN 0
174#define FRF_CZ_DFLT_EVQ_WIDTH 10
175
176/* EE_SPI_HADR_REG: SPI host address register */
177#define FR_AB_EE_SPI_HADR 0x00000110
178#define FRF_AB_EE_SPI_HADR_DUBYTE_LBN 24
179#define FRF_AB_EE_SPI_HADR_DUBYTE_WIDTH 8
180#define FRF_AB_EE_SPI_HADR_ADR_LBN 0
181#define FRF_AB_EE_SPI_HADR_ADR_WIDTH 24
182
183/* EE_SPI_HDATA_REG: SPI host data register */
184#define FR_AB_EE_SPI_HDATA 0x00000120
185#define FRF_AB_EE_SPI_HDATA3_LBN 96
186#define FRF_AB_EE_SPI_HDATA3_WIDTH 32
187#define FRF_AB_EE_SPI_HDATA2_LBN 64
188#define FRF_AB_EE_SPI_HDATA2_WIDTH 32
189#define FRF_AB_EE_SPI_HDATA1_LBN 32
190#define FRF_AB_EE_SPI_HDATA1_WIDTH 32
191#define FRF_AB_EE_SPI_HDATA0_LBN 0
192#define FRF_AB_EE_SPI_HDATA0_WIDTH 32
193
194/* EE_BASE_PAGE_REG: Expansion ROM base mirror register */
195#define FR_AB_EE_BASE_PAGE 0x00000130
196#define FRF_AB_EE_EXPROM_MASK_LBN 16
197#define FRF_AB_EE_EXPROM_MASK_WIDTH 13
198#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_LBN 0
199#define FRF_AB_EE_EXP_ROM_WINDOW_BASE_WIDTH 13
200
201/* EE_VPD_CFG0_REG: SPI/VPD configuration register 0 */
202#define FR_AB_EE_VPD_CFG0 0x00000140
203#define FRF_AB_EE_SF_FASTRD_EN_LBN 127
204#define FRF_AB_EE_SF_FASTRD_EN_WIDTH 1
205#define FRF_AB_EE_SF_CLOCK_DIV_LBN 120
206#define FRF_AB_EE_SF_CLOCK_DIV_WIDTH 7
207#define FRF_AB_EE_VPD_WIP_POLL_LBN 119
208#define FRF_AB_EE_VPD_WIP_POLL_WIDTH 1
209#define FRF_AB_EE_EE_CLOCK_DIV_LBN 112
210#define FRF_AB_EE_EE_CLOCK_DIV_WIDTH 7
211#define FRF_AB_EE_EE_WR_TMR_VALUE_LBN 96
212#define FRF_AB_EE_EE_WR_TMR_VALUE_WIDTH 16
213#define FRF_AB_EE_VPDW_LENGTH_LBN 80
214#define FRF_AB_EE_VPDW_LENGTH_WIDTH 15
215#define FRF_AB_EE_VPDW_BASE_LBN 64
216#define FRF_AB_EE_VPDW_BASE_WIDTH 15
217#define FRF_AB_EE_VPD_WR_CMD_EN_LBN 56
218#define FRF_AB_EE_VPD_WR_CMD_EN_WIDTH 8
219#define FRF_AB_EE_VPD_BASE_LBN 32
220#define FRF_AB_EE_VPD_BASE_WIDTH 24
221#define FRF_AB_EE_VPD_LENGTH_LBN 16
222#define FRF_AB_EE_VPD_LENGTH_WIDTH 15
223#define FRF_AB_EE_VPD_AD_SIZE_LBN 8
224#define FRF_AB_EE_VPD_AD_SIZE_WIDTH 5
225#define FRF_AB_EE_VPD_ACCESS_ON_LBN 5
226#define FRF_AB_EE_VPD_ACCESS_ON_WIDTH 1
227#define FRF_AB_EE_VPD_ACCESS_BLOCK_LBN 4
228#define FRF_AB_EE_VPD_ACCESS_BLOCK_WIDTH 1
229#define FRF_AB_EE_VPD_DEV_SF_SEL_LBN 2
230#define FRF_AB_EE_VPD_DEV_SF_SEL_WIDTH 1
231#define FRF_AB_EE_VPD_EN_AD9_MODE_LBN 1
232#define FRF_AB_EE_VPD_EN_AD9_MODE_WIDTH 1
233#define FRF_AB_EE_VPD_EN_LBN 0
234#define FRF_AB_EE_VPD_EN_WIDTH 1
235
236/* EE_VPD_SW_CNTL_REG: VPD access SW control register */
237#define FR_AB_EE_VPD_SW_CNTL 0x00000150
238#define FRF_AB_EE_VPD_CYCLE_PENDING_LBN 31
239#define FRF_AB_EE_VPD_CYCLE_PENDING_WIDTH 1
240#define FRF_AB_EE_VPD_CYC_WRITE_LBN 28
241#define FRF_AB_EE_VPD_CYC_WRITE_WIDTH 1
242#define FRF_AB_EE_VPD_CYC_ADR_LBN 0
243#define FRF_AB_EE_VPD_CYC_ADR_WIDTH 15
244
245/* EE_VPD_SW_DATA_REG: VPD access SW data register */
246#define FR_AB_EE_VPD_SW_DATA 0x00000160
247#define FRF_AB_EE_VPD_CYC_DAT_LBN 0
248#define FRF_AB_EE_VPD_CYC_DAT_WIDTH 32
249
250/* PBMX_DBG_IADDR_REG: Capture Module address register */
251#define FR_CZ_PBMX_DBG_IADDR 0x000001f0
252#define FRF_CZ_PBMX_DBG_IADDR_LBN 0
253#define FRF_CZ_PBMX_DBG_IADDR_WIDTH 32
254
255/* PCIE_CORE_INDIRECT_REG: Indirect Access to PCIE Core registers */
256#define FR_BB_PCIE_CORE_INDIRECT 0x000001f0
257#define FRF_BB_PCIE_CORE_TARGET_DATA_LBN 32
258#define FRF_BB_PCIE_CORE_TARGET_DATA_WIDTH 32
259#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_LBN 15
260#define FRF_BB_PCIE_CORE_INDIRECT_ACCESS_DIR_WIDTH 1
261#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_LBN 0
262#define FRF_BB_PCIE_CORE_TARGET_REG_ADRS_WIDTH 12
263
264/* PBMX_DBG_IDATA_REG: Capture Module data register */
265#define FR_CZ_PBMX_DBG_IDATA 0x000001f8
266#define FRF_CZ_PBMX_DBG_IDATA_LBN 0
267#define FRF_CZ_PBMX_DBG_IDATA_WIDTH 64
268
269/* NIC_STAT_REG: NIC status register */
270#define FR_AB_NIC_STAT 0x00000200
271#define FRF_BB_AER_DIS_LBN 34
272#define FRF_BB_AER_DIS_WIDTH 1
273#define FRF_BB_EE_STRAP_EN_LBN 31
274#define FRF_BB_EE_STRAP_EN_WIDTH 1
275#define FRF_BB_EE_STRAP_LBN 24
276#define FRF_BB_EE_STRAP_WIDTH 4
277#define FRF_BB_REVISION_ID_LBN 17
278#define FRF_BB_REVISION_ID_WIDTH 7
279#define FRF_AB_ONCHIP_SRAM_LBN 16
280#define FRF_AB_ONCHIP_SRAM_WIDTH 1
281#define FRF_AB_SF_PRST_LBN 9
282#define FRF_AB_SF_PRST_WIDTH 1
283#define FRF_AB_EE_PRST_LBN 8
284#define FRF_AB_EE_PRST_WIDTH 1
285#define FRF_AB_ATE_MODE_LBN 3
286#define FRF_AB_ATE_MODE_WIDTH 1
287#define FRF_AB_STRAP_PINS_LBN 0
288#define FRF_AB_STRAP_PINS_WIDTH 3
289
290/* GPIO_CTL_REG: GPIO control register */
291#define FR_AB_GPIO_CTL 0x00000210
292#define FRF_AB_GPIO_OUT3_LBN 112
293#define FRF_AB_GPIO_OUT3_WIDTH 16
294#define FRF_AB_GPIO_IN3_LBN 104
295#define FRF_AB_GPIO_IN3_WIDTH 8
296#define FRF_AB_GPIO_PWRUP_VALUE3_LBN 96
297#define FRF_AB_GPIO_PWRUP_VALUE3_WIDTH 8
298#define FRF_AB_GPIO_OUT2_LBN 80
299#define FRF_AB_GPIO_OUT2_WIDTH 16
300#define FRF_AB_GPIO_IN2_LBN 72
301#define FRF_AB_GPIO_IN2_WIDTH 8
302#define FRF_AB_GPIO_PWRUP_VALUE2_LBN 64
303#define FRF_AB_GPIO_PWRUP_VALUE2_WIDTH 8
304#define FRF_AB_GPIO15_OEN_LBN 63
305#define FRF_AB_GPIO15_OEN_WIDTH 1
306#define FRF_AB_GPIO14_OEN_LBN 62
307#define FRF_AB_GPIO14_OEN_WIDTH 1
308#define FRF_AB_GPIO13_OEN_LBN 61
309#define FRF_AB_GPIO13_OEN_WIDTH 1
310#define FRF_AB_GPIO12_OEN_LBN 60
311#define FRF_AB_GPIO12_OEN_WIDTH 1
312#define FRF_AB_GPIO11_OEN_LBN 59
313#define FRF_AB_GPIO11_OEN_WIDTH 1
314#define FRF_AB_GPIO10_OEN_LBN 58
315#define FRF_AB_GPIO10_OEN_WIDTH 1
316#define FRF_AB_GPIO9_OEN_LBN 57
317#define FRF_AB_GPIO9_OEN_WIDTH 1
318#define FRF_AB_GPIO8_OEN_LBN 56
319#define FRF_AB_GPIO8_OEN_WIDTH 1
320#define FRF_AB_GPIO15_OUT_LBN 55
321#define FRF_AB_GPIO15_OUT_WIDTH 1
322#define FRF_AB_GPIO14_OUT_LBN 54
323#define FRF_AB_GPIO14_OUT_WIDTH 1
324#define FRF_AB_GPIO13_OUT_LBN 53
325#define FRF_AB_GPIO13_OUT_WIDTH 1
326#define FRF_AB_GPIO12_OUT_LBN 52
327#define FRF_AB_GPIO12_OUT_WIDTH 1
328#define FRF_AB_GPIO11_OUT_LBN 51
329#define FRF_AB_GPIO11_OUT_WIDTH 1
330#define FRF_AB_GPIO10_OUT_LBN 50
331#define FRF_AB_GPIO10_OUT_WIDTH 1
332#define FRF_AB_GPIO9_OUT_LBN 49
333#define FRF_AB_GPIO9_OUT_WIDTH 1
334#define FRF_AB_GPIO8_OUT_LBN 48
335#define FRF_AB_GPIO8_OUT_WIDTH 1
336#define FRF_AB_GPIO15_IN_LBN 47
337#define FRF_AB_GPIO15_IN_WIDTH 1
338#define FRF_AB_GPIO14_IN_LBN 46
339#define FRF_AB_GPIO14_IN_WIDTH 1
340#define FRF_AB_GPIO13_IN_LBN 45
341#define FRF_AB_GPIO13_IN_WIDTH 1
342#define FRF_AB_GPIO12_IN_LBN 44
343#define FRF_AB_GPIO12_IN_WIDTH 1
344#define FRF_AB_GPIO11_IN_LBN 43
345#define FRF_AB_GPIO11_IN_WIDTH 1
346#define FRF_AB_GPIO10_IN_LBN 42
347#define FRF_AB_GPIO10_IN_WIDTH 1
348#define FRF_AB_GPIO9_IN_LBN 41
349#define FRF_AB_GPIO9_IN_WIDTH 1
350#define FRF_AB_GPIO8_IN_LBN 40
351#define FRF_AB_GPIO8_IN_WIDTH 1
352#define FRF_AB_GPIO15_PWRUP_VALUE_LBN 39
353#define FRF_AB_GPIO15_PWRUP_VALUE_WIDTH 1
354#define FRF_AB_GPIO14_PWRUP_VALUE_LBN 38
355#define FRF_AB_GPIO14_PWRUP_VALUE_WIDTH 1
356#define FRF_AB_GPIO13_PWRUP_VALUE_LBN 37
357#define FRF_AB_GPIO13_PWRUP_VALUE_WIDTH 1
358#define FRF_AB_GPIO12_PWRUP_VALUE_LBN 36
359#define FRF_AB_GPIO12_PWRUP_VALUE_WIDTH 1
360#define FRF_AB_GPIO11_PWRUP_VALUE_LBN 35
361#define FRF_AB_GPIO11_PWRUP_VALUE_WIDTH 1
362#define FRF_AB_GPIO10_PWRUP_VALUE_LBN 34
363#define FRF_AB_GPIO10_PWRUP_VALUE_WIDTH 1
364#define FRF_AB_GPIO9_PWRUP_VALUE_LBN 33
365#define FRF_AB_GPIO9_PWRUP_VALUE_WIDTH 1
366#define FRF_AB_GPIO8_PWRUP_VALUE_LBN 32
367#define FRF_AB_GPIO8_PWRUP_VALUE_WIDTH 1
368#define FRF_AB_CLK156_OUT_EN_LBN 31
369#define FRF_AB_CLK156_OUT_EN_WIDTH 1
370#define FRF_AB_USE_NIC_CLK_LBN 30
371#define FRF_AB_USE_NIC_CLK_WIDTH 1
372#define FRF_AB_GPIO5_OEN_LBN 29
373#define FRF_AB_GPIO5_OEN_WIDTH 1
374#define FRF_AB_GPIO4_OEN_LBN 28
375#define FRF_AB_GPIO4_OEN_WIDTH 1
376#define FRF_AB_GPIO3_OEN_LBN 27
377#define FRF_AB_GPIO3_OEN_WIDTH 1
378#define FRF_AB_GPIO2_OEN_LBN 26
379#define FRF_AB_GPIO2_OEN_WIDTH 1
380#define FRF_AB_GPIO1_OEN_LBN 25
381#define FRF_AB_GPIO1_OEN_WIDTH 1
382#define FRF_AB_GPIO0_OEN_LBN 24
383#define FRF_AB_GPIO0_OEN_WIDTH 1
384#define FRF_AB_GPIO7_OUT_LBN 23
385#define FRF_AB_GPIO7_OUT_WIDTH 1
386#define FRF_AB_GPIO6_OUT_LBN 22
387#define FRF_AB_GPIO6_OUT_WIDTH 1
388#define FRF_AB_GPIO5_OUT_LBN 21
389#define FRF_AB_GPIO5_OUT_WIDTH 1
390#define FRF_AB_GPIO4_OUT_LBN 20
391#define FRF_AB_GPIO4_OUT_WIDTH 1
392#define FRF_AB_GPIO3_OUT_LBN 19
393#define FRF_AB_GPIO3_OUT_WIDTH 1
394#define FRF_AB_GPIO2_OUT_LBN 18
395#define FRF_AB_GPIO2_OUT_WIDTH 1
396#define FRF_AB_GPIO1_OUT_LBN 17
397#define FRF_AB_GPIO1_OUT_WIDTH 1
398#define FRF_AB_GPIO0_OUT_LBN 16
399#define FRF_AB_GPIO0_OUT_WIDTH 1
400#define FRF_AB_GPIO7_IN_LBN 15
401#define FRF_AB_GPIO7_IN_WIDTH 1
402#define FRF_AB_GPIO6_IN_LBN 14
403#define FRF_AB_GPIO6_IN_WIDTH 1
404#define FRF_AB_GPIO5_IN_LBN 13
405#define FRF_AB_GPIO5_IN_WIDTH 1
406#define FRF_AB_GPIO4_IN_LBN 12
407#define FRF_AB_GPIO4_IN_WIDTH 1
408#define FRF_AB_GPIO3_IN_LBN 11
409#define FRF_AB_GPIO3_IN_WIDTH 1
410#define FRF_AB_GPIO2_IN_LBN 10
411#define FRF_AB_GPIO2_IN_WIDTH 1
412#define FRF_AB_GPIO1_IN_LBN 9
413#define FRF_AB_GPIO1_IN_WIDTH 1
414#define FRF_AB_GPIO0_IN_LBN 8
415#define FRF_AB_GPIO0_IN_WIDTH 1
416#define FRF_AB_GPIO7_PWRUP_VALUE_LBN 7
417#define FRF_AB_GPIO7_PWRUP_VALUE_WIDTH 1
418#define FRF_AB_GPIO6_PWRUP_VALUE_LBN 6
419#define FRF_AB_GPIO6_PWRUP_VALUE_WIDTH 1
420#define FRF_AB_GPIO5_PWRUP_VALUE_LBN 5
421#define FRF_AB_GPIO5_PWRUP_VALUE_WIDTH 1
422#define FRF_AB_GPIO4_PWRUP_VALUE_LBN 4
423#define FRF_AB_GPIO4_PWRUP_VALUE_WIDTH 1
424#define FRF_AB_GPIO3_PWRUP_VALUE_LBN 3
425#define FRF_AB_GPIO3_PWRUP_VALUE_WIDTH 1
426#define FRF_AB_GPIO2_PWRUP_VALUE_LBN 2
427#define FRF_AB_GPIO2_PWRUP_VALUE_WIDTH 1
428#define FRF_AB_GPIO1_PWRUP_VALUE_LBN 1
429#define FRF_AB_GPIO1_PWRUP_VALUE_WIDTH 1
430#define FRF_AB_GPIO0_PWRUP_VALUE_LBN 0
431#define FRF_AB_GPIO0_PWRUP_VALUE_WIDTH 1
432
433/* GLB_CTL_REG: Global control register */
434#define FR_AB_GLB_CTL 0x00000220
435#define FRF_AB_EXT_PHY_RST_CTL_LBN 63
436#define FRF_AB_EXT_PHY_RST_CTL_WIDTH 1
437#define FRF_AB_XAUI_SD_RST_CTL_LBN 62
438#define FRF_AB_XAUI_SD_RST_CTL_WIDTH 1
439#define FRF_AB_PCIE_SD_RST_CTL_LBN 61
440#define FRF_AB_PCIE_SD_RST_CTL_WIDTH 1
441#define FRF_AA_PCIX_RST_CTL_LBN 60
442#define FRF_AA_PCIX_RST_CTL_WIDTH 1
443#define FRF_BB_BIU_RST_CTL_LBN 60
444#define FRF_BB_BIU_RST_CTL_WIDTH 1
445#define FRF_AB_PCIE_STKY_RST_CTL_LBN 59
446#define FRF_AB_PCIE_STKY_RST_CTL_WIDTH 1
447#define FRF_AB_PCIE_NSTKY_RST_CTL_LBN 58
448#define FRF_AB_PCIE_NSTKY_RST_CTL_WIDTH 1
449#define FRF_AB_PCIE_CORE_RST_CTL_LBN 57
450#define FRF_AB_PCIE_CORE_RST_CTL_WIDTH 1
451#define FRF_AB_XGRX_RST_CTL_LBN 56
452#define FRF_AB_XGRX_RST_CTL_WIDTH 1
453#define FRF_AB_XGTX_RST_CTL_LBN 55
454#define FRF_AB_XGTX_RST_CTL_WIDTH 1
455#define FRF_AB_EM_RST_CTL_LBN 54
456#define FRF_AB_EM_RST_CTL_WIDTH 1
457#define FRF_AB_EV_RST_CTL_LBN 53
458#define FRF_AB_EV_RST_CTL_WIDTH 1
459#define FRF_AB_SR_RST_CTL_LBN 52
460#define FRF_AB_SR_RST_CTL_WIDTH 1
461#define FRF_AB_RX_RST_CTL_LBN 51
462#define FRF_AB_RX_RST_CTL_WIDTH 1
463#define FRF_AB_TX_RST_CTL_LBN 50
464#define FRF_AB_TX_RST_CTL_WIDTH 1
465#define FRF_AB_EE_RST_CTL_LBN 49
466#define FRF_AB_EE_RST_CTL_WIDTH 1
467#define FRF_AB_CS_RST_CTL_LBN 48
468#define FRF_AB_CS_RST_CTL_WIDTH 1
469#define FRF_AB_HOT_RST_CTL_LBN 40
470#define FRF_AB_HOT_RST_CTL_WIDTH 2
471#define FRF_AB_RST_EXT_PHY_LBN 31
472#define FRF_AB_RST_EXT_PHY_WIDTH 1
473#define FRF_AB_RST_XAUI_SD_LBN 30
474#define FRF_AB_RST_XAUI_SD_WIDTH 1
475#define FRF_AB_RST_PCIE_SD_LBN 29
476#define FRF_AB_RST_PCIE_SD_WIDTH 1
477#define FRF_AA_RST_PCIX_LBN 28
478#define FRF_AA_RST_PCIX_WIDTH 1
479#define FRF_BB_RST_BIU_LBN 28
480#define FRF_BB_RST_BIU_WIDTH 1
481#define FRF_AB_RST_PCIE_STKY_LBN 27
482#define FRF_AB_RST_PCIE_STKY_WIDTH 1
483#define FRF_AB_RST_PCIE_NSTKY_LBN 26
484#define FRF_AB_RST_PCIE_NSTKY_WIDTH 1
485#define FRF_AB_RST_PCIE_CORE_LBN 25
486#define FRF_AB_RST_PCIE_CORE_WIDTH 1
487#define FRF_AB_RST_XGRX_LBN 24
488#define FRF_AB_RST_XGRX_WIDTH 1
489#define FRF_AB_RST_XGTX_LBN 23
490#define FRF_AB_RST_XGTX_WIDTH 1
491#define FRF_AB_RST_EM_LBN 22
492#define FRF_AB_RST_EM_WIDTH 1
493#define FRF_AB_RST_EV_LBN 21
494#define FRF_AB_RST_EV_WIDTH 1
495#define FRF_AB_RST_SR_LBN 20
496#define FRF_AB_RST_SR_WIDTH 1
497#define FRF_AB_RST_RX_LBN 19
498#define FRF_AB_RST_RX_WIDTH 1
499#define FRF_AB_RST_TX_LBN 18
500#define FRF_AB_RST_TX_WIDTH 1
501#define FRF_AB_RST_SF_LBN 17
502#define FRF_AB_RST_SF_WIDTH 1
503#define FRF_AB_RST_CS_LBN 16
504#define FRF_AB_RST_CS_WIDTH 1
505#define FRF_AB_INT_RST_DUR_LBN 4
506#define FRF_AB_INT_RST_DUR_WIDTH 3
507#define FRF_AB_EXT_PHY_RST_DUR_LBN 1
508#define FRF_AB_EXT_PHY_RST_DUR_WIDTH 3
509#define FFE_AB_EXT_PHY_RST_DUR_10240US 7
510#define FFE_AB_EXT_PHY_RST_DUR_5120US 6
511#define FFE_AB_EXT_PHY_RST_DUR_2560US 5
512#define FFE_AB_EXT_PHY_RST_DUR_1280US 4
513#define FFE_AB_EXT_PHY_RST_DUR_640US 3
514#define FFE_AB_EXT_PHY_RST_DUR_320US 2
515#define FFE_AB_EXT_PHY_RST_DUR_160US 1
516#define FFE_AB_EXT_PHY_RST_DUR_80US 0
517#define FRF_AB_SWRST_LBN 0
518#define FRF_AB_SWRST_WIDTH 1
519
520/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */
521#define FR_AZ_FATAL_INTR_KER 0x00000230
522#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_LBN 44
523#define FRF_CZ_SRAM_PERR_INT_P_KER_EN_WIDTH 1
524#define FRF_AB_PCI_BUSERR_INT_KER_EN_LBN 43
525#define FRF_AB_PCI_BUSERR_INT_KER_EN_WIDTH 1
526#define FRF_CZ_MBU_PERR_INT_KER_EN_LBN 43
527#define FRF_CZ_MBU_PERR_INT_KER_EN_WIDTH 1
528#define FRF_AZ_SRAM_OOB_INT_KER_EN_LBN 42
529#define FRF_AZ_SRAM_OOB_INT_KER_EN_WIDTH 1
530#define FRF_AZ_BUFID_OOB_INT_KER_EN_LBN 41
531#define FRF_AZ_BUFID_OOB_INT_KER_EN_WIDTH 1
532#define FRF_AZ_MEM_PERR_INT_KER_EN_LBN 40
533#define FRF_AZ_MEM_PERR_INT_KER_EN_WIDTH 1
534#define FRF_AZ_RBUF_OWN_INT_KER_EN_LBN 39
535#define FRF_AZ_RBUF_OWN_INT_KER_EN_WIDTH 1
536#define FRF_AZ_TBUF_OWN_INT_KER_EN_LBN 38
537#define FRF_AZ_TBUF_OWN_INT_KER_EN_WIDTH 1
538#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_LBN 37
539#define FRF_AZ_RDESCQ_OWN_INT_KER_EN_WIDTH 1
540#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_LBN 36
541#define FRF_AZ_TDESCQ_OWN_INT_KER_EN_WIDTH 1
542#define FRF_AZ_EVQ_OWN_INT_KER_EN_LBN 35
543#define FRF_AZ_EVQ_OWN_INT_KER_EN_WIDTH 1
544#define FRF_AZ_EVF_OFLO_INT_KER_EN_LBN 34
545#define FRF_AZ_EVF_OFLO_INT_KER_EN_WIDTH 1
546#define FRF_AZ_ILL_ADR_INT_KER_EN_LBN 33
547#define FRF_AZ_ILL_ADR_INT_KER_EN_WIDTH 1
548#define FRF_AZ_SRM_PERR_INT_KER_EN_LBN 32
549#define FRF_AZ_SRM_PERR_INT_KER_EN_WIDTH 1
550#define FRF_CZ_SRAM_PERR_INT_P_KER_LBN 12
551#define FRF_CZ_SRAM_PERR_INT_P_KER_WIDTH 1
552#define FRF_AB_PCI_BUSERR_INT_KER_LBN 11
553#define FRF_AB_PCI_BUSERR_INT_KER_WIDTH 1
554#define FRF_CZ_MBU_PERR_INT_KER_LBN 11
555#define FRF_CZ_MBU_PERR_INT_KER_WIDTH 1
556#define FRF_AZ_SRAM_OOB_INT_KER_LBN 10
557#define FRF_AZ_SRAM_OOB_INT_KER_WIDTH 1
558#define FRF_AZ_BUFID_DC_OOB_INT_KER_LBN 9
559#define FRF_AZ_BUFID_DC_OOB_INT_KER_WIDTH 1
560#define FRF_AZ_MEM_PERR_INT_KER_LBN 8
561#define FRF_AZ_MEM_PERR_INT_KER_WIDTH 1
562#define FRF_AZ_RBUF_OWN_INT_KER_LBN 7
563#define FRF_AZ_RBUF_OWN_INT_KER_WIDTH 1
564#define FRF_AZ_TBUF_OWN_INT_KER_LBN 6
565#define FRF_AZ_TBUF_OWN_INT_KER_WIDTH 1
566#define FRF_AZ_RDESCQ_OWN_INT_KER_LBN 5
567#define FRF_AZ_RDESCQ_OWN_INT_KER_WIDTH 1
568#define FRF_AZ_TDESCQ_OWN_INT_KER_LBN 4
569#define FRF_AZ_TDESCQ_OWN_INT_KER_WIDTH 1
570#define FRF_AZ_EVQ_OWN_INT_KER_LBN 3
571#define FRF_AZ_EVQ_OWN_INT_KER_WIDTH 1
572#define FRF_AZ_EVF_OFLO_INT_KER_LBN 2
573#define FRF_AZ_EVF_OFLO_INT_KER_WIDTH 1
574#define FRF_AZ_ILL_ADR_INT_KER_LBN 1
575#define FRF_AZ_ILL_ADR_INT_KER_WIDTH 1
576#define FRF_AZ_SRM_PERR_INT_KER_LBN 0
577#define FRF_AZ_SRM_PERR_INT_KER_WIDTH 1
578
579/* FATAL_INTR_REG_CHAR: Fatal interrupt register for Char */
580#define FR_BZ_FATAL_INTR_CHAR 0x00000240
581#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_LBN 44
582#define FRF_CZ_SRAM_PERR_INT_P_CHAR_EN_WIDTH 1
583#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_LBN 43
584#define FRF_BB_PCI_BUSERR_INT_CHAR_EN_WIDTH 1
585#define FRF_CZ_MBU_PERR_INT_CHAR_EN_LBN 43
586#define FRF_CZ_MBU_PERR_INT_CHAR_EN_WIDTH 1
587#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_LBN 42
588#define FRF_BZ_SRAM_OOB_INT_CHAR_EN_WIDTH 1
589#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_LBN 41
590#define FRF_BZ_BUFID_OOB_INT_CHAR_EN_WIDTH 1
591#define FRF_BZ_MEM_PERR_INT_CHAR_EN_LBN 40
592#define FRF_BZ_MEM_PERR_INT_CHAR_EN_WIDTH 1
593#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_LBN 39
594#define FRF_BZ_RBUF_OWN_INT_CHAR_EN_WIDTH 1
595#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_LBN 38
596#define FRF_BZ_TBUF_OWN_INT_CHAR_EN_WIDTH 1
597#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_LBN 37
598#define FRF_BZ_RDESCQ_OWN_INT_CHAR_EN_WIDTH 1
599#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_LBN 36
600#define FRF_BZ_TDESCQ_OWN_INT_CHAR_EN_WIDTH 1
601#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_LBN 35
602#define FRF_BZ_EVQ_OWN_INT_CHAR_EN_WIDTH 1
603#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_LBN 34
604#define FRF_BZ_EVF_OFLO_INT_CHAR_EN_WIDTH 1
605#define FRF_BZ_ILL_ADR_INT_CHAR_EN_LBN 33
606#define FRF_BZ_ILL_ADR_INT_CHAR_EN_WIDTH 1
607#define FRF_BZ_SRM_PERR_INT_CHAR_EN_LBN 32
608#define FRF_BZ_SRM_PERR_INT_CHAR_EN_WIDTH 1
609#define FRF_CZ_SRAM_PERR_INT_P_CHAR_LBN 12
610#define FRF_CZ_SRAM_PERR_INT_P_CHAR_WIDTH 1
611#define FRF_BB_PCI_BUSERR_INT_CHAR_LBN 11
612#define FRF_BB_PCI_BUSERR_INT_CHAR_WIDTH 1
613#define FRF_CZ_MBU_PERR_INT_CHAR_LBN 11
614#define FRF_CZ_MBU_PERR_INT_CHAR_WIDTH 1
615#define FRF_BZ_SRAM_OOB_INT_CHAR_LBN 10
616#define FRF_BZ_SRAM_OOB_INT_CHAR_WIDTH 1
617#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_LBN 9
618#define FRF_BZ_BUFID_DC_OOB_INT_CHAR_WIDTH 1
619#define FRF_BZ_MEM_PERR_INT_CHAR_LBN 8
620#define FRF_BZ_MEM_PERR_INT_CHAR_WIDTH 1
621#define FRF_BZ_RBUF_OWN_INT_CHAR_LBN 7
622#define FRF_BZ_RBUF_OWN_INT_CHAR_WIDTH 1
623#define FRF_BZ_TBUF_OWN_INT_CHAR_LBN 6
624#define FRF_BZ_TBUF_OWN_INT_CHAR_WIDTH 1
625#define FRF_BZ_RDESCQ_OWN_INT_CHAR_LBN 5
626#define FRF_BZ_RDESCQ_OWN_INT_CHAR_WIDTH 1
627#define FRF_BZ_TDESCQ_OWN_INT_CHAR_LBN 4
628#define FRF_BZ_TDESCQ_OWN_INT_CHAR_WIDTH 1
629#define FRF_BZ_EVQ_OWN_INT_CHAR_LBN 3
630#define FRF_BZ_EVQ_OWN_INT_CHAR_WIDTH 1
631#define FRF_BZ_EVF_OFLO_INT_CHAR_LBN 2
632#define FRF_BZ_EVF_OFLO_INT_CHAR_WIDTH 1
633#define FRF_BZ_ILL_ADR_INT_CHAR_LBN 1
634#define FRF_BZ_ILL_ADR_INT_CHAR_WIDTH 1
635#define FRF_BZ_SRM_PERR_INT_CHAR_LBN 0
636#define FRF_BZ_SRM_PERR_INT_CHAR_WIDTH 1
637
638/* DP_CTRL_REG: Datapath control register */
639#define FR_BZ_DP_CTRL 0x00000250
640#define FRF_BZ_FLS_EVQ_ID_LBN 0
641#define FRF_BZ_FLS_EVQ_ID_WIDTH 12
642
643/* MEM_STAT_REG: Memory status register */
644#define FR_AZ_MEM_STAT 0x00000260
645#define FRF_AB_MEM_PERR_VEC_LBN 53
646#define FRF_AB_MEM_PERR_VEC_WIDTH 38
647#define FRF_AB_MBIST_CORR_LBN 38
648#define FRF_AB_MBIST_CORR_WIDTH 15
649#define FRF_AB_MBIST_ERR_LBN 0
650#define FRF_AB_MBIST_ERR_WIDTH 40
651#define FRF_CZ_MEM_PERR_VEC_LBN 0
652#define FRF_CZ_MEM_PERR_VEC_WIDTH 35
653
654/* CS_DEBUG_REG: Debug register */
655#define FR_AZ_CS_DEBUG 0x00000270
656#define FRF_AB_GLB_DEBUG2_SEL_LBN 50
657#define FRF_AB_GLB_DEBUG2_SEL_WIDTH 3
658#define FRF_AB_DEBUG_BLK_SEL2_LBN 47
659#define FRF_AB_DEBUG_BLK_SEL2_WIDTH 3
660#define FRF_AB_DEBUG_BLK_SEL1_LBN 44
661#define FRF_AB_DEBUG_BLK_SEL1_WIDTH 3
662#define FRF_AB_DEBUG_BLK_SEL0_LBN 41
663#define FRF_AB_DEBUG_BLK_SEL0_WIDTH 3
664#define FRF_CZ_CS_PORT_NUM_LBN 40
665#define FRF_CZ_CS_PORT_NUM_WIDTH 2
666#define FRF_AB_MISC_DEBUG_ADDR_LBN 36
667#define FRF_AB_MISC_DEBUG_ADDR_WIDTH 5
668#define FRF_AB_SERDES_DEBUG_ADDR_LBN 31
669#define FRF_AB_SERDES_DEBUG_ADDR_WIDTH 5
670#define FRF_CZ_CS_PORT_FPE_LBN 1
671#define FRF_CZ_CS_PORT_FPE_WIDTH 35
672#define FRF_AB_EM_DEBUG_ADDR_LBN 26
673#define FRF_AB_EM_DEBUG_ADDR_WIDTH 5
674#define FRF_AB_SR_DEBUG_ADDR_LBN 21
675#define FRF_AB_SR_DEBUG_ADDR_WIDTH 5
676#define FRF_AB_EV_DEBUG_ADDR_LBN 16
677#define FRF_AB_EV_DEBUG_ADDR_WIDTH 5
678#define FRF_AB_RX_DEBUG_ADDR_LBN 11
679#define FRF_AB_RX_DEBUG_ADDR_WIDTH 5
680#define FRF_AB_TX_DEBUG_ADDR_LBN 6
681#define FRF_AB_TX_DEBUG_ADDR_WIDTH 5
682#define FRF_AB_CS_BIU_DEBUG_ADDR_LBN 1
683#define FRF_AB_CS_BIU_DEBUG_ADDR_WIDTH 5
684#define FRF_AZ_CS_DEBUG_EN_LBN 0
685#define FRF_AZ_CS_DEBUG_EN_WIDTH 1
686
687/* DRIVER_REG: Driver scratch register [0-7] */
688#define FR_AZ_DRIVER 0x00000280
689#define FR_AZ_DRIVER_STEP 16
690#define FR_AZ_DRIVER_ROWS 8
691#define FRF_AZ_DRIVER_DW0_LBN 0
692#define FRF_AZ_DRIVER_DW0_WIDTH 32
693
694/* ALTERA_BUILD_REG: Altera build register */
695#define FR_AZ_ALTERA_BUILD 0x00000300
696#define FRF_AZ_ALTERA_BUILD_VER_LBN 0
697#define FRF_AZ_ALTERA_BUILD_VER_WIDTH 32
698
699/* CSR_SPARE_REG: Spare register */
700#define FR_AZ_CSR_SPARE 0x00000310
701#define FRF_AB_MEM_PERR_EN_LBN 64
702#define FRF_AB_MEM_PERR_EN_WIDTH 38
703#define FRF_CZ_MEM_PERR_EN_LBN 64
704#define FRF_CZ_MEM_PERR_EN_WIDTH 35
705#define FRF_AB_MEM_PERR_EN_TX_DATA_LBN 72
706#define FRF_AB_MEM_PERR_EN_TX_DATA_WIDTH 2
707#define FRF_AZ_CSR_SPARE_BITS_LBN 0
708#define FRF_AZ_CSR_SPARE_BITS_WIDTH 32
709
710/* PCIE_SD_CTL0123_REG: PCIE SerDes control register 0 to 3 */
711#define FR_AB_PCIE_SD_CTL0123 0x00000320
712#define FRF_AB_PCIE_TESTSIG_H_LBN 96
713#define FRF_AB_PCIE_TESTSIG_H_WIDTH 19
714#define FRF_AB_PCIE_TESTSIG_L_LBN 64
715#define FRF_AB_PCIE_TESTSIG_L_WIDTH 19
716#define FRF_AB_PCIE_OFFSET_LBN 56
717#define FRF_AB_PCIE_OFFSET_WIDTH 8
718#define FRF_AB_PCIE_OFFSETEN_H_LBN 55
719#define FRF_AB_PCIE_OFFSETEN_H_WIDTH 1
720#define FRF_AB_PCIE_OFFSETEN_L_LBN 54
721#define FRF_AB_PCIE_OFFSETEN_L_WIDTH 1
722#define FRF_AB_PCIE_HIVMODE_H_LBN 53
723#define FRF_AB_PCIE_HIVMODE_H_WIDTH 1
724#define FRF_AB_PCIE_HIVMODE_L_LBN 52
725#define FRF_AB_PCIE_HIVMODE_L_WIDTH 1
726#define FRF_AB_PCIE_PARRESET_H_LBN 51
727#define FRF_AB_PCIE_PARRESET_H_WIDTH 1
728#define FRF_AB_PCIE_PARRESET_L_LBN 50
729#define FRF_AB_PCIE_PARRESET_L_WIDTH 1
730#define FRF_AB_PCIE_LPBKWDRV_H_LBN 49
731#define FRF_AB_PCIE_LPBKWDRV_H_WIDTH 1
732#define FRF_AB_PCIE_LPBKWDRV_L_LBN 48
733#define FRF_AB_PCIE_LPBKWDRV_L_WIDTH 1
734#define FRF_AB_PCIE_LPBK_LBN 40
735#define FRF_AB_PCIE_LPBK_WIDTH 8
736#define FRF_AB_PCIE_PARLPBK_LBN 32
737#define FRF_AB_PCIE_PARLPBK_WIDTH 8
738#define FRF_AB_PCIE_RXTERMADJ_H_LBN 30
739#define FRF_AB_PCIE_RXTERMADJ_H_WIDTH 2
740#define FRF_AB_PCIE_RXTERMADJ_L_LBN 28
741#define FRF_AB_PCIE_RXTERMADJ_L_WIDTH 2
742#define FFE_AB_PCIE_RXTERMADJ_MIN15PCNT 3
743#define FFE_AB_PCIE_RXTERMADJ_PL10PCNT 2
744#define FFE_AB_PCIE_RXTERMADJ_MIN17PCNT 1
745#define FFE_AB_PCIE_RXTERMADJ_NOMNL 0
746#define FRF_AB_PCIE_TXTERMADJ_H_LBN 26
747#define FRF_AB_PCIE_TXTERMADJ_H_WIDTH 2
748#define FRF_AB_PCIE_TXTERMADJ_L_LBN 24
749#define FRF_AB_PCIE_TXTERMADJ_L_WIDTH 2
750#define FFE_AB_PCIE_TXTERMADJ_MIN15PCNT 3
751#define FFE_AB_PCIE_TXTERMADJ_PL10PCNT 2
752#define FFE_AB_PCIE_TXTERMADJ_MIN17PCNT 1
753#define FFE_AB_PCIE_TXTERMADJ_NOMNL 0
754#define FRF_AB_PCIE_RXEQCTL_H_LBN 18
755#define FRF_AB_PCIE_RXEQCTL_H_WIDTH 2
756#define FRF_AB_PCIE_RXEQCTL_L_LBN 16
757#define FRF_AB_PCIE_RXEQCTL_L_WIDTH 2
758#define FFE_AB_PCIE_RXEQCTL_OFF_ALT 3
759#define FFE_AB_PCIE_RXEQCTL_OFF 2
760#define FFE_AB_PCIE_RXEQCTL_MIN 1
761#define FFE_AB_PCIE_RXEQCTL_MAX 0
762#define FRF_AB_PCIE_HIDRV_LBN 8
763#define FRF_AB_PCIE_HIDRV_WIDTH 8
764#define FRF_AB_PCIE_LODRV_LBN 0
765#define FRF_AB_PCIE_LODRV_WIDTH 8
766
767/* PCIE_SD_CTL45_REG: PCIE SerDes control register 4 and 5 */
768#define FR_AB_PCIE_SD_CTL45 0x00000330
769#define FRF_AB_PCIE_DTX7_LBN 60
770#define FRF_AB_PCIE_DTX7_WIDTH 4
771#define FRF_AB_PCIE_DTX6_LBN 56
772#define FRF_AB_PCIE_DTX6_WIDTH 4
773#define FRF_AB_PCIE_DTX5_LBN 52
774#define FRF_AB_PCIE_DTX5_WIDTH 4
775#define FRF_AB_PCIE_DTX4_LBN 48
776#define FRF_AB_PCIE_DTX4_WIDTH 4
777#define FRF_AB_PCIE_DTX3_LBN 44
778#define FRF_AB_PCIE_DTX3_WIDTH 4
779#define FRF_AB_PCIE_DTX2_LBN 40
780#define FRF_AB_PCIE_DTX2_WIDTH 4
781#define FRF_AB_PCIE_DTX1_LBN 36
782#define FRF_AB_PCIE_DTX1_WIDTH 4
783#define FRF_AB_PCIE_DTX0_LBN 32
784#define FRF_AB_PCIE_DTX0_WIDTH 4
785#define FRF_AB_PCIE_DEQ7_LBN 28
786#define FRF_AB_PCIE_DEQ7_WIDTH 4
787#define FRF_AB_PCIE_DEQ6_LBN 24
788#define FRF_AB_PCIE_DEQ6_WIDTH 4
789#define FRF_AB_PCIE_DEQ5_LBN 20
790#define FRF_AB_PCIE_DEQ5_WIDTH 4
791#define FRF_AB_PCIE_DEQ4_LBN 16
792#define FRF_AB_PCIE_DEQ4_WIDTH 4
793#define FRF_AB_PCIE_DEQ3_LBN 12
794#define FRF_AB_PCIE_DEQ3_WIDTH 4
795#define FRF_AB_PCIE_DEQ2_LBN 8
796#define FRF_AB_PCIE_DEQ2_WIDTH 4
797#define FRF_AB_PCIE_DEQ1_LBN 4
798#define FRF_AB_PCIE_DEQ1_WIDTH 4
799#define FRF_AB_PCIE_DEQ0_LBN 0
800#define FRF_AB_PCIE_DEQ0_WIDTH 4
801
802/* PCIE_PCS_CTL_STAT_REG: PCIE PCS control and status register */
803#define FR_AB_PCIE_PCS_CTL_STAT 0x00000340
804#define FRF_AB_PCIE_PRBSERRCOUNT0_H_LBN 52
805#define FRF_AB_PCIE_PRBSERRCOUNT0_H_WIDTH 4
806#define FRF_AB_PCIE_PRBSERRCOUNT0_L_LBN 48
807#define FRF_AB_PCIE_PRBSERRCOUNT0_L_WIDTH 4
808#define FRF_AB_PCIE_PRBSERR_LBN 40
809#define FRF_AB_PCIE_PRBSERR_WIDTH 8
810#define FRF_AB_PCIE_PRBSERRH0_LBN 32
811#define FRF_AB_PCIE_PRBSERRH0_WIDTH 8
812#define FRF_AB_PCIE_FASTINIT_H_LBN 15
813#define FRF_AB_PCIE_FASTINIT_H_WIDTH 1
814#define FRF_AB_PCIE_FASTINIT_L_LBN 14
815#define FRF_AB_PCIE_FASTINIT_L_WIDTH 1
816#define FRF_AB_PCIE_CTCDISABLE_H_LBN 13
817#define FRF_AB_PCIE_CTCDISABLE_H_WIDTH 1
818#define FRF_AB_PCIE_CTCDISABLE_L_LBN 12
819#define FRF_AB_PCIE_CTCDISABLE_L_WIDTH 1
820#define FRF_AB_PCIE_PRBSSYNC_H_LBN 11
821#define FRF_AB_PCIE_PRBSSYNC_H_WIDTH 1
822#define FRF_AB_PCIE_PRBSSYNC_L_LBN 10
823#define FRF_AB_PCIE_PRBSSYNC_L_WIDTH 1
824#define FRF_AB_PCIE_PRBSERRACK_H_LBN 9
825#define FRF_AB_PCIE_PRBSERRACK_H_WIDTH 1
826#define FRF_AB_PCIE_PRBSERRACK_L_LBN 8
827#define FRF_AB_PCIE_PRBSERRACK_L_WIDTH 1
828#define FRF_AB_PCIE_PRBSSEL_LBN 0
829#define FRF_AB_PCIE_PRBSSEL_WIDTH 8
830
831/* DEBUG_DATA_OUT_REG: Live Debug and Debug 2 out ports */
832#define FR_BB_DEBUG_DATA_OUT 0x00000350
833#define FRF_BB_DEBUG2_PORT_LBN 25
834#define FRF_BB_DEBUG2_PORT_WIDTH 15
835#define FRF_BB_DEBUG1_PORT_LBN 0
836#define FRF_BB_DEBUG1_PORT_WIDTH 25
837
838/* EVQ_RPTR_REGP0: Event queue read pointer register */
839#define FR_BZ_EVQ_RPTR_P0 0x00000400
840#define FR_BZ_EVQ_RPTR_P0_STEP 8192
841#define FR_BZ_EVQ_RPTR_P0_ROWS 1024
842/* EVQ_RPTR_REG_KER: Event queue read pointer register */
843#define FR_AA_EVQ_RPTR_KER 0x00011b00
844#define FR_AA_EVQ_RPTR_KER_STEP 4
845#define FR_AA_EVQ_RPTR_KER_ROWS 4
846/* EVQ_RPTR_REG: Event queue read pointer register */
847#define FR_BZ_EVQ_RPTR 0x00fa0000
848#define FR_BZ_EVQ_RPTR_STEP 16
849#define FR_BB_EVQ_RPTR_ROWS 4096
850#define FR_CZ_EVQ_RPTR_ROWS 1024
851/* EVQ_RPTR_REGP123: Event queue read pointer register */
852#define FR_BB_EVQ_RPTR_P123 0x01000400
853#define FR_BB_EVQ_RPTR_P123_STEP 8192
854#define FR_BB_EVQ_RPTR_P123_ROWS 3072
855#define FRF_AZ_EVQ_RPTR_VLD_LBN 15
856#define FRF_AZ_EVQ_RPTR_VLD_WIDTH 1
857#define FRF_AZ_EVQ_RPTR_LBN 0
858#define FRF_AZ_EVQ_RPTR_WIDTH 15
859
860/* TIMER_COMMAND_REGP0: Timer Command Registers */
861#define FR_BZ_TIMER_COMMAND_P0 0x00000420
862#define FR_BZ_TIMER_COMMAND_P0_STEP 8192
863#define FR_BZ_TIMER_COMMAND_P0_ROWS 1024
864/* TIMER_COMMAND_REG_KER: Timer Command Registers */
865#define FR_AA_TIMER_COMMAND_KER 0x00000420
866#define FR_AA_TIMER_COMMAND_KER_STEP 8192
867#define FR_AA_TIMER_COMMAND_KER_ROWS 4
868/* TIMER_COMMAND_REGP123: Timer Command Registers */
869#define FR_BB_TIMER_COMMAND_P123 0x01000420
870#define FR_BB_TIMER_COMMAND_P123_STEP 8192
871#define FR_BB_TIMER_COMMAND_P123_ROWS 3072
872#define FRF_CZ_TC_TIMER_MODE_LBN 14
873#define FRF_CZ_TC_TIMER_MODE_WIDTH 2
874#define FRF_AB_TC_TIMER_MODE_LBN 12
875#define FRF_AB_TC_TIMER_MODE_WIDTH 2
876#define FRF_CZ_TC_TIMER_VAL_LBN 0
877#define FRF_CZ_TC_TIMER_VAL_WIDTH 14
878#define FRF_AB_TC_TIMER_VAL_LBN 0
879#define FRF_AB_TC_TIMER_VAL_WIDTH 12
880
881/* DRV_EV_REG: Driver generated event register */
882#define FR_AZ_DRV_EV 0x00000440
883#define FRF_AZ_DRV_EV_QID_LBN 64
884#define FRF_AZ_DRV_EV_QID_WIDTH 12
885#define FRF_AZ_DRV_EV_DATA_LBN 0
886#define FRF_AZ_DRV_EV_DATA_WIDTH 64
887
888/* EVQ_CTL_REG: Event queue control register */
889#define FR_AZ_EVQ_CTL 0x00000450
890#define FRF_CZ_RX_EVQ_WAKEUP_MASK_LBN 15
891#define FRF_CZ_RX_EVQ_WAKEUP_MASK_WIDTH 10
892#define FRF_BB_RX_EVQ_WAKEUP_MASK_LBN 15
893#define FRF_BB_RX_EVQ_WAKEUP_MASK_WIDTH 6
894#define FRF_AZ_EVQ_OWNERR_CTL_LBN 14
895#define FRF_AZ_EVQ_OWNERR_CTL_WIDTH 1
896#define FRF_AZ_EVQ_FIFO_AF_TH_LBN 7
897#define FRF_AZ_EVQ_FIFO_AF_TH_WIDTH 7
898#define FRF_AZ_EVQ_FIFO_NOTAF_TH_LBN 0
899#define FRF_AZ_EVQ_FIFO_NOTAF_TH_WIDTH 7
900
901/* EVQ_CNT1_REG: Event counter 1 register */
902#define FR_AZ_EVQ_CNT1 0x00000460
903#define FRF_AZ_EVQ_CNT_PRE_FIFO_LBN 120
904#define FRF_AZ_EVQ_CNT_PRE_FIFO_WIDTH 7
905#define FRF_AZ_EVQ_CNT_TOBIU_LBN 100
906#define FRF_AZ_EVQ_CNT_TOBIU_WIDTH 20
907#define FRF_AZ_EVQ_TX_REQ_CNT_LBN 80
908#define FRF_AZ_EVQ_TX_REQ_CNT_WIDTH 20
909#define FRF_AZ_EVQ_RX_REQ_CNT_LBN 60
910#define FRF_AZ_EVQ_RX_REQ_CNT_WIDTH 20
911#define FRF_AZ_EVQ_EM_REQ_CNT_LBN 40
912#define FRF_AZ_EVQ_EM_REQ_CNT_WIDTH 20
913#define FRF_AZ_EVQ_CSR_REQ_CNT_LBN 20
914#define FRF_AZ_EVQ_CSR_REQ_CNT_WIDTH 20
915#define FRF_AZ_EVQ_ERR_REQ_CNT_LBN 0
916#define FRF_AZ_EVQ_ERR_REQ_CNT_WIDTH 20
917
918/* EVQ_CNT2_REG: Event counter 2 register */
919#define FR_AZ_EVQ_CNT2 0x00000470
920#define FRF_AZ_EVQ_UPD_REQ_CNT_LBN 104
921#define FRF_AZ_EVQ_UPD_REQ_CNT_WIDTH 20
922#define FRF_AZ_EVQ_CLR_REQ_CNT_LBN 84
923#define FRF_AZ_EVQ_CLR_REQ_CNT_WIDTH 20
924#define FRF_AZ_EVQ_RDY_CNT_LBN 80
925#define FRF_AZ_EVQ_RDY_CNT_WIDTH 4
926#define FRF_AZ_EVQ_WU_REQ_CNT_LBN 60
927#define FRF_AZ_EVQ_WU_REQ_CNT_WIDTH 20
928#define FRF_AZ_EVQ_WET_REQ_CNT_LBN 40
929#define FRF_AZ_EVQ_WET_REQ_CNT_WIDTH 20
930#define FRF_AZ_EVQ_INIT_REQ_CNT_LBN 20
931#define FRF_AZ_EVQ_INIT_REQ_CNT_WIDTH 20
932#define FRF_AZ_EVQ_TM_REQ_CNT_LBN 0
933#define FRF_AZ_EVQ_TM_REQ_CNT_WIDTH 20
934
935/* USR_EV_REG: Event mailbox register */
936#define FR_CZ_USR_EV 0x00000540
937#define FR_CZ_USR_EV_STEP 8192
938#define FR_CZ_USR_EV_ROWS 1024
939#define FRF_CZ_USR_EV_DATA_LBN 0
940#define FRF_CZ_USR_EV_DATA_WIDTH 32
941
942/* BUF_TBL_CFG_REG: Buffer table configuration register */
943#define FR_AZ_BUF_TBL_CFG 0x00000600
944#define FRF_AZ_BUF_TBL_MODE_LBN 3
945#define FRF_AZ_BUF_TBL_MODE_WIDTH 1
946
947/* SRM_RX_DC_CFG_REG: SRAM receive descriptor cache configuration register */
948#define FR_AZ_SRM_RX_DC_CFG 0x00000610
949#define FRF_AZ_SRM_CLK_TMP_EN_LBN 21
950#define FRF_AZ_SRM_CLK_TMP_EN_WIDTH 1
951#define FRF_AZ_SRM_RX_DC_BASE_ADR_LBN 0
952#define FRF_AZ_SRM_RX_DC_BASE_ADR_WIDTH 21
953
954/* SRM_TX_DC_CFG_REG: SRAM transmit descriptor cache configuration register */
955#define FR_AZ_SRM_TX_DC_CFG 0x00000620
956#define FRF_AZ_SRM_TX_DC_BASE_ADR_LBN 0
957#define FRF_AZ_SRM_TX_DC_BASE_ADR_WIDTH 21
958
959/* SRM_CFG_REG: SRAM configuration register */
960#define FR_AZ_SRM_CFG 0x00000630
961#define FRF_AZ_SRM_OOB_ADR_INTEN_LBN 5
962#define FRF_AZ_SRM_OOB_ADR_INTEN_WIDTH 1
963#define FRF_AZ_SRM_OOB_BUF_INTEN_LBN 4
964#define FRF_AZ_SRM_OOB_BUF_INTEN_WIDTH 1
965#define FRF_AZ_SRM_INIT_EN_LBN 3
966#define FRF_AZ_SRM_INIT_EN_WIDTH 1
967#define FRF_AZ_SRM_NUM_BANK_LBN 2
968#define FRF_AZ_SRM_NUM_BANK_WIDTH 1
969#define FRF_AZ_SRM_BANK_SIZE_LBN 0
970#define FRF_AZ_SRM_BANK_SIZE_WIDTH 2
971
972/* BUF_TBL_UPD_REG: Buffer table update register */
973#define FR_AZ_BUF_TBL_UPD 0x00000650
974#define FRF_AZ_BUF_UPD_CMD_LBN 63
975#define FRF_AZ_BUF_UPD_CMD_WIDTH 1
976#define FRF_AZ_BUF_CLR_CMD_LBN 62
977#define FRF_AZ_BUF_CLR_CMD_WIDTH 1
978#define FRF_AZ_BUF_CLR_END_ID_LBN 32
979#define FRF_AZ_BUF_CLR_END_ID_WIDTH 20
980#define FRF_AZ_BUF_CLR_START_ID_LBN 0
981#define FRF_AZ_BUF_CLR_START_ID_WIDTH 20
982
983/* SRM_UPD_EVQ_REG: Buffer table update register */
984#define FR_AZ_SRM_UPD_EVQ 0x00000660
985#define FRF_AZ_SRM_UPD_EVQ_ID_LBN 0
986#define FRF_AZ_SRM_UPD_EVQ_ID_WIDTH 12
987
988/* SRAM_PARITY_REG: SRAM parity register. */
989#define FR_AZ_SRAM_PARITY 0x00000670
990#define FRF_CZ_BYPASS_ECC_LBN 3
991#define FRF_CZ_BYPASS_ECC_WIDTH 1
992#define FRF_CZ_SEC_INT_LBN 2
993#define FRF_CZ_SEC_INT_WIDTH 1
994#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_LBN 1
995#define FRF_CZ_FORCE_SRAM_DOUBLE_ERR_WIDTH 1
996#define FRF_AB_FORCE_SRAM_PERR_LBN 0
997#define FRF_AB_FORCE_SRAM_PERR_WIDTH 1
998#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_LBN 0
999#define FRF_CZ_FORCE_SRAM_SINGLE_ERR_WIDTH 1
1000
1001/* RX_CFG_REG: Receive configuration register */
1002#define FR_AZ_RX_CFG 0x00000800
1003#define FRF_CZ_RX_MIN_KBUF_SIZE_LBN 72
1004#define FRF_CZ_RX_MIN_KBUF_SIZE_WIDTH 14
1005#define FRF_CZ_RX_HDR_SPLIT_EN_LBN 71
1006#define FRF_CZ_RX_HDR_SPLIT_EN_WIDTH 1
1007#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_LBN 62
1008#define FRF_CZ_RX_HDR_SPLIT_PLD_BUF_SIZE_WIDTH 9
1009#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_LBN 53
1010#define FRF_CZ_RX_HDR_SPLIT_HDR_BUF_SIZE_WIDTH 9
1011#define FRF_CZ_RX_PRE_RFF_IPG_LBN 49
1012#define FRF_CZ_RX_PRE_RFF_IPG_WIDTH 4
1013#define FRF_BZ_RX_TCP_SUP_LBN 48
1014#define FRF_BZ_RX_TCP_SUP_WIDTH 1
1015#define FRF_BZ_RX_INGR_EN_LBN 47
1016#define FRF_BZ_RX_INGR_EN_WIDTH 1
1017#define FRF_BZ_RX_IP_HASH_LBN 46
1018#define FRF_BZ_RX_IP_HASH_WIDTH 1
1019#define FRF_BZ_RX_HASH_ALG_LBN 45
1020#define FRF_BZ_RX_HASH_ALG_WIDTH 1
1021#define FRF_BZ_RX_HASH_INSRT_HDR_LBN 44
1022#define FRF_BZ_RX_HASH_INSRT_HDR_WIDTH 1
1023#define FRF_BZ_RX_DESC_PUSH_EN_LBN 43
1024#define FRF_BZ_RX_DESC_PUSH_EN_WIDTH 1
1025#define FRF_BZ_RX_RDW_PATCH_EN_LBN 42
1026#define FRF_BZ_RX_RDW_PATCH_EN_WIDTH 1
1027#define FRF_BB_RX_PCI_BURST_SIZE_LBN 39
1028#define FRF_BB_RX_PCI_BURST_SIZE_WIDTH 3
1029#define FRF_BZ_RX_OWNERR_CTL_LBN 38
1030#define FRF_BZ_RX_OWNERR_CTL_WIDTH 1
1031#define FRF_BZ_RX_XON_TX_TH_LBN 33
1032#define FRF_BZ_RX_XON_TX_TH_WIDTH 5
1033#define FRF_AA_RX_DESC_PUSH_EN_LBN 35
1034#define FRF_AA_RX_DESC_PUSH_EN_WIDTH 1
1035#define FRF_AA_RX_RDW_PATCH_EN_LBN 34
1036#define FRF_AA_RX_RDW_PATCH_EN_WIDTH 1
1037#define FRF_AA_RX_PCI_BURST_SIZE_LBN 31
1038#define FRF_AA_RX_PCI_BURST_SIZE_WIDTH 3
1039#define FRF_BZ_RX_XOFF_TX_TH_LBN 28
1040#define FRF_BZ_RX_XOFF_TX_TH_WIDTH 5
1041#define FRF_AA_RX_OWNERR_CTL_LBN 30
1042#define FRF_AA_RX_OWNERR_CTL_WIDTH 1
1043#define FRF_AA_RX_XON_TX_TH_LBN 25
1044#define FRF_AA_RX_XON_TX_TH_WIDTH 5
1045#define FRF_BZ_RX_USR_BUF_SIZE_LBN 19
1046#define FRF_BZ_RX_USR_BUF_SIZE_WIDTH 9
1047#define FRF_AA_RX_XOFF_TX_TH_LBN 20
1048#define FRF_AA_RX_XOFF_TX_TH_WIDTH 5
1049#define FRF_AA_RX_USR_BUF_SIZE_LBN 11
1050#define FRF_AA_RX_USR_BUF_SIZE_WIDTH 9
1051#define FRF_BZ_RX_XON_MAC_TH_LBN 10
1052#define FRF_BZ_RX_XON_MAC_TH_WIDTH 9
1053#define FRF_AA_RX_XON_MAC_TH_LBN 6
1054#define FRF_AA_RX_XON_MAC_TH_WIDTH 5
1055#define FRF_BZ_RX_XOFF_MAC_TH_LBN 1
1056#define FRF_BZ_RX_XOFF_MAC_TH_WIDTH 9
1057#define FRF_AA_RX_XOFF_MAC_TH_LBN 1
1058#define FRF_AA_RX_XOFF_MAC_TH_WIDTH 5
1059#define FRF_AZ_RX_XOFF_MAC_EN_LBN 0
1060#define FRF_AZ_RX_XOFF_MAC_EN_WIDTH 1
1061
1062/* RX_FILTER_CTL_REG: Receive filter control registers */
1063#define FR_BZ_RX_FILTER_CTL 0x00000810
1064#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_LBN 94
1065#define FRF_CZ_ETHERNET_WILDCARD_SEARCH_LIMIT_WIDTH 8
1066#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_LBN 86
1067#define FRF_CZ_ETHERNET_FULL_SEARCH_LIMIT_WIDTH 8
1068#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_LBN 85
1069#define FRF_CZ_RX_FILTER_ALL_VLAN_ETHERTYPES_WIDTH 1
1070#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_LBN 69
1071#define FRF_CZ_RX_VLAN_MATCH_ETHERTYPE_WIDTH 16
1072#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_LBN 57
1073#define FRF_CZ_MULTICAST_NOMATCH_Q_ID_WIDTH 12
1074#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_LBN 56
1075#define FRF_CZ_MULTICAST_NOMATCH_RSS_ENABLED_WIDTH 1
1076#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_LBN 55
1077#define FRF_CZ_MULTICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
1078#define FRF_CZ_UNICAST_NOMATCH_Q_ID_LBN 43
1079#define FRF_CZ_UNICAST_NOMATCH_Q_ID_WIDTH 12
1080#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_LBN 42
1081#define FRF_CZ_UNICAST_NOMATCH_RSS_ENABLED_WIDTH 1
1082#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_LBN 41
1083#define FRF_CZ_UNICAST_NOMATCH_IP_OVERRIDE_WIDTH 1
1084#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_LBN 40
1085#define FRF_BZ_SCATTER_ENBL_NO_MATCH_Q_WIDTH 1
1086#define FRF_BZ_UDP_FULL_SRCH_LIMIT_LBN 32
1087#define FRF_BZ_UDP_FULL_SRCH_LIMIT_WIDTH 8
1088#define FRF_BZ_NUM_KER_LBN 24
1089#define FRF_BZ_NUM_KER_WIDTH 2
1090#define FRF_BZ_UDP_WILD_SRCH_LIMIT_LBN 16
1091#define FRF_BZ_UDP_WILD_SRCH_LIMIT_WIDTH 8
1092#define FRF_BZ_TCP_WILD_SRCH_LIMIT_LBN 8
1093#define FRF_BZ_TCP_WILD_SRCH_LIMIT_WIDTH 8
1094#define FRF_BZ_TCP_FULL_SRCH_LIMIT_LBN 0
1095#define FRF_BZ_TCP_FULL_SRCH_LIMIT_WIDTH 8
1096
1097/* RX_FLUSH_DESCQ_REG: Receive flush descriptor queue register */
1098#define FR_AZ_RX_FLUSH_DESCQ 0x00000820
1099#define FRF_AZ_RX_FLUSH_DESCQ_CMD_LBN 24
1100#define FRF_AZ_RX_FLUSH_DESCQ_CMD_WIDTH 1
1101#define FRF_AZ_RX_FLUSH_DESCQ_LBN 0
1102#define FRF_AZ_RX_FLUSH_DESCQ_WIDTH 12
1103
1104/* RX_DESC_UPD_REGP0: Receive descriptor update register. */
1105#define FR_BZ_RX_DESC_UPD_P0 0x00000830
1106#define FR_BZ_RX_DESC_UPD_P0_STEP 8192
1107#define FR_BZ_RX_DESC_UPD_P0_ROWS 1024
1108/* RX_DESC_UPD_REG_KER: Receive descriptor update register. */
1109#define FR_AA_RX_DESC_UPD_KER 0x00000830
1110#define FR_AA_RX_DESC_UPD_KER_STEP 8192
1111#define FR_AA_RX_DESC_UPD_KER_ROWS 4
1112/* RX_DESC_UPD_REGP123: Receive descriptor update register. */
1113#define FR_BB_RX_DESC_UPD_P123 0x01000830
1114#define FR_BB_RX_DESC_UPD_P123_STEP 8192
1115#define FR_BB_RX_DESC_UPD_P123_ROWS 3072
1116#define FRF_AZ_RX_DESC_WPTR_LBN 96
1117#define FRF_AZ_RX_DESC_WPTR_WIDTH 12
1118#define FRF_AZ_RX_DESC_PUSH_CMD_LBN 95
1119#define FRF_AZ_RX_DESC_PUSH_CMD_WIDTH 1
1120#define FRF_AZ_RX_DESC_LBN 0
1121#define FRF_AZ_RX_DESC_WIDTH 64
1122
1123/* RX_DC_CFG_REG: Receive descriptor cache configuration register */
1124#define FR_AZ_RX_DC_CFG 0x00000840
1125#define FRF_AB_RX_MAX_PF_LBN 2
1126#define FRF_AB_RX_MAX_PF_WIDTH 2
1127#define FRF_AZ_RX_DC_SIZE_LBN 0
1128#define FRF_AZ_RX_DC_SIZE_WIDTH 2
1129#define FFE_AZ_RX_DC_SIZE_64 3
1130#define FFE_AZ_RX_DC_SIZE_32 2
1131#define FFE_AZ_RX_DC_SIZE_16 1
1132#define FFE_AZ_RX_DC_SIZE_8 0
1133
1134/* RX_DC_PF_WM_REG: Receive descriptor cache pre-fetch watermark register */
1135#define FR_AZ_RX_DC_PF_WM 0x00000850
1136#define FRF_AZ_RX_DC_PF_HWM_LBN 6
1137#define FRF_AZ_RX_DC_PF_HWM_WIDTH 6
1138#define FRF_AZ_RX_DC_PF_LWM_LBN 0
1139#define FRF_AZ_RX_DC_PF_LWM_WIDTH 6
1140
1141/* RX_RSS_TKEY_REG: RSS Toeplitz hash key */
1142#define FR_BZ_RX_RSS_TKEY 0x00000860
1143#define FRF_BZ_RX_RSS_TKEY_HI_LBN 64
1144#define FRF_BZ_RX_RSS_TKEY_HI_WIDTH 64
1145#define FRF_BZ_RX_RSS_TKEY_LO_LBN 0
1146#define FRF_BZ_RX_RSS_TKEY_LO_WIDTH 64
1147
1148/* RX_NODESC_DROP_REG: Receive dropped packet counter register */
1149#define FR_AZ_RX_NODESC_DROP 0x00000880
1150#define FRF_CZ_RX_NODESC_DROP_CNT_LBN 0
1151#define FRF_CZ_RX_NODESC_DROP_CNT_WIDTH 32
1152#define FRF_AB_RX_NODESC_DROP_CNT_LBN 0
1153#define FRF_AB_RX_NODESC_DROP_CNT_WIDTH 16
1154
1155/* RX_SELF_RST_REG: Receive self reset register */
1156#define FR_AA_RX_SELF_RST 0x00000890
1157#define FRF_AA_RX_ISCSI_DIS_LBN 17
1158#define FRF_AA_RX_ISCSI_DIS_WIDTH 1
1159#define FRF_AA_RX_SW_RST_REG_LBN 16
1160#define FRF_AA_RX_SW_RST_REG_WIDTH 1
1161#define FRF_AA_RX_NODESC_WAIT_DIS_LBN 9
1162#define FRF_AA_RX_NODESC_WAIT_DIS_WIDTH 1
1163#define FRF_AA_RX_SELF_RST_EN_LBN 8
1164#define FRF_AA_RX_SELF_RST_EN_WIDTH 1
1165#define FRF_AA_RX_MAX_PF_LAT_LBN 4
1166#define FRF_AA_RX_MAX_PF_LAT_WIDTH 4
1167#define FRF_AA_RX_MAX_LU_LAT_LBN 0
1168#define FRF_AA_RX_MAX_LU_LAT_WIDTH 4
1169
1170/* RX_DEBUG_REG: undocumented register */
1171#define FR_AZ_RX_DEBUG 0x000008a0
1172#define FRF_AZ_RX_DEBUG_LBN 0
1173#define FRF_AZ_RX_DEBUG_WIDTH 64
1174
1175/* RX_PUSH_DROP_REG: Receive descriptor push dropped counter register */
1176#define FR_AZ_RX_PUSH_DROP 0x000008b0
1177#define FRF_AZ_RX_PUSH_DROP_CNT_LBN 0
1178#define FRF_AZ_RX_PUSH_DROP_CNT_WIDTH 32
1179
1180/* RX_RSS_IPV6_REG1: IPv6 RSS Toeplitz hash key low bytes */
1181#define FR_CZ_RX_RSS_IPV6_REG1 0x000008d0
1182#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_LBN 0
1183#define FRF_CZ_RX_RSS_IPV6_TKEY_LO_WIDTH 128
1184
1185/* RX_RSS_IPV6_REG2: IPv6 RSS Toeplitz hash key middle bytes */
1186#define FR_CZ_RX_RSS_IPV6_REG2 0x000008e0
1187#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_LBN 0
1188#define FRF_CZ_RX_RSS_IPV6_TKEY_MID_WIDTH 128
1189
1190/* RX_RSS_IPV6_REG3: IPv6 RSS Toeplitz hash key upper bytes and IPv6 RSS settings */
1191#define FR_CZ_RX_RSS_IPV6_REG3 0x000008f0
1192#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_LBN 66
1193#define FRF_CZ_RX_RSS_IPV6_THASH_ENABLE_WIDTH 1
1194#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_LBN 65
1195#define FRF_CZ_RX_RSS_IPV6_IP_THASH_ENABLE_WIDTH 1
1196#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_LBN 64
1197#define FRF_CZ_RX_RSS_IPV6_TCP_SUPPRESS_WIDTH 1
1198#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_LBN 0
1199#define FRF_CZ_RX_RSS_IPV6_TKEY_HI_WIDTH 64
1200
1201/* TX_FLUSH_DESCQ_REG: Transmit flush descriptor queue register */
1202#define FR_AZ_TX_FLUSH_DESCQ 0x00000a00
1203#define FRF_AZ_TX_FLUSH_DESCQ_CMD_LBN 12
1204#define FRF_AZ_TX_FLUSH_DESCQ_CMD_WIDTH 1
1205#define FRF_AZ_TX_FLUSH_DESCQ_LBN 0
1206#define FRF_AZ_TX_FLUSH_DESCQ_WIDTH 12
1207
1208/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */
1209#define FR_BZ_TX_DESC_UPD_P0 0x00000a10
1210#define FR_BZ_TX_DESC_UPD_P0_STEP 8192
1211#define FR_BZ_TX_DESC_UPD_P0_ROWS 1024
1212/* TX_DESC_UPD_REG_KER: Transmit descriptor update register. */
1213#define FR_AA_TX_DESC_UPD_KER 0x00000a10
1214#define FR_AA_TX_DESC_UPD_KER_STEP 8192
1215#define FR_AA_TX_DESC_UPD_KER_ROWS 8
1216/* TX_DESC_UPD_REGP123: Transmit descriptor update register. */
1217#define FR_BB_TX_DESC_UPD_P123 0x01000a10
1218#define FR_BB_TX_DESC_UPD_P123_STEP 8192
1219#define FR_BB_TX_DESC_UPD_P123_ROWS 3072
1220#define FRF_AZ_TX_DESC_WPTR_LBN 96
1221#define FRF_AZ_TX_DESC_WPTR_WIDTH 12
1222#define FRF_AZ_TX_DESC_PUSH_CMD_LBN 95
1223#define FRF_AZ_TX_DESC_PUSH_CMD_WIDTH 1
1224#define FRF_AZ_TX_DESC_LBN 0
1225#define FRF_AZ_TX_DESC_WIDTH 95
1226
1227/* TX_DC_CFG_REG: Transmit descriptor cache configuration register */
1228#define FR_AZ_TX_DC_CFG 0x00000a20
1229#define FRF_AZ_TX_DC_SIZE_LBN 0
1230#define FRF_AZ_TX_DC_SIZE_WIDTH 2
1231#define FFE_AZ_TX_DC_SIZE_32 2
1232#define FFE_AZ_TX_DC_SIZE_16 1
1233#define FFE_AZ_TX_DC_SIZE_8 0
1234
1235/* TX_CHKSM_CFG_REG: Transmit checksum configuration register */
1236#define FR_AA_TX_CHKSM_CFG 0x00000a30
1237#define FRF_AA_TX_Q_CHKSM_DIS_96_127_LBN 96
1238#define FRF_AA_TX_Q_CHKSM_DIS_96_127_WIDTH 32
1239#define FRF_AA_TX_Q_CHKSM_DIS_64_95_LBN 64
1240#define FRF_AA_TX_Q_CHKSM_DIS_64_95_WIDTH 32
1241#define FRF_AA_TX_Q_CHKSM_DIS_32_63_LBN 32
1242#define FRF_AA_TX_Q_CHKSM_DIS_32_63_WIDTH 32
1243#define FRF_AA_TX_Q_CHKSM_DIS_0_31_LBN 0
1244#define FRF_AA_TX_Q_CHKSM_DIS_0_31_WIDTH 32
1245
1246/* TX_CFG_REG: Transmit configuration register */
1247#define FR_AZ_TX_CFG 0x00000a50
1248#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_LBN 114
1249#define FRF_CZ_TX_CONT_LOOKUP_THRESH_RANGE_WIDTH 8
1250#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_LBN 113
1251#define FRF_CZ_TX_FILTER_TEST_MODE_BIT_WIDTH 1
1252#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_LBN 105
1253#define FRF_CZ_TX_ETH_FILTER_WILD_SEARCH_RANGE_WIDTH 8
1254#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_LBN 97
1255#define FRF_CZ_TX_ETH_FILTER_FULL_SEARCH_RANGE_WIDTH 8
1256#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_LBN 89
1257#define FRF_CZ_TX_UDPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
1258#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_LBN 81
1259#define FRF_CZ_TX_UDPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
1260#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_LBN 73
1261#define FRF_CZ_TX_TCPIP_FILTER_WILD_SEARCH_RANGE_WIDTH 8
1262#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_LBN 65
1263#define FRF_CZ_TX_TCPIP_FILTER_FULL_SEARCH_RANGE_WIDTH 8
1264#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_LBN 64
1265#define FRF_CZ_TX_FILTER_ALL_VLAN_ETHERTYPES_BIT_WIDTH 1
1266#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_LBN 48
1267#define FRF_CZ_TX_VLAN_MATCH_ETHERTYPE_RANGE_WIDTH 16
1268#define FRF_CZ_TX_FILTER_EN_BIT_LBN 47
1269#define FRF_CZ_TX_FILTER_EN_BIT_WIDTH 1
1270#define FRF_AZ_TX_IP_ID_P0_OFS_LBN 16
1271#define FRF_AZ_TX_IP_ID_P0_OFS_WIDTH 15
1272#define FRF_AZ_TX_NO_EOP_DISC_EN_LBN 5
1273#define FRF_AZ_TX_NO_EOP_DISC_EN_WIDTH 1
1274#define FRF_AZ_TX_P1_PRI_EN_LBN 4
1275#define FRF_AZ_TX_P1_PRI_EN_WIDTH 1
1276#define FRF_AZ_TX_OWNERR_CTL_LBN 2
1277#define FRF_AZ_TX_OWNERR_CTL_WIDTH 1
1278#define FRF_AA_TX_NON_IP_DROP_DIS_LBN 1
1279#define FRF_AA_TX_NON_IP_DROP_DIS_WIDTH 1
1280#define FRF_AZ_TX_IP_ID_REP_EN_LBN 0
1281#define FRF_AZ_TX_IP_ID_REP_EN_WIDTH 1
1282
1283/* TX_PUSH_DROP_REG: Transmit push dropped register */
1284#define FR_AZ_TX_PUSH_DROP 0x00000a60
1285#define FRF_AZ_TX_PUSH_DROP_CNT_LBN 0
1286#define FRF_AZ_TX_PUSH_DROP_CNT_WIDTH 32
1287
1288/* TX_RESERVED_REG: Transmit configuration register */
1289#define FR_AZ_TX_RESERVED 0x00000a80
1290#define FRF_AZ_TX_EVT_CNT_LBN 121
1291#define FRF_AZ_TX_EVT_CNT_WIDTH 7
1292#define FRF_AZ_TX_PREF_AGE_CNT_LBN 119
1293#define FRF_AZ_TX_PREF_AGE_CNT_WIDTH 2
1294#define FRF_AZ_TX_RD_COMP_TMR_LBN 96
1295#define FRF_AZ_TX_RD_COMP_TMR_WIDTH 23
1296#define FRF_AZ_TX_PUSH_EN_LBN 89
1297#define FRF_AZ_TX_PUSH_EN_WIDTH 1
1298#define FRF_AZ_TX_PUSH_CHK_DIS_LBN 88
1299#define FRF_AZ_TX_PUSH_CHK_DIS_WIDTH 1
1300#define FRF_AZ_TX_D_FF_FULL_P0_LBN 85
1301#define FRF_AZ_TX_D_FF_FULL_P0_WIDTH 1
1302#define FRF_AZ_TX_DMAR_ST_P0_LBN 81
1303#define FRF_AZ_TX_DMAR_ST_P0_WIDTH 1
1304#define FRF_AZ_TX_DMAQ_ST_LBN 78
1305#define FRF_AZ_TX_DMAQ_ST_WIDTH 1
1306#define FRF_AZ_TX_RX_SPACER_LBN 64
1307#define FRF_AZ_TX_RX_SPACER_WIDTH 8
1308#define FRF_AZ_TX_DROP_ABORT_EN_LBN 60
1309#define FRF_AZ_TX_DROP_ABORT_EN_WIDTH 1
1310#define FRF_AZ_TX_SOFT_EVT_EN_LBN 59
1311#define FRF_AZ_TX_SOFT_EVT_EN_WIDTH 1
1312#define FRF_AZ_TX_PS_EVT_DIS_LBN 58
1313#define FRF_AZ_TX_PS_EVT_DIS_WIDTH 1
1314#define FRF_AZ_TX_RX_SPACER_EN_LBN 57
1315#define FRF_AZ_TX_RX_SPACER_EN_WIDTH 1
1316#define FRF_AZ_TX_XP_TIMER_LBN 52
1317#define FRF_AZ_TX_XP_TIMER_WIDTH 5
1318#define FRF_AZ_TX_PREF_SPACER_LBN 44
1319#define FRF_AZ_TX_PREF_SPACER_WIDTH 8
1320#define FRF_AZ_TX_PREF_WD_TMR_LBN 22
1321#define FRF_AZ_TX_PREF_WD_TMR_WIDTH 22
1322#define FRF_AZ_TX_ONLY1TAG_LBN 21
1323#define FRF_AZ_TX_ONLY1TAG_WIDTH 1
1324#define FRF_AZ_TX_PREF_THRESHOLD_LBN 19
1325#define FRF_AZ_TX_PREF_THRESHOLD_WIDTH 2
1326#define FRF_AZ_TX_ONE_PKT_PER_Q_LBN 18
1327#define FRF_AZ_TX_ONE_PKT_PER_Q_WIDTH 1
1328#define FRF_AZ_TX_DIS_NON_IP_EV_LBN 17
1329#define FRF_AZ_TX_DIS_NON_IP_EV_WIDTH 1
1330#define FRF_AA_TX_DMA_FF_THR_LBN 16
1331#define FRF_AA_TX_DMA_FF_THR_WIDTH 1
1332#define FRF_AZ_TX_DMA_SPACER_LBN 8
1333#define FRF_AZ_TX_DMA_SPACER_WIDTH 8
1334#define FRF_AA_TX_TCP_DIS_LBN 7
1335#define FRF_AA_TX_TCP_DIS_WIDTH 1
1336#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_LBN 7
1337#define FRF_BZ_TX_FLUSH_MIN_LEN_EN_WIDTH 1
1338#define FRF_AA_TX_IP_DIS_LBN 6
1339#define FRF_AA_TX_IP_DIS_WIDTH 1
1340#define FRF_AZ_TX_MAX_CPL_LBN 2
1341#define FRF_AZ_TX_MAX_CPL_WIDTH 2
1342#define FFE_AZ_TX_MAX_CPL_16 3
1343#define FFE_AZ_TX_MAX_CPL_8 2
1344#define FFE_AZ_TX_MAX_CPL_4 1
1345#define FFE_AZ_TX_MAX_CPL_NOLIMIT 0
1346#define FRF_AZ_TX_MAX_PREF_LBN 0
1347#define FRF_AZ_TX_MAX_PREF_WIDTH 2
1348#define FFE_AZ_TX_MAX_PREF_32 3
1349#define FFE_AZ_TX_MAX_PREF_16 2
1350#define FFE_AZ_TX_MAX_PREF_8 1
1351#define FFE_AZ_TX_MAX_PREF_OFF 0
1352
1353/* TX_PACE_REG: Transmit pace control register */
1354#define FR_BZ_TX_PACE 0x00000a90
1355#define FRF_BZ_TX_PACE_SB_NOT_AF_LBN 19
1356#define FRF_BZ_TX_PACE_SB_NOT_AF_WIDTH 10
1357#define FRF_BZ_TX_PACE_SB_AF_LBN 9
1358#define FRF_BZ_TX_PACE_SB_AF_WIDTH 10
1359#define FRF_BZ_TX_PACE_FB_BASE_LBN 5
1360#define FRF_BZ_TX_PACE_FB_BASE_WIDTH 4
1361#define FRF_BZ_TX_PACE_BIN_TH_LBN 0
1362#define FRF_BZ_TX_PACE_BIN_TH_WIDTH 5
1363
1364/* TX_PACE_DROP_QID_REG: PACE Drop QID Counter */
1365#define FR_BZ_TX_PACE_DROP_QID 0x00000aa0
1366#define FRF_BZ_TX_PACE_QID_DRP_CNT_LBN 0
1367#define FRF_BZ_TX_PACE_QID_DRP_CNT_WIDTH 16
1368
1369/* TX_VLAN_REG: Transmit VLAN tag register */
1370#define FR_BB_TX_VLAN 0x00000ae0
1371#define FRF_BB_TX_VLAN_EN_LBN 127
1372#define FRF_BB_TX_VLAN_EN_WIDTH 1
1373#define FRF_BB_TX_VLAN7_PORT1_EN_LBN 125
1374#define FRF_BB_TX_VLAN7_PORT1_EN_WIDTH 1
1375#define FRF_BB_TX_VLAN7_PORT0_EN_LBN 124
1376#define FRF_BB_TX_VLAN7_PORT0_EN_WIDTH 1
1377#define FRF_BB_TX_VLAN7_LBN 112
1378#define FRF_BB_TX_VLAN7_WIDTH 12
1379#define FRF_BB_TX_VLAN6_PORT1_EN_LBN 109
1380#define FRF_BB_TX_VLAN6_PORT1_EN_WIDTH 1
1381#define FRF_BB_TX_VLAN6_PORT0_EN_LBN 108
1382#define FRF_BB_TX_VLAN6_PORT0_EN_WIDTH 1
1383#define FRF_BB_TX_VLAN6_LBN 96
1384#define FRF_BB_TX_VLAN6_WIDTH 12
1385#define FRF_BB_TX_VLAN5_PORT1_EN_LBN 93
1386#define FRF_BB_TX_VLAN5_PORT1_EN_WIDTH 1
1387#define FRF_BB_TX_VLAN5_PORT0_EN_LBN 92
1388#define FRF_BB_TX_VLAN5_PORT0_EN_WIDTH 1
1389#define FRF_BB_TX_VLAN5_LBN 80
1390#define FRF_BB_TX_VLAN5_WIDTH 12
1391#define FRF_BB_TX_VLAN4_PORT1_EN_LBN 77
1392#define FRF_BB_TX_VLAN4_PORT1_EN_WIDTH 1
1393#define FRF_BB_TX_VLAN4_PORT0_EN_LBN 76
1394#define FRF_BB_TX_VLAN4_PORT0_EN_WIDTH 1
1395#define FRF_BB_TX_VLAN4_LBN 64
1396#define FRF_BB_TX_VLAN4_WIDTH 12
1397#define FRF_BB_TX_VLAN3_PORT1_EN_LBN 61
1398#define FRF_BB_TX_VLAN3_PORT1_EN_WIDTH 1
1399#define FRF_BB_TX_VLAN3_PORT0_EN_LBN 60
1400#define FRF_BB_TX_VLAN3_PORT0_EN_WIDTH 1
1401#define FRF_BB_TX_VLAN3_LBN 48
1402#define FRF_BB_TX_VLAN3_WIDTH 12
1403#define FRF_BB_TX_VLAN2_PORT1_EN_LBN 45
1404#define FRF_BB_TX_VLAN2_PORT1_EN_WIDTH 1
1405#define FRF_BB_TX_VLAN2_PORT0_EN_LBN 44
1406#define FRF_BB_TX_VLAN2_PORT0_EN_WIDTH 1
1407#define FRF_BB_TX_VLAN2_LBN 32
1408#define FRF_BB_TX_VLAN2_WIDTH 12
1409#define FRF_BB_TX_VLAN1_PORT1_EN_LBN 29
1410#define FRF_BB_TX_VLAN1_PORT1_EN_WIDTH 1
1411#define FRF_BB_TX_VLAN1_PORT0_EN_LBN 28
1412#define FRF_BB_TX_VLAN1_PORT0_EN_WIDTH 1
1413#define FRF_BB_TX_VLAN1_LBN 16
1414#define FRF_BB_TX_VLAN1_WIDTH 12
1415#define FRF_BB_TX_VLAN0_PORT1_EN_LBN 13
1416#define FRF_BB_TX_VLAN0_PORT1_EN_WIDTH 1
1417#define FRF_BB_TX_VLAN0_PORT0_EN_LBN 12
1418#define FRF_BB_TX_VLAN0_PORT0_EN_WIDTH 1
1419#define FRF_BB_TX_VLAN0_LBN 0
1420#define FRF_BB_TX_VLAN0_WIDTH 12
1421
1422/* TX_IPFIL_PORTEN_REG: Transmit filter control register */
1423#define FR_BZ_TX_IPFIL_PORTEN 0x00000af0
1424#define FRF_BZ_TX_MADR0_FIL_EN_LBN 64
1425#define FRF_BZ_TX_MADR0_FIL_EN_WIDTH 1
1426#define FRF_BB_TX_IPFIL31_PORT_EN_LBN 62
1427#define FRF_BB_TX_IPFIL31_PORT_EN_WIDTH 1
1428#define FRF_BB_TX_IPFIL30_PORT_EN_LBN 60
1429#define FRF_BB_TX_IPFIL30_PORT_EN_WIDTH 1
1430#define FRF_BB_TX_IPFIL29_PORT_EN_LBN 58
1431#define FRF_BB_TX_IPFIL29_PORT_EN_WIDTH 1
1432#define FRF_BB_TX_IPFIL28_PORT_EN_LBN 56
1433#define FRF_BB_TX_IPFIL28_PORT_EN_WIDTH 1
1434#define FRF_BB_TX_IPFIL27_PORT_EN_LBN 54
1435#define FRF_BB_TX_IPFIL27_PORT_EN_WIDTH 1
1436#define FRF_BB_TX_IPFIL26_PORT_EN_LBN 52
1437#define FRF_BB_TX_IPFIL26_PORT_EN_WIDTH 1
1438#define FRF_BB_TX_IPFIL25_PORT_EN_LBN 50
1439#define FRF_BB_TX_IPFIL25_PORT_EN_WIDTH 1
1440#define FRF_BB_TX_IPFIL24_PORT_EN_LBN 48
1441#define FRF_BB_TX_IPFIL24_PORT_EN_WIDTH 1
1442#define FRF_BB_TX_IPFIL23_PORT_EN_LBN 46
1443#define FRF_BB_TX_IPFIL23_PORT_EN_WIDTH 1
1444#define FRF_BB_TX_IPFIL22_PORT_EN_LBN 44
1445#define FRF_BB_TX_IPFIL22_PORT_EN_WIDTH 1
1446#define FRF_BB_TX_IPFIL21_PORT_EN_LBN 42
1447#define FRF_BB_TX_IPFIL21_PORT_EN_WIDTH 1
1448#define FRF_BB_TX_IPFIL20_PORT_EN_LBN 40
1449#define FRF_BB_TX_IPFIL20_PORT_EN_WIDTH 1
1450#define FRF_BB_TX_IPFIL19_PORT_EN_LBN 38
1451#define FRF_BB_TX_IPFIL19_PORT_EN_WIDTH 1
1452#define FRF_BB_TX_IPFIL18_PORT_EN_LBN 36
1453#define FRF_BB_TX_IPFIL18_PORT_EN_WIDTH 1
1454#define FRF_BB_TX_IPFIL17_PORT_EN_LBN 34
1455#define FRF_BB_TX_IPFIL17_PORT_EN_WIDTH 1
1456#define FRF_BB_TX_IPFIL16_PORT_EN_LBN 32
1457#define FRF_BB_TX_IPFIL16_PORT_EN_WIDTH 1
1458#define FRF_BB_TX_IPFIL15_PORT_EN_LBN 30
1459#define FRF_BB_TX_IPFIL15_PORT_EN_WIDTH 1
1460#define FRF_BB_TX_IPFIL14_PORT_EN_LBN 28
1461#define FRF_BB_TX_IPFIL14_PORT_EN_WIDTH 1
1462#define FRF_BB_TX_IPFIL13_PORT_EN_LBN 26
1463#define FRF_BB_TX_IPFIL13_PORT_EN_WIDTH 1
1464#define FRF_BB_TX_IPFIL12_PORT_EN_LBN 24
1465#define FRF_BB_TX_IPFIL12_PORT_EN_WIDTH 1
1466#define FRF_BB_TX_IPFIL11_PORT_EN_LBN 22
1467#define FRF_BB_TX_IPFIL11_PORT_EN_WIDTH 1
1468#define FRF_BB_TX_IPFIL10_PORT_EN_LBN 20
1469#define FRF_BB_TX_IPFIL10_PORT_EN_WIDTH 1
1470#define FRF_BB_TX_IPFIL9_PORT_EN_LBN 18
1471#define FRF_BB_TX_IPFIL9_PORT_EN_WIDTH 1
1472#define FRF_BB_TX_IPFIL8_PORT_EN_LBN 16
1473#define FRF_BB_TX_IPFIL8_PORT_EN_WIDTH 1
1474#define FRF_BB_TX_IPFIL7_PORT_EN_LBN 14
1475#define FRF_BB_TX_IPFIL7_PORT_EN_WIDTH 1
1476#define FRF_BB_TX_IPFIL6_PORT_EN_LBN 12
1477#define FRF_BB_TX_IPFIL6_PORT_EN_WIDTH 1
1478#define FRF_BB_TX_IPFIL5_PORT_EN_LBN 10
1479#define FRF_BB_TX_IPFIL5_PORT_EN_WIDTH 1
1480#define FRF_BB_TX_IPFIL4_PORT_EN_LBN 8
1481#define FRF_BB_TX_IPFIL4_PORT_EN_WIDTH 1
1482#define FRF_BB_TX_IPFIL3_PORT_EN_LBN 6
1483#define FRF_BB_TX_IPFIL3_PORT_EN_WIDTH 1
1484#define FRF_BB_TX_IPFIL2_PORT_EN_LBN 4
1485#define FRF_BB_TX_IPFIL2_PORT_EN_WIDTH 1
1486#define FRF_BB_TX_IPFIL1_PORT_EN_LBN 2
1487#define FRF_BB_TX_IPFIL1_PORT_EN_WIDTH 1
1488#define FRF_BB_TX_IPFIL0_PORT_EN_LBN 0
1489#define FRF_BB_TX_IPFIL0_PORT_EN_WIDTH 1
1490
1491/* TX_IPFIL_TBL: Transmit IP source address filter table */
1492#define FR_BB_TX_IPFIL_TBL 0x00000b00
1493#define FR_BB_TX_IPFIL_TBL_STEP 16
1494#define FR_BB_TX_IPFIL_TBL_ROWS 16
1495#define FRF_BB_TX_IPFIL_MASK_1_LBN 96
1496#define FRF_BB_TX_IPFIL_MASK_1_WIDTH 32
1497#define FRF_BB_TX_IP_SRC_ADR_1_LBN 64
1498#define FRF_BB_TX_IP_SRC_ADR_1_WIDTH 32
1499#define FRF_BB_TX_IPFIL_MASK_0_LBN 32
1500#define FRF_BB_TX_IPFIL_MASK_0_WIDTH 32
1501#define FRF_BB_TX_IP_SRC_ADR_0_LBN 0
1502#define FRF_BB_TX_IP_SRC_ADR_0_WIDTH 32
1503
1504/* MD_TXD_REG: PHY management transmit data register */
1505#define FR_AB_MD_TXD 0x00000c00
1506#define FRF_AB_MD_TXD_LBN 0
1507#define FRF_AB_MD_TXD_WIDTH 16
1508
1509/* MD_RXD_REG: PHY management receive data register */
1510#define FR_AB_MD_RXD 0x00000c10
1511#define FRF_AB_MD_RXD_LBN 0
1512#define FRF_AB_MD_RXD_WIDTH 16
1513
1514/* MD_CS_REG: PHY management configuration & status register */
1515#define FR_AB_MD_CS 0x00000c20
1516#define FRF_AB_MD_RD_EN_CMD_LBN 15
1517#define FRF_AB_MD_RD_EN_CMD_WIDTH 1
1518#define FRF_AB_MD_WR_EN_CMD_LBN 14
1519#define FRF_AB_MD_WR_EN_CMD_WIDTH 1
1520#define FRF_AB_MD_ADDR_CMD_LBN 13
1521#define FRF_AB_MD_ADDR_CMD_WIDTH 1
1522#define FRF_AB_MD_PT_LBN 7
1523#define FRF_AB_MD_PT_WIDTH 3
1524#define FRF_AB_MD_PL_LBN 6
1525#define FRF_AB_MD_PL_WIDTH 1
1526#define FRF_AB_MD_INT_CLR_LBN 5
1527#define FRF_AB_MD_INT_CLR_WIDTH 1
1528#define FRF_AB_MD_GC_LBN 4
1529#define FRF_AB_MD_GC_WIDTH 1
1530#define FRF_AB_MD_PRSP_LBN 3
1531#define FRF_AB_MD_PRSP_WIDTH 1
1532#define FRF_AB_MD_RIC_LBN 2
1533#define FRF_AB_MD_RIC_WIDTH 1
1534#define FRF_AB_MD_RDC_LBN 1
1535#define FRF_AB_MD_RDC_WIDTH 1
1536#define FRF_AB_MD_WRC_LBN 0
1537#define FRF_AB_MD_WRC_WIDTH 1
1538
1539/* MD_PHY_ADR_REG: PHY management PHY address register */
1540#define FR_AB_MD_PHY_ADR 0x00000c30
1541#define FRF_AB_MD_PHY_ADR_LBN 0
1542#define FRF_AB_MD_PHY_ADR_WIDTH 16
1543
1544/* MD_ID_REG: PHY management ID register */
1545#define FR_AB_MD_ID 0x00000c40
1546#define FRF_AB_MD_PRT_ADR_LBN 11
1547#define FRF_AB_MD_PRT_ADR_WIDTH 5
1548#define FRF_AB_MD_DEV_ADR_LBN 6
1549#define FRF_AB_MD_DEV_ADR_WIDTH 5
1550
1551/* MD_STAT_REG: PHY management status & mask register */
1552#define FR_AB_MD_STAT 0x00000c50
1553#define FRF_AB_MD_PINT_LBN 4
1554#define FRF_AB_MD_PINT_WIDTH 1
1555#define FRF_AB_MD_DONE_LBN 3
1556#define FRF_AB_MD_DONE_WIDTH 1
1557#define FRF_AB_MD_BSERR_LBN 2
1558#define FRF_AB_MD_BSERR_WIDTH 1
1559#define FRF_AB_MD_LNFL_LBN 1
1560#define FRF_AB_MD_LNFL_WIDTH 1
1561#define FRF_AB_MD_BSY_LBN 0
1562#define FRF_AB_MD_BSY_WIDTH 1
1563
1564/* MAC_STAT_DMA_REG: Port MAC statistical counter DMA register */
1565#define FR_AB_MAC_STAT_DMA 0x00000c60
1566#define FRF_AB_MAC_STAT_DMA_CMD_LBN 48
1567#define FRF_AB_MAC_STAT_DMA_CMD_WIDTH 1
1568#define FRF_AB_MAC_STAT_DMA_ADR_LBN 0
1569#define FRF_AB_MAC_STAT_DMA_ADR_WIDTH 48
1570
1571/* MAC_CTRL_REG: Port MAC control register */
1572#define FR_AB_MAC_CTRL 0x00000c80
1573#define FRF_AB_MAC_XOFF_VAL_LBN 16
1574#define FRF_AB_MAC_XOFF_VAL_WIDTH 16
1575#define FRF_BB_TXFIFO_DRAIN_EN_LBN 7
1576#define FRF_BB_TXFIFO_DRAIN_EN_WIDTH 1
1577#define FRF_AB_MAC_XG_DISTXCRC_LBN 5
1578#define FRF_AB_MAC_XG_DISTXCRC_WIDTH 1
1579#define FRF_AB_MAC_BCAD_ACPT_LBN 4
1580#define FRF_AB_MAC_BCAD_ACPT_WIDTH 1
1581#define FRF_AB_MAC_UC_PROM_LBN 3
1582#define FRF_AB_MAC_UC_PROM_WIDTH 1
1583#define FRF_AB_MAC_LINK_STATUS_LBN 2
1584#define FRF_AB_MAC_LINK_STATUS_WIDTH 1
1585#define FRF_AB_MAC_SPEED_LBN 0
1586#define FRF_AB_MAC_SPEED_WIDTH 2
1587#define FFE_AB_MAC_SPEED_10G 3
1588#define FFE_AB_MAC_SPEED_1G 2
1589#define FFE_AB_MAC_SPEED_100M 1
1590#define FFE_AB_MAC_SPEED_10M 0
1591
1592/* GEN_MODE_REG: General Purpose mode register (external interrupt mask) */
1593#define FR_BB_GEN_MODE 0x00000c90
1594#define FRF_BB_XFP_PHY_INT_POL_SEL_LBN 3
1595#define FRF_BB_XFP_PHY_INT_POL_SEL_WIDTH 1
1596#define FRF_BB_XG_PHY_INT_POL_SEL_LBN 2
1597#define FRF_BB_XG_PHY_INT_POL_SEL_WIDTH 1
1598#define FRF_BB_XFP_PHY_INT_MASK_LBN 1
1599#define FRF_BB_XFP_PHY_INT_MASK_WIDTH 1
1600#define FRF_BB_XG_PHY_INT_MASK_LBN 0
1601#define FRF_BB_XG_PHY_INT_MASK_WIDTH 1
1602
1603/* MAC_MC_HASH_REG0: Multicast address hash table */
1604#define FR_AB_MAC_MC_HASH_REG0 0x00000ca0
1605#define FRF_AB_MAC_MCAST_HASH0_LBN 0
1606#define FRF_AB_MAC_MCAST_HASH0_WIDTH 128
1607
1608/* MAC_MC_HASH_REG1: Multicast address hash table */
1609#define FR_AB_MAC_MC_HASH_REG1 0x00000cb0
1610#define FRF_AB_MAC_MCAST_HASH1_LBN 0
1611#define FRF_AB_MAC_MCAST_HASH1_WIDTH 128
1612
1613/* GM_CFG1_REG: GMAC configuration register 1 */
1614#define FR_AB_GM_CFG1 0x00000e00
1615#define FRF_AB_GM_SW_RST_LBN 31
1616#define FRF_AB_GM_SW_RST_WIDTH 1
1617#define FRF_AB_GM_SIM_RST_LBN 30
1618#define FRF_AB_GM_SIM_RST_WIDTH 1
1619#define FRF_AB_GM_RST_RX_MAC_CTL_LBN 19
1620#define FRF_AB_GM_RST_RX_MAC_CTL_WIDTH 1
1621#define FRF_AB_GM_RST_TX_MAC_CTL_LBN 18
1622#define FRF_AB_GM_RST_TX_MAC_CTL_WIDTH 1
1623#define FRF_AB_GM_RST_RX_FUNC_LBN 17
1624#define FRF_AB_GM_RST_RX_FUNC_WIDTH 1
1625#define FRF_AB_GM_RST_TX_FUNC_LBN 16
1626#define FRF_AB_GM_RST_TX_FUNC_WIDTH 1
1627#define FRF_AB_GM_LOOP_LBN 8
1628#define FRF_AB_GM_LOOP_WIDTH 1
1629#define FRF_AB_GM_RX_FC_EN_LBN 5
1630#define FRF_AB_GM_RX_FC_EN_WIDTH 1
1631#define FRF_AB_GM_TX_FC_EN_LBN 4
1632#define FRF_AB_GM_TX_FC_EN_WIDTH 1
1633#define FRF_AB_GM_SYNC_RXEN_LBN 3
1634#define FRF_AB_GM_SYNC_RXEN_WIDTH 1
1635#define FRF_AB_GM_RX_EN_LBN 2
1636#define FRF_AB_GM_RX_EN_WIDTH 1
1637#define FRF_AB_GM_SYNC_TXEN_LBN 1
1638#define FRF_AB_GM_SYNC_TXEN_WIDTH 1
1639#define FRF_AB_GM_TX_EN_LBN 0
1640#define FRF_AB_GM_TX_EN_WIDTH 1
1641
1642/* GM_CFG2_REG: GMAC configuration register 2 */
1643#define FR_AB_GM_CFG2 0x00000e10
1644#define FRF_AB_GM_PAMBL_LEN_LBN 12
1645#define FRF_AB_GM_PAMBL_LEN_WIDTH 4
1646#define FRF_AB_GM_IF_MODE_LBN 8
1647#define FRF_AB_GM_IF_MODE_WIDTH 2
1648#define FFE_AB_IF_MODE_BYTE_MODE 2
1649#define FFE_AB_IF_MODE_NIBBLE_MODE 1
1650#define FRF_AB_GM_HUGE_FRM_EN_LBN 5
1651#define FRF_AB_GM_HUGE_FRM_EN_WIDTH 1
1652#define FRF_AB_GM_LEN_CHK_LBN 4
1653#define FRF_AB_GM_LEN_CHK_WIDTH 1
1654#define FRF_AB_GM_PAD_CRC_EN_LBN 2
1655#define FRF_AB_GM_PAD_CRC_EN_WIDTH 1
1656#define FRF_AB_GM_CRC_EN_LBN 1
1657#define FRF_AB_GM_CRC_EN_WIDTH 1
1658#define FRF_AB_GM_FD_LBN 0
1659#define FRF_AB_GM_FD_WIDTH 1
1660
1661/* GM_IPG_REG: GMAC IPG register */
1662#define FR_AB_GM_IPG 0x00000e20
1663#define FRF_AB_GM_NONB2B_IPG1_LBN 24
1664#define FRF_AB_GM_NONB2B_IPG1_WIDTH 7
1665#define FRF_AB_GM_NONB2B_IPG2_LBN 16
1666#define FRF_AB_GM_NONB2B_IPG2_WIDTH 7
1667#define FRF_AB_GM_MIN_IPG_ENF_LBN 8
1668#define FRF_AB_GM_MIN_IPG_ENF_WIDTH 8
1669#define FRF_AB_GM_B2B_IPG_LBN 0
1670#define FRF_AB_GM_B2B_IPG_WIDTH 7
1671
1672/* GM_HD_REG: GMAC half duplex register */
1673#define FR_AB_GM_HD 0x00000e30
1674#define FRF_AB_GM_ALT_BOFF_VAL_LBN 20
1675#define FRF_AB_GM_ALT_BOFF_VAL_WIDTH 4
1676#define FRF_AB_GM_ALT_BOFF_EN_LBN 19
1677#define FRF_AB_GM_ALT_BOFF_EN_WIDTH 1
1678#define FRF_AB_GM_BP_NO_BOFF_LBN 18
1679#define FRF_AB_GM_BP_NO_BOFF_WIDTH 1
1680#define FRF_AB_GM_DIS_BOFF_LBN 17
1681#define FRF_AB_GM_DIS_BOFF_WIDTH 1
1682#define FRF_AB_GM_EXDEF_TX_EN_LBN 16
1683#define FRF_AB_GM_EXDEF_TX_EN_WIDTH 1
1684#define FRF_AB_GM_RTRY_LIMIT_LBN 12
1685#define FRF_AB_GM_RTRY_LIMIT_WIDTH 4
1686#define FRF_AB_GM_COL_WIN_LBN 0
1687#define FRF_AB_GM_COL_WIN_WIDTH 10
1688
1689/* GM_MAX_FLEN_REG: GMAC maximum frame length register */
1690#define FR_AB_GM_MAX_FLEN 0x00000e40
1691#define FRF_AB_GM_MAX_FLEN_LBN 0
1692#define FRF_AB_GM_MAX_FLEN_WIDTH 16
1693
1694/* GM_TEST_REG: GMAC test register */
1695#define FR_AB_GM_TEST 0x00000e70
1696#define FRF_AB_GM_MAX_BOFF_LBN 3
1697#define FRF_AB_GM_MAX_BOFF_WIDTH 1
1698#define FRF_AB_GM_REG_TX_FLOW_EN_LBN 2
1699#define FRF_AB_GM_REG_TX_FLOW_EN_WIDTH 1
1700#define FRF_AB_GM_TEST_PAUSE_LBN 1
1701#define FRF_AB_GM_TEST_PAUSE_WIDTH 1
1702#define FRF_AB_GM_SHORT_SLOT_LBN 0
1703#define FRF_AB_GM_SHORT_SLOT_WIDTH 1
1704
1705/* GM_ADR1_REG: GMAC station address register 1 */
1706#define FR_AB_GM_ADR1 0x00000f00
1707#define FRF_AB_GM_ADR_B0_LBN 24
1708#define FRF_AB_GM_ADR_B0_WIDTH 8
1709#define FRF_AB_GM_ADR_B1_LBN 16
1710#define FRF_AB_GM_ADR_B1_WIDTH 8
1711#define FRF_AB_GM_ADR_B2_LBN 8
1712#define FRF_AB_GM_ADR_B2_WIDTH 8
1713#define FRF_AB_GM_ADR_B3_LBN 0
1714#define FRF_AB_GM_ADR_B3_WIDTH 8
1715
1716/* GM_ADR2_REG: GMAC station address register 2 */
1717#define FR_AB_GM_ADR2 0x00000f10
1718#define FRF_AB_GM_ADR_B4_LBN 24
1719#define FRF_AB_GM_ADR_B4_WIDTH 8
1720#define FRF_AB_GM_ADR_B5_LBN 16
1721#define FRF_AB_GM_ADR_B5_WIDTH 8
1722
1723/* GMF_CFG0_REG: GMAC FIFO configuration register 0 */
1724#define FR_AB_GMF_CFG0 0x00000f20
1725#define FRF_AB_GMF_FTFENRPLY_LBN 20
1726#define FRF_AB_GMF_FTFENRPLY_WIDTH 1
1727#define FRF_AB_GMF_STFENRPLY_LBN 19
1728#define FRF_AB_GMF_STFENRPLY_WIDTH 1
1729#define FRF_AB_GMF_FRFENRPLY_LBN 18
1730#define FRF_AB_GMF_FRFENRPLY_WIDTH 1
1731#define FRF_AB_GMF_SRFENRPLY_LBN 17
1732#define FRF_AB_GMF_SRFENRPLY_WIDTH 1
1733#define FRF_AB_GMF_WTMENRPLY_LBN 16
1734#define FRF_AB_GMF_WTMENRPLY_WIDTH 1
1735#define FRF_AB_GMF_FTFENREQ_LBN 12
1736#define FRF_AB_GMF_FTFENREQ_WIDTH 1
1737#define FRF_AB_GMF_STFENREQ_LBN 11
1738#define FRF_AB_GMF_STFENREQ_WIDTH 1
1739#define FRF_AB_GMF_FRFENREQ_LBN 10
1740#define FRF_AB_GMF_FRFENREQ_WIDTH 1
1741#define FRF_AB_GMF_SRFENREQ_LBN 9
1742#define FRF_AB_GMF_SRFENREQ_WIDTH 1
1743#define FRF_AB_GMF_WTMENREQ_LBN 8
1744#define FRF_AB_GMF_WTMENREQ_WIDTH 1
1745#define FRF_AB_GMF_HSTRSTFT_LBN 4
1746#define FRF_AB_GMF_HSTRSTFT_WIDTH 1
1747#define FRF_AB_GMF_HSTRSTST_LBN 3
1748#define FRF_AB_GMF_HSTRSTST_WIDTH 1
1749#define FRF_AB_GMF_HSTRSTFR_LBN 2
1750#define FRF_AB_GMF_HSTRSTFR_WIDTH 1
1751#define FRF_AB_GMF_HSTRSTSR_LBN 1
1752#define FRF_AB_GMF_HSTRSTSR_WIDTH 1
1753#define FRF_AB_GMF_HSTRSTWT_LBN 0
1754#define FRF_AB_GMF_HSTRSTWT_WIDTH 1
1755
1756/* GMF_CFG1_REG: GMAC FIFO configuration register 1 */
1757#define FR_AB_GMF_CFG1 0x00000f30
1758#define FRF_AB_GMF_CFGFRTH_LBN 16
1759#define FRF_AB_GMF_CFGFRTH_WIDTH 5
1760#define FRF_AB_GMF_CFGXOFFRTX_LBN 0
1761#define FRF_AB_GMF_CFGXOFFRTX_WIDTH 16
1762
1763/* GMF_CFG2_REG: GMAC FIFO configuration register 2 */
1764#define FR_AB_GMF_CFG2 0x00000f40
1765#define FRF_AB_GMF_CFGHWM_LBN 16
1766#define FRF_AB_GMF_CFGHWM_WIDTH 6
1767#define FRF_AB_GMF_CFGLWM_LBN 0
1768#define FRF_AB_GMF_CFGLWM_WIDTH 6
1769
1770/* GMF_CFG3_REG: GMAC FIFO configuration register 3 */
1771#define FR_AB_GMF_CFG3 0x00000f50
1772#define FRF_AB_GMF_CFGHWMFT_LBN 16
1773#define FRF_AB_GMF_CFGHWMFT_WIDTH 6
1774#define FRF_AB_GMF_CFGFTTH_LBN 0
1775#define FRF_AB_GMF_CFGFTTH_WIDTH 6
1776
1777/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */
1778#define FR_AB_GMF_CFG4 0x00000f60
1779#define FRF_AB_GMF_HSTFLTRFRM_LBN 0
1780#define FRF_AB_GMF_HSTFLTRFRM_WIDTH 18
1781
1782/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */
1783#define FR_AB_GMF_CFG5 0x00000f70
1784#define FRF_AB_GMF_CFGHDPLX_LBN 22
1785#define FRF_AB_GMF_CFGHDPLX_WIDTH 1
1786#define FRF_AB_GMF_SRFULL_LBN 21
1787#define FRF_AB_GMF_SRFULL_WIDTH 1
1788#define FRF_AB_GMF_HSTSRFULLCLR_LBN 20
1789#define FRF_AB_GMF_HSTSRFULLCLR_WIDTH 1
1790#define FRF_AB_GMF_CFGBYTMODE_LBN 19
1791#define FRF_AB_GMF_CFGBYTMODE_WIDTH 1
1792#define FRF_AB_GMF_HSTDRPLT64_LBN 18
1793#define FRF_AB_GMF_HSTDRPLT64_WIDTH 1
1794#define FRF_AB_GMF_HSTFLTRFRMDC_LBN 0
1795#define FRF_AB_GMF_HSTFLTRFRMDC_WIDTH 18
1796
1797/* TX_SRC_MAC_TBL: Transmit IP source address filter table */
1798#define FR_BB_TX_SRC_MAC_TBL 0x00001000
1799#define FR_BB_TX_SRC_MAC_TBL_STEP 16
1800#define FR_BB_TX_SRC_MAC_TBL_ROWS 16
1801#define FRF_BB_TX_SRC_MAC_ADR_1_LBN 64
1802#define FRF_BB_TX_SRC_MAC_ADR_1_WIDTH 48
1803#define FRF_BB_TX_SRC_MAC_ADR_0_LBN 0
1804#define FRF_BB_TX_SRC_MAC_ADR_0_WIDTH 48
1805
1806/* TX_SRC_MAC_CTL_REG: Transmit MAC source address filter control */
1807#define FR_BB_TX_SRC_MAC_CTL 0x00001100
1808#define FRF_BB_TX_SRC_DROP_CTR_LBN 16
1809#define FRF_BB_TX_SRC_DROP_CTR_WIDTH 16
1810#define FRF_BB_TX_SRC_FLTR_EN_LBN 15
1811#define FRF_BB_TX_SRC_FLTR_EN_WIDTH 1
1812#define FRF_BB_TX_DROP_CTR_CLR_LBN 12
1813#define FRF_BB_TX_DROP_CTR_CLR_WIDTH 1
1814#define FRF_BB_TX_MAC_QID_SEL_LBN 0
1815#define FRF_BB_TX_MAC_QID_SEL_WIDTH 3
1816
1817/* XM_ADR_LO_REG: XGMAC address register low */
1818#define FR_AB_XM_ADR_LO 0x00001200
1819#define FRF_AB_XM_ADR_LO_LBN 0
1820#define FRF_AB_XM_ADR_LO_WIDTH 32
1821
1822/* XM_ADR_HI_REG: XGMAC address register high */
1823#define FR_AB_XM_ADR_HI 0x00001210
1824#define FRF_AB_XM_ADR_HI_LBN 0
1825#define FRF_AB_XM_ADR_HI_WIDTH 16
1826
1827/* XM_GLB_CFG_REG: XGMAC global configuration */
1828#define FR_AB_XM_GLB_CFG 0x00001220
1829#define FRF_AB_XM_RMTFLT_GEN_LBN 17
1830#define FRF_AB_XM_RMTFLT_GEN_WIDTH 1
1831#define FRF_AB_XM_DEBUG_MODE_LBN 16
1832#define FRF_AB_XM_DEBUG_MODE_WIDTH 1
1833#define FRF_AB_XM_RX_STAT_EN_LBN 11
1834#define FRF_AB_XM_RX_STAT_EN_WIDTH 1
1835#define FRF_AB_XM_TX_STAT_EN_LBN 10
1836#define FRF_AB_XM_TX_STAT_EN_WIDTH 1
1837#define FRF_AB_XM_RX_JUMBO_MODE_LBN 6
1838#define FRF_AB_XM_RX_JUMBO_MODE_WIDTH 1
1839#define FRF_AB_XM_WAN_MODE_LBN 5
1840#define FRF_AB_XM_WAN_MODE_WIDTH 1
1841#define FRF_AB_XM_INTCLR_MODE_LBN 3
1842#define FRF_AB_XM_INTCLR_MODE_WIDTH 1
1843#define FRF_AB_XM_CORE_RST_LBN 0
1844#define FRF_AB_XM_CORE_RST_WIDTH 1
1845
1846/* XM_TX_CFG_REG: XGMAC transmit configuration */
1847#define FR_AB_XM_TX_CFG 0x00001230
1848#define FRF_AB_XM_TX_PROG_LBN 24
1849#define FRF_AB_XM_TX_PROG_WIDTH 1
1850#define FRF_AB_XM_IPG_LBN 16
1851#define FRF_AB_XM_IPG_WIDTH 4
1852#define FRF_AB_XM_FCNTL_LBN 10
1853#define FRF_AB_XM_FCNTL_WIDTH 1
1854#define FRF_AB_XM_TXCRC_LBN 8
1855#define FRF_AB_XM_TXCRC_WIDTH 1
1856#define FRF_AB_XM_EDRC_LBN 6
1857#define FRF_AB_XM_EDRC_WIDTH 1
1858#define FRF_AB_XM_AUTO_PAD_LBN 5
1859#define FRF_AB_XM_AUTO_PAD_WIDTH 1
1860#define FRF_AB_XM_TX_PRMBL_LBN 2
1861#define FRF_AB_XM_TX_PRMBL_WIDTH 1
1862#define FRF_AB_XM_TXEN_LBN 1
1863#define FRF_AB_XM_TXEN_WIDTH 1
1864#define FRF_AB_XM_TX_RST_LBN 0
1865#define FRF_AB_XM_TX_RST_WIDTH 1
1866
1867/* XM_RX_CFG_REG: XGMAC receive configuration */
1868#define FR_AB_XM_RX_CFG 0x00001240
1869#define FRF_AB_XM_PASS_LENERR_LBN 26
1870#define FRF_AB_XM_PASS_LENERR_WIDTH 1
1871#define FRF_AB_XM_PASS_CRC_ERR_LBN 25
1872#define FRF_AB_XM_PASS_CRC_ERR_WIDTH 1
1873#define FRF_AB_XM_PASS_PRMBLE_ERR_LBN 24
1874#define FRF_AB_XM_PASS_PRMBLE_ERR_WIDTH 1
1875#define FRF_AB_XM_REJ_BCAST_LBN 20
1876#define FRF_AB_XM_REJ_BCAST_WIDTH 1
1877#define FRF_AB_XM_ACPT_ALL_MCAST_LBN 11
1878#define FRF_AB_XM_ACPT_ALL_MCAST_WIDTH 1
1879#define FRF_AB_XM_ACPT_ALL_UCAST_LBN 9
1880#define FRF_AB_XM_ACPT_ALL_UCAST_WIDTH 1
1881#define FRF_AB_XM_AUTO_DEPAD_LBN 8
1882#define FRF_AB_XM_AUTO_DEPAD_WIDTH 1
1883#define FRF_AB_XM_RXCRC_LBN 3
1884#define FRF_AB_XM_RXCRC_WIDTH 1
1885#define FRF_AB_XM_RX_PRMBL_LBN 2
1886#define FRF_AB_XM_RX_PRMBL_WIDTH 1
1887#define FRF_AB_XM_RXEN_LBN 1
1888#define FRF_AB_XM_RXEN_WIDTH 1
1889#define FRF_AB_XM_RX_RST_LBN 0
1890#define FRF_AB_XM_RX_RST_WIDTH 1
1891
1892/* XM_MGT_INT_MASK: documentation to be written for sum_XM_MGT_INT_MASK */
1893#define FR_AB_XM_MGT_INT_MASK 0x00001250
1894#define FRF_AB_XM_MSK_STA_INTR_LBN 16
1895#define FRF_AB_XM_MSK_STA_INTR_WIDTH 1
1896#define FRF_AB_XM_MSK_STAT_CNTR_HF_LBN 9
1897#define FRF_AB_XM_MSK_STAT_CNTR_HF_WIDTH 1
1898#define FRF_AB_XM_MSK_STAT_CNTR_OF_LBN 8
1899#define FRF_AB_XM_MSK_STAT_CNTR_OF_WIDTH 1
1900#define FRF_AB_XM_MSK_PRMBLE_ERR_LBN 2
1901#define FRF_AB_XM_MSK_PRMBLE_ERR_WIDTH 1
1902#define FRF_AB_XM_MSK_RMTFLT_LBN 1
1903#define FRF_AB_XM_MSK_RMTFLT_WIDTH 1
1904#define FRF_AB_XM_MSK_LCLFLT_LBN 0
1905#define FRF_AB_XM_MSK_LCLFLT_WIDTH 1
1906
1907/* XM_FC_REG: XGMAC flow control register */
1908#define FR_AB_XM_FC 0x00001270
1909#define FRF_AB_XM_PAUSE_TIME_LBN 16
1910#define FRF_AB_XM_PAUSE_TIME_WIDTH 16
1911#define FRF_AB_XM_RX_MAC_STAT_LBN 11
1912#define FRF_AB_XM_RX_MAC_STAT_WIDTH 1
1913#define FRF_AB_XM_TX_MAC_STAT_LBN 10
1914#define FRF_AB_XM_TX_MAC_STAT_WIDTH 1
1915#define FRF_AB_XM_MCNTL_PASS_LBN 8
1916#define FRF_AB_XM_MCNTL_PASS_WIDTH 2
1917#define FRF_AB_XM_REJ_CNTL_UCAST_LBN 6
1918#define FRF_AB_XM_REJ_CNTL_UCAST_WIDTH 1
1919#define FRF_AB_XM_REJ_CNTL_MCAST_LBN 5
1920#define FRF_AB_XM_REJ_CNTL_MCAST_WIDTH 1
1921#define FRF_AB_XM_ZPAUSE_LBN 2
1922#define FRF_AB_XM_ZPAUSE_WIDTH 1
1923#define FRF_AB_XM_XMIT_PAUSE_LBN 1
1924#define FRF_AB_XM_XMIT_PAUSE_WIDTH 1
1925#define FRF_AB_XM_DIS_FCNTL_LBN 0
1926#define FRF_AB_XM_DIS_FCNTL_WIDTH 1
1927
1928/* XM_PAUSE_TIME_REG: XGMAC pause time register */
1929#define FR_AB_XM_PAUSE_TIME 0x00001290
1930#define FRF_AB_XM_TX_PAUSE_CNT_LBN 16
1931#define FRF_AB_XM_TX_PAUSE_CNT_WIDTH 16
1932#define FRF_AB_XM_RX_PAUSE_CNT_LBN 0
1933#define FRF_AB_XM_RX_PAUSE_CNT_WIDTH 16
1934
1935/* XM_TX_PARAM_REG: XGMAC transmit parameter register */
1936#define FR_AB_XM_TX_PARAM 0x000012d0
1937#define FRF_AB_XM_TX_JUMBO_MODE_LBN 31
1938#define FRF_AB_XM_TX_JUMBO_MODE_WIDTH 1
1939#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_LBN 19
1940#define FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH 11
1941#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN 16
1942#define FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH 3
1943#define FRF_AB_XM_PAD_CHAR_LBN 0
1944#define FRF_AB_XM_PAD_CHAR_WIDTH 8
1945
1946/* XM_RX_PARAM_REG: XGMAC receive parameter register */
1947#define FR_AB_XM_RX_PARAM 0x000012e0
1948#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_LBN 3
1949#define FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH 11
1950#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN 0
1951#define FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH 3
1952
1953/* XM_MGT_INT_MSK_REG: XGMAC management interrupt mask register */
1954#define FR_AB_XM_MGT_INT_MSK 0x000012f0
1955#define FRF_AB_XM_STAT_CNTR_OF_LBN 9
1956#define FRF_AB_XM_STAT_CNTR_OF_WIDTH 1
1957#define FRF_AB_XM_STAT_CNTR_HF_LBN 8
1958#define FRF_AB_XM_STAT_CNTR_HF_WIDTH 1
1959#define FRF_AB_XM_PRMBLE_ERR_LBN 2
1960#define FRF_AB_XM_PRMBLE_ERR_WIDTH 1
1961#define FRF_AB_XM_RMTFLT_LBN 1
1962#define FRF_AB_XM_RMTFLT_WIDTH 1
1963#define FRF_AB_XM_LCLFLT_LBN 0
1964#define FRF_AB_XM_LCLFLT_WIDTH 1
1965
1966/* XX_PWR_RST_REG: XGXS/XAUI powerdown/reset register */
1967#define FR_AB_XX_PWR_RST 0x00001300
1968#define FRF_AB_XX_PWRDND_SIG_LBN 31
1969#define FRF_AB_XX_PWRDND_SIG_WIDTH 1
1970#define FRF_AB_XX_PWRDNC_SIG_LBN 30
1971#define FRF_AB_XX_PWRDNC_SIG_WIDTH 1
1972#define FRF_AB_XX_PWRDNB_SIG_LBN 29
1973#define FRF_AB_XX_PWRDNB_SIG_WIDTH 1
1974#define FRF_AB_XX_PWRDNA_SIG_LBN 28
1975#define FRF_AB_XX_PWRDNA_SIG_WIDTH 1
1976#define FRF_AB_XX_SIM_MODE_LBN 27
1977#define FRF_AB_XX_SIM_MODE_WIDTH 1
1978#define FRF_AB_XX_RSTPLLCD_SIG_LBN 25
1979#define FRF_AB_XX_RSTPLLCD_SIG_WIDTH 1
1980#define FRF_AB_XX_RSTPLLAB_SIG_LBN 24
1981#define FRF_AB_XX_RSTPLLAB_SIG_WIDTH 1
1982#define FRF_AB_XX_RESETD_SIG_LBN 23
1983#define FRF_AB_XX_RESETD_SIG_WIDTH 1
1984#define FRF_AB_XX_RESETC_SIG_LBN 22
1985#define FRF_AB_XX_RESETC_SIG_WIDTH 1
1986#define FRF_AB_XX_RESETB_SIG_LBN 21
1987#define FRF_AB_XX_RESETB_SIG_WIDTH 1
1988#define FRF_AB_XX_RESETA_SIG_LBN 20
1989#define FRF_AB_XX_RESETA_SIG_WIDTH 1
1990#define FRF_AB_XX_RSTXGXSRX_SIG_LBN 18
1991#define FRF_AB_XX_RSTXGXSRX_SIG_WIDTH 1
1992#define FRF_AB_XX_RSTXGXSTX_SIG_LBN 17
1993#define FRF_AB_XX_RSTXGXSTX_SIG_WIDTH 1
1994#define FRF_AB_XX_SD_RST_ACT_LBN 16
1995#define FRF_AB_XX_SD_RST_ACT_WIDTH 1
1996#define FRF_AB_XX_PWRDND_EN_LBN 15
1997#define FRF_AB_XX_PWRDND_EN_WIDTH 1
1998#define FRF_AB_XX_PWRDNC_EN_LBN 14
1999#define FRF_AB_XX_PWRDNC_EN_WIDTH 1
2000#define FRF_AB_XX_PWRDNB_EN_LBN 13
2001#define FRF_AB_XX_PWRDNB_EN_WIDTH 1
2002#define FRF_AB_XX_PWRDNA_EN_LBN 12
2003#define FRF_AB_XX_PWRDNA_EN_WIDTH 1
2004#define FRF_AB_XX_RSTPLLCD_EN_LBN 9
2005#define FRF_AB_XX_RSTPLLCD_EN_WIDTH 1
2006#define FRF_AB_XX_RSTPLLAB_EN_LBN 8
2007#define FRF_AB_XX_RSTPLLAB_EN_WIDTH 1
2008#define FRF_AB_XX_RESETD_EN_LBN 7
2009#define FRF_AB_XX_RESETD_EN_WIDTH 1
2010#define FRF_AB_XX_RESETC_EN_LBN 6
2011#define FRF_AB_XX_RESETC_EN_WIDTH 1
2012#define FRF_AB_XX_RESETB_EN_LBN 5
2013#define FRF_AB_XX_RESETB_EN_WIDTH 1
2014#define FRF_AB_XX_RESETA_EN_LBN 4
2015#define FRF_AB_XX_RESETA_EN_WIDTH 1
2016#define FRF_AB_XX_RSTXGXSRX_EN_LBN 2
2017#define FRF_AB_XX_RSTXGXSRX_EN_WIDTH 1
2018#define FRF_AB_XX_RSTXGXSTX_EN_LBN 1
2019#define FRF_AB_XX_RSTXGXSTX_EN_WIDTH 1
2020#define FRF_AB_XX_RST_XX_EN_LBN 0
2021#define FRF_AB_XX_RST_XX_EN_WIDTH 1
2022
2023/* XX_SD_CTL_REG: XGXS/XAUI powerdown/reset control register */
2024#define FR_AB_XX_SD_CTL 0x00001310
2025#define FRF_AB_XX_TERMADJ1_LBN 17
2026#define FRF_AB_XX_TERMADJ1_WIDTH 1
2027#define FRF_AB_XX_TERMADJ0_LBN 16
2028#define FRF_AB_XX_TERMADJ0_WIDTH 1
2029#define FRF_AB_XX_HIDRVD_LBN 15
2030#define FRF_AB_XX_HIDRVD_WIDTH 1
2031#define FRF_AB_XX_LODRVD_LBN 14
2032#define FRF_AB_XX_LODRVD_WIDTH 1
2033#define FRF_AB_XX_HIDRVC_LBN 13
2034#define FRF_AB_XX_HIDRVC_WIDTH 1
2035#define FRF_AB_XX_LODRVC_LBN 12
2036#define FRF_AB_XX_LODRVC_WIDTH 1
2037#define FRF_AB_XX_HIDRVB_LBN 11
2038#define FRF_AB_XX_HIDRVB_WIDTH 1
2039#define FRF_AB_XX_LODRVB_LBN 10
2040#define FRF_AB_XX_LODRVB_WIDTH 1
2041#define FRF_AB_XX_HIDRVA_LBN 9
2042#define FRF_AB_XX_HIDRVA_WIDTH 1
2043#define FRF_AB_XX_LODRVA_LBN 8
2044#define FRF_AB_XX_LODRVA_WIDTH 1
2045#define FRF_AB_XX_LPBKD_LBN 3
2046#define FRF_AB_XX_LPBKD_WIDTH 1
2047#define FRF_AB_XX_LPBKC_LBN 2
2048#define FRF_AB_XX_LPBKC_WIDTH 1
2049#define FRF_AB_XX_LPBKB_LBN 1
2050#define FRF_AB_XX_LPBKB_WIDTH 1
2051#define FRF_AB_XX_LPBKA_LBN 0
2052#define FRF_AB_XX_LPBKA_WIDTH 1
2053
2054/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */
2055#define FR_AB_XX_TXDRV_CTL 0x00001320
2056#define FRF_AB_XX_DEQD_LBN 28
2057#define FRF_AB_XX_DEQD_WIDTH 4
2058#define FRF_AB_XX_DEQC_LBN 24
2059#define FRF_AB_XX_DEQC_WIDTH 4
2060#define FRF_AB_XX_DEQB_LBN 20
2061#define FRF_AB_XX_DEQB_WIDTH 4
2062#define FRF_AB_XX_DEQA_LBN 16
2063#define FRF_AB_XX_DEQA_WIDTH 4
2064#define FRF_AB_XX_DTXD_LBN 12
2065#define FRF_AB_XX_DTXD_WIDTH 4
2066#define FRF_AB_XX_DTXC_LBN 8
2067#define FRF_AB_XX_DTXC_WIDTH 4
2068#define FRF_AB_XX_DTXB_LBN 4
2069#define FRF_AB_XX_DTXB_WIDTH 4
2070#define FRF_AB_XX_DTXA_LBN 0
2071#define FRF_AB_XX_DTXA_WIDTH 4
2072
2073/* XX_PRBS_CTL_REG: documentation to be written for sum_XX_PRBS_CTL_REG */
2074#define FR_AB_XX_PRBS_CTL 0x00001330
2075#define FRF_AB_XX_CH3_RX_PRBS_SEL_LBN 30
2076#define FRF_AB_XX_CH3_RX_PRBS_SEL_WIDTH 2
2077#define FRF_AB_XX_CH3_RX_PRBS_INV_LBN 29
2078#define FRF_AB_XX_CH3_RX_PRBS_INV_WIDTH 1
2079#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_LBN 28
2080#define FRF_AB_XX_CH3_RX_PRBS_CHKEN_WIDTH 1
2081#define FRF_AB_XX_CH2_RX_PRBS_SEL_LBN 26
2082#define FRF_AB_XX_CH2_RX_PRBS_SEL_WIDTH 2
2083#define FRF_AB_XX_CH2_RX_PRBS_INV_LBN 25
2084#define FRF_AB_XX_CH2_RX_PRBS_INV_WIDTH 1
2085#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_LBN 24
2086#define FRF_AB_XX_CH2_RX_PRBS_CHKEN_WIDTH 1
2087#define FRF_AB_XX_CH1_RX_PRBS_SEL_LBN 22
2088#define FRF_AB_XX_CH1_RX_PRBS_SEL_WIDTH 2
2089#define FRF_AB_XX_CH1_RX_PRBS_INV_LBN 21
2090#define FRF_AB_XX_CH1_RX_PRBS_INV_WIDTH 1
2091#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_LBN 20
2092#define FRF_AB_XX_CH1_RX_PRBS_CHKEN_WIDTH 1
2093#define FRF_AB_XX_CH0_RX_PRBS_SEL_LBN 18
2094#define FRF_AB_XX_CH0_RX_PRBS_SEL_WIDTH 2
2095#define FRF_AB_XX_CH0_RX_PRBS_INV_LBN 17
2096#define FRF_AB_XX_CH0_RX_PRBS_INV_WIDTH 1
2097#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_LBN 16
2098#define FRF_AB_XX_CH0_RX_PRBS_CHKEN_WIDTH 1
2099#define FRF_AB_XX_CH3_TX_PRBS_SEL_LBN 14
2100#define FRF_AB_XX_CH3_TX_PRBS_SEL_WIDTH 2
2101#define FRF_AB_XX_CH3_TX_PRBS_INV_LBN 13
2102#define FRF_AB_XX_CH3_TX_PRBS_INV_WIDTH 1
2103#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_LBN 12
2104#define FRF_AB_XX_CH3_TX_PRBS_CHKEN_WIDTH 1
2105#define FRF_AB_XX_CH2_TX_PRBS_SEL_LBN 10
2106#define FRF_AB_XX_CH2_TX_PRBS_SEL_WIDTH 2
2107#define FRF_AB_XX_CH2_TX_PRBS_INV_LBN 9
2108#define FRF_AB_XX_CH2_TX_PRBS_INV_WIDTH 1
2109#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_LBN 8
2110#define FRF_AB_XX_CH2_TX_PRBS_CHKEN_WIDTH 1
2111#define FRF_AB_XX_CH1_TX_PRBS_SEL_LBN 6
2112#define FRF_AB_XX_CH1_TX_PRBS_SEL_WIDTH 2
2113#define FRF_AB_XX_CH1_TX_PRBS_INV_LBN 5
2114#define FRF_AB_XX_CH1_TX_PRBS_INV_WIDTH 1
2115#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_LBN 4
2116#define FRF_AB_XX_CH1_TX_PRBS_CHKEN_WIDTH 1
2117#define FRF_AB_XX_CH0_TX_PRBS_SEL_LBN 2
2118#define FRF_AB_XX_CH0_TX_PRBS_SEL_WIDTH 2
2119#define FRF_AB_XX_CH0_TX_PRBS_INV_LBN 1
2120#define FRF_AB_XX_CH0_TX_PRBS_INV_WIDTH 1
2121#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_LBN 0
2122#define FRF_AB_XX_CH0_TX_PRBS_CHKEN_WIDTH 1
2123
2124/* XX_PRBS_CHK_REG: documentation to be written for sum_XX_PRBS_CHK_REG */
2125#define FR_AB_XX_PRBS_CHK 0x00001340
2126#define FRF_AB_XX_REV_LB_EN_LBN 16
2127#define FRF_AB_XX_REV_LB_EN_WIDTH 1
2128#define FRF_AB_XX_CH3_DEG_DET_LBN 15
2129#define FRF_AB_XX_CH3_DEG_DET_WIDTH 1
2130#define FRF_AB_XX_CH3_LFSR_LOCK_IND_LBN 14
2131#define FRF_AB_XX_CH3_LFSR_LOCK_IND_WIDTH 1
2132#define FRF_AB_XX_CH3_PRBS_FRUN_LBN 13
2133#define FRF_AB_XX_CH3_PRBS_FRUN_WIDTH 1
2134#define FRF_AB_XX_CH3_ERR_CHK_LBN 12
2135#define FRF_AB_XX_CH3_ERR_CHK_WIDTH 1
2136#define FRF_AB_XX_CH2_DEG_DET_LBN 11
2137#define FRF_AB_XX_CH2_DEG_DET_WIDTH 1
2138#define FRF_AB_XX_CH2_LFSR_LOCK_IND_LBN 10
2139#define FRF_AB_XX_CH2_LFSR_LOCK_IND_WIDTH 1
2140#define FRF_AB_XX_CH2_PRBS_FRUN_LBN 9
2141#define FRF_AB_XX_CH2_PRBS_FRUN_WIDTH 1
2142#define FRF_AB_XX_CH2_ERR_CHK_LBN 8
2143#define FRF_AB_XX_CH2_ERR_CHK_WIDTH 1
2144#define FRF_AB_XX_CH1_DEG_DET_LBN 7
2145#define FRF_AB_XX_CH1_DEG_DET_WIDTH 1
2146#define FRF_AB_XX_CH1_LFSR_LOCK_IND_LBN 6
2147#define FRF_AB_XX_CH1_LFSR_LOCK_IND_WIDTH 1
2148#define FRF_AB_XX_CH1_PRBS_FRUN_LBN 5
2149#define FRF_AB_XX_CH1_PRBS_FRUN_WIDTH 1
2150#define FRF_AB_XX_CH1_ERR_CHK_LBN 4
2151#define FRF_AB_XX_CH1_ERR_CHK_WIDTH 1
2152#define FRF_AB_XX_CH0_DEG_DET_LBN 3
2153#define FRF_AB_XX_CH0_DEG_DET_WIDTH 1
2154#define FRF_AB_XX_CH0_LFSR_LOCK_IND_LBN 2
2155#define FRF_AB_XX_CH0_LFSR_LOCK_IND_WIDTH 1
2156#define FRF_AB_XX_CH0_PRBS_FRUN_LBN 1
2157#define FRF_AB_XX_CH0_PRBS_FRUN_WIDTH 1
2158#define FRF_AB_XX_CH0_ERR_CHK_LBN 0
2159#define FRF_AB_XX_CH0_ERR_CHK_WIDTH 1
2160
2161/* XX_PRBS_ERR_REG: documentation to be written for sum_XX_PRBS_ERR_REG */
2162#define FR_AB_XX_PRBS_ERR 0x00001350
2163#define FRF_AB_XX_CH3_PRBS_ERR_CNT_LBN 24
2164#define FRF_AB_XX_CH3_PRBS_ERR_CNT_WIDTH 8
2165#define FRF_AB_XX_CH2_PRBS_ERR_CNT_LBN 16
2166#define FRF_AB_XX_CH2_PRBS_ERR_CNT_WIDTH 8
2167#define FRF_AB_XX_CH1_PRBS_ERR_CNT_LBN 8
2168#define FRF_AB_XX_CH1_PRBS_ERR_CNT_WIDTH 8
2169#define FRF_AB_XX_CH0_PRBS_ERR_CNT_LBN 0
2170#define FRF_AB_XX_CH0_PRBS_ERR_CNT_WIDTH 8
2171
2172/* XX_CORE_STAT_REG: XAUI XGXS core status register */
2173#define FR_AB_XX_CORE_STAT 0x00001360
2174#define FRF_AB_XX_FORCE_SIG3_LBN 31
2175#define FRF_AB_XX_FORCE_SIG3_WIDTH 1
2176#define FRF_AB_XX_FORCE_SIG3_VAL_LBN 30
2177#define FRF_AB_XX_FORCE_SIG3_VAL_WIDTH 1
2178#define FRF_AB_XX_FORCE_SIG2_LBN 29
2179#define FRF_AB_XX_FORCE_SIG2_WIDTH 1
2180#define FRF_AB_XX_FORCE_SIG2_VAL_LBN 28
2181#define FRF_AB_XX_FORCE_SIG2_VAL_WIDTH 1
2182#define FRF_AB_XX_FORCE_SIG1_LBN 27
2183#define FRF_AB_XX_FORCE_SIG1_WIDTH 1
2184#define FRF_AB_XX_FORCE_SIG1_VAL_LBN 26
2185#define FRF_AB_XX_FORCE_SIG1_VAL_WIDTH 1
2186#define FRF_AB_XX_FORCE_SIG0_LBN 25
2187#define FRF_AB_XX_FORCE_SIG0_WIDTH 1
2188#define FRF_AB_XX_FORCE_SIG0_VAL_LBN 24
2189#define FRF_AB_XX_FORCE_SIG0_VAL_WIDTH 1
2190#define FRF_AB_XX_XGXS_LB_EN_LBN 23
2191#define FRF_AB_XX_XGXS_LB_EN_WIDTH 1
2192#define FRF_AB_XX_XGMII_LB_EN_LBN 22
2193#define FRF_AB_XX_XGMII_LB_EN_WIDTH 1
2194#define FRF_AB_XX_MATCH_FAULT_LBN 21
2195#define FRF_AB_XX_MATCH_FAULT_WIDTH 1
2196#define FRF_AB_XX_ALIGN_DONE_LBN 20
2197#define FRF_AB_XX_ALIGN_DONE_WIDTH 1
2198#define FRF_AB_XX_SYNC_STAT3_LBN 19
2199#define FRF_AB_XX_SYNC_STAT3_WIDTH 1
2200#define FRF_AB_XX_SYNC_STAT2_LBN 18
2201#define FRF_AB_XX_SYNC_STAT2_WIDTH 1
2202#define FRF_AB_XX_SYNC_STAT1_LBN 17
2203#define FRF_AB_XX_SYNC_STAT1_WIDTH 1
2204#define FRF_AB_XX_SYNC_STAT0_LBN 16
2205#define FRF_AB_XX_SYNC_STAT0_WIDTH 1
2206#define FRF_AB_XX_COMMA_DET_CH3_LBN 15
2207#define FRF_AB_XX_COMMA_DET_CH3_WIDTH 1
2208#define FRF_AB_XX_COMMA_DET_CH2_LBN 14
2209#define FRF_AB_XX_COMMA_DET_CH2_WIDTH 1
2210#define FRF_AB_XX_COMMA_DET_CH1_LBN 13
2211#define FRF_AB_XX_COMMA_DET_CH1_WIDTH 1
2212#define FRF_AB_XX_COMMA_DET_CH0_LBN 12
2213#define FRF_AB_XX_COMMA_DET_CH0_WIDTH 1
2214#define FRF_AB_XX_CGRP_ALIGN_CH3_LBN 11
2215#define FRF_AB_XX_CGRP_ALIGN_CH3_WIDTH 1
2216#define FRF_AB_XX_CGRP_ALIGN_CH2_LBN 10
2217#define FRF_AB_XX_CGRP_ALIGN_CH2_WIDTH 1
2218#define FRF_AB_XX_CGRP_ALIGN_CH1_LBN 9
2219#define FRF_AB_XX_CGRP_ALIGN_CH1_WIDTH 1
2220#define FRF_AB_XX_CGRP_ALIGN_CH0_LBN 8
2221#define FRF_AB_XX_CGRP_ALIGN_CH0_WIDTH 1
2222#define FRF_AB_XX_CHAR_ERR_CH3_LBN 7
2223#define FRF_AB_XX_CHAR_ERR_CH3_WIDTH 1
2224#define FRF_AB_XX_CHAR_ERR_CH2_LBN 6
2225#define FRF_AB_XX_CHAR_ERR_CH2_WIDTH 1
2226#define FRF_AB_XX_CHAR_ERR_CH1_LBN 5
2227#define FRF_AB_XX_CHAR_ERR_CH1_WIDTH 1
2228#define FRF_AB_XX_CHAR_ERR_CH0_LBN 4
2229#define FRF_AB_XX_CHAR_ERR_CH0_WIDTH 1
2230#define FRF_AB_XX_DISPERR_CH3_LBN 3
2231#define FRF_AB_XX_DISPERR_CH3_WIDTH 1
2232#define FRF_AB_XX_DISPERR_CH2_LBN 2
2233#define FRF_AB_XX_DISPERR_CH2_WIDTH 1
2234#define FRF_AB_XX_DISPERR_CH1_LBN 1
2235#define FRF_AB_XX_DISPERR_CH1_WIDTH 1
2236#define FRF_AB_XX_DISPERR_CH0_LBN 0
2237#define FRF_AB_XX_DISPERR_CH0_WIDTH 1
2238
2239/* RX_DESC_PTR_TBL_KER: Receive descriptor pointer table */
2240#define FR_AA_RX_DESC_PTR_TBL_KER 0x00011800
2241#define FR_AA_RX_DESC_PTR_TBL_KER_STEP 16
2242#define FR_AA_RX_DESC_PTR_TBL_KER_ROWS 4
2243/* RX_DESC_PTR_TBL: Receive descriptor pointer table */
2244#define FR_BZ_RX_DESC_PTR_TBL 0x00f40000
2245#define FR_BZ_RX_DESC_PTR_TBL_STEP 16
2246#define FR_BB_RX_DESC_PTR_TBL_ROWS 4096
2247#define FR_CZ_RX_DESC_PTR_TBL_ROWS 1024
2248#define FRF_CZ_RX_HDR_SPLIT_LBN 90
2249#define FRF_CZ_RX_HDR_SPLIT_WIDTH 1
2250#define FRF_AA_RX_RESET_LBN 89
2251#define FRF_AA_RX_RESET_WIDTH 1
2252#define FRF_AZ_RX_ISCSI_DDIG_EN_LBN 88
2253#define FRF_AZ_RX_ISCSI_DDIG_EN_WIDTH 1
2254#define FRF_AZ_RX_ISCSI_HDIG_EN_LBN 87
2255#define FRF_AZ_RX_ISCSI_HDIG_EN_WIDTH 1
2256#define FRF_AZ_RX_DESC_PREF_ACT_LBN 86
2257#define FRF_AZ_RX_DESC_PREF_ACT_WIDTH 1
2258#define FRF_AZ_RX_DC_HW_RPTR_LBN 80
2259#define FRF_AZ_RX_DC_HW_RPTR_WIDTH 6
2260#define FRF_AZ_RX_DESCQ_HW_RPTR_LBN 68
2261#define FRF_AZ_RX_DESCQ_HW_RPTR_WIDTH 12
2262#define FRF_AZ_RX_DESCQ_SW_WPTR_LBN 56
2263#define FRF_AZ_RX_DESCQ_SW_WPTR_WIDTH 12
2264#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_LBN 36
2265#define FRF_AZ_RX_DESCQ_BUF_BASE_ID_WIDTH 20
2266#define FRF_AZ_RX_DESCQ_EVQ_ID_LBN 24
2267#define FRF_AZ_RX_DESCQ_EVQ_ID_WIDTH 12
2268#define FRF_AZ_RX_DESCQ_OWNER_ID_LBN 10
2269#define FRF_AZ_RX_DESCQ_OWNER_ID_WIDTH 14
2270#define FRF_AZ_RX_DESCQ_LABEL_LBN 5
2271#define FRF_AZ_RX_DESCQ_LABEL_WIDTH 5
2272#define FRF_AZ_RX_DESCQ_SIZE_LBN 3
2273#define FRF_AZ_RX_DESCQ_SIZE_WIDTH 2
2274#define FFE_AZ_RX_DESCQ_SIZE_4K 3
2275#define FFE_AZ_RX_DESCQ_SIZE_2K 2
2276#define FFE_AZ_RX_DESCQ_SIZE_1K 1
2277#define FFE_AZ_RX_DESCQ_SIZE_512 0
2278#define FRF_AZ_RX_DESCQ_TYPE_LBN 2
2279#define FRF_AZ_RX_DESCQ_TYPE_WIDTH 1
2280#define FRF_AZ_RX_DESCQ_JUMBO_LBN 1
2281#define FRF_AZ_RX_DESCQ_JUMBO_WIDTH 1
2282#define FRF_AZ_RX_DESCQ_EN_LBN 0
2283#define FRF_AZ_RX_DESCQ_EN_WIDTH 1
2284
2285/* TX_DESC_PTR_TBL_KER: Transmit descriptor pointer */
2286#define FR_AA_TX_DESC_PTR_TBL_KER 0x00011900
2287#define FR_AA_TX_DESC_PTR_TBL_KER_STEP 16
2288#define FR_AA_TX_DESC_PTR_TBL_KER_ROWS 8
2289/* TX_DESC_PTR_TBL: Transmit descriptor pointer */
2290#define FR_BZ_TX_DESC_PTR_TBL 0x00f50000
2291#define FR_BZ_TX_DESC_PTR_TBL_STEP 16
2292#define FR_BB_TX_DESC_PTR_TBL_ROWS 4096
2293#define FR_CZ_TX_DESC_PTR_TBL_ROWS 1024
2294#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_LBN 94
2295#define FRF_CZ_TX_DPT_Q_MASK_WIDTH_WIDTH 2
2296#define FRF_CZ_TX_DPT_ETH_FILT_EN_LBN 93
2297#define FRF_CZ_TX_DPT_ETH_FILT_EN_WIDTH 1
2298#define FRF_CZ_TX_DPT_IP_FILT_EN_LBN 92
2299#define FRF_CZ_TX_DPT_IP_FILT_EN_WIDTH 1
2300#define FRF_BZ_TX_NON_IP_DROP_DIS_LBN 91
2301#define FRF_BZ_TX_NON_IP_DROP_DIS_WIDTH 1
2302#define FRF_BZ_TX_IP_CHKSM_DIS_LBN 90
2303#define FRF_BZ_TX_IP_CHKSM_DIS_WIDTH 1
2304#define FRF_BZ_TX_TCP_CHKSM_DIS_LBN 89
2305#define FRF_BZ_TX_TCP_CHKSM_DIS_WIDTH 1
2306#define FRF_AZ_TX_DESCQ_EN_LBN 88
2307#define FRF_AZ_TX_DESCQ_EN_WIDTH 1
2308#define FRF_AZ_TX_ISCSI_DDIG_EN_LBN 87
2309#define FRF_AZ_TX_ISCSI_DDIG_EN_WIDTH 1
2310#define FRF_AZ_TX_ISCSI_HDIG_EN_LBN 86
2311#define FRF_AZ_TX_ISCSI_HDIG_EN_WIDTH 1
2312#define FRF_AZ_TX_DC_HW_RPTR_LBN 80
2313#define FRF_AZ_TX_DC_HW_RPTR_WIDTH 6
2314#define FRF_AZ_TX_DESCQ_HW_RPTR_LBN 68
2315#define FRF_AZ_TX_DESCQ_HW_RPTR_WIDTH 12
2316#define FRF_AZ_TX_DESCQ_SW_WPTR_LBN 56
2317#define FRF_AZ_TX_DESCQ_SW_WPTR_WIDTH 12
2318#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_LBN 36
2319#define FRF_AZ_TX_DESCQ_BUF_BASE_ID_WIDTH 20
2320#define FRF_AZ_TX_DESCQ_EVQ_ID_LBN 24
2321#define FRF_AZ_TX_DESCQ_EVQ_ID_WIDTH 12
2322#define FRF_AZ_TX_DESCQ_OWNER_ID_LBN 10
2323#define FRF_AZ_TX_DESCQ_OWNER_ID_WIDTH 14
2324#define FRF_AZ_TX_DESCQ_LABEL_LBN 5
2325#define FRF_AZ_TX_DESCQ_LABEL_WIDTH 5
2326#define FRF_AZ_TX_DESCQ_SIZE_LBN 3
2327#define FRF_AZ_TX_DESCQ_SIZE_WIDTH 2
2328#define FFE_AZ_TX_DESCQ_SIZE_4K 3
2329#define FFE_AZ_TX_DESCQ_SIZE_2K 2
2330#define FFE_AZ_TX_DESCQ_SIZE_1K 1
2331#define FFE_AZ_TX_DESCQ_SIZE_512 0
2332#define FRF_AZ_TX_DESCQ_TYPE_LBN 1
2333#define FRF_AZ_TX_DESCQ_TYPE_WIDTH 2
2334#define FRF_AZ_TX_DESCQ_FLUSH_LBN 0
2335#define FRF_AZ_TX_DESCQ_FLUSH_WIDTH 1
2336
2337/* EVQ_PTR_TBL_KER: Event queue pointer table */
2338#define FR_AA_EVQ_PTR_TBL_KER 0x00011a00
2339#define FR_AA_EVQ_PTR_TBL_KER_STEP 16
2340#define FR_AA_EVQ_PTR_TBL_KER_ROWS 4
2341/* EVQ_PTR_TBL: Event queue pointer table */
2342#define FR_BZ_EVQ_PTR_TBL 0x00f60000
2343#define FR_BZ_EVQ_PTR_TBL_STEP 16
2344#define FR_CZ_EVQ_PTR_TBL_ROWS 1024
2345#define FR_BB_EVQ_PTR_TBL_ROWS 4096
2346#define FRF_BZ_EVQ_RPTR_IGN_LBN 40
2347#define FRF_BZ_EVQ_RPTR_IGN_WIDTH 1
2348#define FRF_AB_EVQ_WKUP_OR_INT_EN_LBN 39
2349#define FRF_AB_EVQ_WKUP_OR_INT_EN_WIDTH 1
2350#define FRF_CZ_EVQ_DOS_PROTECT_EN_LBN 39
2351#define FRF_CZ_EVQ_DOS_PROTECT_EN_WIDTH 1
2352#define FRF_AZ_EVQ_NXT_WPTR_LBN 24
2353#define FRF_AZ_EVQ_NXT_WPTR_WIDTH 15
2354#define FRF_AZ_EVQ_EN_LBN 23
2355#define FRF_AZ_EVQ_EN_WIDTH 1
2356#define FRF_AZ_EVQ_SIZE_LBN 20
2357#define FRF_AZ_EVQ_SIZE_WIDTH 3
2358#define FFE_AZ_EVQ_SIZE_32K 6
2359#define FFE_AZ_EVQ_SIZE_16K 5
2360#define FFE_AZ_EVQ_SIZE_8K 4
2361#define FFE_AZ_EVQ_SIZE_4K 3
2362#define FFE_AZ_EVQ_SIZE_2K 2
2363#define FFE_AZ_EVQ_SIZE_1K 1
2364#define FFE_AZ_EVQ_SIZE_512 0
2365#define FRF_AZ_EVQ_BUF_BASE_ID_LBN 0
2366#define FRF_AZ_EVQ_BUF_BASE_ID_WIDTH 20
2367
2368/* BUF_HALF_TBL_KER: Buffer table in half buffer table mode direct access by driver */
2369#define FR_AA_BUF_HALF_TBL_KER 0x00018000
2370#define FR_AA_BUF_HALF_TBL_KER_STEP 8
2371#define FR_AA_BUF_HALF_TBL_KER_ROWS 4096
2372/* BUF_HALF_TBL: Buffer table in half buffer table mode direct access by driver */
2373#define FR_BZ_BUF_HALF_TBL 0x00800000
2374#define FR_BZ_BUF_HALF_TBL_STEP 8
2375#define FR_CZ_BUF_HALF_TBL_ROWS 147456
2376#define FR_BB_BUF_HALF_TBL_ROWS 524288
2377#define FRF_AZ_BUF_ADR_HBUF_ODD_LBN 44
2378#define FRF_AZ_BUF_ADR_HBUF_ODD_WIDTH 20
2379#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_LBN 32
2380#define FRF_AZ_BUF_OWNER_ID_HBUF_ODD_WIDTH 12
2381#define FRF_AZ_BUF_ADR_HBUF_EVEN_LBN 12
2382#define FRF_AZ_BUF_ADR_HBUF_EVEN_WIDTH 20
2383#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_LBN 0
2384#define FRF_AZ_BUF_OWNER_ID_HBUF_EVEN_WIDTH 12
2385
2386/* BUF_FULL_TBL_KER: Buffer table in full buffer table mode direct access by driver */
2387#define FR_AA_BUF_FULL_TBL_KER 0x00018000
2388#define FR_AA_BUF_FULL_TBL_KER_STEP 8
2389#define FR_AA_BUF_FULL_TBL_KER_ROWS 4096
2390/* BUF_FULL_TBL: Buffer table in full buffer table mode direct access by driver */
2391#define FR_BZ_BUF_FULL_TBL 0x00800000
2392#define FR_BZ_BUF_FULL_TBL_STEP 8
2393#define FR_CZ_BUF_FULL_TBL_ROWS 147456
2394#define FR_BB_BUF_FULL_TBL_ROWS 917504
2395#define FRF_AZ_BUF_FULL_UNUSED_LBN 51
2396#define FRF_AZ_BUF_FULL_UNUSED_WIDTH 13
2397#define FRF_AZ_IP_DAT_BUF_SIZE_LBN 50
2398#define FRF_AZ_IP_DAT_BUF_SIZE_WIDTH 1
2399#define FRF_AZ_BUF_ADR_REGION_LBN 48
2400#define FRF_AZ_BUF_ADR_REGION_WIDTH 2
2401#define FFE_AZ_BUF_ADR_REGN3 3
2402#define FFE_AZ_BUF_ADR_REGN2 2
2403#define FFE_AZ_BUF_ADR_REGN1 1
2404#define FFE_AZ_BUF_ADR_REGN0 0
2405#define FRF_AZ_BUF_ADR_FBUF_LBN 14
2406#define FRF_AZ_BUF_ADR_FBUF_WIDTH 34
2407#define FRF_AZ_BUF_OWNER_ID_FBUF_LBN 0
2408#define FRF_AZ_BUF_OWNER_ID_FBUF_WIDTH 14
2409
2410/* RX_FILTER_TBL0: TCP/IPv4 Receive filter table */
2411#define FR_BZ_RX_FILTER_TBL0 0x00f00000
2412#define FR_BZ_RX_FILTER_TBL0_STEP 32
2413#define FR_BZ_RX_FILTER_TBL0_ROWS 8192
2414/* RX_FILTER_TBL1: TCP/IPv4 Receive filter table */
2415#define FR_BB_RX_FILTER_TBL1 0x00f00010
2416#define FR_BB_RX_FILTER_TBL1_STEP 32
2417#define FR_BB_RX_FILTER_TBL1_ROWS 8192
2418#define FRF_BZ_RSS_EN_LBN 110
2419#define FRF_BZ_RSS_EN_WIDTH 1
2420#define FRF_BZ_SCATTER_EN_LBN 109
2421#define FRF_BZ_SCATTER_EN_WIDTH 1
2422#define FRF_BZ_TCP_UDP_LBN 108
2423#define FRF_BZ_TCP_UDP_WIDTH 1
2424#define FRF_BZ_RXQ_ID_LBN 96
2425#define FRF_BZ_RXQ_ID_WIDTH 12
2426#define FRF_BZ_DEST_IP_LBN 64
2427#define FRF_BZ_DEST_IP_WIDTH 32
2428#define FRF_BZ_DEST_PORT_TCP_LBN 48
2429#define FRF_BZ_DEST_PORT_TCP_WIDTH 16
2430#define FRF_BZ_SRC_IP_LBN 16
2431#define FRF_BZ_SRC_IP_WIDTH 32
2432#define FRF_BZ_SRC_TCP_DEST_UDP_LBN 0
2433#define FRF_BZ_SRC_TCP_DEST_UDP_WIDTH 16
2434
2435/* RX_MAC_FILTER_TBL0: Receive Ethernet filter table */
2436#define FR_CZ_RX_MAC_FILTER_TBL0 0x00f00010
2437#define FR_CZ_RX_MAC_FILTER_TBL0_STEP 32
2438#define FR_CZ_RX_MAC_FILTER_TBL0_ROWS 512
2439#define FRF_CZ_RMFT_RSS_EN_LBN 75
2440#define FRF_CZ_RMFT_RSS_EN_WIDTH 1
2441#define FRF_CZ_RMFT_SCATTER_EN_LBN 74
2442#define FRF_CZ_RMFT_SCATTER_EN_WIDTH 1
2443#define FRF_CZ_RMFT_IP_OVERRIDE_LBN 73
2444#define FRF_CZ_RMFT_IP_OVERRIDE_WIDTH 1
2445#define FRF_CZ_RMFT_RXQ_ID_LBN 61
2446#define FRF_CZ_RMFT_RXQ_ID_WIDTH 12
2447#define FRF_CZ_RMFT_WILDCARD_MATCH_LBN 60
2448#define FRF_CZ_RMFT_WILDCARD_MATCH_WIDTH 1
2449#define FRF_CZ_RMFT_DEST_MAC_LBN 16
2450#define FRF_CZ_RMFT_DEST_MAC_WIDTH 44
2451#define FRF_CZ_RMFT_VLAN_ID_LBN 0
2452#define FRF_CZ_RMFT_VLAN_ID_WIDTH 12
2453
2454/* TIMER_TBL: Timer table */
2455#define FR_BZ_TIMER_TBL 0x00f70000
2456#define FR_BZ_TIMER_TBL_STEP 16
2457#define FR_CZ_TIMER_TBL_ROWS 1024
2458#define FR_BB_TIMER_TBL_ROWS 4096
2459#define FRF_CZ_TIMER_Q_EN_LBN 33
2460#define FRF_CZ_TIMER_Q_EN_WIDTH 1
2461#define FRF_CZ_INT_ARMD_LBN 32
2462#define FRF_CZ_INT_ARMD_WIDTH 1
2463#define FRF_CZ_INT_PEND_LBN 31
2464#define FRF_CZ_INT_PEND_WIDTH 1
2465#define FRF_CZ_HOST_NOTIFY_MODE_LBN 30
2466#define FRF_CZ_HOST_NOTIFY_MODE_WIDTH 1
2467#define FRF_CZ_RELOAD_TIMER_VAL_LBN 16
2468#define FRF_CZ_RELOAD_TIMER_VAL_WIDTH 14
2469#define FRF_CZ_TIMER_MODE_LBN 14
2470#define FRF_CZ_TIMER_MODE_WIDTH 2
2471#define FFE_CZ_TIMER_MODE_INT_HLDOFF 3
2472#define FFE_CZ_TIMER_MODE_TRIG_START 2
2473#define FFE_CZ_TIMER_MODE_IMMED_START 1
2474#define FFE_CZ_TIMER_MODE_DIS 0
2475#define FRF_BB_TIMER_MODE_LBN 12
2476#define FRF_BB_TIMER_MODE_WIDTH 2
2477#define FFE_BB_TIMER_MODE_INT_HLDOFF 2
2478#define FFE_BB_TIMER_MODE_TRIG_START 2
2479#define FFE_BB_TIMER_MODE_IMMED_START 1
2480#define FFE_BB_TIMER_MODE_DIS 0
2481#define FRF_CZ_TIMER_VAL_LBN 0
2482#define FRF_CZ_TIMER_VAL_WIDTH 14
2483#define FRF_BB_TIMER_VAL_LBN 0
2484#define FRF_BB_TIMER_VAL_WIDTH 12
2485
2486/* TX_PACE_TBL: Transmit pacing table */
2487#define FR_BZ_TX_PACE_TBL 0x00f80000
2488#define FR_BZ_TX_PACE_TBL_STEP 16
2489#define FR_CZ_TX_PACE_TBL_ROWS 1024
2490#define FR_BB_TX_PACE_TBL_ROWS 4096
2491#define FRF_BZ_TX_PACE_LBN 0
2492#define FRF_BZ_TX_PACE_WIDTH 5
2493
2494/* RX_INDIRECTION_TBL: RX Indirection Table */
2495#define FR_BZ_RX_INDIRECTION_TBL 0x00fb0000
2496#define FR_BZ_RX_INDIRECTION_TBL_STEP 16
2497#define FR_BZ_RX_INDIRECTION_TBL_ROWS 128
2498#define FRF_BZ_IT_QUEUE_LBN 0
2499#define FRF_BZ_IT_QUEUE_WIDTH 6
2500
2501/* TX_FILTER_TBL0: TCP/IPv4 Transmit filter table */
2502#define FR_CZ_TX_FILTER_TBL0 0x00fc0000
2503#define FR_CZ_TX_FILTER_TBL0_STEP 16
2504#define FR_CZ_TX_FILTER_TBL0_ROWS 8192
2505#define FRF_CZ_TIFT_TCP_UDP_LBN 108
2506#define FRF_CZ_TIFT_TCP_UDP_WIDTH 1
2507#define FRF_CZ_TIFT_TXQ_ID_LBN 96
2508#define FRF_CZ_TIFT_TXQ_ID_WIDTH 12
2509#define FRF_CZ_TIFT_DEST_IP_LBN 64
2510#define FRF_CZ_TIFT_DEST_IP_WIDTH 32
2511#define FRF_CZ_TIFT_DEST_PORT_TCP_LBN 48
2512#define FRF_CZ_TIFT_DEST_PORT_TCP_WIDTH 16
2513#define FRF_CZ_TIFT_SRC_IP_LBN 16
2514#define FRF_CZ_TIFT_SRC_IP_WIDTH 32
2515#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_LBN 0
2516#define FRF_CZ_TIFT_SRC_TCP_DEST_UDP_WIDTH 16
2517
2518/* TX_MAC_FILTER_TBL0: Transmit Ethernet filter table */
2519#define FR_CZ_TX_MAC_FILTER_TBL0 0x00fe0000
2520#define FR_CZ_TX_MAC_FILTER_TBL0_STEP 16
2521#define FR_CZ_TX_MAC_FILTER_TBL0_ROWS 512
2522#define FRF_CZ_TMFT_TXQ_ID_LBN 61
2523#define FRF_CZ_TMFT_TXQ_ID_WIDTH 12
2524#define FRF_CZ_TMFT_WILDCARD_MATCH_LBN 60
2525#define FRF_CZ_TMFT_WILDCARD_MATCH_WIDTH 1
2526#define FRF_CZ_TMFT_SRC_MAC_LBN 16
2527#define FRF_CZ_TMFT_SRC_MAC_WIDTH 44
2528#define FRF_CZ_TMFT_VLAN_ID_LBN 0
2529#define FRF_CZ_TMFT_VLAN_ID_WIDTH 12
2530
2531/* MC_TREG_SMEM: MC Shared Memory */
2532#define FR_CZ_MC_TREG_SMEM 0x00ff0000
2533#define FR_CZ_MC_TREG_SMEM_STEP 4
2534#define FR_CZ_MC_TREG_SMEM_ROWS 512
2535#define FRF_CZ_MC_TREG_SMEM_ROW_LBN 0
2536#define FRF_CZ_MC_TREG_SMEM_ROW_WIDTH 32
2537
2538/* MSIX_VECTOR_TABLE: MSIX Vector Table */
2539#define FR_BB_MSIX_VECTOR_TABLE 0x00ff0000
2540#define FR_BZ_MSIX_VECTOR_TABLE_STEP 16
2541#define FR_BB_MSIX_VECTOR_TABLE_ROWS 64
2542/* MSIX_VECTOR_TABLE: MSIX Vector Table */
2543#define FR_CZ_MSIX_VECTOR_TABLE 0x00000000
2544/* FR_BZ_MSIX_VECTOR_TABLE_STEP 16 */
2545#define FR_CZ_MSIX_VECTOR_TABLE_ROWS 1024
2546#define FRF_BZ_MSIX_VECTOR_RESERVED_LBN 97
2547#define FRF_BZ_MSIX_VECTOR_RESERVED_WIDTH 31
2548#define FRF_BZ_MSIX_VECTOR_MASK_LBN 96
2549#define FRF_BZ_MSIX_VECTOR_MASK_WIDTH 1
2550#define FRF_BZ_MSIX_MESSAGE_DATA_LBN 64
2551#define FRF_BZ_MSIX_MESSAGE_DATA_WIDTH 32
2552#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_LBN 32
2553#define FRF_BZ_MSIX_MESSAGE_ADDRESS_HI_WIDTH 32
2554#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_LBN 0
2555#define FRF_BZ_MSIX_MESSAGE_ADDRESS_LO_WIDTH 32
2556
2557/* MSIX_PBA_TABLE: MSIX Pending Bit Array */
2558#define FR_BB_MSIX_PBA_TABLE 0x00ff2000
2559#define FR_BZ_MSIX_PBA_TABLE_STEP 4
2560#define FR_BB_MSIX_PBA_TABLE_ROWS 2
2561/* MSIX_PBA_TABLE: MSIX Pending Bit Array */
2562#define FR_CZ_MSIX_PBA_TABLE 0x00008000
2563/* FR_BZ_MSIX_PBA_TABLE_STEP 4 */
2564#define FR_CZ_MSIX_PBA_TABLE_ROWS 32
2565#define FRF_BZ_MSIX_PBA_PEND_DWORD_LBN 0
2566#define FRF_BZ_MSIX_PBA_PEND_DWORD_WIDTH 32
2567
2568/* SRM_DBG_REG: SRAM debug access */
2569#define FR_BZ_SRM_DBG 0x03000000
2570#define FR_BZ_SRM_DBG_STEP 8
2571#define FR_CZ_SRM_DBG_ROWS 262144
2572#define FR_BB_SRM_DBG_ROWS 2097152
2573#define FRF_BZ_SRM_DBG_LBN 0
2574#define FRF_BZ_SRM_DBG_WIDTH 64
2575
2576/* TB_MSIX_PBA_TABLE: MSIX Pending Bit Array */
2577#define FR_CZ_TB_MSIX_PBA_TABLE 0x00008000
2578#define FR_CZ_TB_MSIX_PBA_TABLE_STEP 4
2579#define FR_CZ_TB_MSIX_PBA_TABLE_ROWS 1024
2580#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_LBN 0
2581#define FRF_CZ_TB_MSIX_PBA_PEND_DWORD_WIDTH 32
2582
2583/* DRIVER_EV */
2584#define FSF_AZ_DRIVER_EV_SUBCODE_LBN 56
2585#define FSF_AZ_DRIVER_EV_SUBCODE_WIDTH 4
2586#define FSE_BZ_TX_DSC_ERROR_EV 15
2587#define FSE_BZ_RX_DSC_ERROR_EV 14
2588#define FSE_AA_RX_RECOVER_EV 11
2589#define FSE_AZ_TIMER_EV 10
2590#define FSE_AZ_TX_PKT_NON_TCP_UDP 9
2591#define FSE_AZ_WAKE_UP_EV 6
2592#define FSE_AZ_SRM_UPD_DONE_EV 5
2593#define FSE_AB_EVQ_NOT_EN_EV 3
2594#define FSE_AZ_EVQ_INIT_DONE_EV 2
2595#define FSE_AZ_RX_DESCQ_FLS_DONE_EV 1
2596#define FSE_AZ_TX_DESCQ_FLS_DONE_EV 0
2597#define FSF_AZ_DRIVER_EV_SUBDATA_LBN 0
2598#define FSF_AZ_DRIVER_EV_SUBDATA_WIDTH 14
2599
2600/* EVENT_ENTRY */
2601#define FSF_AZ_EV_CODE_LBN 60
2602#define FSF_AZ_EV_CODE_WIDTH 4
2603#define FSE_CZ_EV_CODE_MCDI_EV 12
2604#define FSE_CZ_EV_CODE_USER_EV 8
2605#define FSE_AZ_EV_CODE_DRV_GEN_EV 7
2606#define FSE_AZ_EV_CODE_GLOBAL_EV 6
2607#define FSE_AZ_EV_CODE_DRIVER_EV 5
2608#define FSE_AZ_EV_CODE_TX_EV 2
2609#define FSE_AZ_EV_CODE_RX_EV 0
2610#define FSF_AZ_EV_DATA_LBN 0
2611#define FSF_AZ_EV_DATA_WIDTH 60
2612
2613/* GLOBAL_EV */
2614#define FSF_BB_GLB_EV_RX_RECOVERY_LBN 12
2615#define FSF_BB_GLB_EV_RX_RECOVERY_WIDTH 1
2616#define FSF_AA_GLB_EV_RX_RECOVERY_LBN 11
2617#define FSF_AA_GLB_EV_RX_RECOVERY_WIDTH 1
2618#define FSF_BB_GLB_EV_XG_MGT_INTR_LBN 11
2619#define FSF_BB_GLB_EV_XG_MGT_INTR_WIDTH 1
2620#define FSF_AB_GLB_EV_XFP_PHY0_INTR_LBN 10
2621#define FSF_AB_GLB_EV_XFP_PHY0_INTR_WIDTH 1
2622#define FSF_AB_GLB_EV_XG_PHY0_INTR_LBN 9
2623#define FSF_AB_GLB_EV_XG_PHY0_INTR_WIDTH 1
2624#define FSF_AB_GLB_EV_G_PHY0_INTR_LBN 7
2625#define FSF_AB_GLB_EV_G_PHY0_INTR_WIDTH 1
2626
2627/* LEGACY_INT_VEC */
2628#define FSF_AZ_NET_IVEC_FATAL_INT_LBN 64
2629#define FSF_AZ_NET_IVEC_FATAL_INT_WIDTH 1
2630#define FSF_AZ_NET_IVEC_INT_Q_LBN 40
2631#define FSF_AZ_NET_IVEC_INT_Q_WIDTH 4
2632#define FSF_AZ_NET_IVEC_INT_FLAG_LBN 32
2633#define FSF_AZ_NET_IVEC_INT_FLAG_WIDTH 1
2634#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_LBN 1
2635#define FSF_AZ_NET_IVEC_EVQ_FIFO_HF_WIDTH 1
2636#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_LBN 0
2637#define FSF_AZ_NET_IVEC_EVQ_FIFO_AF_WIDTH 1
2638
2639/* MC_XGMAC_FLTR_RULE_DEF */
2640#define FSF_CZ_MC_XFRC_MODE_LBN 416
2641#define FSF_CZ_MC_XFRC_MODE_WIDTH 1
2642#define FSE_CZ_MC_XFRC_MODE_LAYERED 1
2643#define FSE_CZ_MC_XFRC_MODE_SIMPLE 0
2644#define FSF_CZ_MC_XFRC_HASH_LBN 384
2645#define FSF_CZ_MC_XFRC_HASH_WIDTH 32
2646#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_LBN 256
2647#define FSF_CZ_MC_XFRC_LAYER4_BYTE_MASK_WIDTH 128
2648#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_LBN 128
2649#define FSF_CZ_MC_XFRC_LAYER3_BYTE_MASK_WIDTH 128
2650#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_LBN 0
2651#define FSF_CZ_MC_XFRC_LAYER2_OR_SIMPLE_BYTE_MASK_WIDTH 128
2652
2653/* RX_EV */
2654#define FSF_CZ_RX_EV_PKT_NOT_PARSED_LBN 58
2655#define FSF_CZ_RX_EV_PKT_NOT_PARSED_WIDTH 1
2656#define FSF_CZ_RX_EV_IPV6_PKT_LBN 57
2657#define FSF_CZ_RX_EV_IPV6_PKT_WIDTH 1
2658#define FSF_AZ_RX_EV_PKT_OK_LBN 56
2659#define FSF_AZ_RX_EV_PKT_OK_WIDTH 1
2660#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_LBN 55
2661#define FSF_AZ_RX_EV_PAUSE_FRM_ERR_WIDTH 1
2662#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_LBN 54
2663#define FSF_AZ_RX_EV_BUF_OWNER_ID_ERR_WIDTH 1
2664#define FSF_AZ_RX_EV_IP_FRAG_ERR_LBN 53
2665#define FSF_AZ_RX_EV_IP_FRAG_ERR_WIDTH 1
2666#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_LBN 52
2667#define FSF_AZ_RX_EV_IP_HDR_CHKSUM_ERR_WIDTH 1
2668#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_LBN 51
2669#define FSF_AZ_RX_EV_TCP_UDP_CHKSUM_ERR_WIDTH 1
2670#define FSF_AZ_RX_EV_ETH_CRC_ERR_LBN 50
2671#define FSF_AZ_RX_EV_ETH_CRC_ERR_WIDTH 1
2672#define FSF_AZ_RX_EV_FRM_TRUNC_LBN 49
2673#define FSF_AZ_RX_EV_FRM_TRUNC_WIDTH 1
2674#define FSF_AA_RX_EV_DRIB_NIB_LBN 49
2675#define FSF_AA_RX_EV_DRIB_NIB_WIDTH 1
2676#define FSF_AZ_RX_EV_TOBE_DISC_LBN 47
2677#define FSF_AZ_RX_EV_TOBE_DISC_WIDTH 1
2678#define FSF_AZ_RX_EV_PKT_TYPE_LBN 44
2679#define FSF_AZ_RX_EV_PKT_TYPE_WIDTH 3
2680#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_JUMBO 5
2681#define FSE_AZ_RX_EV_PKT_TYPE_VLAN_LLC 4
2682#define FSE_AZ_RX_EV_PKT_TYPE_VLAN 3
2683#define FSE_AZ_RX_EV_PKT_TYPE_JUMBO 2
2684#define FSE_AZ_RX_EV_PKT_TYPE_LLC 1
2685#define FSE_AZ_RX_EV_PKT_TYPE_ETH 0
2686#define FSF_AZ_RX_EV_HDR_TYPE_LBN 42
2687#define FSF_AZ_RX_EV_HDR_TYPE_WIDTH 2
2688#define FSE_AZ_RX_EV_HDR_TYPE_OTHER 3
2689#define FSE_AB_RX_EV_HDR_TYPE_IPV4_OTHER 2
2690#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_OTHER 2
2691#define FSE_AB_RX_EV_HDR_TYPE_IPV4_UDP 1
2692#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_UDP 1
2693#define FSE_AB_RX_EV_HDR_TYPE_IPV4_TCP 0
2694#define FSE_CZ_RX_EV_HDR_TYPE_IPV4V6_TCP 0
2695#define FSF_AZ_RX_EV_DESC_Q_EMPTY_LBN 41
2696#define FSF_AZ_RX_EV_DESC_Q_EMPTY_WIDTH 1
2697#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_LBN 40
2698#define FSF_AZ_RX_EV_MCAST_HASH_MATCH_WIDTH 1
2699#define FSF_AZ_RX_EV_MCAST_PKT_LBN 39
2700#define FSF_AZ_RX_EV_MCAST_PKT_WIDTH 1
2701#define FSF_AA_RX_EV_RECOVERY_FLAG_LBN 37
2702#define FSF_AA_RX_EV_RECOVERY_FLAG_WIDTH 1
2703#define FSF_AZ_RX_EV_Q_LABEL_LBN 32
2704#define FSF_AZ_RX_EV_Q_LABEL_WIDTH 5
2705#define FSF_AZ_RX_EV_JUMBO_CONT_LBN 31
2706#define FSF_AZ_RX_EV_JUMBO_CONT_WIDTH 1
2707#define FSF_AZ_RX_EV_PORT_LBN 30
2708#define FSF_AZ_RX_EV_PORT_WIDTH 1
2709#define FSF_AZ_RX_EV_BYTE_CNT_LBN 16
2710#define FSF_AZ_RX_EV_BYTE_CNT_WIDTH 14
2711#define FSF_AZ_RX_EV_SOP_LBN 15
2712#define FSF_AZ_RX_EV_SOP_WIDTH 1
2713#define FSF_AZ_RX_EV_ISCSI_PKT_OK_LBN 14
2714#define FSF_AZ_RX_EV_ISCSI_PKT_OK_WIDTH 1
2715#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_LBN 13
2716#define FSF_AZ_RX_EV_ISCSI_DDIG_ERR_WIDTH 1
2717#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_LBN 12
2718#define FSF_AZ_RX_EV_ISCSI_HDIG_ERR_WIDTH 1
2719#define FSF_AZ_RX_EV_DESC_PTR_LBN 0
2720#define FSF_AZ_RX_EV_DESC_PTR_WIDTH 12
2721
2722/* RX_KER_DESC */
2723#define FSF_AZ_RX_KER_BUF_SIZE_LBN 48
2724#define FSF_AZ_RX_KER_BUF_SIZE_WIDTH 14
2725#define FSF_AZ_RX_KER_BUF_REGION_LBN 46
2726#define FSF_AZ_RX_KER_BUF_REGION_WIDTH 2
2727#define FSF_AZ_RX_KER_BUF_ADDR_LBN 0
2728#define FSF_AZ_RX_KER_BUF_ADDR_WIDTH 46
2729
2730/* RX_USER_DESC */
2731#define FSF_AZ_RX_USER_2BYTE_OFFSET_LBN 20
2732#define FSF_AZ_RX_USER_2BYTE_OFFSET_WIDTH 12
2733#define FSF_AZ_RX_USER_BUF_ID_LBN 0
2734#define FSF_AZ_RX_USER_BUF_ID_WIDTH 20
2735
2736/* TX_EV */
2737#define FSF_AZ_TX_EV_PKT_ERR_LBN 38
2738#define FSF_AZ_TX_EV_PKT_ERR_WIDTH 1
2739#define FSF_AZ_TX_EV_PKT_TOO_BIG_LBN 37
2740#define FSF_AZ_TX_EV_PKT_TOO_BIG_WIDTH 1
2741#define FSF_AZ_TX_EV_Q_LABEL_LBN 32
2742#define FSF_AZ_TX_EV_Q_LABEL_WIDTH 5
2743#define FSF_AZ_TX_EV_PORT_LBN 16
2744#define FSF_AZ_TX_EV_PORT_WIDTH 1
2745#define FSF_AZ_TX_EV_WQ_FF_FULL_LBN 15
2746#define FSF_AZ_TX_EV_WQ_FF_FULL_WIDTH 1
2747#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_LBN 14
2748#define FSF_AZ_TX_EV_BUF_OWNER_ID_ERR_WIDTH 1
2749#define FSF_AZ_TX_EV_COMP_LBN 12
2750#define FSF_AZ_TX_EV_COMP_WIDTH 1
2751#define FSF_AZ_TX_EV_DESC_PTR_LBN 0
2752#define FSF_AZ_TX_EV_DESC_PTR_WIDTH 12
2753
2754/* TX_KER_DESC */
2755#define FSF_AZ_TX_KER_CONT_LBN 62
2756#define FSF_AZ_TX_KER_CONT_WIDTH 1
2757#define FSF_AZ_TX_KER_BYTE_COUNT_LBN 48
2758#define FSF_AZ_TX_KER_BYTE_COUNT_WIDTH 14
2759#define FSF_AZ_TX_KER_BUF_REGION_LBN 46
2760#define FSF_AZ_TX_KER_BUF_REGION_WIDTH 2
2761#define FSF_AZ_TX_KER_BUF_ADDR_LBN 0
2762#define FSF_AZ_TX_KER_BUF_ADDR_WIDTH 46
2763
2764/* TX_USER_DESC */
2765#define FSF_AZ_TX_USER_SW_EV_EN_LBN 48
2766#define FSF_AZ_TX_USER_SW_EV_EN_WIDTH 1
2767#define FSF_AZ_TX_USER_CONT_LBN 46
2768#define FSF_AZ_TX_USER_CONT_WIDTH 1
2769#define FSF_AZ_TX_USER_BYTE_CNT_LBN 33
2770#define FSF_AZ_TX_USER_BYTE_CNT_WIDTH 13
2771#define FSF_AZ_TX_USER_BUF_ID_LBN 13
2772#define FSF_AZ_TX_USER_BUF_ID_WIDTH 20
2773#define FSF_AZ_TX_USER_BYTE_OFS_LBN 0
2774#define FSF_AZ_TX_USER_BYTE_OFS_WIDTH 13
2775
2776/* USER_EV */
2777#define FSF_CZ_USER_QID_LBN 32
2778#define FSF_CZ_USER_QID_WIDTH 10
2779#define FSF_CZ_USER_EV_REG_VALUE_LBN 0
2780#define FSF_CZ_USER_EV_REG_VALUE_WIDTH 32
2781
2782/**************************************************************************
2783 *
2784 * Falcon B0 PCIe core indirect registers
2785 *
2786 **************************************************************************
2787 */
2788
2789#define FPCR_BB_PCIE_DEVICE_CTRL_STAT 0x68
2790
2791#define FPCR_BB_PCIE_LINK_CTRL_STAT 0x70
2792
2793#define FPCR_BB_ACK_RPL_TIMER 0x700
2794#define FPCRF_BB_ACK_TL_LBN 0
2795#define FPCRF_BB_ACK_TL_WIDTH 16
2796#define FPCRF_BB_RPL_TL_LBN 16
2797#define FPCRF_BB_RPL_TL_WIDTH 16
2798
2799#define FPCR_BB_ACK_FREQ 0x70C
2800#define FPCRF_BB_ACK_FREQ_LBN 0
2801#define FPCRF_BB_ACK_FREQ_WIDTH 7
2802
2803/**************************************************************************
2804 *
2805 * Pseudo-registers and fields
2806 *
2807 **************************************************************************
2808 */
2809
2810/* Interrupt acknowledge work-around register (A0/A1 only) */
2811#define FR_AA_WORK_AROUND_BROKEN_PCI_READS 0x0070
2812
2813/* EE_SPI_HCMD_REG: SPI host command register */
2814/* Values for the EE_SPI_HCMD_SF_SEL register field */
2815#define FFE_AB_SPI_DEVICE_EEPROM 0
2816#define FFE_AB_SPI_DEVICE_FLASH 1
2817
2818/* NIC_STAT_REG: NIC status register */
2819#define FRF_AB_STRAP_10G_LBN 2
2820#define FRF_AB_STRAP_10G_WIDTH 1
2821#define FRF_AA_STRAP_PCIE_LBN 0
2822#define FRF_AA_STRAP_PCIE_WIDTH 1
2823
2824/* FATAL_INTR_REG_KER: Fatal interrupt register for Kernel */
2825#define FRF_AZ_FATAL_INTR_LBN 0
2826#define FRF_AZ_FATAL_INTR_WIDTH 12
2827
2828/* SRM_CFG_REG: SRAM configuration register */
2829/* We treat the number of SRAM banks and bank size as a single field */
2830#define FRF_AZ_SRM_NB_SZ_LBN FRF_AZ_SRM_BANK_SIZE_LBN
2831#define FRF_AZ_SRM_NB_SZ_WIDTH \
2832 (FRF_AZ_SRM_BANK_SIZE_WIDTH + FRF_AZ_SRM_NUM_BANK_WIDTH)
2833#define FFE_AB_SRM_NB1_SZ2M 0
2834#define FFE_AB_SRM_NB1_SZ4M 1
2835#define FFE_AB_SRM_NB1_SZ8M 2
2836#define FFE_AB_SRM_NB_SZ_DEF 3
2837#define FFE_AB_SRM_NB2_SZ4M 4
2838#define FFE_AB_SRM_NB2_SZ8M 5
2839#define FFE_AB_SRM_NB2_SZ16M 6
2840#define FFE_AB_SRM_NB_SZ_RES 7
2841
2842/* RX_DESC_UPD_REGP0: Receive descriptor update register. */
2843/* We write just the last dword of these registers */
2844#define FR_AZ_RX_DESC_UPD_DWORD_P0 \
2845 (BUILD_BUG_ON_ZERO(FR_AA_RX_DESC_UPD_KER != FR_BZ_RX_DESC_UPD_P0) + \
2846 FR_BZ_RX_DESC_UPD_P0 + 3 * 4)
2847#define FRF_AZ_RX_DESC_WPTR_DWORD_LBN (FRF_AZ_RX_DESC_WPTR_LBN - 3 * 32)
2848#define FRF_AZ_RX_DESC_WPTR_DWORD_WIDTH FRF_AZ_RX_DESC_WPTR_WIDTH
2849
2850/* TX_DESC_UPD_REGP0: Transmit descriptor update register. */
2851#define FR_AZ_TX_DESC_UPD_DWORD_P0 \
2852 (BUILD_BUG_ON_ZERO(FR_AA_TX_DESC_UPD_KER != FR_BZ_TX_DESC_UPD_P0) + \
2853 FR_BZ_TX_DESC_UPD_P0 + 3 * 4)
2854#define FRF_AZ_TX_DESC_WPTR_DWORD_LBN (FRF_AZ_TX_DESC_WPTR_LBN - 3 * 32)
2855#define FRF_AZ_TX_DESC_WPTR_DWORD_WIDTH FRF_AZ_TX_DESC_WPTR_WIDTH
2856
2857/* GMF_CFG4_REG: GMAC FIFO configuration register 4 */
2858#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_LBN 12
2859#define FRF_AB_GMF_HSTFLTRFRM_PAUSE_WIDTH 1
2860
2861/* GMF_CFG5_REG: GMAC FIFO configuration register 5 */
2862#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_LBN 12
2863#define FRF_AB_GMF_HSTFLTRFRMDC_PAUSE_WIDTH 1
2864
2865/* XM_TX_PARAM_REG: XGMAC transmit parameter register */
2866#define FRF_AB_XM_MAX_TX_FRM_SIZE_LBN FRF_AB_XM_MAX_TX_FRM_SIZE_LO_LBN
2867#define FRF_AB_XM_MAX_TX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_TX_FRM_SIZE_HI_WIDTH + \
2868 FRF_AB_XM_MAX_TX_FRM_SIZE_LO_WIDTH)
2869
2870/* XM_RX_PARAM_REG: XGMAC receive parameter register */
2871#define FRF_AB_XM_MAX_RX_FRM_SIZE_LBN FRF_AB_XM_MAX_RX_FRM_SIZE_LO_LBN
2872#define FRF_AB_XM_MAX_RX_FRM_SIZE_WIDTH (FRF_AB_XM_MAX_RX_FRM_SIZE_HI_WIDTH + \
2873 FRF_AB_XM_MAX_RX_FRM_SIZE_LO_WIDTH)
2874
2875/* XX_TXDRV_CTL_REG: XAUI SerDes transmit drive control register */
2876/* Default values */
2877#define FFE_AB_XX_TXDRV_DEQ_DEF 0xe /* deq=.6 */
2878#define FFE_AB_XX_TXDRV_DTX_DEF 0x5 /* 1.25 */
2879#define FFE_AB_XX_SD_CTL_DRV_DEF 0 /* 20mA */
2880
2881/* XX_CORE_STAT_REG: XAUI XGXS core status register */
2882/* XGXS all-lanes status fields */
2883#define FRF_AB_XX_SYNC_STAT_LBN FRF_AB_XX_SYNC_STAT0_LBN
2884#define FRF_AB_XX_SYNC_STAT_WIDTH 4
2885#define FRF_AB_XX_COMMA_DET_LBN FRF_AB_XX_COMMA_DET_CH0_LBN
2886#define FRF_AB_XX_COMMA_DET_WIDTH 4
2887#define FRF_AB_XX_CHAR_ERR_LBN FRF_AB_XX_CHAR_ERR_CH0_LBN
2888#define FRF_AB_XX_CHAR_ERR_WIDTH 4
2889#define FRF_AB_XX_DISPERR_LBN FRF_AB_XX_DISPERR_CH0_LBN
2890#define FRF_AB_XX_DISPERR_WIDTH 4
2891#define FFE_AB_XX_STAT_ALL_LANES 0xf
2892#define FRF_AB_XX_FORCE_SIG_LBN FRF_AB_XX_FORCE_SIG0_VAL_LBN
2893#define FRF_AB_XX_FORCE_SIG_WIDTH 8
2894#define FFE_AB_XX_FORCE_SIG_ALL_LANES 0xff
2895
2896/* DRIVER_EV */
2897/* Sub-fields of an RX flush completion event */
2898#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_LBN 12
2899#define FSF_AZ_DRIVER_EV_RX_FLUSH_FAIL_WIDTH 1
2900#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_LBN 0
2901#define FSF_AZ_DRIVER_EV_RX_DESCQ_ID_WIDTH 12
2902
2903/* EVENT_ENTRY */
2904/* Magic number field for event test */
2905#define FSF_AZ_DRV_GEN_EV_MAGIC_LBN 0
2906#define FSF_AZ_DRV_GEN_EV_MAGIC_WIDTH 32
2907
2908/**************************************************************************
2909 *
2910 * Falcon MAC stats
2911 *
2912 **************************************************************************
2913 *
2914 */
2915
2916#define GRxGoodOct_offset 0x0
2917#define GRxGoodOct_WIDTH 48
2918#define GRxBadOct_offset 0x8
2919#define GRxBadOct_WIDTH 48
2920#define GRxMissPkt_offset 0x10
2921#define GRxMissPkt_WIDTH 32
2922#define GRxFalseCRS_offset 0x14
2923#define GRxFalseCRS_WIDTH 32
2924#define GRxPausePkt_offset 0x18
2925#define GRxPausePkt_WIDTH 32
2926#define GRxBadPkt_offset 0x1C
2927#define GRxBadPkt_WIDTH 32
2928#define GRxUcastPkt_offset 0x20
2929#define GRxUcastPkt_WIDTH 32
2930#define GRxMcastPkt_offset 0x24
2931#define GRxMcastPkt_WIDTH 32
2932#define GRxBcastPkt_offset 0x28
2933#define GRxBcastPkt_WIDTH 32
2934#define GRxGoodLt64Pkt_offset 0x2C
2935#define GRxGoodLt64Pkt_WIDTH 32
2936#define GRxBadLt64Pkt_offset 0x30
2937#define GRxBadLt64Pkt_WIDTH 32
2938#define GRx64Pkt_offset 0x34
2939#define GRx64Pkt_WIDTH 32
2940#define GRx65to127Pkt_offset 0x38
2941#define GRx65to127Pkt_WIDTH 32
2942#define GRx128to255Pkt_offset 0x3C
2943#define GRx128to255Pkt_WIDTH 32
2944#define GRx256to511Pkt_offset 0x40
2945#define GRx256to511Pkt_WIDTH 32
2946#define GRx512to1023Pkt_offset 0x44
2947#define GRx512to1023Pkt_WIDTH 32
2948#define GRx1024to15xxPkt_offset 0x48
2949#define GRx1024to15xxPkt_WIDTH 32
2950#define GRx15xxtoJumboPkt_offset 0x4C
2951#define GRx15xxtoJumboPkt_WIDTH 32
2952#define GRxGtJumboPkt_offset 0x50
2953#define GRxGtJumboPkt_WIDTH 32
2954#define GRxFcsErr64to15xxPkt_offset 0x54
2955#define GRxFcsErr64to15xxPkt_WIDTH 32
2956#define GRxFcsErr15xxtoJumboPkt_offset 0x58
2957#define GRxFcsErr15xxtoJumboPkt_WIDTH 32
2958#define GRxFcsErrGtJumboPkt_offset 0x5C
2959#define GRxFcsErrGtJumboPkt_WIDTH 32
2960#define GTxGoodBadOct_offset 0x80
2961#define GTxGoodBadOct_WIDTH 48
2962#define GTxGoodOct_offset 0x88
2963#define GTxGoodOct_WIDTH 48
2964#define GTxSglColPkt_offset 0x90
2965#define GTxSglColPkt_WIDTH 32
2966#define GTxMultColPkt_offset 0x94
2967#define GTxMultColPkt_WIDTH 32
2968#define GTxExColPkt_offset 0x98
2969#define GTxExColPkt_WIDTH 32
2970#define GTxDefPkt_offset 0x9C
2971#define GTxDefPkt_WIDTH 32
2972#define GTxLateCol_offset 0xA0
2973#define GTxLateCol_WIDTH 32
2974#define GTxExDefPkt_offset 0xA4
2975#define GTxExDefPkt_WIDTH 32
2976#define GTxPausePkt_offset 0xA8
2977#define GTxPausePkt_WIDTH 32
2978#define GTxBadPkt_offset 0xAC
2979#define GTxBadPkt_WIDTH 32
2980#define GTxUcastPkt_offset 0xB0
2981#define GTxUcastPkt_WIDTH 32
2982#define GTxMcastPkt_offset 0xB4
2983#define GTxMcastPkt_WIDTH 32
2984#define GTxBcastPkt_offset 0xB8
2985#define GTxBcastPkt_WIDTH 32
2986#define GTxLt64Pkt_offset 0xBC
2987#define GTxLt64Pkt_WIDTH 32
2988#define GTx64Pkt_offset 0xC0
2989#define GTx64Pkt_WIDTH 32
2990#define GTx65to127Pkt_offset 0xC4
2991#define GTx65to127Pkt_WIDTH 32
2992#define GTx128to255Pkt_offset 0xC8
2993#define GTx128to255Pkt_WIDTH 32
2994#define GTx256to511Pkt_offset 0xCC
2995#define GTx256to511Pkt_WIDTH 32
2996#define GTx512to1023Pkt_offset 0xD0
2997#define GTx512to1023Pkt_WIDTH 32
2998#define GTx1024to15xxPkt_offset 0xD4
2999#define GTx1024to15xxPkt_WIDTH 32
3000#define GTx15xxtoJumboPkt_offset 0xD8
3001#define GTx15xxtoJumboPkt_WIDTH 32
3002#define GTxGtJumboPkt_offset 0xDC
3003#define GTxGtJumboPkt_WIDTH 32
3004#define GTxNonTcpUdpPkt_offset 0xE0
3005#define GTxNonTcpUdpPkt_WIDTH 16
3006#define GTxMacSrcErrPkt_offset 0xE4
3007#define GTxMacSrcErrPkt_WIDTH 16
3008#define GTxIpSrcErrPkt_offset 0xE8
3009#define GTxIpSrcErrPkt_WIDTH 16
3010#define GDmaDone_offset 0xEC
3011#define GDmaDone_WIDTH 32
3012
3013#define XgRxOctets_offset 0x0
3014#define XgRxOctets_WIDTH 48
3015#define XgRxOctetsOK_offset 0x8
3016#define XgRxOctetsOK_WIDTH 48
3017#define XgRxPkts_offset 0x10
3018#define XgRxPkts_WIDTH 32
3019#define XgRxPktsOK_offset 0x14
3020#define XgRxPktsOK_WIDTH 32
3021#define XgRxBroadcastPkts_offset 0x18
3022#define XgRxBroadcastPkts_WIDTH 32
3023#define XgRxMulticastPkts_offset 0x1C
3024#define XgRxMulticastPkts_WIDTH 32
3025#define XgRxUnicastPkts_offset 0x20
3026#define XgRxUnicastPkts_WIDTH 32
3027#define XgRxUndersizePkts_offset 0x24
3028#define XgRxUndersizePkts_WIDTH 32
3029#define XgRxOversizePkts_offset 0x28
3030#define XgRxOversizePkts_WIDTH 32
3031#define XgRxJabberPkts_offset 0x2C
3032#define XgRxJabberPkts_WIDTH 32
3033#define XgRxUndersizeFCSerrorPkts_offset 0x30
3034#define XgRxUndersizeFCSerrorPkts_WIDTH 32
3035#define XgRxDropEvents_offset 0x34
3036#define XgRxDropEvents_WIDTH 32
3037#define XgRxFCSerrorPkts_offset 0x38
3038#define XgRxFCSerrorPkts_WIDTH 32
3039#define XgRxAlignError_offset 0x3C
3040#define XgRxAlignError_WIDTH 32
3041#define XgRxSymbolError_offset 0x40
3042#define XgRxSymbolError_WIDTH 32
3043#define XgRxInternalMACError_offset 0x44
3044#define XgRxInternalMACError_WIDTH 32
3045#define XgRxControlPkts_offset 0x48
3046#define XgRxControlPkts_WIDTH 32
3047#define XgRxPausePkts_offset 0x4C
3048#define XgRxPausePkts_WIDTH 32
3049#define XgRxPkts64Octets_offset 0x50
3050#define XgRxPkts64Octets_WIDTH 32
3051#define XgRxPkts65to127Octets_offset 0x54
3052#define XgRxPkts65to127Octets_WIDTH 32
3053#define XgRxPkts128to255Octets_offset 0x58
3054#define XgRxPkts128to255Octets_WIDTH 32
3055#define XgRxPkts256to511Octets_offset 0x5C
3056#define XgRxPkts256to511Octets_WIDTH 32
3057#define XgRxPkts512to1023Octets_offset 0x60
3058#define XgRxPkts512to1023Octets_WIDTH 32
3059#define XgRxPkts1024to15xxOctets_offset 0x64
3060#define XgRxPkts1024to15xxOctets_WIDTH 32
3061#define XgRxPkts15xxtoMaxOctets_offset 0x68
3062#define XgRxPkts15xxtoMaxOctets_WIDTH 32
3063#define XgRxLengthError_offset 0x6C
3064#define XgRxLengthError_WIDTH 32
3065#define XgTxPkts_offset 0x80
3066#define XgTxPkts_WIDTH 32
3067#define XgTxOctets_offset 0x88
3068#define XgTxOctets_WIDTH 48
3069#define XgTxMulticastPkts_offset 0x90
3070#define XgTxMulticastPkts_WIDTH 32
3071#define XgTxBroadcastPkts_offset 0x94
3072#define XgTxBroadcastPkts_WIDTH 32
3073#define XgTxUnicastPkts_offset 0x98
3074#define XgTxUnicastPkts_WIDTH 32
3075#define XgTxControlPkts_offset 0x9C
3076#define XgTxControlPkts_WIDTH 32
3077#define XgTxPausePkts_offset 0xA0
3078#define XgTxPausePkts_WIDTH 32
3079#define XgTxPkts64Octets_offset 0xA4
3080#define XgTxPkts64Octets_WIDTH 32
3081#define XgTxPkts65to127Octets_offset 0xA8
3082#define XgTxPkts65to127Octets_WIDTH 32
3083#define XgTxPkts128to255Octets_offset 0xAC
3084#define XgTxPkts128to255Octets_WIDTH 32
3085#define XgTxPkts256to511Octets_offset 0xB0
3086#define XgTxPkts256to511Octets_WIDTH 32
3087#define XgTxPkts512to1023Octets_offset 0xB4
3088#define XgTxPkts512to1023Octets_WIDTH 32
3089#define XgTxPkts1024to15xxOctets_offset 0xB8
3090#define XgTxPkts1024to15xxOctets_WIDTH 32
3091#define XgTxPkts1519toMaxOctets_offset 0xBC
3092#define XgTxPkts1519toMaxOctets_WIDTH 32
3093#define XgTxUndersizePkts_offset 0xC0
3094#define XgTxUndersizePkts_WIDTH 32
3095#define XgTxOversizePkts_offset 0xC4
3096#define XgTxOversizePkts_WIDTH 32
3097#define XgTxNonTcpUdpPkt_offset 0xC8
3098#define XgTxNonTcpUdpPkt_WIDTH 16
3099#define XgTxMacSrcErrPkt_offset 0xCC
3100#define XgTxMacSrcErrPkt_WIDTH 16
3101#define XgTxIpSrcErrPkt_offset 0xD0
3102#define XgTxIpSrcErrPkt_WIDTH 16
3103#define XgDmaDone_offset 0xD4
3104#define XgDmaDone_WIDTH 32
3105
3106#define FALCON_STATS_NOT_DONE 0x00000000
3107#define FALCON_STATS_DONE 0xffffffff
3108
3109/* Interrupt status register bits */
3110#define FATAL_INT_LBN 64
3111#define FATAL_INT_WIDTH 1
3112#define INT_EVQS_LBN 40
3113#define INT_EVQS_WIDTH 4
3114#define INT_FLAG_LBN 32
3115#define INT_FLAG_WIDTH 1
3116#define EVQ_FIFO_HF_LBN 1
3117#define EVQ_FIFO_HF_WIDTH 1
3118#define EVQ_FIFO_AF_LBN 0
3119#define EVQ_FIFO_AF_WIDTH 1
3120
3121/**************************************************************************
3122 *
3123 * Falcon non-volatile configuration
3124 *
3125 **************************************************************************
3126 */
3127
3128/* Board configuration v2 (v1 is obsolete; later versions are compatible) */
3129struct falcon_nvconfig_board_v2 {
3130 __le16 nports;
3131 u8 port0_phy_addr;
3132 u8 port0_phy_type;
3133 u8 port1_phy_addr;
3134 u8 port1_phy_type;
3135 __le16 asic_sub_revision;
3136 __le16 board_revision;
3137} __packed;
3138
3139/* Board configuration v3 extra information */
3140struct falcon_nvconfig_board_v3 {
3141 __le32 spi_device_type[2];
3142} __packed;
3143
3144/* Bit numbers for spi_device_type */
3145#define SPI_DEV_TYPE_SIZE_LBN 0
3146#define SPI_DEV_TYPE_SIZE_WIDTH 5
3147#define SPI_DEV_TYPE_ADDR_LEN_LBN 6
3148#define SPI_DEV_TYPE_ADDR_LEN_WIDTH 2
3149#define SPI_DEV_TYPE_ERASE_CMD_LBN 8
3150#define SPI_DEV_TYPE_ERASE_CMD_WIDTH 8
3151#define SPI_DEV_TYPE_ERASE_SIZE_LBN 16
3152#define SPI_DEV_TYPE_ERASE_SIZE_WIDTH 5
3153#define SPI_DEV_TYPE_BLOCK_SIZE_LBN 24
3154#define SPI_DEV_TYPE_BLOCK_SIZE_WIDTH 5
3155#define SPI_DEV_TYPE_FIELD(type, field) \
3156 (((type) >> EFX_LOW_BIT(field)) & EFX_MASK32(EFX_WIDTH(field)))
3157
3158#define FALCON_NVCONFIG_OFFSET 0x300
3159
3160#define FALCON_NVCONFIG_BOARD_MAGIC_NUM 0xFA1C
3161struct falcon_nvconfig {
3162 efx_oword_t ee_vpd_cfg_reg; /* 0x300 */
3163 u8 mac_address[2][8]; /* 0x310 */
3164 efx_oword_t pcie_sd_ctl0123_reg; /* 0x320 */
3165 efx_oword_t pcie_sd_ctl45_reg; /* 0x330 */
3166 efx_oword_t pcie_pcs_ctl_stat_reg; /* 0x340 */
3167 efx_oword_t hw_init_reg; /* 0x350 */
3168 efx_oword_t nic_stat_reg; /* 0x360 */
3169 efx_oword_t glb_ctl_reg; /* 0x370 */
3170 efx_oword_t srm_cfg_reg; /* 0x380 */
3171 efx_oword_t spare_reg; /* 0x390 */
3172 __le16 board_magic_num; /* 0x3A0 */
3173 __le16 board_struct_ver;
3174 __le16 board_checksum;
3175 struct falcon_nvconfig_board_v2 board_v2;
3176 efx_oword_t ee_base_page_reg; /* 0x3B0 */
3177 struct falcon_nvconfig_board_v3 board_v3; /* 0x3C0 */
3178} __packed;
3179
3180#endif /* EFX_REGS_H */
diff --git a/drivers/net/sfc/rx.c b/drivers/net/sfc/rx.c
index 98bff5ada09a..a60c7188fdad 100644
--- a/drivers/net/sfc/rx.c
+++ b/drivers/net/sfc/rx.c
@@ -61,7 +61,7 @@
61 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ? 61 * rx_alloc_method = (rx_alloc_level > RX_ALLOC_LEVEL_LRO ?
62 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB) 62 * RX_ALLOC_METHOD_PAGE : RX_ALLOC_METHOD_SKB)
63 */ 63 */
64static int rx_alloc_method = RX_ALLOC_METHOD_PAGE; 64static int rx_alloc_method = RX_ALLOC_METHOD_AUTO;
65 65
66#define RX_ALLOC_LEVEL_LRO 0x2000 66#define RX_ALLOC_LEVEL_LRO 0x2000
67#define RX_ALLOC_LEVEL_MAX 0x3000 67#define RX_ALLOC_LEVEL_MAX 0x3000
@@ -293,8 +293,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
293 * fill anyway. 293 * fill anyway.
294 */ 294 */
295 fill_level = (rx_queue->added_count - rx_queue->removed_count); 295 fill_level = (rx_queue->added_count - rx_queue->removed_count);
296 EFX_BUG_ON_PARANOID(fill_level > 296 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
297 rx_queue->efx->type->rxd_ring_mask + 1);
298 297
299 /* Don't fill if we don't need to */ 298 /* Don't fill if we don't need to */
300 if (fill_level >= rx_queue->fast_fill_trigger) 299 if (fill_level >= rx_queue->fast_fill_trigger)
@@ -316,8 +315,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
316 retry: 315 retry:
317 /* Recalculate current fill level now that we have the lock */ 316 /* Recalculate current fill level now that we have the lock */
318 fill_level = (rx_queue->added_count - rx_queue->removed_count); 317 fill_level = (rx_queue->added_count - rx_queue->removed_count);
319 EFX_BUG_ON_PARANOID(fill_level > 318 EFX_BUG_ON_PARANOID(fill_level > EFX_RXQ_SIZE);
320 rx_queue->efx->type->rxd_ring_mask + 1);
321 space = rx_queue->fast_fill_limit - fill_level; 319 space = rx_queue->fast_fill_limit - fill_level;
322 if (space < EFX_RX_BATCH) 320 if (space < EFX_RX_BATCH)
323 goto out_unlock; 321 goto out_unlock;
@@ -329,8 +327,7 @@ static int __efx_fast_push_rx_descriptors(struct efx_rx_queue *rx_queue,
329 327
330 do { 328 do {
331 for (i = 0; i < EFX_RX_BATCH; ++i) { 329 for (i = 0; i < EFX_RX_BATCH; ++i) {
332 index = (rx_queue->added_count & 330 index = rx_queue->added_count & EFX_RXQ_MASK;
333 rx_queue->efx->type->rxd_ring_mask);
334 rx_buf = efx_rx_buffer(rx_queue, index); 331 rx_buf = efx_rx_buffer(rx_queue, index);
335 rc = efx_init_rx_buffer(rx_queue, rx_buf); 332 rc = efx_init_rx_buffer(rx_queue, rx_buf);
336 if (unlikely(rc)) 333 if (unlikely(rc))
@@ -448,6 +445,7 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
448 bool checksummed) 445 bool checksummed)
449{ 446{
450 struct napi_struct *napi = &channel->napi_str; 447 struct napi_struct *napi = &channel->napi_str;
448 gro_result_t gro_result;
451 449
452 /* Pass the skb/page into the LRO engine */ 450 /* Pass the skb/page into the LRO engine */
453 if (rx_buf->page) { 451 if (rx_buf->page) {
@@ -455,6 +453,7 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
455 453
456 if (!skb) { 454 if (!skb) {
457 put_page(rx_buf->page); 455 put_page(rx_buf->page);
456 gro_result = GRO_DROP;
458 goto out; 457 goto out;
459 } 458 }
460 459
@@ -470,7 +469,7 @@ static void efx_rx_packet_lro(struct efx_channel *channel,
470 skb->ip_summed = 469 skb->ip_summed =
471 checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE; 470 checksummed ? CHECKSUM_UNNECESSARY : CHECKSUM_NONE;
472 471
473 napi_gro_frags(napi); 472 gro_result = napi_gro_frags(napi);
474 473
475out: 474out:
476 EFX_BUG_ON_PARANOID(rx_buf->skb); 475 EFX_BUG_ON_PARANOID(rx_buf->skb);
@@ -479,9 +478,16 @@ out:
479 EFX_BUG_ON_PARANOID(!rx_buf->skb); 478 EFX_BUG_ON_PARANOID(!rx_buf->skb);
480 EFX_BUG_ON_PARANOID(!checksummed); 479 EFX_BUG_ON_PARANOID(!checksummed);
481 480
482 napi_gro_receive(napi, rx_buf->skb); 481 gro_result = napi_gro_receive(napi, rx_buf->skb);
483 rx_buf->skb = NULL; 482 rx_buf->skb = NULL;
484 } 483 }
484
485 if (gro_result == GRO_NORMAL) {
486 channel->rx_alloc_level += RX_ALLOC_FACTOR_SKB;
487 } else if (gro_result != GRO_DROP) {
488 channel->rx_alloc_level += RX_ALLOC_FACTOR_LRO;
489 channel->irq_mod_score += 2;
490 }
485} 491}
486 492
487void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index, 493void efx_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index,
@@ -632,7 +638,7 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
632 EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue); 638 EFX_LOG(efx, "creating RX queue %d\n", rx_queue->queue);
633 639
634 /* Allocate RX buffers */ 640 /* Allocate RX buffers */
635 rxq_size = (efx->type->rxd_ring_mask + 1) * sizeof(*rx_queue->buffer); 641 rxq_size = EFX_RXQ_SIZE * sizeof(*rx_queue->buffer);
636 rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL); 642 rx_queue->buffer = kzalloc(rxq_size, GFP_KERNEL);
637 if (!rx_queue->buffer) 643 if (!rx_queue->buffer)
638 return -ENOMEM; 644 return -ENOMEM;
@@ -647,7 +653,6 @@ int efx_probe_rx_queue(struct efx_rx_queue *rx_queue)
647 653
648void efx_init_rx_queue(struct efx_rx_queue *rx_queue) 654void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
649{ 655{
650 struct efx_nic *efx = rx_queue->efx;
651 unsigned int max_fill, trigger, limit; 656 unsigned int max_fill, trigger, limit;
652 657
653 EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue); 658 EFX_LOG(rx_queue->efx, "initialising RX queue %d\n", rx_queue->queue);
@@ -660,7 +665,7 @@ void efx_init_rx_queue(struct efx_rx_queue *rx_queue)
660 rx_queue->min_overfill = -1U; 665 rx_queue->min_overfill = -1U;
661 666
662 /* Initialise limit fields */ 667 /* Initialise limit fields */
663 max_fill = efx->type->rxd_ring_mask + 1 - EFX_RXD_HEAD_ROOM; 668 max_fill = EFX_RXQ_SIZE - EFX_RXD_HEAD_ROOM;
664 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U; 669 trigger = max_fill * min(rx_refill_threshold, 100U) / 100U;
665 limit = max_fill * min(rx_refill_limit, 100U) / 100U; 670 limit = max_fill * min(rx_refill_limit, 100U) / 100U;
666 671
@@ -683,7 +688,7 @@ void efx_fini_rx_queue(struct efx_rx_queue *rx_queue)
683 688
684 /* Release RX buffers NB start at index 0 not current HW ptr */ 689 /* Release RX buffers NB start at index 0 not current HW ptr */
685 if (rx_queue->buffer) { 690 if (rx_queue->buffer) {
686 for (i = 0; i <= rx_queue->efx->type->rxd_ring_mask; i++) { 691 for (i = 0; i <= EFX_RXQ_MASK; i++) {
687 rx_buf = efx_rx_buffer(rx_queue, i); 692 rx_buf = efx_rx_buffer(rx_queue, i);
688 efx_fini_rx_buffer(rx_queue, rx_buf); 693 efx_fini_rx_buffer(rx_queue, rx_buf);
689 } 694 }
diff --git a/drivers/net/sfc/selftest.c b/drivers/net/sfc/selftest.c
index 817c7efc11e0..7a9386f97c42 100644
--- a/drivers/net/sfc/selftest.c
+++ b/drivers/net/sfc/selftest.c
@@ -24,10 +24,9 @@
24#include "efx.h" 24#include "efx.h"
25#include "falcon.h" 25#include "falcon.h"
26#include "selftest.h" 26#include "selftest.h"
27#include "boards.h"
28#include "workarounds.h" 27#include "workarounds.h"
29#include "spi.h" 28#include "spi.h"
30#include "falcon_io.h" 29#include "io.h"
31#include "mdio_10g.h" 30#include "mdio_10g.h"
32 31
33/* 32/*
@@ -527,7 +526,7 @@ efx_test_loopback(struct efx_tx_queue *tx_queue,
527 526
528 for (i = 0; i < 3; i++) { 527 for (i = 0; i < 3; i++) {
529 /* Determine how many packets to send */ 528 /* Determine how many packets to send */
530 state->packet_count = (efx->type->txd_ring_mask + 1) / 3; 529 state->packet_count = EFX_TXQ_SIZE / 3;
531 state->packet_count = min(1 << (i << 2), state->packet_count); 530 state->packet_count = min(1 << (i << 2), state->packet_count);
532 state->skbs = kzalloc(sizeof(state->skbs[0]) * 531 state->skbs = kzalloc(sizeof(state->skbs[0]) *
533 state->packet_count, GFP_KERNEL); 532 state->packet_count, GFP_KERNEL);
diff --git a/drivers/net/sfc/tenxpress.c b/drivers/net/sfc/tenxpress.c
index f4d509015f75..390b27b5ace9 100644
--- a/drivers/net/sfc/tenxpress.c
+++ b/drivers/net/sfc/tenxpress.c
@@ -14,8 +14,7 @@
14#include "mdio_10g.h" 14#include "mdio_10g.h"
15#include "falcon.h" 15#include "falcon.h"
16#include "phy.h" 16#include "phy.h"
17#include "falcon_hwdefs.h" 17#include "regs.h"
18#include "boards.h"
19#include "workarounds.h" 18#include "workarounds.h"
20#include "selftest.h" 19#include "selftest.h"
21 20
@@ -301,6 +300,7 @@ static int tenxpress_init(struct efx_nic *efx)
301static int tenxpress_phy_init(struct efx_nic *efx) 300static int tenxpress_phy_init(struct efx_nic *efx)
302{ 301{
303 struct tenxpress_phy_data *phy_data; 302 struct tenxpress_phy_data *phy_data;
303 u16 old_adv, adv;
304 int rc = 0; 304 int rc = 0;
305 305
306 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL); 306 phy_data = kzalloc(sizeof(*phy_data), GFP_KERNEL);
@@ -333,6 +333,15 @@ static int tenxpress_phy_init(struct efx_nic *efx)
333 if (rc < 0) 333 if (rc < 0)
334 goto fail; 334 goto fail;
335 335
336 /* Set pause advertising */
337 old_adv = efx_mdio_read(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE);
338 adv = ((old_adv & ~(ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM)) |
339 mii_advertise_flowctrl(efx->wanted_fc));
340 if (adv != old_adv) {
341 efx_mdio_write(efx, MDIO_MMD_AN, MDIO_AN_ADVERTISE, adv);
342 mdio45_nway_restart(&efx->mdio);
343 }
344
336 if (efx->phy_type == PHY_TYPE_SFT9001B) { 345 if (efx->phy_type == PHY_TYPE_SFT9001B) {
337 rc = device_create_file(&efx->pci_dev->dev, 346 rc = device_create_file(&efx->pci_dev->dev,
338 &dev_attr_phy_short_reach); 347 &dev_attr_phy_short_reach);
@@ -742,6 +751,7 @@ tenxpress_get_settings(struct efx_nic *efx, struct ethtool_cmd *ecmd)
742 751
743 mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa); 752 mdio45_ethtool_gset_npage(&efx->mdio, ecmd, adv, lpa);
744 753
754 ecmd->supported |= SUPPORTED_Pause | SUPPORTED_Asym_Pause;
745 if (efx->phy_type != PHY_TYPE_SFX7101) { 755 if (efx->phy_type != PHY_TYPE_SFX7101) {
746 ecmd->supported |= (SUPPORTED_100baseT_Full | 756 ecmd->supported |= (SUPPORTED_100baseT_Full |
747 SUPPORTED_1000baseT_Full); 757 SUPPORTED_1000baseT_Full);
diff --git a/drivers/net/sfc/tx.c b/drivers/net/sfc/tx.c
index 489c4de31447..303919a34df6 100644
--- a/drivers/net/sfc/tx.c
+++ b/drivers/net/sfc/tx.c
@@ -26,8 +26,7 @@
26 * The tx_queue descriptor ring fill-level must fall below this value 26 * The tx_queue descriptor ring fill-level must fall below this value
27 * before we restart the netif queue 27 * before we restart the netif queue
28 */ 28 */
29#define EFX_NETDEV_TX_THRESHOLD(_tx_queue) \ 29#define EFX_TXQ_THRESHOLD (EFX_TXQ_MASK / 2u)
30 (_tx_queue->efx->type->txd_ring_mask / 2u)
31 30
32/* We want to be able to nest calls to netif_stop_queue(), since each 31/* We want to be able to nest calls to netif_stop_queue(), since each
33 * channel can have an individual stop on the queue. 32 * channel can have an individual stop on the queue.
@@ -125,6 +124,24 @@ static void efx_tsoh_free(struct efx_tx_queue *tx_queue,
125} 124}
126 125
127 126
127static inline unsigned
128efx_max_tx_len(struct efx_nic *efx, dma_addr_t dma_addr)
129{
130 /* Depending on the NIC revision, we can use descriptor
131 * lengths up to 8K or 8K-1. However, since PCI Express
132 * devices must split read requests at 4K boundaries, there is
133 * little benefit from using descriptors that cross those
134 * boundaries and we keep things simple by not doing so.
135 */
136 unsigned len = (~dma_addr & 0xfff) + 1;
137
138 /* Work around hardware bug for unaligned buffers. */
139 if (EFX_WORKAROUND_5391(efx) && (dma_addr & 0xf))
140 len = min_t(unsigned, len, 512 - (dma_addr & 0xf));
141
142 return len;
143}
144
128/* 145/*
129 * Add a socket buffer to a TX queue 146 * Add a socket buffer to a TX queue
130 * 147 *
@@ -147,7 +164,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
147 skb_frag_t *fragment; 164 skb_frag_t *fragment;
148 struct page *page; 165 struct page *page;
149 int page_offset; 166 int page_offset;
150 unsigned int len, unmap_len = 0, fill_level, insert_ptr, misalign; 167 unsigned int len, unmap_len = 0, fill_level, insert_ptr;
151 dma_addr_t dma_addr, unmap_addr = 0; 168 dma_addr_t dma_addr, unmap_addr = 0;
152 unsigned int dma_len; 169 unsigned int dma_len;
153 bool unmap_single; 170 bool unmap_single;
@@ -171,7 +188,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
171 } 188 }
172 189
173 fill_level = tx_queue->insert_count - tx_queue->old_read_count; 190 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
174 q_space = efx->type->txd_ring_mask - 1 - fill_level; 191 q_space = EFX_TXQ_MASK - 1 - fill_level;
175 192
176 /* Map for DMA. Use pci_map_single rather than pci_map_page 193 /* Map for DMA. Use pci_map_single rather than pci_map_page
177 * since this is more efficient on machines with sparse 194 * since this is more efficient on machines with sparse
@@ -208,16 +225,14 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
208 &tx_queue->read_count; 225 &tx_queue->read_count;
209 fill_level = (tx_queue->insert_count 226 fill_level = (tx_queue->insert_count
210 - tx_queue->old_read_count); 227 - tx_queue->old_read_count);
211 q_space = (efx->type->txd_ring_mask - 1 - 228 q_space = EFX_TXQ_MASK - 1 - fill_level;
212 fill_level);
213 if (unlikely(q_space-- <= 0)) 229 if (unlikely(q_space-- <= 0))
214 goto stop; 230 goto stop;
215 smp_mb(); 231 smp_mb();
216 --tx_queue->stopped; 232 --tx_queue->stopped;
217 } 233 }
218 234
219 insert_ptr = (tx_queue->insert_count & 235 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
220 efx->type->txd_ring_mask);
221 buffer = &tx_queue->buffer[insert_ptr]; 236 buffer = &tx_queue->buffer[insert_ptr];
222 efx_tsoh_free(tx_queue, buffer); 237 efx_tsoh_free(tx_queue, buffer);
223 EFX_BUG_ON_PARANOID(buffer->tsoh); 238 EFX_BUG_ON_PARANOID(buffer->tsoh);
@@ -226,14 +241,10 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
226 EFX_BUG_ON_PARANOID(!buffer->continuation); 241 EFX_BUG_ON_PARANOID(!buffer->continuation);
227 EFX_BUG_ON_PARANOID(buffer->unmap_len); 242 EFX_BUG_ON_PARANOID(buffer->unmap_len);
228 243
229 dma_len = (((~dma_addr) & efx->type->tx_dma_mask) + 1); 244 dma_len = efx_max_tx_len(efx, dma_addr);
230 if (likely(dma_len > len)) 245 if (likely(dma_len >= len))
231 dma_len = len; 246 dma_len = len;
232 247
233 misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
234 if (misalign && dma_len + misalign > 512)
235 dma_len = 512 - misalign;
236
237 /* Fill out per descriptor fields */ 248 /* Fill out per descriptor fields */
238 buffer->len = dma_len; 249 buffer->len = dma_len;
239 buffer->dma_addr = dma_addr; 250 buffer->dma_addr = dma_addr;
@@ -289,7 +300,7 @@ static netdev_tx_t efx_enqueue_skb(struct efx_tx_queue *tx_queue,
289 /* Work backwards until we hit the original insert pointer value */ 300 /* Work backwards until we hit the original insert pointer value */
290 while (tx_queue->insert_count != tx_queue->write_count) { 301 while (tx_queue->insert_count != tx_queue->write_count) {
291 --tx_queue->insert_count; 302 --tx_queue->insert_count;
292 insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask; 303 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
293 buffer = &tx_queue->buffer[insert_ptr]; 304 buffer = &tx_queue->buffer[insert_ptr];
294 efx_dequeue_buffer(tx_queue, buffer); 305 efx_dequeue_buffer(tx_queue, buffer);
295 buffer->len = 0; 306 buffer->len = 0;
@@ -318,10 +329,9 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
318{ 329{
319 struct efx_nic *efx = tx_queue->efx; 330 struct efx_nic *efx = tx_queue->efx;
320 unsigned int stop_index, read_ptr; 331 unsigned int stop_index, read_ptr;
321 unsigned int mask = tx_queue->efx->type->txd_ring_mask;
322 332
323 stop_index = (index + 1) & mask; 333 stop_index = (index + 1) & EFX_TXQ_MASK;
324 read_ptr = tx_queue->read_count & mask; 334 read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
325 335
326 while (read_ptr != stop_index) { 336 while (read_ptr != stop_index) {
327 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr]; 337 struct efx_tx_buffer *buffer = &tx_queue->buffer[read_ptr];
@@ -338,7 +348,7 @@ static void efx_dequeue_buffers(struct efx_tx_queue *tx_queue,
338 buffer->len = 0; 348 buffer->len = 0;
339 349
340 ++tx_queue->read_count; 350 ++tx_queue->read_count;
341 read_ptr = tx_queue->read_count & mask; 351 read_ptr = tx_queue->read_count & EFX_TXQ_MASK;
342 } 352 }
343} 353}
344 354
@@ -391,7 +401,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
391 unsigned fill_level; 401 unsigned fill_level;
392 struct efx_nic *efx = tx_queue->efx; 402 struct efx_nic *efx = tx_queue->efx;
393 403
394 EFX_BUG_ON_PARANOID(index > efx->type->txd_ring_mask); 404 EFX_BUG_ON_PARANOID(index > EFX_TXQ_MASK);
395 405
396 efx_dequeue_buffers(tx_queue, index); 406 efx_dequeue_buffers(tx_queue, index);
397 407
@@ -401,7 +411,7 @@ void efx_xmit_done(struct efx_tx_queue *tx_queue, unsigned int index)
401 smp_mb(); 411 smp_mb();
402 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) { 412 if (unlikely(tx_queue->stopped) && likely(efx->port_enabled)) {
403 fill_level = tx_queue->insert_count - tx_queue->read_count; 413 fill_level = tx_queue->insert_count - tx_queue->read_count;
404 if (fill_level < EFX_NETDEV_TX_THRESHOLD(tx_queue)) { 414 if (fill_level < EFX_TXQ_THRESHOLD) {
405 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx)); 415 EFX_BUG_ON_PARANOID(!efx_dev_registered(efx));
406 416
407 /* Do this under netif_tx_lock(), to avoid racing 417 /* Do this under netif_tx_lock(), to avoid racing
@@ -425,11 +435,11 @@ int efx_probe_tx_queue(struct efx_tx_queue *tx_queue)
425 EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue); 435 EFX_LOG(efx, "creating TX queue %d\n", tx_queue->queue);
426 436
427 /* Allocate software ring */ 437 /* Allocate software ring */
428 txq_size = (efx->type->txd_ring_mask + 1) * sizeof(*tx_queue->buffer); 438 txq_size = EFX_TXQ_SIZE * sizeof(*tx_queue->buffer);
429 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL); 439 tx_queue->buffer = kzalloc(txq_size, GFP_KERNEL);
430 if (!tx_queue->buffer) 440 if (!tx_queue->buffer)
431 return -ENOMEM; 441 return -ENOMEM;
432 for (i = 0; i <= efx->type->txd_ring_mask; ++i) 442 for (i = 0; i <= EFX_TXQ_MASK; ++i)
433 tx_queue->buffer[i].continuation = true; 443 tx_queue->buffer[i].continuation = true;
434 444
435 /* Allocate hardware ring */ 445 /* Allocate hardware ring */
@@ -468,8 +478,7 @@ void efx_release_tx_buffers(struct efx_tx_queue *tx_queue)
468 478
469 /* Free any buffers left in the ring */ 479 /* Free any buffers left in the ring */
470 while (tx_queue->read_count != tx_queue->write_count) { 480 while (tx_queue->read_count != tx_queue->write_count) {
471 buffer = &tx_queue->buffer[tx_queue->read_count & 481 buffer = &tx_queue->buffer[tx_queue->read_count & EFX_TXQ_MASK];
472 tx_queue->efx->type->txd_ring_mask];
473 efx_dequeue_buffer(tx_queue, buffer); 482 efx_dequeue_buffer(tx_queue, buffer);
474 buffer->continuation = true; 483 buffer->continuation = true;
475 buffer->len = 0; 484 buffer->len = 0;
@@ -708,14 +717,14 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
708{ 717{
709 struct efx_tx_buffer *buffer; 718 struct efx_tx_buffer *buffer;
710 struct efx_nic *efx = tx_queue->efx; 719 struct efx_nic *efx = tx_queue->efx;
711 unsigned dma_len, fill_level, insert_ptr, misalign; 720 unsigned dma_len, fill_level, insert_ptr;
712 int q_space; 721 int q_space;
713 722
714 EFX_BUG_ON_PARANOID(len <= 0); 723 EFX_BUG_ON_PARANOID(len <= 0);
715 724
716 fill_level = tx_queue->insert_count - tx_queue->old_read_count; 725 fill_level = tx_queue->insert_count - tx_queue->old_read_count;
717 /* -1 as there is no way to represent all descriptors used */ 726 /* -1 as there is no way to represent all descriptors used */
718 q_space = efx->type->txd_ring_mask - 1 - fill_level; 727 q_space = EFX_TXQ_MASK - 1 - fill_level;
719 728
720 while (1) { 729 while (1) {
721 if (unlikely(q_space-- <= 0)) { 730 if (unlikely(q_space-- <= 0)) {
@@ -731,7 +740,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
731 *(volatile unsigned *)&tx_queue->read_count; 740 *(volatile unsigned *)&tx_queue->read_count;
732 fill_level = (tx_queue->insert_count 741 fill_level = (tx_queue->insert_count
733 - tx_queue->old_read_count); 742 - tx_queue->old_read_count);
734 q_space = efx->type->txd_ring_mask - 1 - fill_level; 743 q_space = EFX_TXQ_MASK - 1 - fill_level;
735 if (unlikely(q_space-- <= 0)) { 744 if (unlikely(q_space-- <= 0)) {
736 *final_buffer = NULL; 745 *final_buffer = NULL;
737 return 1; 746 return 1;
@@ -740,13 +749,13 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
740 --tx_queue->stopped; 749 --tx_queue->stopped;
741 } 750 }
742 751
743 insert_ptr = tx_queue->insert_count & efx->type->txd_ring_mask; 752 insert_ptr = tx_queue->insert_count & EFX_TXQ_MASK;
744 buffer = &tx_queue->buffer[insert_ptr]; 753 buffer = &tx_queue->buffer[insert_ptr];
745 ++tx_queue->insert_count; 754 ++tx_queue->insert_count;
746 755
747 EFX_BUG_ON_PARANOID(tx_queue->insert_count - 756 EFX_BUG_ON_PARANOID(tx_queue->insert_count -
748 tx_queue->read_count > 757 tx_queue->read_count >
749 efx->type->txd_ring_mask); 758 EFX_TXQ_MASK);
750 759
751 efx_tsoh_free(tx_queue, buffer); 760 efx_tsoh_free(tx_queue, buffer);
752 EFX_BUG_ON_PARANOID(buffer->len); 761 EFX_BUG_ON_PARANOID(buffer->len);
@@ -757,12 +766,7 @@ static int efx_tx_queue_insert(struct efx_tx_queue *tx_queue,
757 766
758 buffer->dma_addr = dma_addr; 767 buffer->dma_addr = dma_addr;
759 768
760 /* Ensure we do not cross a boundary unsupported by H/W */ 769 dma_len = efx_max_tx_len(efx, dma_addr);
761 dma_len = (~dma_addr & efx->type->tx_dma_mask) + 1;
762
763 misalign = (unsigned)dma_addr & efx->type->bug5391_mask;
764 if (misalign && dma_len + misalign > 512)
765 dma_len = 512 - misalign;
766 770
767 /* If there is enough space to send then do so */ 771 /* If there is enough space to send then do so */
768 if (dma_len >= len) 772 if (dma_len >= len)
@@ -792,8 +796,7 @@ static void efx_tso_put_header(struct efx_tx_queue *tx_queue,
792{ 796{
793 struct efx_tx_buffer *buffer; 797 struct efx_tx_buffer *buffer;
794 798
795 buffer = &tx_queue->buffer[tx_queue->insert_count & 799 buffer = &tx_queue->buffer[tx_queue->insert_count & EFX_TXQ_MASK];
796 tx_queue->efx->type->txd_ring_mask];
797 efx_tsoh_free(tx_queue, buffer); 800 efx_tsoh_free(tx_queue, buffer);
798 EFX_BUG_ON_PARANOID(buffer->len); 801 EFX_BUG_ON_PARANOID(buffer->len);
799 EFX_BUG_ON_PARANOID(buffer->unmap_len); 802 EFX_BUG_ON_PARANOID(buffer->unmap_len);
@@ -818,7 +821,7 @@ static void efx_enqueue_unwind(struct efx_tx_queue *tx_queue)
818 while (tx_queue->insert_count != tx_queue->write_count) { 821 while (tx_queue->insert_count != tx_queue->write_count) {
819 --tx_queue->insert_count; 822 --tx_queue->insert_count;
820 buffer = &tx_queue->buffer[tx_queue->insert_count & 823 buffer = &tx_queue->buffer[tx_queue->insert_count &
821 tx_queue->efx->type->txd_ring_mask]; 824 EFX_TXQ_MASK];
822 efx_tsoh_free(tx_queue, buffer); 825 efx_tsoh_free(tx_queue, buffer);
823 EFX_BUG_ON_PARANOID(buffer->skb); 826 EFX_BUG_ON_PARANOID(buffer->skb);
824 buffer->len = 0; 827 buffer->len = 0;
@@ -1135,7 +1138,7 @@ static void efx_fini_tso(struct efx_tx_queue *tx_queue)
1135 unsigned i; 1138 unsigned i;
1136 1139
1137 if (tx_queue->buffer) { 1140 if (tx_queue->buffer) {
1138 for (i = 0; i <= tx_queue->efx->type->txd_ring_mask; ++i) 1141 for (i = 0; i <= EFX_TXQ_MASK; ++i)
1139 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]); 1142 efx_tsoh_free(tx_queue, &tx_queue->buffer[i]);
1140 } 1143 }
1141 1144
diff --git a/drivers/net/sfc/workarounds.h b/drivers/net/sfc/workarounds.h
index c821c15445a0..325029949488 100644
--- a/drivers/net/sfc/workarounds.h
+++ b/drivers/net/sfc/workarounds.h
@@ -41,6 +41,8 @@
41 41
42/* Spurious parity errors in TSORT buffers */ 42/* Spurious parity errors in TSORT buffers */
43#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A 43#define EFX_WORKAROUND_5129 EFX_WORKAROUND_FALCON_A
44/* Unaligned read request >512 bytes after aligning may break TSORT */
45#define EFX_WORKAROUND_5391 EFX_WORKAROUND_FALCON_A
44/* iSCSI parsing errors */ 46/* iSCSI parsing errors */
45#define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A 47#define EFX_WORKAROUND_5583 EFX_WORKAROUND_FALCON_A
46/* RX events go missing */ 48/* RX events go missing */
diff --git a/drivers/net/sgiseeq.c b/drivers/net/sgiseeq.c
index f4dfd1f679a9..6b364a6c6c60 100644
--- a/drivers/net/sgiseeq.c
+++ b/drivers/net/sgiseeq.c
@@ -365,11 +365,10 @@ static inline void sgiseeq_rx(struct net_device *dev, struct sgiseeq_private *sp
365 } 365 }
366 skb_reserve(newskb, 2); 366 skb_reserve(newskb, 2);
367 } else { 367 } else {
368 skb = netdev_alloc_skb(dev, len + 2); 368 skb = netdev_alloc_skb_ip_align(dev, len);
369 if (skb) { 369 if (skb)
370 skb_reserve(skb, 2);
371 skb_copy_to_linear_data(skb, rd->skb->data, len); 370 skb_copy_to_linear_data(skb, rd->skb->data, len);
372 } 371
373 newskb = rd->skb; 372 newskb = rd->skb;
374 } 373 }
375memory_squeeze: 374memory_squeeze:
diff --git a/drivers/net/sh_eth.c b/drivers/net/sh_eth.c
index 528b912a4b0d..5783f50d18e9 100644
--- a/drivers/net/sh_eth.c
+++ b/drivers/net/sh_eth.c
@@ -30,6 +30,7 @@
30#include <linux/phy.h> 30#include <linux/phy.h>
31#include <linux/cache.h> 31#include <linux/cache.h>
32#include <linux/io.h> 32#include <linux/io.h>
33#include <linux/pm_runtime.h>
33#include <asm/cacheflush.h> 34#include <asm/cacheflush.h>
34 35
35#include "sh_eth.h" 36#include "sh_eth.h"
@@ -299,16 +300,20 @@ static void update_mac_address(struct net_device *ndev)
299 * When you want use this device, you must set MAC address in bootloader. 300 * When you want use this device, you must set MAC address in bootloader.
300 * 301 *
301 */ 302 */
302static void read_mac_address(struct net_device *ndev) 303static void read_mac_address(struct net_device *ndev, unsigned char *mac)
303{ 304{
304 u32 ioaddr = ndev->base_addr; 305 u32 ioaddr = ndev->base_addr;
305 306
306 ndev->dev_addr[0] = (ctrl_inl(ioaddr + MAHR) >> 24); 307 if (mac[0] || mac[1] || mac[2] || mac[3] || mac[4] || mac[5]) {
307 ndev->dev_addr[1] = (ctrl_inl(ioaddr + MAHR) >> 16) & 0xFF; 308 memcpy(ndev->dev_addr, mac, 6);
308 ndev->dev_addr[2] = (ctrl_inl(ioaddr + MAHR) >> 8) & 0xFF; 309 } else {
309 ndev->dev_addr[3] = (ctrl_inl(ioaddr + MAHR) & 0xFF); 310 ndev->dev_addr[0] = (ctrl_inl(ioaddr + MAHR) >> 24);
310 ndev->dev_addr[4] = (ctrl_inl(ioaddr + MALR) >> 8) & 0xFF; 311 ndev->dev_addr[1] = (ctrl_inl(ioaddr + MAHR) >> 16) & 0xFF;
311 ndev->dev_addr[5] = (ctrl_inl(ioaddr + MALR) & 0xFF); 312 ndev->dev_addr[2] = (ctrl_inl(ioaddr + MAHR) >> 8) & 0xFF;
313 ndev->dev_addr[3] = (ctrl_inl(ioaddr + MAHR) & 0xFF);
314 ndev->dev_addr[4] = (ctrl_inl(ioaddr + MALR) >> 8) & 0xFF;
315 ndev->dev_addr[5] = (ctrl_inl(ioaddr + MALR) & 0xFF);
316 }
312} 317}
313 318
314struct bb_info { 319struct bb_info {
@@ -1009,6 +1014,8 @@ static int sh_eth_open(struct net_device *ndev)
1009 int ret = 0; 1014 int ret = 0;
1010 struct sh_eth_private *mdp = netdev_priv(ndev); 1015 struct sh_eth_private *mdp = netdev_priv(ndev);
1011 1016
1017 pm_runtime_get_sync(&mdp->pdev->dev);
1018
1012 ret = request_irq(ndev->irq, &sh_eth_interrupt, 1019 ret = request_irq(ndev->irq, &sh_eth_interrupt,
1013#if defined(CONFIG_CPU_SUBTYPE_SH7763) || defined(CONFIG_CPU_SUBTYPE_SH7764) 1020#if defined(CONFIG_CPU_SUBTYPE_SH7763) || defined(CONFIG_CPU_SUBTYPE_SH7764)
1014 IRQF_SHARED, 1021 IRQF_SHARED,
@@ -1045,6 +1052,7 @@ static int sh_eth_open(struct net_device *ndev)
1045 1052
1046out_free_irq: 1053out_free_irq:
1047 free_irq(ndev->irq, ndev); 1054 free_irq(ndev->irq, ndev);
1055 pm_runtime_put_sync(&mdp->pdev->dev);
1048 return ret; 1056 return ret;
1049} 1057}
1050 1058
@@ -1176,6 +1184,8 @@ static int sh_eth_close(struct net_device *ndev)
1176 ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE; 1184 ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
1177 dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma); 1185 dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);
1178 1186
1187 pm_runtime_put_sync(&mdp->pdev->dev);
1188
1179 return 0; 1189 return 0;
1180} 1190}
1181 1191
@@ -1184,6 +1194,8 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
1184 struct sh_eth_private *mdp = netdev_priv(ndev); 1194 struct sh_eth_private *mdp = netdev_priv(ndev);
1185 u32 ioaddr = ndev->base_addr; 1195 u32 ioaddr = ndev->base_addr;
1186 1196
1197 pm_runtime_get_sync(&mdp->pdev->dev);
1198
1187 mdp->stats.tx_dropped += ctrl_inl(ioaddr + TROCR); 1199 mdp->stats.tx_dropped += ctrl_inl(ioaddr + TROCR);
1188 ctrl_outl(0, ioaddr + TROCR); /* (write clear) */ 1200 ctrl_outl(0, ioaddr + TROCR); /* (write clear) */
1189 mdp->stats.collisions += ctrl_inl(ioaddr + CDCR); 1201 mdp->stats.collisions += ctrl_inl(ioaddr + CDCR);
@@ -1199,6 +1211,8 @@ static struct net_device_stats *sh_eth_get_stats(struct net_device *ndev)
1199 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CNDCR); 1211 mdp->stats.tx_carrier_errors += ctrl_inl(ioaddr + CNDCR);
1200 ctrl_outl(0, ioaddr + CNDCR); /* (write clear) */ 1212 ctrl_outl(0, ioaddr + CNDCR); /* (write clear) */
1201#endif 1213#endif
1214 pm_runtime_put_sync(&mdp->pdev->dev);
1215
1202 return &mdp->stats; 1216 return &mdp->stats;
1203} 1217}
1204 1218
@@ -1407,6 +1421,9 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1407 1421
1408 mdp = netdev_priv(ndev); 1422 mdp = netdev_priv(ndev);
1409 spin_lock_init(&mdp->lock); 1423 spin_lock_init(&mdp->lock);
1424 mdp->pdev = pdev;
1425 pm_runtime_enable(&pdev->dev);
1426 pm_runtime_resume(&pdev->dev);
1410 1427
1411 pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data); 1428 pd = (struct sh_eth_plat_data *)(pdev->dev.platform_data);
1412 /* get PHY ID */ 1429 /* get PHY ID */
@@ -1428,7 +1445,7 @@ static int sh_eth_drv_probe(struct platform_device *pdev)
1428 mdp->post_fw = POST_FW >> (devno << 1); 1445 mdp->post_fw = POST_FW >> (devno << 1);
1429 1446
1430 /* read and set MAC address */ 1447 /* read and set MAC address */
1431 read_mac_address(ndev); 1448 read_mac_address(ndev, pd->mac_addr);
1432 1449
1433 /* First device only init */ 1450 /* First device only init */
1434 if (!devno) { 1451 if (!devno) {
@@ -1482,18 +1499,37 @@ static int sh_eth_drv_remove(struct platform_device *pdev)
1482 sh_mdio_release(ndev); 1499 sh_mdio_release(ndev);
1483 unregister_netdev(ndev); 1500 unregister_netdev(ndev);
1484 flush_scheduled_work(); 1501 flush_scheduled_work();
1485 1502 pm_runtime_disable(&pdev->dev);
1486 free_netdev(ndev); 1503 free_netdev(ndev);
1487 platform_set_drvdata(pdev, NULL); 1504 platform_set_drvdata(pdev, NULL);
1488 1505
1489 return 0; 1506 return 0;
1490} 1507}
1491 1508
1509static int sh_eth_runtime_nop(struct device *dev)
1510{
1511 /*
1512 * Runtime PM callback shared between ->runtime_suspend()
1513 * and ->runtime_resume(). Simply returns success.
1514 *
1515 * This driver re-initializes all registers after
1516 * pm_runtime_get_sync() anyway so there is no need
1517 * to save and restore registers here.
1518 */
1519 return 0;
1520}
1521
1522static struct dev_pm_ops sh_eth_dev_pm_ops = {
1523 .runtime_suspend = sh_eth_runtime_nop,
1524 .runtime_resume = sh_eth_runtime_nop,
1525};
1526
1492static struct platform_driver sh_eth_driver = { 1527static struct platform_driver sh_eth_driver = {
1493 .probe = sh_eth_drv_probe, 1528 .probe = sh_eth_drv_probe,
1494 .remove = sh_eth_drv_remove, 1529 .remove = sh_eth_drv_remove,
1495 .driver = { 1530 .driver = {
1496 .name = CARDNAME, 1531 .name = CARDNAME,
1532 .pm = &sh_eth_dev_pm_ops,
1497 }, 1533 },
1498}; 1534};
1499 1535
diff --git a/drivers/net/sh_eth.h b/drivers/net/sh_eth.h
index ba151f86ae7b..8b47763958f2 100644
--- a/drivers/net/sh_eth.h
+++ b/drivers/net/sh_eth.h
@@ -703,6 +703,7 @@ struct sh_eth_cpu_data {
703}; 703};
704 704
705struct sh_eth_private { 705struct sh_eth_private {
706 struct platform_device *pdev;
706 struct sh_eth_cpu_data *cd; 707 struct sh_eth_cpu_data *cd;
707 dma_addr_t rx_desc_dma; 708 dma_addr_t rx_desc_dma;
708 dma_addr_t tx_desc_dma; 709 dma_addr_t tx_desc_dma;
diff --git a/drivers/net/sis190.c b/drivers/net/sis190.c
index 7cc9898f4e00..31233b4c44a0 100644
--- a/drivers/net/sis190.c
+++ b/drivers/net/sis190.c
@@ -536,13 +536,12 @@ static bool sis190_try_rx_copy(struct sis190_private *tp,
536 if (pkt_size >= rx_copybreak) 536 if (pkt_size >= rx_copybreak)
537 goto out; 537 goto out;
538 538
539 skb = netdev_alloc_skb(tp->dev, pkt_size + 2); 539 skb = netdev_alloc_skb_ip_align(tp->dev, pkt_size);
540 if (!skb) 540 if (!skb)
541 goto out; 541 goto out;
542 542
543 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz, 543 pci_dma_sync_single_for_cpu(tp->pci_dev, addr, tp->rx_buf_sz,
544 PCI_DMA_FROMDEVICE); 544 PCI_DMA_FROMDEVICE);
545 skb_reserve(skb, 2);
546 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size); 545 skb_copy_to_linear_data(skb, sk_buff[0]->data, pkt_size);
547 *sk_buff = skb; 546 *sk_buff = skb;
548 done = true; 547 done = true;
diff --git a/drivers/net/skge.c b/drivers/net/skge.c
index 01f6811f1324..be28ebb3811c 100644
--- a/drivers/net/skge.c
+++ b/drivers/net/skge.c
@@ -3070,11 +3070,10 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3070 goto error; 3070 goto error;
3071 3071
3072 if (len < RX_COPY_THRESHOLD) { 3072 if (len < RX_COPY_THRESHOLD) {
3073 skb = netdev_alloc_skb(dev, len + 2); 3073 skb = netdev_alloc_skb_ip_align(dev, len);
3074 if (!skb) 3074 if (!skb)
3075 goto resubmit; 3075 goto resubmit;
3076 3076
3077 skb_reserve(skb, 2);
3078 pci_dma_sync_single_for_cpu(skge->hw->pdev, 3077 pci_dma_sync_single_for_cpu(skge->hw->pdev,
3079 pci_unmap_addr(e, mapaddr), 3078 pci_unmap_addr(e, mapaddr),
3080 len, PCI_DMA_FROMDEVICE); 3079 len, PCI_DMA_FROMDEVICE);
@@ -3085,11 +3084,11 @@ static struct sk_buff *skge_rx_get(struct net_device *dev,
3085 skge_rx_reuse(e, skge->rx_buf_size); 3084 skge_rx_reuse(e, skge->rx_buf_size);
3086 } else { 3085 } else {
3087 struct sk_buff *nskb; 3086 struct sk_buff *nskb;
3088 nskb = netdev_alloc_skb(dev, skge->rx_buf_size + NET_IP_ALIGN); 3087
3088 nskb = netdev_alloc_skb_ip_align(dev, skge->rx_buf_size);
3089 if (!nskb) 3089 if (!nskb)
3090 goto resubmit; 3090 goto resubmit;
3091 3091
3092 skb_reserve(nskb, NET_IP_ALIGN);
3093 pci_unmap_single(skge->hw->pdev, 3092 pci_unmap_single(skge->hw->pdev,
3094 pci_unmap_addr(e, mapaddr), 3093 pci_unmap_addr(e, mapaddr),
3095 pci_unmap_len(e, maplen), 3094 pci_unmap_len(e, maplen),
diff --git a/drivers/net/sky2.c b/drivers/net/sky2.c
index 6a10d7ba5877..a3d99913f184 100644
--- a/drivers/net/sky2.c
+++ b/drivers/net/sky2.c
@@ -50,7 +50,7 @@
50#include "sky2.h" 50#include "sky2.h"
51 51
52#define DRV_NAME "sky2" 52#define DRV_NAME "sky2"
53#define DRV_VERSION "1.25" 53#define DRV_VERSION "1.26"
54#define PFX DRV_NAME " " 54#define PFX DRV_NAME " "
55 55
56/* 56/*
@@ -102,6 +102,7 @@ MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
102static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = { 102static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = {
103 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */ 103 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9000) }, /* SK-9Sxx */
104 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */ 104 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E00) }, /* SK-9Exx */
105 { PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, 0x9E01) }, /* SK-9E21M */
105 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */ 106 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4b00) }, /* DGE-560T */
106 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */ 107 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4001) }, /* DGE-550SX */
107 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */ 108 { PCI_DEVICE(PCI_VENDOR_ID_DLINK, 0x4B02) }, /* DGE-560SX */
@@ -139,6 +140,7 @@ static DEFINE_PCI_DEVICE_TABLE(sky2_id_table) = {
139 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */ 140 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x436D) }, /* 88E8055 */
140 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */ 141 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4370) }, /* 88E8075 */
141 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */ 142 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4380) }, /* 88E8057 */
143 { PCI_DEVICE(PCI_VENDOR_ID_MARVELL, 0x4381) }, /* 88E8059 */
142 { 0 } 144 { 0 }
143}; 145};
144 146
@@ -602,6 +604,16 @@ static void sky2_phy_init(struct sky2_hw *hw, unsigned port)
602 /* apply workaround for integrated resistors calibration */ 604 /* apply workaround for integrated resistors calibration */
603 gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17); 605 gm_phy_write(hw, port, PHY_MARV_PAGE_ADDR, 17);
604 gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60); 606 gm_phy_write(hw, port, PHY_MARV_PAGE_DATA, 0x3f60);
607 } else if (hw->chip_id == CHIP_ID_YUKON_OPT && hw->chip_rev == 0) {
608 /* apply fixes in PHY AFE */
609 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0x00ff);
610
611 /* apply RDAC termination workaround */
612 gm_phy_write(hw, port, 24, 0x2800);
613 gm_phy_write(hw, port, 23, 0x2001);
614
615 /* set page register back to 0 */
616 gm_phy_write(hw, port, PHY_MARV_EXT_ADR, 0);
605 } else if (hw->chip_id != CHIP_ID_YUKON_EX && 617 } else if (hw->chip_id != CHIP_ID_YUKON_EX &&
606 hw->chip_id < CHIP_ID_YUKON_SUPR) { 618 hw->chip_id < CHIP_ID_YUKON_SUPR) {
607 /* no effect on Yukon-XL */ 619 /* no effect on Yukon-XL */
@@ -786,8 +798,7 @@ static void sky2_set_tx_stfwd(struct sky2_hw *hw, unsigned port)
786 798
787 if ( (hw->chip_id == CHIP_ID_YUKON_EX && 799 if ( (hw->chip_id == CHIP_ID_YUKON_EX &&
788 hw->chip_rev != CHIP_REV_YU_EX_A0) || 800 hw->chip_rev != CHIP_REV_YU_EX_A0) ||
789 hw->chip_id == CHIP_ID_YUKON_FE_P || 801 hw->chip_id >= CHIP_ID_YUKON_FE_P) {
790 hw->chip_id == CHIP_ID_YUKON_SUPR) {
791 /* Yukon-Extreme B0 and further Extreme devices */ 802 /* Yukon-Extreme B0 and further Extreme devices */
792 /* enable Store & Forward mode for TX */ 803 /* enable Store & Forward mode for TX */
793 804
@@ -925,8 +936,14 @@ static void sky2_mac_init(struct sky2_hw *hw, unsigned port)
925 936
926 /* On chips without ram buffer, pause is controled by MAC level */ 937 /* On chips without ram buffer, pause is controled by MAC level */
927 if (!(hw->flags & SKY2_HW_RAM_BUFFER)) { 938 if (!(hw->flags & SKY2_HW_RAM_BUFFER)) {
928 sky2_write8(hw, SK_REG(port, RX_GMF_LP_THR), 768/8); 939 /* Pause threshold is scaled by 8 in bytes */
929 sky2_write8(hw, SK_REG(port, RX_GMF_UP_THR), 1024/8); 940 if (hw->chip_id == CHIP_ID_YUKON_FE_P
941 && hw->chip_rev == CHIP_REV_YU_FE2_A0)
942 reg = 1568 / 8;
943 else
944 reg = 1024 / 8;
945 sky2_write16(hw, SK_REG(port, RX_GMF_UP_THR), reg);
946 sky2_write16(hw, SK_REG(port, RX_GMF_LP_THR), 768 / 8);
930 947
931 sky2_set_tx_stfwd(hw, port); 948 sky2_set_tx_stfwd(hw, port);
932 } 949 }
@@ -1397,6 +1414,31 @@ static int sky2_rx_start(struct sky2_port *sky2)
1397 1414
1398 /* Tell chip about available buffers */ 1415 /* Tell chip about available buffers */
1399 sky2_rx_update(sky2, rxq); 1416 sky2_rx_update(sky2, rxq);
1417
1418 if (hw->chip_id == CHIP_ID_YUKON_EX ||
1419 hw->chip_id == CHIP_ID_YUKON_SUPR) {
1420 /*
1421 * Disable flushing of non ASF packets;
1422 * must be done after initializing the BMUs;
1423 * drivers without ASF support should do this too, otherwise
1424 * it may happen that they cannot run on ASF devices;
1425 * remember that the MAC FIFO isn't reset during initialization.
1426 */
1427 sky2_write32(hw, SK_REG(sky2->port, RX_GMF_CTRL_T), RX_MACSEC_FLUSH_OFF);
1428 }
1429
1430 if (hw->chip_id >= CHIP_ID_YUKON_SUPR) {
1431 /* Enable RX Home Address & Routing Header checksum fix */
1432 sky2_write16(hw, SK_REG(sky2->port, RX_GMF_FL_CTRL),
1433 RX_IPV6_SA_MOB_ENA | RX_IPV6_DA_MOB_ENA);
1434
1435 /* Enable TX Home Address & Routing Header checksum fix */
1436 sky2_write32(hw, Q_ADDR(txqaddr[sky2->port], Q_TEST),
1437 TBMU_TEST_HOME_ADD_FIX_EN | TBMU_TEST_ROUTING_ADD_FIX_EN);
1438 }
1439
1440
1441
1400 return 0; 1442 return 0;
1401nomem: 1443nomem:
1402 sky2_rx_clean(sky2); 1444 sky2_rx_clean(sky2);
@@ -2096,6 +2138,25 @@ out:
2096 spin_unlock(&sky2->phy_lock); 2138 spin_unlock(&sky2->phy_lock);
2097} 2139}
2098 2140
2141/* Special quick link interrupt (Yukon-2 Optima only) */
2142static void sky2_qlink_intr(struct sky2_hw *hw)
2143{
2144 struct sky2_port *sky2 = netdev_priv(hw->dev[0]);
2145 u32 imask;
2146 u16 phy;
2147
2148 /* disable irq */
2149 imask = sky2_read32(hw, B0_IMSK);
2150 imask &= ~Y2_IS_PHY_QLNK;
2151 sky2_write32(hw, B0_IMSK, imask);
2152
2153 /* reset PHY Link Detect */
2154 phy = sky2_pci_read16(hw, PSM_CONFIG_REG4);
2155 sky2_pci_write16(hw, PSM_CONFIG_REG4, phy | 1);
2156
2157 sky2_link_up(sky2);
2158}
2159
2099/* Transmit timeout is only called if we are running, carrier is up 2160/* Transmit timeout is only called if we are running, carrier is up
2100 * and tx queue is full (stopped). 2161 * and tx queue is full (stopped).
2101 */ 2162 */
@@ -2191,9 +2252,8 @@ static struct sk_buff *receive_copy(struct sky2_port *sky2,
2191{ 2252{
2192 struct sk_buff *skb; 2253 struct sk_buff *skb;
2193 2254
2194 skb = netdev_alloc_skb(sky2->netdev, length + 2); 2255 skb = netdev_alloc_skb_ip_align(sky2->netdev, length);
2195 if (likely(skb)) { 2256 if (likely(skb)) {
2196 skb_reserve(skb, 2);
2197 pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr, 2257 pci_dma_sync_single_for_cpu(sky2->hw->pdev, re->data_addr,
2198 length, PCI_DMA_FROMDEVICE); 2258 length, PCI_DMA_FROMDEVICE);
2199 skb_copy_from_linear_data(re->skb, skb->data, length); 2259 skb_copy_from_linear_data(re->skb, skb->data, length);
@@ -2766,6 +2826,9 @@ static int sky2_poll(struct napi_struct *napi, int work_limit)
2766 if (status & Y2_IS_IRQ_PHY2) 2826 if (status & Y2_IS_IRQ_PHY2)
2767 sky2_phy_intr(hw, 1); 2827 sky2_phy_intr(hw, 1);
2768 2828
2829 if (status & Y2_IS_PHY_QLNK)
2830 sky2_qlink_intr(hw);
2831
2769 while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) { 2832 while ((idx = sky2_read16(hw, STAT_PUT_IDX)) != hw->st_idx) {
2770 work_done += sky2_status_intr(hw, work_limit - work_done, idx); 2833 work_done += sky2_status_intr(hw, work_limit - work_done, idx);
2771 2834
@@ -2815,6 +2878,7 @@ static u32 sky2_mhz(const struct sky2_hw *hw)
2815 case CHIP_ID_YUKON_EX: 2878 case CHIP_ID_YUKON_EX:
2816 case CHIP_ID_YUKON_SUPR: 2879 case CHIP_ID_YUKON_SUPR:
2817 case CHIP_ID_YUKON_UL_2: 2880 case CHIP_ID_YUKON_UL_2:
2881 case CHIP_ID_YUKON_OPT:
2818 return 125; 2882 return 125;
2819 2883
2820 case CHIP_ID_YUKON_FE: 2884 case CHIP_ID_YUKON_FE:
@@ -2904,6 +2968,7 @@ static int __devinit sky2_init(struct sky2_hw *hw)
2904 break; 2968 break;
2905 2969
2906 case CHIP_ID_YUKON_UL_2: 2970 case CHIP_ID_YUKON_UL_2:
2971 case CHIP_ID_YUKON_OPT:
2907 hw->flags = SKY2_HW_GIGABIT 2972 hw->flags = SKY2_HW_GIGABIT
2908 | SKY2_HW_ADV_POWER_CTL; 2973 | SKY2_HW_ADV_POWER_CTL;
2909 break; 2974 break;
@@ -2986,6 +3051,52 @@ static void sky2_reset(struct sky2_hw *hw)
2986 sky2_write16(hw, SK_REG(i, GMAC_CTRL), 3051 sky2_write16(hw, SK_REG(i, GMAC_CTRL),
2987 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON 3052 GMC_BYP_MACSECRX_ON | GMC_BYP_MACSECTX_ON
2988 | GMC_BYP_RETR_ON); 3053 | GMC_BYP_RETR_ON);
3054
3055 }
3056
3057 if (hw->chip_id == CHIP_ID_YUKON_SUPR && hw->chip_rev > CHIP_REV_YU_SU_B0) {
3058 /* enable MACSec clock gating */
3059 sky2_pci_write32(hw, PCI_DEV_REG3, P_CLK_MACSEC_DIS);
3060 }
3061
3062 if (hw->chip_id == CHIP_ID_YUKON_OPT) {
3063 u16 reg;
3064 u32 msk;
3065
3066 if (hw->chip_rev == 0) {
3067 /* disable PCI-E PHY power down (set PHY reg 0x80, bit 7 */
3068 sky2_write32(hw, Y2_PEX_PHY_DATA, (0x80UL << 16) | (1 << 7));
3069
3070 /* set PHY Link Detect Timer to 1.1 second (11x 100ms) */
3071 reg = 10;
3072 } else {
3073 /* set PHY Link Detect Timer to 0.4 second (4x 100ms) */
3074 reg = 3;
3075 }
3076
3077 reg <<= PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE;
3078
3079 /* reset PHY Link Detect */
3080 sky2_pci_write16(hw, PSM_CONFIG_REG4,
3081 reg | PSM_CONFIG_REG4_RST_PHY_LINK_DETECT);
3082 sky2_pci_write16(hw, PSM_CONFIG_REG4, reg);
3083
3084
3085 /* enable PHY Quick Link */
3086 msk = sky2_read32(hw, B0_IMSK);
3087 msk |= Y2_IS_PHY_QLNK;
3088 sky2_write32(hw, B0_IMSK, msk);
3089
3090 /* check if PSMv2 was running before */
3091 reg = sky2_pci_read16(hw, PSM_CONFIG_REG3);
3092 if (reg & PCI_EXP_LNKCTL_ASPMC) {
3093 int cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
3094 /* restore the PCIe Link Control register */
3095 sky2_pci_write16(hw, cap + PCI_EXP_LNKCTL, reg);
3096 }
3097
3098 /* re-enable PEX PM in PEX PHY debug reg. 8 (clear bit 12) */
3099 sky2_write32(hw, Y2_PEX_PHY_DATA, PEX_DB_ACCESS | (0x08UL << 16));
2989 } 3100 }
2990 3101
2991 /* Clear I2C IRQ noise */ 3102 /* Clear I2C IRQ noise */
@@ -4406,9 +4517,11 @@ static const char *sky2_name(u8 chipid, char *buf, int sz)
4406 "FE+", /* 0xb8 */ 4517 "FE+", /* 0xb8 */
4407 "Supreme", /* 0xb9 */ 4518 "Supreme", /* 0xb9 */
4408 "UL 2", /* 0xba */ 4519 "UL 2", /* 0xba */
4520 "Unknown", /* 0xbb */
4521 "Optima", /* 0xbc */
4409 }; 4522 };
4410 4523
4411 if (chipid >= CHIP_ID_YUKON_XL && chipid < CHIP_ID_YUKON_UL_2) 4524 if (chipid >= CHIP_ID_YUKON_XL && chipid < CHIP_ID_YUKON_OPT)
4412 strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz); 4525 strncpy(buf, name[chipid - CHIP_ID_YUKON_XL], sz);
4413 else 4526 else
4414 snprintf(buf, sz, "(chip %#x)", chipid); 4527 snprintf(buf, sz, "(chip %#x)", chipid);
diff --git a/drivers/net/sky2.h b/drivers/net/sky2.h
index ed54129698b4..365d79c7d834 100644
--- a/drivers/net/sky2.h
+++ b/drivers/net/sky2.h
@@ -16,6 +16,13 @@ enum {
16 PCI_DEV_REG5 = 0x88, 16 PCI_DEV_REG5 = 0x88,
17 PCI_CFG_REG_0 = 0x90, 17 PCI_CFG_REG_0 = 0x90,
18 PCI_CFG_REG_1 = 0x94, 18 PCI_CFG_REG_1 = 0x94,
19
20 PSM_CONFIG_REG0 = 0x98,
21 PSM_CONFIG_REG1 = 0x9C,
22 PSM_CONFIG_REG2 = 0x160,
23 PSM_CONFIG_REG3 = 0x164,
24 PSM_CONFIG_REG4 = 0x168,
25
19}; 26};
20 27
21/* Yukon-2 */ 28/* Yukon-2 */
@@ -48,6 +55,37 @@ enum pci_dev_reg_2 {
48 PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */ 55 PCI_USEDATA64 = 1<<0, /* Use 64Bit Data bus ext */
49}; 56};
50 57
58/* PCI_OUR_REG_3 32 bit Our Register 3 (Yukon-ECU only) */
59enum pci_dev_reg_3 {
60 P_CLK_ASF_REGS_DIS = 1<<18,/* Disable Clock ASF (Yukon-Ext.) */
61 P_CLK_COR_REGS_D0_DIS = 1<<17,/* Disable Clock Core Regs D0 */
62 P_CLK_MACSEC_DIS = 1<<17,/* Disable Clock MACSec (Yukon-Ext.) */
63 P_CLK_PCI_REGS_D0_DIS = 1<<16,/* Disable Clock PCI Regs D0 */
64 P_CLK_COR_YTB_ARB_DIS = 1<<15,/* Disable Clock YTB Arbiter */
65 P_CLK_MAC_LNK1_D3_DIS = 1<<14,/* Disable Clock MAC Link1 D3 */
66 P_CLK_COR_LNK1_D0_DIS = 1<<13,/* Disable Clock Core Link1 D0 */
67 P_CLK_MAC_LNK1_D0_DIS = 1<<12,/* Disable Clock MAC Link1 D0 */
68 P_CLK_COR_LNK1_D3_DIS = 1<<11,/* Disable Clock Core Link1 D3 */
69 P_CLK_PCI_MST_ARB_DIS = 1<<10,/* Disable Clock PCI Master Arb. */
70 P_CLK_COR_REGS_D3_DIS = 1<<9, /* Disable Clock Core Regs D3 */
71 P_CLK_PCI_REGS_D3_DIS = 1<<8, /* Disable Clock PCI Regs D3 */
72 P_CLK_REF_LNK1_GM_DIS = 1<<7, /* Disable Clock Ref. Link1 GMAC */
73 P_CLK_COR_LNK1_GM_DIS = 1<<6, /* Disable Clock Core Link1 GMAC */
74 P_CLK_PCI_COMMON_DIS = 1<<5, /* Disable Clock PCI Common */
75 P_CLK_COR_COMMON_DIS = 1<<4, /* Disable Clock Core Common */
76 P_CLK_PCI_LNK1_BMU_DIS = 1<<3, /* Disable Clock PCI Link1 BMU */
77 P_CLK_COR_LNK1_BMU_DIS = 1<<2, /* Disable Clock Core Link1 BMU */
78 P_CLK_PCI_LNK1_BIU_DIS = 1<<1, /* Disable Clock PCI Link1 BIU */
79 P_CLK_COR_LNK1_BIU_DIS = 1<<0, /* Disable Clock Core Link1 BIU */
80 PCIE_OUR3_WOL_D3_COLD_SET = P_CLK_ASF_REGS_DIS |
81 P_CLK_COR_REGS_D0_DIS |
82 P_CLK_COR_LNK1_D0_DIS |
83 P_CLK_MAC_LNK1_D0_DIS |
84 P_CLK_PCI_MST_ARB_DIS |
85 P_CLK_COR_COMMON_DIS |
86 P_CLK_COR_LNK1_BMU_DIS,
87};
88
51/* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */ 89/* PCI_OUR_REG_4 32 bit Our Register 4 (Yukon-ECU only) */
52enum pci_dev_reg_4 { 90enum pci_dev_reg_4 {
53 /* (Link Training & Status State Machine) */ 91 /* (Link Training & Status State Machine) */
@@ -114,7 +152,7 @@ enum pci_dev_reg_5 {
114 P_GAT_PCIE_RX_EL_IDLE, 152 P_GAT_PCIE_RX_EL_IDLE,
115}; 153};
116 154
117#/* PCI_CFG_REG_1 32 bit Config Register 1 (Yukon-Ext only) */ 155/* PCI_CFG_REG_1 32 bit Config Register 1 (Yukon-Ext only) */
118enum pci_cfg_reg1 { 156enum pci_cfg_reg1 {
119 P_CF1_DIS_REL_EVT_RST = 1<<24, /* Dis. Rel. Event during PCIE reset */ 157 P_CF1_DIS_REL_EVT_RST = 1<<24, /* Dis. Rel. Event during PCIE reset */
120 /* Bit 23..21: Release Clock on Event */ 158 /* Bit 23..21: Release Clock on Event */
@@ -145,6 +183,72 @@ enum pci_cfg_reg1 {
145 P_CF1_ENA_TXBMU_WR_IDLE, 183 P_CF1_ENA_TXBMU_WR_IDLE,
146}; 184};
147 185
186/* Yukon-Optima */
187enum {
188 PSM_CONFIG_REG1_AC_PRESENT_STATUS = 1<<31, /* AC Present Status */
189
190 PSM_CONFIG_REG1_PTP_CLK_SEL = 1<<29, /* PTP Clock Select */
191 PSM_CONFIG_REG1_PTP_MODE = 1<<28, /* PTP Mode */
192
193 PSM_CONFIG_REG1_MUX_PHY_LINK = 1<<27, /* PHY Energy Detect Event */
194
195 PSM_CONFIG_REG1_EN_PIN63_AC_PRESENT = 1<<26, /* Enable LED_DUPLEX for ac_present */
196 PSM_CONFIG_REG1_EN_PCIE_TIMER = 1<<25, /* Enable PCIe Timer */
197 PSM_CONFIG_REG1_EN_SPU_TIMER = 1<<24, /* Enable SPU Timer */
198 PSM_CONFIG_REG1_POLARITY_AC_PRESENT = 1<<23, /* AC Present Polarity */
199
200 PSM_CONFIG_REG1_EN_AC_PRESENT = 1<<21, /* Enable AC Present */
201
202 PSM_CONFIG_REG1_EN_GPHY_INT_PSM = 1<<20, /* Enable GPHY INT for PSM */
203 PSM_CONFIG_REG1_DIS_PSM_TIMER = 1<<19, /* Disable PSM Timer */
204};
205
206/* Yukon-Supreme */
207enum {
208 PSM_CONFIG_REG1_GPHY_ENERGY_STS = 1<<31, /* GPHY Energy Detect Status */
209
210 PSM_CONFIG_REG1_UART_MODE_MSK = 3<<29, /* UART_Mode */
211 PSM_CONFIG_REG1_CLK_RUN_ASF = 1<<28, /* Enable Clock Free Running for ASF Subsystem */
212 PSM_CONFIG_REG1_UART_CLK_DISABLE= 1<<27, /* Disable UART clock */
213 PSM_CONFIG_REG1_VAUX_ONE = 1<<26, /* Tie internal Vaux to 1'b1 */
214 PSM_CONFIG_REG1_UART_FC_RI_VAL = 1<<25, /* Default value for UART_RI_n */
215 PSM_CONFIG_REG1_UART_FC_DCD_VAL = 1<<24, /* Default value for UART_DCD_n */
216 PSM_CONFIG_REG1_UART_FC_DSR_VAL = 1<<23, /* Default value for UART_DSR_n */
217 PSM_CONFIG_REG1_UART_FC_CTS_VAL = 1<<22, /* Default value for UART_CTS_n */
218 PSM_CONFIG_REG1_LATCH_VAUX = 1<<21, /* Enable Latch current Vaux_avlbl */
219 PSM_CONFIG_REG1_FORCE_TESTMODE_INPUT= 1<<20, /* Force Testmode pin as input PAD */
220 PSM_CONFIG_REG1_UART_RST = 1<<19, /* UART_RST */
221 PSM_CONFIG_REG1_PSM_PCIE_L1_POL = 1<<18, /* PCIE L1 Event Polarity for PSM */
222 PSM_CONFIG_REG1_TIMER_STAT = 1<<17, /* PSM Timer Status */
223 PSM_CONFIG_REG1_GPHY_INT = 1<<16, /* GPHY INT Status */
224 PSM_CONFIG_REG1_FORCE_TESTMODE_ZERO= 1<<15, /* Force internal Testmode as 1'b0 */
225 PSM_CONFIG_REG1_EN_INT_ASPM_CLKREQ = 1<<14, /* ENABLE INT for CLKRUN on ASPM and CLKREQ */
226 PSM_CONFIG_REG1_EN_SND_TASK_ASPM_CLKREQ = 1<<13, /* ENABLE Snd_task for CLKRUN on ASPM and CLKREQ */
227 PSM_CONFIG_REG1_DIS_CLK_GATE_SND_TASK = 1<<12, /* Disable CLK_GATE control snd_task */
228 PSM_CONFIG_REG1_DIS_FF_CHIAN_SND_INTA = 1<<11, /* Disable flip-flop chain for sndmsg_inta */
229
230 PSM_CONFIG_REG1_DIS_LOADER = 1<<9, /* Disable Loader SM after PSM Goes back to IDLE */
231 PSM_CONFIG_REG1_DO_PWDN = 1<<8, /* Do Power Down, Start PSM Scheme */
232 PSM_CONFIG_REG1_DIS_PIG = 1<<7, /* Disable Plug-in-Go SM after PSM Goes back to IDLE */
233 PSM_CONFIG_REG1_DIS_PERST = 1<<6, /* Disable Internal PCIe Reset after PSM Goes back to IDLE */
234 PSM_CONFIG_REG1_EN_REG18_PD = 1<<5, /* Enable REG18 Power Down for PSM */
235 PSM_CONFIG_REG1_EN_PSM_LOAD = 1<<4, /* Disable EEPROM Loader after PSM Goes back to IDLE */
236 PSM_CONFIG_REG1_EN_PSM_HOT_RST = 1<<3, /* Enable PCIe Hot Reset for PSM */
237 PSM_CONFIG_REG1_EN_PSM_PERST = 1<<2, /* Enable PCIe Reset Event for PSM */
238 PSM_CONFIG_REG1_EN_PSM_PCIE_L1 = 1<<1, /* Enable PCIe L1 Event for PSM */
239 PSM_CONFIG_REG1_EN_PSM = 1<<0, /* Enable PSM Scheme */
240};
241
242/* PSM_CONFIG_REG4 0x0168 PSM Config Register 4 */
243enum {
244 /* PHY Link Detect Timer */
245 PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_MSK = 0xf<<4,
246 PSM_CONFIG_REG4_TIMER_PHY_LINK_DETECT_BASE = 4,
247
248 PSM_CONFIG_REG4_DEBUG_TIMER = 1<<1, /* Debug Timer */
249 PSM_CONFIG_REG4_RST_PHY_LINK_DETECT = 1<<0, /* Reset GPHY Link Detect */
250};
251
148 252
149#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \ 253#define PCI_STATUS_ERROR_BITS (PCI_STATUS_DETECTED_PARITY | \
150 PCI_STATUS_SIG_SYSTEM_ERROR | \ 254 PCI_STATUS_SIG_SYSTEM_ERROR | \
@@ -197,6 +301,9 @@ enum csr_regs {
197 B2_I2C_IRQ = 0x0168, 301 B2_I2C_IRQ = 0x0168,
198 B2_I2C_SW = 0x016c, 302 B2_I2C_SW = 0x016c,
199 303
304 Y2_PEX_PHY_DATA = 0x0170,
305 Y2_PEX_PHY_ADDR = 0x0172,
306
200 B3_RAM_ADDR = 0x0180, 307 B3_RAM_ADDR = 0x0180,
201 B3_RAM_DATA_LO = 0x0184, 308 B3_RAM_DATA_LO = 0x0184,
202 B3_RAM_DATA_HI = 0x0188, 309 B3_RAM_DATA_HI = 0x0188,
@@ -317,6 +424,10 @@ enum {
317 Y2_IS_CHK_TXS2 = 1<<9, /* Descriptor error TXS 2 */ 424 Y2_IS_CHK_TXS2 = 1<<9, /* Descriptor error TXS 2 */
318 Y2_IS_CHK_TXA2 = 1<<8, /* Descriptor error TXA 2 */ 425 Y2_IS_CHK_TXA2 = 1<<8, /* Descriptor error TXA 2 */
319 426
427 Y2_IS_PSM_ACK = 1<<7, /* PSM Acknowledge (Yukon-Optima only) */
428 Y2_IS_PTP_TIST = 1<<6, /* PTP Time Stamp (Yukon-Optima only) */
429 Y2_IS_PHY_QLNK = 1<<5, /* PHY Quick Link (Yukon-Optima only) */
430
320 Y2_IS_IRQ_PHY1 = 1<<4, /* Interrupt from PHY 1 */ 431 Y2_IS_IRQ_PHY1 = 1<<4, /* Interrupt from PHY 1 */
321 Y2_IS_IRQ_MAC1 = 1<<3, /* Interrupt from MAC 1 */ 432 Y2_IS_IRQ_MAC1 = 1<<3, /* Interrupt from MAC 1 */
322 Y2_IS_CHK_RX1 = 1<<2, /* Descriptor error Rx 1 */ 433 Y2_IS_CHK_RX1 = 1<<2, /* Descriptor error Rx 1 */
@@ -435,6 +546,7 @@ enum {
435 CHIP_ID_YUKON_FE_P = 0xb8, /* YUKON-2 FE+ */ 546 CHIP_ID_YUKON_FE_P = 0xb8, /* YUKON-2 FE+ */
436 CHIP_ID_YUKON_SUPR = 0xb9, /* YUKON-2 Supreme */ 547 CHIP_ID_YUKON_SUPR = 0xb9, /* YUKON-2 Supreme */
437 CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */ 548 CHIP_ID_YUKON_UL_2 = 0xba, /* YUKON-2 Ultra 2 */
549 CHIP_ID_YUKON_OPT = 0xbc, /* YUKON-2 Optima */
438}; 550};
439enum yukon_ec_rev { 551enum yukon_ec_rev {
440 CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */ 552 CHIP_REV_YU_EC_A1 = 0, /* Chip Rev. for Yukon-EC A1/A0 */
@@ -459,6 +571,8 @@ enum yukon_ex_rev {
459}; 571};
460enum yukon_supr_rev { 572enum yukon_supr_rev {
461 CHIP_REV_YU_SU_A0 = 0, 573 CHIP_REV_YU_SU_A0 = 0,
574 CHIP_REV_YU_SU_B0 = 1,
575 CHIP_REV_YU_SU_B1 = 3,
462}; 576};
463 577
464 578
@@ -513,6 +627,12 @@ enum {
513 TIM_T_STEP = 1<<0, /* Test step */ 627 TIM_T_STEP = 1<<0, /* Test step */
514}; 628};
515 629
630/* Y2_PEX_PHY_ADDR/DATA PEX PHY address and data reg (Yukon-2 only) */
631enum {
632 PEX_RD_ACCESS = 1<<31, /* Access Mode Read = 1, Write = 0 */
633 PEX_DB_ACCESS = 1<<30, /* Access to debug register */
634};
635
516/* B3_RAM_ADDR 32 bit RAM Address, to read or write */ 636/* B3_RAM_ADDR 32 bit RAM Address, to read or write */
517 /* Bit 31..19: reserved */ 637 /* Bit 31..19: reserved */
518#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */ 638#define RAM_ADR_RAN 0x0007ffffL /* Bit 18.. 0: RAM Address Range */
@@ -688,10 +808,11 @@ enum {
688 RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */ 808 RX_GMF_AF_THR = 0x0c44,/* 32 bit Rx GMAC FIFO Almost Full Thresh. */
689 RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */ 809 RX_GMF_CTRL_T = 0x0c48,/* 32 bit Rx GMAC FIFO Control/Test */
690 RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */ 810 RX_GMF_FL_MSK = 0x0c4c,/* 32 bit Rx GMAC FIFO Flush Mask */
691 RX_GMF_FL_THR = 0x0c50,/* 32 bit Rx GMAC FIFO Flush Threshold */ 811 RX_GMF_FL_THR = 0x0c50,/* 16 bit Rx GMAC FIFO Flush Threshold */
812 RX_GMF_FL_CTRL = 0x0c52,/* 16 bit Rx GMAC FIFO Flush Control */
692 RX_GMF_TR_THR = 0x0c54,/* 32 bit Rx Truncation Threshold (Yukon-2) */ 813 RX_GMF_TR_THR = 0x0c54,/* 32 bit Rx Truncation Threshold (Yukon-2) */
693 RX_GMF_UP_THR = 0x0c58,/* 8 bit Rx Upper Pause Thr (Yukon-EC_U) */ 814 RX_GMF_UP_THR = 0x0c58,/* 16 bit Rx Upper Pause Thr (Yukon-EC_U) */
694 RX_GMF_LP_THR = 0x0c5a,/* 8 bit Rx Lower Pause Thr (Yukon-EC_U) */ 815 RX_GMF_LP_THR = 0x0c5a,/* 16 bit Rx Lower Pause Thr (Yukon-EC_U) */
695 RX_GMF_VLAN = 0x0c5c,/* 32 bit Rx VLAN Type Register (Yukon-2) */ 816 RX_GMF_VLAN = 0x0c5c,/* 32 bit Rx VLAN Type Register (Yukon-2) */
696 RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */ 817 RX_GMF_WP = 0x0c60,/* 32 bit Rx GMAC FIFO Write Pointer */
697 818
@@ -754,6 +875,42 @@ enum {
754 BMU_TX_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment length mismatch */ 875 BMU_TX_CLR_IRQ_TCP = 1<<11, /* Clear IRQ on TCP segment length mismatch */
755}; 876};
756 877
878/* TBMU_TEST 0x06B8 Transmit BMU Test Register */
879enum {
880 TBMU_TEST_BMU_TX_CHK_AUTO_OFF = 1<<31, /* BMU Tx Checksum Auto Calculation Disable */
881 TBMU_TEST_BMU_TX_CHK_AUTO_ON = 1<<30, /* BMU Tx Checksum Auto Calculation Enable */
882 TBMU_TEST_HOME_ADD_PAD_FIX1_EN = 1<<29, /* Home Address Paddiing FIX1 Enable */
883 TBMU_TEST_HOME_ADD_PAD_FIX1_DIS = 1<<28, /* Home Address Paddiing FIX1 Disable */
884 TBMU_TEST_ROUTING_ADD_FIX_EN = 1<<27, /* Routing Address Fix Enable */
885 TBMU_TEST_ROUTING_ADD_FIX_DIS = 1<<26, /* Routing Address Fix Disable */
886 TBMU_TEST_HOME_ADD_FIX_EN = 1<<25, /* Home address checksum fix enable */
887 TBMU_TEST_HOME_ADD_FIX_DIS = 1<<24, /* Home address checksum fix disable */
888
889 TBMU_TEST_TEST_RSPTR_ON = 1<<22, /* Testmode Shadow Read Ptr On */
890 TBMU_TEST_TEST_RSPTR_OFF = 1<<21, /* Testmode Shadow Read Ptr Off */
891 TBMU_TEST_TESTSTEP_RSPTR = 1<<20, /* Teststep Shadow Read Ptr */
892
893 TBMU_TEST_TEST_RPTR_ON = 1<<18, /* Testmode Read Ptr On */
894 TBMU_TEST_TEST_RPTR_OFF = 1<<17, /* Testmode Read Ptr Off */
895 TBMU_TEST_TESTSTEP_RPTR = 1<<16, /* Teststep Read Ptr */
896
897 TBMU_TEST_TEST_WSPTR_ON = 1<<14, /* Testmode Shadow Write Ptr On */
898 TBMU_TEST_TEST_WSPTR_OFF = 1<<13, /* Testmode Shadow Write Ptr Off */
899 TBMU_TEST_TESTSTEP_WSPTR = 1<<12, /* Teststep Shadow Write Ptr */
900
901 TBMU_TEST_TEST_WPTR_ON = 1<<10, /* Testmode Write Ptr On */
902 TBMU_TEST_TEST_WPTR_OFF = 1<<9, /* Testmode Write Ptr Off */
903 TBMU_TEST_TESTSTEP_WPTR = 1<<8, /* Teststep Write Ptr */
904
905 TBMU_TEST_TEST_REQ_NB_ON = 1<<6, /* Testmode Req Nbytes/Addr On */
906 TBMU_TEST_TEST_REQ_NB_OFF = 1<<5, /* Testmode Req Nbytes/Addr Off */
907 TBMU_TEST_TESTSTEP_REQ_NB = 1<<4, /* Teststep Req Nbytes/Addr */
908
909 TBMU_TEST_TEST_DONE_IDX_ON = 1<<2, /* Testmode Done Index On */
910 TBMU_TEST_TEST_DONE_IDX_OFF = 1<<1, /* Testmode Done Index Off */
911 TBMU_TEST_TESTSTEP_DONE_IDX = 1<<0, /* Teststep Done Index */
912};
913
757/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/ 914/* Queue Prefetch Unit Offsets, use Y2_QADDR() to address (Yukon-2 only)*/
758/* PREF_UNIT_CTRL 32 bit Prefetch Control register */ 915/* PREF_UNIT_CTRL 32 bit Prefetch Control register */
759enum { 916enum {
@@ -1674,6 +1831,12 @@ enum {
1674 1831
1675/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */ 1832/* RX_GMF_CTRL_T 32 bit Rx GMAC FIFO Control/Test */
1676enum { 1833enum {
1834 RX_GCLKMAC_ENA = 1<<31, /* RX MAC Clock Gating Enable */
1835 RX_GCLKMAC_OFF = 1<<30,
1836
1837 RX_STFW_DIS = 1<<29, /* RX Store and Forward Enable */
1838 RX_STFW_ENA = 1<<28,
1839
1677 RX_TRUNC_ON = 1<<27, /* enable packet truncation */ 1840 RX_TRUNC_ON = 1<<27, /* enable packet truncation */
1678 RX_TRUNC_OFF = 1<<26, /* disable packet truncation */ 1841 RX_TRUNC_OFF = 1<<26, /* disable packet truncation */
1679 RX_VLAN_STRIP_ON = 1<<25, /* enable VLAN stripping */ 1842 RX_VLAN_STRIP_ON = 1<<25, /* enable VLAN stripping */
@@ -1711,6 +1874,20 @@ enum {
1711 GMF_RX_CTRL_DEF = GMF_OPER_ON | GMF_RX_F_FL_ON, 1874 GMF_RX_CTRL_DEF = GMF_OPER_ON | GMF_RX_F_FL_ON,
1712}; 1875};
1713 1876
1877/* RX_GMF_FL_CTRL 16 bit Rx GMAC FIFO Flush Control (Yukon-Supreme) */
1878enum {
1879 RX_IPV6_SA_MOB_ENA = 1<<9, /* IPv6 SA Mobility Support Enable */
1880 RX_IPV6_SA_MOB_DIS = 1<<8, /* IPv6 SA Mobility Support Disable */
1881 RX_IPV6_DA_MOB_ENA = 1<<7, /* IPv6 DA Mobility Support Enable */
1882 RX_IPV6_DA_MOB_DIS = 1<<6, /* IPv6 DA Mobility Support Disable */
1883 RX_PTR_SYNCDLY_ENA = 1<<5, /* Pointers Delay Synch Enable */
1884 RX_PTR_SYNCDLY_DIS = 1<<4, /* Pointers Delay Synch Disable */
1885 RX_ASF_NEWFLAG_ENA = 1<<3, /* RX ASF Flag New Logic Enable */
1886 RX_ASF_NEWFLAG_DIS = 1<<2, /* RX ASF Flag New Logic Disable */
1887 RX_FLSH_MISSPKT_ENA = 1<<1, /* RX Flush Miss-Packet Enable */
1888 RX_FLSH_MISSPKT_DIS = 1<<0, /* RX Flush Miss-Packet Disable */
1889};
1890
1714/* TX_GMF_EA 32 bit Tx GMAC FIFO End Address */ 1891/* TX_GMF_EA 32 bit Tx GMAC FIFO End Address */
1715enum { 1892enum {
1716 TX_DYN_WM_ENA = 3, /* Yukon-FE+ specific */ 1893 TX_DYN_WM_ENA = 3, /* Yukon-FE+ specific */
diff --git a/drivers/net/slip.c b/drivers/net/slip.c
index e17c535a577e..ccfe45924fd9 100644
--- a/drivers/net/slip.c
+++ b/drivers/net/slip.c
@@ -79,6 +79,7 @@
79#include <linux/rtnetlink.h> 79#include <linux/rtnetlink.h>
80#include <linux/if_arp.h> 80#include <linux/if_arp.h>
81#include <linux/if_slip.h> 81#include <linux/if_slip.h>
82#include <linux/compat.h>
82#include <linux/delay.h> 83#include <linux/delay.h>
83#include <linux/init.h> 84#include <linux/init.h>
84#include "slip.h" 85#include "slip.h"
@@ -1168,6 +1169,27 @@ static int slip_ioctl(struct tty_struct *tty, struct file *file,
1168 } 1169 }
1169} 1170}
1170 1171
1172#ifdef CONFIG_COMPAT
1173static long slip_compat_ioctl(struct tty_struct *tty, struct file *file,
1174 unsigned int cmd, unsigned long arg)
1175{
1176 switch (cmd) {
1177 case SIOCGIFNAME:
1178 case SIOCGIFENCAP:
1179 case SIOCSIFENCAP:
1180 case SIOCSIFHWADDR:
1181 case SIOCSKEEPALIVE:
1182 case SIOCGKEEPALIVE:
1183 case SIOCSOUTFILL:
1184 case SIOCGOUTFILL:
1185 return slip_ioctl(tty, file, cmd,
1186 (unsigned long)compat_ptr(arg));
1187 }
1188
1189 return -ENOIOCTLCMD;
1190}
1191#endif
1192
1171/* VSV changes start here */ 1193/* VSV changes start here */
1172#ifdef CONFIG_SLIP_SMART 1194#ifdef CONFIG_SLIP_SMART
1173/* function do_ioctl called from net/core/dev.c 1195/* function do_ioctl called from net/core/dev.c
@@ -1260,6 +1282,9 @@ static struct tty_ldisc_ops sl_ldisc = {
1260 .close = slip_close, 1282 .close = slip_close,
1261 .hangup = slip_hangup, 1283 .hangup = slip_hangup,
1262 .ioctl = slip_ioctl, 1284 .ioctl = slip_ioctl,
1285#ifdef CONFIG_COMPAT
1286 .compat_ioctl = slip_compat_ioctl,
1287#endif
1263 .receive_buf = slip_receive_buf, 1288 .receive_buf = slip_receive_buf,
1264 .write_wakeup = slip_write_wakeup, 1289 .write_wakeup = slip_write_wakeup,
1265}; 1290};
diff --git a/drivers/net/smsc911x.c b/drivers/net/smsc911x.c
index f9cdcbcb77d4..7f01e60d5172 100644
--- a/drivers/net/smsc911x.c
+++ b/drivers/net/smsc911x.c
@@ -2071,6 +2071,9 @@ static int __devinit smsc911x_drv_probe(struct platform_device *pdev)
2071 if (is_valid_ether_addr(dev->dev_addr)) { 2071 if (is_valid_ether_addr(dev->dev_addr)) {
2072 smsc911x_set_hw_mac_address(pdata, dev->dev_addr); 2072 smsc911x_set_hw_mac_address(pdata, dev->dev_addr);
2073 SMSC_TRACE(PROBE, "MAC Address is specified by configuration"); 2073 SMSC_TRACE(PROBE, "MAC Address is specified by configuration");
2074 } else if (is_valid_ether_addr(pdata->config.mac)) {
2075 memcpy(dev->dev_addr, pdata->config.mac, 6);
2076 SMSC_TRACE(PROBE, "MAC Address specified by platform data");
2074 } else { 2077 } else {
2075 /* Try reading mac address from device. if EEPROM is present 2078 /* Try reading mac address from device. if EEPROM is present
2076 * it will already have been set */ 2079 * it will already have been set */
diff --git a/drivers/net/spider_net.c b/drivers/net/spider_net.c
index 90e663f4515c..782910cf220f 100644
--- a/drivers/net/spider_net.c
+++ b/drivers/net/spider_net.c
@@ -57,6 +57,7 @@ MODULE_AUTHOR("Utz Bacher <utz.bacher@de.ibm.com> and Jens Osterkamp " \
57MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver"); 57MODULE_DESCRIPTION("Spider Southbridge Gigabit Ethernet driver");
58MODULE_LICENSE("GPL"); 58MODULE_LICENSE("GPL");
59MODULE_VERSION(VERSION); 59MODULE_VERSION(VERSION);
60MODULE_FIRMWARE(SPIDER_NET_FIRMWARE_NAME);
60 61
61static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT; 62static int rx_descriptors = SPIDER_NET_RX_DESCRIPTORS_DEFAULT;
62static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT; 63static int tx_descriptors = SPIDER_NET_TX_DESCRIPTORS_DEFAULT;
diff --git a/drivers/net/sungem.c b/drivers/net/sungem.c
index 305ec3d783db..d6f4faf5bbcb 100644
--- a/drivers/net/sungem.c
+++ b/drivers/net/sungem.c
@@ -1033,10 +1033,8 @@ static netdev_tx_t gem_start_xmit(struct sk_buff *skb,
1033 (csum_stuff_off << 21)); 1033 (csum_stuff_off << 21));
1034 } 1034 }
1035 1035
1036 local_irq_save(flags); 1036 if (!spin_trylock_irqsave(&gp->tx_lock, flags)) {
1037 if (!spin_trylock(&gp->tx_lock)) {
1038 /* Tell upper layer to requeue */ 1037 /* Tell upper layer to requeue */
1039 local_irq_restore(flags);
1040 return NETDEV_TX_LOCKED; 1038 return NETDEV_TX_LOCKED;
1041 } 1039 }
1042 /* We raced with gem_do_stop() */ 1040 /* We raced with gem_do_stop() */
diff --git a/drivers/net/tc35815.c b/drivers/net/tc35815.c
index d1298e5b72c5..6572e8a54520 100644
--- a/drivers/net/tc35815.c
+++ b/drivers/net/tc35815.c
@@ -22,11 +22,7 @@
22 * All Rights Reserved. 22 * All Rights Reserved.
23 */ 23 */
24 24
25#ifdef TC35815_NAPI 25#define DRV_VERSION "1.39"
26#define DRV_VERSION "1.38-NAPI"
27#else
28#define DRV_VERSION "1.38"
29#endif
30static const char *version = "tc35815.c:v" DRV_VERSION "\n"; 26static const char *version = "tc35815.c:v" DRV_VERSION "\n";
31#define MODNAME "tc35815" 27#define MODNAME "tc35815"
32 28
@@ -54,13 +50,6 @@ static const char *version = "tc35815.c:v" DRV_VERSION "\n";
54#include <asm/io.h> 50#include <asm/io.h>
55#include <asm/byteorder.h> 51#include <asm/byteorder.h>
56 52
57/* First, a few definitions that the brave might change. */
58
59#define GATHER_TXINT /* On-Demand Tx Interrupt */
60#define WORKAROUND_LOSTCAR
61#define WORKAROUND_100HALF_PROMISC
62/* #define TC35815_USE_PACKEDBUFFER */
63
64enum tc35815_chiptype { 53enum tc35815_chiptype {
65 TC35815CF = 0, 54 TC35815CF = 0,
66 TC35815_NWU, 55 TC35815_NWU,
@@ -330,17 +319,10 @@ struct BDesc {
330 319
331 320
332/* Some useful constants. */ 321/* Some useful constants. */
333#undef NO_CHECK_CARRIER /* Does not check No-Carrier with TP */
334 322
335#ifdef NO_CHECK_CARRIER 323#define TX_CTL_CMD (Tx_EnTxPar | Tx_EnLateColl | \
336#define TX_CTL_CMD (Tx_EnComp | Tx_EnTxPar | Tx_EnLateColl | \
337 Tx_EnExColl | Tx_EnExDefer | Tx_EnUnder | \
338 Tx_En) /* maybe 0x7b01 */
339#else
340#define TX_CTL_CMD (Tx_EnComp | Tx_EnTxPar | Tx_EnLateColl | \
341 Tx_EnExColl | Tx_EnLCarr | Tx_EnExDefer | Tx_EnUnder | \ 324 Tx_EnExColl | Tx_EnLCarr | Tx_EnExDefer | Tx_EnUnder | \
342 Tx_En) /* maybe 0x7b01 */ 325 Tx_En) /* maybe 0x7b01 */
343#endif
344/* Do not use Rx_StripCRC -- it causes trouble on BLEx/FDAEx condition */ 326/* Do not use Rx_StripCRC -- it causes trouble on BLEx/FDAEx condition */
345#define RX_CTL_CMD (Rx_EnGood | Rx_EnRxPar | Rx_EnLongErr | Rx_EnOver \ 327#define RX_CTL_CMD (Rx_EnGood | Rx_EnRxPar | Rx_EnLongErr | Rx_EnOver \
346 | Rx_EnCRCErr | Rx_EnAlign | Rx_RxEn) /* maybe 0x6f01 */ 328 | Rx_EnCRCErr | Rx_EnAlign | Rx_RxEn) /* maybe 0x6f01 */
@@ -361,13 +343,6 @@ struct BDesc {
361#define TX_THRESHOLD_KEEP_LIMIT 10 343#define TX_THRESHOLD_KEEP_LIMIT 10
362 344
363/* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */ 345/* 16 + RX_BUF_NUM * 8 + RX_FD_NUM * 16 + TX_FD_NUM * 32 <= PAGE_SIZE*FD_PAGE_NUM */
364#ifdef TC35815_USE_PACKEDBUFFER
365#define FD_PAGE_NUM 2
366#define RX_BUF_NUM 8 /* >= 2 */
367#define RX_FD_NUM 250 /* >= 32 */
368#define TX_FD_NUM 128
369#define RX_BUF_SIZE PAGE_SIZE
370#else /* TC35815_USE_PACKEDBUFFER */
371#define FD_PAGE_NUM 4 346#define FD_PAGE_NUM 4
372#define RX_BUF_NUM 128 /* < 256 */ 347#define RX_BUF_NUM 128 /* < 256 */
373#define RX_FD_NUM 256 /* >= 32 */ 348#define RX_FD_NUM 256 /* >= 32 */
@@ -381,7 +356,6 @@ struct BDesc {
381#define RX_BUF_SIZE \ 356#define RX_BUF_SIZE \
382 L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN + NET_IP_ALIGN) 357 L1_CACHE_ALIGN(ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN + NET_IP_ALIGN)
383#endif 358#endif
384#endif /* TC35815_USE_PACKEDBUFFER */
385#define RX_FD_RESERVE (2 / 2) /* max 2 BD per RxFD */ 359#define RX_FD_RESERVE (2 / 2) /* max 2 BD per RxFD */
386#define NAPI_WEIGHT 16 360#define NAPI_WEIGHT 16
387 361
@@ -439,11 +413,7 @@ struct tc35815_local {
439 /* 413 /*
440 * Transmitting: Batch Mode. 414 * Transmitting: Batch Mode.
441 * 1 BD in 1 TxFD. 415 * 1 BD in 1 TxFD.
442 * Receiving: Packing Mode. (TC35815_USE_PACKEDBUFFER) 416 * Receiving: Non-Packing Mode.
443 * 1 circular FD for Free Buffer List.
444 * RX_BUF_NUM BD in Free Buffer FD.
445 * One Free Buffer BD has PAGE_SIZE data buffer.
446 * Or Non-Packing Mode.
447 * 1 circular FD for Free Buffer List. 417 * 1 circular FD for Free Buffer List.
448 * RX_BUF_NUM BD in Free Buffer FD. 418 * RX_BUF_NUM BD in Free Buffer FD.
449 * One Free Buffer BD has ETH_FRAME_LEN data buffer. 419 * One Free Buffer BD has ETH_FRAME_LEN data buffer.
@@ -457,21 +427,11 @@ struct tc35815_local {
457 struct RxFD *rfd_limit; 427 struct RxFD *rfd_limit;
458 struct RxFD *rfd_cur; 428 struct RxFD *rfd_cur;
459 struct FrFD *fbl_ptr; 429 struct FrFD *fbl_ptr;
460#ifdef TC35815_USE_PACKEDBUFFER
461 unsigned char fbl_curid;
462 void *data_buf[RX_BUF_NUM]; /* packing */
463 dma_addr_t data_buf_dma[RX_BUF_NUM];
464 struct {
465 struct sk_buff *skb;
466 dma_addr_t skb_dma;
467 } tx_skbs[TX_FD_NUM];
468#else
469 unsigned int fbl_count; 430 unsigned int fbl_count;
470 struct { 431 struct {
471 struct sk_buff *skb; 432 struct sk_buff *skb;
472 dma_addr_t skb_dma; 433 dma_addr_t skb_dma;
473 } tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM]; 434 } tx_skbs[TX_FD_NUM], rx_skbs[RX_BUF_NUM];
474#endif
475 u32 msg_enable; 435 u32 msg_enable;
476 enum tc35815_chiptype chiptype; 436 enum tc35815_chiptype chiptype;
477}; 437};
@@ -486,51 +446,6 @@ static inline void *fd_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus)
486 return (void *)((u8 *)lp->fd_buf + (bus - lp->fd_buf_dma)); 446 return (void *)((u8 *)lp->fd_buf + (bus - lp->fd_buf_dma));
487} 447}
488#endif 448#endif
489#ifdef TC35815_USE_PACKEDBUFFER
490static inline void *rxbuf_bus_to_virt(struct tc35815_local *lp, dma_addr_t bus)
491{
492 int i;
493 for (i = 0; i < RX_BUF_NUM; i++) {
494 if (bus >= lp->data_buf_dma[i] &&
495 bus < lp->data_buf_dma[i] + PAGE_SIZE)
496 return (void *)((u8 *)lp->data_buf[i] +
497 (bus - lp->data_buf_dma[i]));
498 }
499 return NULL;
500}
501
502#define TC35815_DMA_SYNC_ONDEMAND
503static void *alloc_rxbuf_page(struct pci_dev *hwdev, dma_addr_t *dma_handle)
504{
505#ifdef TC35815_DMA_SYNC_ONDEMAND
506 void *buf;
507 /* pci_map + pci_dma_sync will be more effective than
508 * pci_alloc_consistent on some archs. */
509 buf = (void *)__get_free_page(GFP_ATOMIC);
510 if (!buf)
511 return NULL;
512 *dma_handle = pci_map_single(hwdev, buf, PAGE_SIZE,
513 PCI_DMA_FROMDEVICE);
514 if (pci_dma_mapping_error(hwdev, *dma_handle)) {
515 free_page((unsigned long)buf);
516 return NULL;
517 }
518 return buf;
519#else
520 return pci_alloc_consistent(hwdev, PAGE_SIZE, dma_handle);
521#endif
522}
523
524static void free_rxbuf_page(struct pci_dev *hwdev, void *buf, dma_addr_t dma_handle)
525{
526#ifdef TC35815_DMA_SYNC_ONDEMAND
527 pci_unmap_single(hwdev, dma_handle, PAGE_SIZE, PCI_DMA_FROMDEVICE);
528 free_page((unsigned long)buf);
529#else
530 pci_free_consistent(hwdev, PAGE_SIZE, buf, dma_handle);
531#endif
532}
533#else /* TC35815_USE_PACKEDBUFFER */
534static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev, 449static struct sk_buff *alloc_rxbuf_skb(struct net_device *dev,
535 struct pci_dev *hwdev, 450 struct pci_dev *hwdev,
536 dma_addr_t *dma_handle) 451 dma_addr_t *dma_handle)
@@ -555,19 +470,14 @@ static void free_rxbuf_skb(struct pci_dev *hwdev, struct sk_buff *skb, dma_addr_
555 PCI_DMA_FROMDEVICE); 470 PCI_DMA_FROMDEVICE);
556 dev_kfree_skb_any(skb); 471 dev_kfree_skb_any(skb);
557} 472}
558#endif /* TC35815_USE_PACKEDBUFFER */
559 473
560/* Index to functions, as function prototypes. */ 474/* Index to functions, as function prototypes. */
561 475
562static int tc35815_open(struct net_device *dev); 476static int tc35815_open(struct net_device *dev);
563static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev); 477static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev);
564static irqreturn_t tc35815_interrupt(int irq, void *dev_id); 478static irqreturn_t tc35815_interrupt(int irq, void *dev_id);
565#ifdef TC35815_NAPI
566static int tc35815_rx(struct net_device *dev, int limit); 479static int tc35815_rx(struct net_device *dev, int limit);
567static int tc35815_poll(struct napi_struct *napi, int budget); 480static int tc35815_poll(struct napi_struct *napi, int budget);
568#else
569static void tc35815_rx(struct net_device *dev);
570#endif
571static void tc35815_txdone(struct net_device *dev); 481static void tc35815_txdone(struct net_device *dev);
572static int tc35815_close(struct net_device *dev); 482static int tc35815_close(struct net_device *dev);
573static struct net_device_stats *tc35815_get_stats(struct net_device *dev); 483static struct net_device_stats *tc35815_get_stats(struct net_device *dev);
@@ -654,8 +564,6 @@ static void tc_handle_link_change(struct net_device *dev)
654 * TX4939 PCFG.SPEEDn bit will be changed on 564 * TX4939 PCFG.SPEEDn bit will be changed on
655 * NETDEV_CHANGE event. 565 * NETDEV_CHANGE event.
656 */ 566 */
657
658#if !defined(NO_CHECK_CARRIER) && defined(WORKAROUND_LOSTCAR)
659 /* 567 /*
660 * WORKAROUND: enable LostCrS only if half duplex 568 * WORKAROUND: enable LostCrS only if half duplex
661 * operation. 569 * operation.
@@ -665,7 +573,6 @@ static void tc_handle_link_change(struct net_device *dev)
665 lp->chiptype != TC35815_TX4939) 573 lp->chiptype != TC35815_TX4939)
666 tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr, 574 tc_writel(tc_readl(&tr->Tx_Ctl) | Tx_EnLCarr,
667 &tr->Tx_Ctl); 575 &tr->Tx_Ctl);
668#endif
669 576
670 lp->speed = phydev->speed; 577 lp->speed = phydev->speed;
671 lp->duplex = phydev->duplex; 578 lp->duplex = phydev->duplex;
@@ -674,11 +581,9 @@ static void tc_handle_link_change(struct net_device *dev)
674 581
675 if (phydev->link != lp->link) { 582 if (phydev->link != lp->link) {
676 if (phydev->link) { 583 if (phydev->link) {
677#ifdef WORKAROUND_100HALF_PROMISC
678 /* delayed promiscuous enabling */ 584 /* delayed promiscuous enabling */
679 if (dev->flags & IFF_PROMISC) 585 if (dev->flags & IFF_PROMISC)
680 tc35815_set_multicast_list(dev); 586 tc35815_set_multicast_list(dev);
681#endif
682 } else { 587 } else {
683 lp->speed = 0; 588 lp->speed = 0;
684 lp->duplex = -1; 589 lp->duplex = -1;
@@ -923,9 +828,7 @@ static int __devinit tc35815_init_one(struct pci_dev *pdev,
923 dev->netdev_ops = &tc35815_netdev_ops; 828 dev->netdev_ops = &tc35815_netdev_ops;
924 dev->ethtool_ops = &tc35815_ethtool_ops; 829 dev->ethtool_ops = &tc35815_ethtool_ops;
925 dev->watchdog_timeo = TC35815_TX_TIMEOUT; 830 dev->watchdog_timeo = TC35815_TX_TIMEOUT;
926#ifdef TC35815_NAPI
927 netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT); 831 netif_napi_add(dev, &lp->napi, tc35815_poll, NAPI_WEIGHT);
928#endif
929 832
930 dev->irq = pdev->irq; 833 dev->irq = pdev->irq;
931 dev->base_addr = (unsigned long)ioaddr; 834 dev->base_addr = (unsigned long)ioaddr;
@@ -1007,25 +910,6 @@ tc35815_init_queues(struct net_device *dev)
1007 if (!lp->fd_buf) 910 if (!lp->fd_buf)
1008 return -ENOMEM; 911 return -ENOMEM;
1009 for (i = 0; i < RX_BUF_NUM; i++) { 912 for (i = 0; i < RX_BUF_NUM; i++) {
1010#ifdef TC35815_USE_PACKEDBUFFER
1011 lp->data_buf[i] =
1012 alloc_rxbuf_page(lp->pci_dev,
1013 &lp->data_buf_dma[i]);
1014 if (!lp->data_buf[i]) {
1015 while (--i >= 0) {
1016 free_rxbuf_page(lp->pci_dev,
1017 lp->data_buf[i],
1018 lp->data_buf_dma[i]);
1019 lp->data_buf[i] = NULL;
1020 }
1021 pci_free_consistent(lp->pci_dev,
1022 PAGE_SIZE * FD_PAGE_NUM,
1023 lp->fd_buf,
1024 lp->fd_buf_dma);
1025 lp->fd_buf = NULL;
1026 return -ENOMEM;
1027 }
1028#else
1029 lp->rx_skbs[i].skb = 913 lp->rx_skbs[i].skb =
1030 alloc_rxbuf_skb(dev, lp->pci_dev, 914 alloc_rxbuf_skb(dev, lp->pci_dev,
1031 &lp->rx_skbs[i].skb_dma); 915 &lp->rx_skbs[i].skb_dma);
@@ -1043,15 +927,9 @@ tc35815_init_queues(struct net_device *dev)
1043 lp->fd_buf = NULL; 927 lp->fd_buf = NULL;
1044 return -ENOMEM; 928 return -ENOMEM;
1045 } 929 }
1046#endif
1047 } 930 }
1048 printk(KERN_DEBUG "%s: FD buf %p DataBuf", 931 printk(KERN_DEBUG "%s: FD buf %p DataBuf",
1049 dev->name, lp->fd_buf); 932 dev->name, lp->fd_buf);
1050#ifdef TC35815_USE_PACKEDBUFFER
1051 printk(" DataBuf");
1052 for (i = 0; i < RX_BUF_NUM; i++)
1053 printk(" %p", lp->data_buf[i]);
1054#endif
1055 printk("\n"); 933 printk("\n");
1056 } else { 934 } else {
1057 for (i = 0; i < FD_PAGE_NUM; i++) 935 for (i = 0; i < FD_PAGE_NUM; i++)
@@ -1084,7 +962,6 @@ tc35815_init_queues(struct net_device *dev)
1084 lp->fbl_ptr = (struct FrFD *)fd_addr; 962 lp->fbl_ptr = (struct FrFD *)fd_addr;
1085 lp->fbl_ptr->fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, lp->fbl_ptr)); 963 lp->fbl_ptr->fd.FDNext = cpu_to_le32(fd_virt_to_bus(lp, lp->fbl_ptr));
1086 lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_NUM | FD_CownsFD); 964 lp->fbl_ptr->fd.FDCtl = cpu_to_le32(RX_BUF_NUM | FD_CownsFD);
1087#ifndef TC35815_USE_PACKEDBUFFER
1088 /* 965 /*
1089 * move all allocated skbs to head of rx_skbs[] array. 966 * move all allocated skbs to head of rx_skbs[] array.
1090 * fbl_count mighe not be RX_BUF_NUM if alloc_rxbuf_skb() in 967 * fbl_count mighe not be RX_BUF_NUM if alloc_rxbuf_skb() in
@@ -1102,11 +979,7 @@ tc35815_init_queues(struct net_device *dev)
1102 lp->fbl_count++; 979 lp->fbl_count++;
1103 } 980 }
1104 } 981 }
1105#endif
1106 for (i = 0; i < RX_BUF_NUM; i++) { 982 for (i = 0; i < RX_BUF_NUM; i++) {
1107#ifdef TC35815_USE_PACKEDBUFFER
1108 lp->fbl_ptr->bd[i].BuffData = cpu_to_le32(lp->data_buf_dma[i]);
1109#else
1110 if (i >= lp->fbl_count) { 983 if (i >= lp->fbl_count) {
1111 lp->fbl_ptr->bd[i].BuffData = 0; 984 lp->fbl_ptr->bd[i].BuffData = 0;
1112 lp->fbl_ptr->bd[i].BDCtl = 0; 985 lp->fbl_ptr->bd[i].BDCtl = 0;
@@ -1114,15 +987,11 @@ tc35815_init_queues(struct net_device *dev)
1114 } 987 }
1115 lp->fbl_ptr->bd[i].BuffData = 988 lp->fbl_ptr->bd[i].BuffData =
1116 cpu_to_le32(lp->rx_skbs[i].skb_dma); 989 cpu_to_le32(lp->rx_skbs[i].skb_dma);
1117#endif
1118 /* BDID is index of FrFD.bd[] */ 990 /* BDID is index of FrFD.bd[] */
1119 lp->fbl_ptr->bd[i].BDCtl = 991 lp->fbl_ptr->bd[i].BDCtl =
1120 cpu_to_le32(BD_CownsBD | (i << BD_RxBDID_SHIFT) | 992 cpu_to_le32(BD_CownsBD | (i << BD_RxBDID_SHIFT) |
1121 RX_BUF_SIZE); 993 RX_BUF_SIZE);
1122 } 994 }
1123#ifdef TC35815_USE_PACKEDBUFFER
1124 lp->fbl_curid = 0;
1125#endif
1126 995
1127 printk(KERN_DEBUG "%s: TxFD %p RxFD %p FrFD %p\n", 996 printk(KERN_DEBUG "%s: TxFD %p RxFD %p FrFD %p\n",
1128 dev->name, lp->tfd_base, lp->rfd_base, lp->fbl_ptr); 997 dev->name, lp->tfd_base, lp->rfd_base, lp->fbl_ptr);
@@ -1196,19 +1065,11 @@ tc35815_free_queues(struct net_device *dev)
1196 lp->fbl_ptr = NULL; 1065 lp->fbl_ptr = NULL;
1197 1066
1198 for (i = 0; i < RX_BUF_NUM; i++) { 1067 for (i = 0; i < RX_BUF_NUM; i++) {
1199#ifdef TC35815_USE_PACKEDBUFFER
1200 if (lp->data_buf[i]) {
1201 free_rxbuf_page(lp->pci_dev,
1202 lp->data_buf[i], lp->data_buf_dma[i]);
1203 lp->data_buf[i] = NULL;
1204 }
1205#else
1206 if (lp->rx_skbs[i].skb) { 1068 if (lp->rx_skbs[i].skb) {
1207 free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb, 1069 free_rxbuf_skb(lp->pci_dev, lp->rx_skbs[i].skb,
1208 lp->rx_skbs[i].skb_dma); 1070 lp->rx_skbs[i].skb_dma);
1209 lp->rx_skbs[i].skb = NULL; 1071 lp->rx_skbs[i].skb = NULL;
1210 } 1072 }
1211#endif
1212 } 1073 }
1213 if (lp->fd_buf) { 1074 if (lp->fd_buf) {
1214 pci_free_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM, 1075 pci_free_consistent(lp->pci_dev, PAGE_SIZE * FD_PAGE_NUM,
@@ -1254,7 +1115,7 @@ dump_rxfd(struct RxFD *fd)
1254 return bd_count; 1115 return bd_count;
1255} 1116}
1256 1117
1257#if defined(DEBUG) || defined(TC35815_USE_PACKEDBUFFER) 1118#ifdef DEBUG
1258static void 1119static void
1259dump_frfd(struct FrFD *fd) 1120dump_frfd(struct FrFD *fd)
1260{ 1121{
@@ -1271,9 +1132,7 @@ dump_frfd(struct FrFD *fd)
1271 le32_to_cpu(fd->bd[i].BDCtl)); 1132 le32_to_cpu(fd->bd[i].BDCtl));
1272 printk("\n"); 1133 printk("\n");
1273} 1134}
1274#endif
1275 1135
1276#ifdef DEBUG
1277static void 1136static void
1278panic_queues(struct net_device *dev) 1137panic_queues(struct net_device *dev)
1279{ 1138{
@@ -1400,9 +1259,7 @@ tc35815_open(struct net_device *dev)
1400 return -EAGAIN; 1259 return -EAGAIN;
1401 } 1260 }
1402 1261
1403#ifdef TC35815_NAPI
1404 napi_enable(&lp->napi); 1262 napi_enable(&lp->napi);
1405#endif
1406 1263
1407 /* Reset the hardware here. Don't forget to set the station address. */ 1264 /* Reset the hardware here. Don't forget to set the station address. */
1408 spin_lock_irq(&lp->lock); 1265 spin_lock_irq(&lp->lock);
@@ -1478,9 +1335,7 @@ static int tc35815_send_packet(struct sk_buff *skb, struct net_device *dev)
1478 (struct tc35815_regs __iomem *)dev->base_addr; 1335 (struct tc35815_regs __iomem *)dev->base_addr;
1479 /* Start DMA Transmitter. */ 1336 /* Start DMA Transmitter. */
1480 txfd->fd.FDNext |= cpu_to_le32(FD_Next_EOL); 1337 txfd->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
1481#ifdef GATHER_TXINT
1482 txfd->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx); 1338 txfd->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
1483#endif
1484 if (netif_msg_tx_queued(lp)) { 1339 if (netif_msg_tx_queued(lp)) {
1485 printk("%s: starting TxFD.\n", dev->name); 1340 printk("%s: starting TxFD.\n", dev->name);
1486 dump_txfd(txfd); 1341 dump_txfd(txfd);
@@ -1536,11 +1391,7 @@ static void tc35815_fatal_error_interrupt(struct net_device *dev, u32 status)
1536 tc35815_schedule_restart(dev); 1391 tc35815_schedule_restart(dev);
1537} 1392}
1538 1393
1539#ifdef TC35815_NAPI
1540static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit) 1394static int tc35815_do_interrupt(struct net_device *dev, u32 status, int limit)
1541#else
1542static int tc35815_do_interrupt(struct net_device *dev, u32 status)
1543#endif
1544{ 1395{
1545 struct tc35815_local *lp = netdev_priv(dev); 1396 struct tc35815_local *lp = netdev_priv(dev);
1546 int ret = -1; 1397 int ret = -1;
@@ -1579,12 +1430,7 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status)
1579 /* normal notification */ 1430 /* normal notification */
1580 if (status & Int_IntMacRx) { 1431 if (status & Int_IntMacRx) {
1581 /* Got a packet(s). */ 1432 /* Got a packet(s). */
1582#ifdef TC35815_NAPI
1583 ret = tc35815_rx(dev, limit); 1433 ret = tc35815_rx(dev, limit);
1584#else
1585 tc35815_rx(dev);
1586 ret = 0;
1587#endif
1588 lp->lstats.rx_ints++; 1434 lp->lstats.rx_ints++;
1589 } 1435 }
1590 if (status & Int_IntMacTx) { 1436 if (status & Int_IntMacTx) {
@@ -1592,7 +1438,8 @@ static int tc35815_do_interrupt(struct net_device *dev, u32 status)
1592 lp->lstats.tx_ints++; 1438 lp->lstats.tx_ints++;
1593 tc35815_txdone(dev); 1439 tc35815_txdone(dev);
1594 netif_wake_queue(dev); 1440 netif_wake_queue(dev);
1595 ret = 0; 1441 if (ret < 0)
1442 ret = 0;
1596 } 1443 }
1597 return ret; 1444 return ret;
1598} 1445}
@@ -1607,7 +1454,6 @@ static irqreturn_t tc35815_interrupt(int irq, void *dev_id)
1607 struct tc35815_local *lp = netdev_priv(dev); 1454 struct tc35815_local *lp = netdev_priv(dev);
1608 struct tc35815_regs __iomem *tr = 1455 struct tc35815_regs __iomem *tr =
1609 (struct tc35815_regs __iomem *)dev->base_addr; 1456 (struct tc35815_regs __iomem *)dev->base_addr;
1610#ifdef TC35815_NAPI
1611 u32 dmactl = tc_readl(&tr->DMA_Ctl); 1457 u32 dmactl = tc_readl(&tr->DMA_Ctl);
1612 1458
1613 if (!(dmactl & DMA_IntMask)) { 1459 if (!(dmactl & DMA_IntMask)) {
@@ -1624,22 +1470,6 @@ static irqreturn_t tc35815_interrupt(int irq, void *dev_id)
1624 return IRQ_HANDLED; 1470 return IRQ_HANDLED;
1625 } 1471 }
1626 return IRQ_NONE; 1472 return IRQ_NONE;
1627#else
1628 int handled;
1629 u32 status;
1630
1631 spin_lock(&lp->lock);
1632 status = tc_readl(&tr->Int_Src);
1633 /* BLEx, FDAEx will be cleared later */
1634 tc_writel(status & ~(Int_BLEx | Int_FDAEx),
1635 &tr->Int_Src); /* write to clear */
1636 handled = tc35815_do_interrupt(dev, status);
1637 if (status & (Int_BLEx | Int_FDAEx))
1638 tc_writel(status & (Int_BLEx | Int_FDAEx), &tr->Int_Src);
1639 (void)tc_readl(&tr->Int_Src); /* flush */
1640 spin_unlock(&lp->lock);
1641 return IRQ_RETVAL(handled >= 0);
1642#endif /* TC35815_NAPI */
1643} 1473}
1644 1474
1645#ifdef CONFIG_NET_POLL_CONTROLLER 1475#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -1652,20 +1482,13 @@ static void tc35815_poll_controller(struct net_device *dev)
1652#endif 1482#endif
1653 1483
1654/* We have a good packet(s), get it/them out of the buffers. */ 1484/* We have a good packet(s), get it/them out of the buffers. */
1655#ifdef TC35815_NAPI
1656static int 1485static int
1657tc35815_rx(struct net_device *dev, int limit) 1486tc35815_rx(struct net_device *dev, int limit)
1658#else
1659static void
1660tc35815_rx(struct net_device *dev)
1661#endif
1662{ 1487{
1663 struct tc35815_local *lp = netdev_priv(dev); 1488 struct tc35815_local *lp = netdev_priv(dev);
1664 unsigned int fdctl; 1489 unsigned int fdctl;
1665 int i; 1490 int i;
1666#ifdef TC35815_NAPI
1667 int received = 0; 1491 int received = 0;
1668#endif
1669 1492
1670 while (!((fdctl = le32_to_cpu(lp->rfd_cur->fd.FDCtl)) & FD_CownsFD)) { 1493 while (!((fdctl = le32_to_cpu(lp->rfd_cur->fd.FDCtl)) & FD_CownsFD)) {
1671 int status = le32_to_cpu(lp->rfd_cur->fd.FDStat); 1494 int status = le32_to_cpu(lp->rfd_cur->fd.FDStat);
@@ -1684,52 +1507,9 @@ tc35815_rx(struct net_device *dev)
1684 struct sk_buff *skb; 1507 struct sk_buff *skb;
1685 unsigned char *data; 1508 unsigned char *data;
1686 int cur_bd; 1509 int cur_bd;
1687#ifdef TC35815_USE_PACKEDBUFFER
1688 int offset;
1689#endif
1690 1510
1691#ifdef TC35815_NAPI
1692 if (--limit < 0) 1511 if (--limit < 0)
1693 break; 1512 break;
1694#endif
1695#ifdef TC35815_USE_PACKEDBUFFER
1696 BUG_ON(bd_count > 2);
1697 skb = dev_alloc_skb(pkt_len + NET_IP_ALIGN);
1698 if (skb == NULL) {
1699 printk(KERN_NOTICE "%s: Memory squeeze, dropping packet.\n",
1700 dev->name);
1701 dev->stats.rx_dropped++;
1702 break;
1703 }
1704 skb_reserve(skb, NET_IP_ALIGN);
1705
1706 data = skb_put(skb, pkt_len);
1707
1708 /* copy from receive buffer */
1709 cur_bd = 0;
1710 offset = 0;
1711 while (offset < pkt_len && cur_bd < bd_count) {
1712 int len = le32_to_cpu(lp->rfd_cur->bd[cur_bd].BDCtl) &
1713 BD_BuffLength_MASK;
1714 dma_addr_t dma = le32_to_cpu(lp->rfd_cur->bd[cur_bd].BuffData);
1715 void *rxbuf = rxbuf_bus_to_virt(lp, dma);
1716 if (offset + len > pkt_len)
1717 len = pkt_len - offset;
1718#ifdef TC35815_DMA_SYNC_ONDEMAND
1719 pci_dma_sync_single_for_cpu(lp->pci_dev,
1720 dma, len,
1721 PCI_DMA_FROMDEVICE);
1722#endif
1723 memcpy(data + offset, rxbuf, len);
1724#ifdef TC35815_DMA_SYNC_ONDEMAND
1725 pci_dma_sync_single_for_device(lp->pci_dev,
1726 dma, len,
1727 PCI_DMA_FROMDEVICE);
1728#endif
1729 offset += len;
1730 cur_bd++;
1731 }
1732#else /* TC35815_USE_PACKEDBUFFER */
1733 BUG_ON(bd_count > 1); 1513 BUG_ON(bd_count > 1);
1734 cur_bd = (le32_to_cpu(lp->rfd_cur->bd[0].BDCtl) 1514 cur_bd = (le32_to_cpu(lp->rfd_cur->bd[0].BDCtl)
1735 & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT; 1515 & BD_RxBDID_MASK) >> BD_RxBDID_SHIFT;
@@ -1757,16 +1537,11 @@ tc35815_rx(struct net_device *dev)
1757 memmove(skb->data, skb->data - NET_IP_ALIGN, 1537 memmove(skb->data, skb->data - NET_IP_ALIGN,
1758 pkt_len); 1538 pkt_len);
1759 data = skb_put(skb, pkt_len); 1539 data = skb_put(skb, pkt_len);
1760#endif /* TC35815_USE_PACKEDBUFFER */
1761 if (netif_msg_pktdata(lp)) 1540 if (netif_msg_pktdata(lp))
1762 print_eth(data); 1541 print_eth(data);
1763 skb->protocol = eth_type_trans(skb, dev); 1542 skb->protocol = eth_type_trans(skb, dev);
1764#ifdef TC35815_NAPI
1765 netif_receive_skb(skb); 1543 netif_receive_skb(skb);
1766 received++; 1544 received++;
1767#else
1768 netif_rx(skb);
1769#endif
1770 dev->stats.rx_packets++; 1545 dev->stats.rx_packets++;
1771 dev->stats.rx_bytes += pkt_len; 1546 dev->stats.rx_bytes += pkt_len;
1772 } else { 1547 } else {
@@ -1803,19 +1578,11 @@ tc35815_rx(struct net_device *dev)
1803 BUG_ON(id >= RX_BUF_NUM); 1578 BUG_ON(id >= RX_BUF_NUM);
1804#endif 1579#endif
1805 /* free old buffers */ 1580 /* free old buffers */
1806#ifdef TC35815_USE_PACKEDBUFFER
1807 while (lp->fbl_curid != id)
1808#else
1809 lp->fbl_count--; 1581 lp->fbl_count--;
1810 while (lp->fbl_count < RX_BUF_NUM) 1582 while (lp->fbl_count < RX_BUF_NUM)
1811#endif
1812 { 1583 {
1813#ifdef TC35815_USE_PACKEDBUFFER
1814 unsigned char curid = lp->fbl_curid;
1815#else
1816 unsigned char curid = 1584 unsigned char curid =
1817 (id + 1 + lp->fbl_count) % RX_BUF_NUM; 1585 (id + 1 + lp->fbl_count) % RX_BUF_NUM;
1818#endif
1819 struct BDesc *bd = &lp->fbl_ptr->bd[curid]; 1586 struct BDesc *bd = &lp->fbl_ptr->bd[curid];
1820#ifdef DEBUG 1587#ifdef DEBUG
1821 bdctl = le32_to_cpu(bd->BDCtl); 1588 bdctl = le32_to_cpu(bd->BDCtl);
@@ -1826,7 +1593,6 @@ tc35815_rx(struct net_device *dev)
1826 } 1593 }
1827#endif 1594#endif
1828 /* pass BD to controller */ 1595 /* pass BD to controller */
1829#ifndef TC35815_USE_PACKEDBUFFER
1830 if (!lp->rx_skbs[curid].skb) { 1596 if (!lp->rx_skbs[curid].skb) {
1831 lp->rx_skbs[curid].skb = 1597 lp->rx_skbs[curid].skb =
1832 alloc_rxbuf_skb(dev, 1598 alloc_rxbuf_skb(dev,
@@ -1836,21 +1602,11 @@ tc35815_rx(struct net_device *dev)
1836 break; /* try on next reception */ 1602 break; /* try on next reception */
1837 bd->BuffData = cpu_to_le32(lp->rx_skbs[curid].skb_dma); 1603 bd->BuffData = cpu_to_le32(lp->rx_skbs[curid].skb_dma);
1838 } 1604 }
1839#endif /* TC35815_USE_PACKEDBUFFER */
1840 /* Note: BDLength was modified by chip. */ 1605 /* Note: BDLength was modified by chip. */
1841 bd->BDCtl = cpu_to_le32(BD_CownsBD | 1606 bd->BDCtl = cpu_to_le32(BD_CownsBD |
1842 (curid << BD_RxBDID_SHIFT) | 1607 (curid << BD_RxBDID_SHIFT) |
1843 RX_BUF_SIZE); 1608 RX_BUF_SIZE);
1844#ifdef TC35815_USE_PACKEDBUFFER
1845 lp->fbl_curid = (curid + 1) % RX_BUF_NUM;
1846 if (netif_msg_rx_status(lp)) {
1847 printk("%s: Entering new FBD %d\n",
1848 dev->name, lp->fbl_curid);
1849 dump_frfd(lp->fbl_ptr);
1850 }
1851#else
1852 lp->fbl_count++; 1609 lp->fbl_count++;
1853#endif
1854 } 1610 }
1855 } 1611 }
1856 1612
@@ -1882,12 +1638,9 @@ tc35815_rx(struct net_device *dev)
1882#endif 1638#endif
1883 } 1639 }
1884 1640
1885#ifdef TC35815_NAPI
1886 return received; 1641 return received;
1887#endif
1888} 1642}
1889 1643
1890#ifdef TC35815_NAPI
1891static int tc35815_poll(struct napi_struct *napi, int budget) 1644static int tc35815_poll(struct napi_struct *napi, int budget)
1892{ 1645{
1893 struct tc35815_local *lp = container_of(napi, struct tc35815_local, napi); 1646 struct tc35815_local *lp = container_of(napi, struct tc35815_local, napi);
@@ -1924,13 +1677,8 @@ static int tc35815_poll(struct napi_struct *napi, int budget)
1924 } 1677 }
1925 return received; 1678 return received;
1926} 1679}
1927#endif
1928 1680
1929#ifdef NO_CHECK_CARRIER
1930#define TX_STA_ERR (Tx_ExColl|Tx_Under|Tx_Defer|Tx_LateColl|Tx_TxPar|Tx_SQErr)
1931#else
1932#define TX_STA_ERR (Tx_ExColl|Tx_Under|Tx_Defer|Tx_NCarr|Tx_LateColl|Tx_TxPar|Tx_SQErr) 1681#define TX_STA_ERR (Tx_ExColl|Tx_Under|Tx_Defer|Tx_NCarr|Tx_LateColl|Tx_TxPar|Tx_SQErr)
1933#endif
1934 1682
1935static void 1683static void
1936tc35815_check_tx_stat(struct net_device *dev, int status) 1684tc35815_check_tx_stat(struct net_device *dev, int status)
@@ -1944,16 +1692,12 @@ tc35815_check_tx_stat(struct net_device *dev, int status)
1944 if (status & Tx_TxColl_MASK) 1692 if (status & Tx_TxColl_MASK)
1945 dev->stats.collisions += status & Tx_TxColl_MASK; 1693 dev->stats.collisions += status & Tx_TxColl_MASK;
1946 1694
1947#ifndef NO_CHECK_CARRIER
1948 /* TX4939 does not have NCarr */ 1695 /* TX4939 does not have NCarr */
1949 if (lp->chiptype == TC35815_TX4939) 1696 if (lp->chiptype == TC35815_TX4939)
1950 status &= ~Tx_NCarr; 1697 status &= ~Tx_NCarr;
1951#ifdef WORKAROUND_LOSTCAR
1952 /* WORKAROUND: ignore LostCrS in full duplex operation */ 1698 /* WORKAROUND: ignore LostCrS in full duplex operation */
1953 if (!lp->link || lp->duplex == DUPLEX_FULL) 1699 if (!lp->link || lp->duplex == DUPLEX_FULL)
1954 status &= ~Tx_NCarr; 1700 status &= ~Tx_NCarr;
1955#endif
1956#endif
1957 1701
1958 if (!(status & TX_STA_ERR)) { 1702 if (!(status & TX_STA_ERR)) {
1959 /* no error. */ 1703 /* no error. */
@@ -1983,12 +1727,10 @@ tc35815_check_tx_stat(struct net_device *dev, int status)
1983 dev->stats.tx_fifo_errors++; 1727 dev->stats.tx_fifo_errors++;
1984 msg = "Excessive Deferral."; 1728 msg = "Excessive Deferral.";
1985 } 1729 }
1986#ifndef NO_CHECK_CARRIER
1987 if (status & Tx_NCarr) { 1730 if (status & Tx_NCarr) {
1988 dev->stats.tx_carrier_errors++; 1731 dev->stats.tx_carrier_errors++;
1989 msg = "Lost Carrier Sense."; 1732 msg = "Lost Carrier Sense.";
1990 } 1733 }
1991#endif
1992 if (status & Tx_LateColl) { 1734 if (status & Tx_LateColl) {
1993 dev->stats.tx_aborted_errors++; 1735 dev->stats.tx_aborted_errors++;
1994 msg = "Late Collision."; 1736 msg = "Late Collision.";
@@ -2044,11 +1786,7 @@ tc35815_txdone(struct net_device *dev)
2044 pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE); 1786 pci_unmap_single(lp->pci_dev, lp->tx_skbs[lp->tfd_end].skb_dma, skb->len, PCI_DMA_TODEVICE);
2045 lp->tx_skbs[lp->tfd_end].skb = NULL; 1787 lp->tx_skbs[lp->tfd_end].skb = NULL;
2046 lp->tx_skbs[lp->tfd_end].skb_dma = 0; 1788 lp->tx_skbs[lp->tfd_end].skb_dma = 0;
2047#ifdef TC35815_NAPI
2048 dev_kfree_skb_any(skb); 1789 dev_kfree_skb_any(skb);
2049#else
2050 dev_kfree_skb_irq(skb);
2051#endif
2052 } 1790 }
2053 txfd->fd.FDSystem = cpu_to_le32(0xffffffff); 1791 txfd->fd.FDSystem = cpu_to_le32(0xffffffff);
2054 1792
@@ -2083,9 +1821,7 @@ tc35815_txdone(struct net_device *dev)
2083 1821
2084 /* start DMA Transmitter again */ 1822 /* start DMA Transmitter again */
2085 txhead->fd.FDNext |= cpu_to_le32(FD_Next_EOL); 1823 txhead->fd.FDNext |= cpu_to_le32(FD_Next_EOL);
2086#ifdef GATHER_TXINT
2087 txhead->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx); 1824 txhead->fd.FDCtl |= cpu_to_le32(FD_FrmOpt_IntTx);
2088#endif
2089 if (netif_msg_tx_queued(lp)) { 1825 if (netif_msg_tx_queued(lp)) {
2090 printk("%s: start TxFD on queue.\n", 1826 printk("%s: start TxFD on queue.\n",
2091 dev->name); 1827 dev->name);
@@ -2112,9 +1848,7 @@ tc35815_close(struct net_device *dev)
2112 struct tc35815_local *lp = netdev_priv(dev); 1848 struct tc35815_local *lp = netdev_priv(dev);
2113 1849
2114 netif_stop_queue(dev); 1850 netif_stop_queue(dev);
2115#ifdef TC35815_NAPI
2116 napi_disable(&lp->napi); 1851 napi_disable(&lp->napi);
2117#endif
2118 if (lp->phy_dev) 1852 if (lp->phy_dev)
2119 phy_stop(lp->phy_dev); 1853 phy_stop(lp->phy_dev);
2120 cancel_work_sync(&lp->restart_work); 1854 cancel_work_sync(&lp->restart_work);
@@ -2198,14 +1932,12 @@ tc35815_set_multicast_list(struct net_device *dev)
2198 (struct tc35815_regs __iomem *)dev->base_addr; 1932 (struct tc35815_regs __iomem *)dev->base_addr;
2199 1933
2200 if (dev->flags & IFF_PROMISC) { 1934 if (dev->flags & IFF_PROMISC) {
2201#ifdef WORKAROUND_100HALF_PROMISC
2202 /* With some (all?) 100MHalf HUB, controller will hang 1935 /* With some (all?) 100MHalf HUB, controller will hang
2203 * if we enabled promiscuous mode before linkup... */ 1936 * if we enabled promiscuous mode before linkup... */
2204 struct tc35815_local *lp = netdev_priv(dev); 1937 struct tc35815_local *lp = netdev_priv(dev);
2205 1938
2206 if (!lp->link) 1939 if (!lp->link)
2207 return; 1940 return;
2208#endif
2209 /* Enable promiscuous mode */ 1941 /* Enable promiscuous mode */
2210 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl); 1942 tc_writel(CAM_CompEn | CAM_BroadAcc | CAM_GroupAcc | CAM_StationAcc, &tr->CAM_Ctl);
2211 } else if ((dev->flags & IFF_ALLMULTI) || 1943 } else if ((dev->flags & IFF_ALLMULTI) ||
@@ -2392,9 +2124,6 @@ static void tc35815_chip_init(struct net_device *dev)
2392 tc_writel(DMA_BURST_SIZE | DMA_RxAlign_2, &tr->DMA_Ctl); 2124 tc_writel(DMA_BURST_SIZE | DMA_RxAlign_2, &tr->DMA_Ctl);
2393 else 2125 else
2394 tc_writel(DMA_BURST_SIZE, &tr->DMA_Ctl); 2126 tc_writel(DMA_BURST_SIZE, &tr->DMA_Ctl);
2395#ifdef TC35815_USE_PACKEDBUFFER
2396 tc_writel(RxFrag_EnPack | ETH_ZLEN, &tr->RxFragSize); /* Packing */
2397#endif
2398 tc_writel(0, &tr->TxPollCtr); /* Batch mode */ 2127 tc_writel(0, &tr->TxPollCtr); /* Batch mode */
2399 tc_writel(TX_THRESHOLD, &tr->TxThrsh); 2128 tc_writel(TX_THRESHOLD, &tr->TxThrsh);
2400 tc_writel(INT_EN_CMD, &tr->Int_En); 2129 tc_writel(INT_EN_CMD, &tr->Int_En);
@@ -2412,19 +2141,12 @@ static void tc35815_chip_init(struct net_device *dev)
2412 tc_writel(RX_CTL_CMD, &tr->Rx_Ctl); /* start MAC receiver */ 2141 tc_writel(RX_CTL_CMD, &tr->Rx_Ctl); /* start MAC receiver */
2413 2142
2414 /* start MAC transmitter */ 2143 /* start MAC transmitter */
2415#ifndef NO_CHECK_CARRIER
2416 /* TX4939 does not have EnLCarr */ 2144 /* TX4939 does not have EnLCarr */
2417 if (lp->chiptype == TC35815_TX4939) 2145 if (lp->chiptype == TC35815_TX4939)
2418 txctl &= ~Tx_EnLCarr; 2146 txctl &= ~Tx_EnLCarr;
2419#ifdef WORKAROUND_LOSTCAR
2420 /* WORKAROUND: ignore LostCrS in full duplex operation */ 2147 /* WORKAROUND: ignore LostCrS in full duplex operation */
2421 if (!lp->phy_dev || !lp->link || lp->duplex == DUPLEX_FULL) 2148 if (!lp->phy_dev || !lp->link || lp->duplex == DUPLEX_FULL)
2422 txctl &= ~Tx_EnLCarr; 2149 txctl &= ~Tx_EnLCarr;
2423#endif
2424#endif /* !NO_CHECK_CARRIER */
2425#ifdef GATHER_TXINT
2426 txctl &= ~Tx_EnComp; /* disable global tx completion int. */
2427#endif
2428 tc_writel(txctl, &tr->Tx_Ctl); 2150 tc_writel(txctl, &tr->Tx_Ctl);
2429} 2151}
2430 2152
diff --git a/drivers/net/tehuti.c b/drivers/net/tehuti.c
index ec9dfb251f30..492bff68bf2d 100644
--- a/drivers/net/tehuti.c
+++ b/drivers/net/tehuti.c
@@ -1878,7 +1878,7 @@ static void bdx_tx_push_desc_safe(struct bdx_priv *priv, void *data, int size)
1878 udelay(50); /* give hw a chance to clean fifo */ 1878 udelay(50); /* give hw a chance to clean fifo */
1879 continue; 1879 continue;
1880 } 1880 }
1881 avail = MIN(avail, size); 1881 avail = min(avail, size);
1882 DBG("about to push %d bytes starting %p size %d\n", avail, 1882 DBG("about to push %d bytes starting %p size %d\n", avail,
1883 data, size); 1883 data, size);
1884 bdx_tx_push_desc(priv, data, avail); 1884 bdx_tx_push_desc(priv, data, avail);
@@ -2105,12 +2105,6 @@ err_pci:
2105} 2105}
2106 2106
2107/****************** Ethtool interface *********************/ 2107/****************** Ethtool interface *********************/
2108/* get strings for tests */
2109static const char
2110 bdx_test_names[][ETH_GSTRING_LEN] = {
2111 "No tests defined"
2112};
2113
2114/* get strings for statistics counters */ 2108/* get strings for statistics counters */
2115static const char 2109static const char
2116 bdx_stat_names[][ETH_GSTRING_LEN] = { 2110 bdx_stat_names[][ETH_GSTRING_LEN] = {
@@ -2380,9 +2374,6 @@ bdx_set_ringparam(struct net_device *netdev, struct ethtool_ringparam *ring)
2380static void bdx_get_strings(struct net_device *netdev, u32 stringset, u8 *data) 2374static void bdx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2381{ 2375{
2382 switch (stringset) { 2376 switch (stringset) {
2383 case ETH_SS_TEST:
2384 memcpy(data, *bdx_test_names, sizeof(bdx_test_names));
2385 break;
2386 case ETH_SS_STATS: 2377 case ETH_SS_STATS:
2387 memcpy(data, *bdx_stat_names, sizeof(bdx_stat_names)); 2378 memcpy(data, *bdx_stat_names, sizeof(bdx_stat_names));
2388 break; 2379 break;
@@ -2390,15 +2381,21 @@ static void bdx_get_strings(struct net_device *netdev, u32 stringset, u8 *data)
2390} 2381}
2391 2382
2392/* 2383/*
2393 * bdx_get_stats_count - return number of 64bit statistics counters 2384 * bdx_get_sset_count - return number of statistics or tests
2394 * @netdev 2385 * @netdev
2395 */ 2386 */
2396static int bdx_get_stats_count(struct net_device *netdev) 2387static int bdx_get_sset_count(struct net_device *netdev, int stringset)
2397{ 2388{
2398 struct bdx_priv *priv = netdev_priv(netdev); 2389 struct bdx_priv *priv = netdev_priv(netdev);
2399 BDX_ASSERT(ARRAY_SIZE(bdx_stat_names) 2390
2400 != sizeof(struct bdx_stats) / sizeof(u64)); 2391 switch (stringset) {
2401 return ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0); 2392 case ETH_SS_STATS:
2393 BDX_ASSERT(ARRAY_SIZE(bdx_stat_names)
2394 != sizeof(struct bdx_stats) / sizeof(u64));
2395 return ((priv->stats_flag) ? ARRAY_SIZE(bdx_stat_names) : 0);
2396 default:
2397 return -EINVAL;
2398 }
2402} 2399}
2403 2400
2404/* 2401/*
@@ -2441,7 +2438,7 @@ static void bdx_ethtool_ops(struct net_device *netdev)
2441 .get_sg = ethtool_op_get_sg, 2438 .get_sg = ethtool_op_get_sg,
2442 .get_tso = ethtool_op_get_tso, 2439 .get_tso = ethtool_op_get_tso,
2443 .get_strings = bdx_get_strings, 2440 .get_strings = bdx_get_strings,
2444 .get_stats_count = bdx_get_stats_count, 2441 .get_sset_count = bdx_get_sset_count,
2445 .get_ethtool_stats = bdx_get_ethtool_stats, 2442 .get_ethtool_stats = bdx_get_ethtool_stats,
2446 }; 2443 };
2447 2444
diff --git a/drivers/net/tehuti.h b/drivers/net/tehuti.h
index 4fc875e5dcdd..124141909e42 100644
--- a/drivers/net/tehuti.h
+++ b/drivers/net/tehuti.h
@@ -76,8 +76,6 @@
76#define FIFO_SIZE 4096 76#define FIFO_SIZE 4096
77#define FIFO_EXTRA_SPACE 1024 77#define FIFO_EXTRA_SPACE 1024
78 78
79#define MIN(x, y) ((x) < (y) ? (x) : (y))
80
81#if BITS_PER_LONG == 64 79#if BITS_PER_LONG == 64
82# define H32_64(x) (u32) ((u64)(x) >> 32) 80# define H32_64(x) (u32) ((u64)(x) >> 32)
83# define L32_64(x) (u32) ((u64)(x) & 0xffffffff) 81# define L32_64(x) (u32) ((u64)(x) & 0xffffffff)
diff --git a/drivers/net/tg3.c b/drivers/net/tg3.c
index ba5d3fe753b6..6e6db955b4a9 100644
--- a/drivers/net/tg3.c
+++ b/drivers/net/tg3.c
@@ -68,8 +68,8 @@
68 68
69#define DRV_MODULE_NAME "tg3" 69#define DRV_MODULE_NAME "tg3"
70#define PFX DRV_MODULE_NAME ": " 70#define PFX DRV_MODULE_NAME ": "
71#define DRV_MODULE_VERSION "3.102" 71#define DRV_MODULE_VERSION "3.104"
72#define DRV_MODULE_RELDATE "September 1, 2009" 72#define DRV_MODULE_RELDATE "November 13, 2009"
73 73
74#define TG3_DEF_MAC_MODE 0 74#define TG3_DEF_MAC_MODE 0
75#define TG3_DEF_RX_MODE 0 75#define TG3_DEF_RX_MODE 0
@@ -137,6 +137,12 @@
137#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ) 137#define TG3_RX_STD_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
138#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ) 138#define TG3_RX_JMB_MAP_SZ TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
139 139
140#define TG3_RX_STD_BUFF_RING_SIZE \
141 (sizeof(struct ring_info) * TG3_RX_RING_SIZE)
142
143#define TG3_RX_JMB_BUFF_RING_SIZE \
144 (sizeof(struct ring_info) * TG3_RX_JUMBO_RING_SIZE)
145
140/* minimum number of free TX descriptors required to wake up TX process */ 146/* minimum number of free TX descriptors required to wake up TX process */
141#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4) 147#define TG3_TX_WAKEUP_THRESH(tnapi) ((tnapi)->tx_pending / 4)
142 148
@@ -235,6 +241,9 @@ static struct pci_device_id tg3_pci_tbl[] = {
235 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)}, 241 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
236 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)}, 242 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
237 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)}, 243 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
244 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
245 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
246 {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5724)},
238 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)}, 247 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
239 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)}, 248 {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
240 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)}, 249 {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
@@ -396,7 +405,7 @@ static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
396 TG3_64BIT_REG_LOW, val); 405 TG3_64BIT_REG_LOW, val);
397 return; 406 return;
398 } 407 }
399 if (off == (MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW)) { 408 if (off == TG3_RX_STD_PROD_IDX_REG) {
400 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX + 409 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
401 TG3_64BIT_REG_LOW, val); 410 TG3_64BIT_REG_LOW, val);
402 return; 411 return;
@@ -937,9 +946,10 @@ static void tg3_mdio_config_5785(struct tg3 *tp)
937 u32 val; 946 u32 val;
938 struct phy_device *phydev; 947 struct phy_device *phydev;
939 948
940 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 949 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
941 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 950 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
942 case TG3_PHY_ID_BCM50610: 951 case TG3_PHY_ID_BCM50610:
952 case TG3_PHY_ID_BCM50610M:
943 val = MAC_PHYCFG2_50610_LED_MODES; 953 val = MAC_PHYCFG2_50610_LED_MODES;
944 break; 954 break;
945 case TG3_PHY_ID_BCMAC131: 955 case TG3_PHY_ID_BCMAC131:
@@ -1031,7 +1041,7 @@ static void tg3_mdio_start(struct tg3 *tp)
1031 if (is_serdes) 1041 if (is_serdes)
1032 tp->phy_addr += 7; 1042 tp->phy_addr += 7;
1033 } else 1043 } else
1034 tp->phy_addr = PHY_ADDR; 1044 tp->phy_addr = TG3_PHY_MII_ADDR;
1035 1045
1036 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) && 1046 if ((tp->tg3_flags3 & TG3_FLG3_MDIOBUS_INITED) &&
1037 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) 1047 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
@@ -1062,7 +1072,7 @@ static int tg3_mdio_init(struct tg3 *tp)
1062 tp->mdio_bus->read = &tg3_mdio_read; 1072 tp->mdio_bus->read = &tg3_mdio_read;
1063 tp->mdio_bus->write = &tg3_mdio_write; 1073 tp->mdio_bus->write = &tg3_mdio_write;
1064 tp->mdio_bus->reset = &tg3_mdio_reset; 1074 tp->mdio_bus->reset = &tg3_mdio_reset;
1065 tp->mdio_bus->phy_mask = ~(1 << PHY_ADDR); 1075 tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1066 tp->mdio_bus->irq = &tp->mdio_irq[0]; 1076 tp->mdio_bus->irq = &tp->mdio_irq[0];
1067 1077
1068 for (i = 0; i < PHY_MAX_ADDR; i++) 1078 for (i = 0; i < PHY_MAX_ADDR; i++)
@@ -1084,7 +1094,7 @@ static int tg3_mdio_init(struct tg3 *tp)
1084 return i; 1094 return i;
1085 } 1095 }
1086 1096
1087 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 1097 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1088 1098
1089 if (!phydev || !phydev->drv) { 1099 if (!phydev || !phydev->drv) {
1090 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name); 1100 printk(KERN_WARNING "%s: No PHY devices\n", tp->dev->name);
@@ -1096,8 +1106,14 @@ static int tg3_mdio_init(struct tg3 *tp)
1096 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) { 1106 switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1097 case TG3_PHY_ID_BCM57780: 1107 case TG3_PHY_ID_BCM57780:
1098 phydev->interface = PHY_INTERFACE_MODE_GMII; 1108 phydev->interface = PHY_INTERFACE_MODE_GMII;
1109 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1099 break; 1110 break;
1100 case TG3_PHY_ID_BCM50610: 1111 case TG3_PHY_ID_BCM50610:
1112 case TG3_PHY_ID_BCM50610M:
1113 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1114 PHY_BRCM_RX_REFCLK_UNUSED |
1115 PHY_BRCM_DIS_TXCRXC_NOENRGY |
1116 PHY_BRCM_AUTO_PWRDWN_ENABLE;
1101 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE) 1117 if (tp->tg3_flags3 & TG3_FLG3_RGMII_STD_IBND_DISABLE)
1102 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE; 1118 phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1103 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN) 1119 if (tp->tg3_flags3 & TG3_FLG3_RGMII_EXT_IBND_RX_EN)
@@ -1111,6 +1127,7 @@ static int tg3_mdio_init(struct tg3 *tp)
1111 case TG3_PHY_ID_RTL8201E: 1127 case TG3_PHY_ID_RTL8201E:
1112 case TG3_PHY_ID_BCMAC131: 1128 case TG3_PHY_ID_BCMAC131:
1113 phydev->interface = PHY_INTERFACE_MODE_MII; 1129 phydev->interface = PHY_INTERFACE_MODE_MII;
1130 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1114 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET; 1131 tp->tg3_flags3 |= TG3_FLG3_PHY_IS_FET;
1115 break; 1132 break;
1116 } 1133 }
@@ -1311,7 +1328,7 @@ static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1311 u32 old_tx_mode = tp->tx_mode; 1328 u32 old_tx_mode = tp->tx_mode;
1312 1329
1313 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) 1330 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB)
1314 autoneg = tp->mdio_bus->phy_map[PHY_ADDR]->autoneg; 1331 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1315 else 1332 else
1316 autoneg = tp->link_config.autoneg; 1333 autoneg = tp->link_config.autoneg;
1317 1334
@@ -1348,7 +1365,7 @@ static void tg3_adjust_link(struct net_device *dev)
1348 u8 oldflowctrl, linkmesg = 0; 1365 u8 oldflowctrl, linkmesg = 0;
1349 u32 mac_mode, lcl_adv, rmt_adv; 1366 u32 mac_mode, lcl_adv, rmt_adv;
1350 struct tg3 *tp = netdev_priv(dev); 1367 struct tg3 *tp = netdev_priv(dev);
1351 struct phy_device *phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 1368 struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1352 1369
1353 spin_lock_bh(&tp->lock); 1370 spin_lock_bh(&tp->lock);
1354 1371
@@ -1363,8 +1380,11 @@ static void tg3_adjust_link(struct net_device *dev)
1363 1380
1364 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10) 1381 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1365 mac_mode |= MAC_MODE_PORT_MODE_MII; 1382 mac_mode |= MAC_MODE_PORT_MODE_MII;
1366 else 1383 else if (phydev->speed == SPEED_1000 ||
1384 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1367 mac_mode |= MAC_MODE_PORT_MODE_GMII; 1385 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1386 else
1387 mac_mode |= MAC_MODE_PORT_MODE_MII;
1368 1388
1369 if (phydev->duplex == DUPLEX_HALF) 1389 if (phydev->duplex == DUPLEX_HALF)
1370 mac_mode |= MAC_MODE_HALF_DUPLEX; 1390 mac_mode |= MAC_MODE_HALF_DUPLEX;
@@ -1434,7 +1454,7 @@ static int tg3_phy_init(struct tg3 *tp)
1434 /* Bring the PHY back to a known state. */ 1454 /* Bring the PHY back to a known state. */
1435 tg3_bmcr_reset(tp); 1455 tg3_bmcr_reset(tp);
1436 1456
1437 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 1457 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1438 1458
1439 /* Attach the MAC to the PHY. */ 1459 /* Attach the MAC to the PHY. */
1440 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link, 1460 phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
@@ -1461,7 +1481,7 @@ static int tg3_phy_init(struct tg3 *tp)
1461 SUPPORTED_Asym_Pause); 1481 SUPPORTED_Asym_Pause);
1462 break; 1482 break;
1463 default: 1483 default:
1464 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]); 1484 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1465 return -EINVAL; 1485 return -EINVAL;
1466 } 1486 }
1467 1487
@@ -1479,7 +1499,7 @@ static void tg3_phy_start(struct tg3 *tp)
1479 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 1499 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1480 return; 1500 return;
1481 1501
1482 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 1502 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1483 1503
1484 if (tp->link_config.phy_is_low_power) { 1504 if (tp->link_config.phy_is_low_power) {
1485 tp->link_config.phy_is_low_power = 0; 1505 tp->link_config.phy_is_low_power = 0;
@@ -1499,13 +1519,13 @@ static void tg3_phy_stop(struct tg3 *tp)
1499 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 1519 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
1500 return; 1520 return;
1501 1521
1502 phy_stop(tp->mdio_bus->phy_map[PHY_ADDR]); 1522 phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1503} 1523}
1504 1524
1505static void tg3_phy_fini(struct tg3 *tp) 1525static void tg3_phy_fini(struct tg3 *tp)
1506{ 1526{
1507 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) { 1527 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
1508 phy_disconnect(tp->mdio_bus->phy_map[PHY_ADDR]); 1528 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1509 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED; 1529 tp->tg3_flags3 &= ~TG3_FLG3_PHY_CONNECTED;
1510 } 1530 }
1511} 1531}
@@ -2149,6 +2169,26 @@ static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2149 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ); 2169 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2150 udelay(40); 2170 udelay(40);
2151 return; 2171 return;
2172 } else if (tp->tg3_flags3 & TG3_FLG3_PHY_IS_FET) {
2173 u32 phytest;
2174 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2175 u32 phy;
2176
2177 tg3_writephy(tp, MII_ADVERTISE, 0);
2178 tg3_writephy(tp, MII_BMCR,
2179 BMCR_ANENABLE | BMCR_ANRESTART);
2180
2181 tg3_writephy(tp, MII_TG3_FET_TEST,
2182 phytest | MII_TG3_FET_SHADOW_EN);
2183 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2184 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2185 tg3_writephy(tp,
2186 MII_TG3_FET_SHDW_AUXMODE4,
2187 phy);
2188 }
2189 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2190 }
2191 return;
2152 } else if (do_low_power) { 2192 } else if (do_low_power) {
2153 tg3_writephy(tp, MII_TG3_EXT_CTRL, 2193 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2154 MII_TG3_EXT_CTRL_FORCE_LED_OFF); 2194 MII_TG3_EXT_CTRL_FORCE_LED_OFF);
@@ -2218,7 +2258,7 @@ static void tg3_nvram_unlock(struct tg3 *tp)
2218static void tg3_enable_nvram_access(struct tg3 *tp) 2258static void tg3_enable_nvram_access(struct tg3 *tp)
2219{ 2259{
2220 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 2260 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2221 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { 2261 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2222 u32 nvaccess = tr32(NVRAM_ACCESS); 2262 u32 nvaccess = tr32(NVRAM_ACCESS);
2223 2263
2224 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE); 2264 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
@@ -2229,7 +2269,7 @@ static void tg3_enable_nvram_access(struct tg3 *tp)
2229static void tg3_disable_nvram_access(struct tg3 *tp) 2269static void tg3_disable_nvram_access(struct tg3 *tp)
2230{ 2270{
2231 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 2271 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
2232 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) { 2272 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM)) {
2233 u32 nvaccess = tr32(NVRAM_ACCESS); 2273 u32 nvaccess = tr32(NVRAM_ACCESS);
2234 2274
2235 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE); 2275 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
@@ -2474,7 +2514,7 @@ static int tg3_set_power_state(struct tg3 *tp, pci_power_t state)
2474 struct phy_device *phydev; 2514 struct phy_device *phydev;
2475 u32 phyid, advertising; 2515 u32 phyid, advertising;
2476 2516
2477 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 2517 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
2478 2518
2479 tp->link_config.phy_is_low_power = 1; 2519 tp->link_config.phy_is_low_power = 1;
2480 2520
@@ -3243,15 +3283,6 @@ relink:
3243 pci_write_config_word(tp->pdev, 3283 pci_write_config_word(tp->pdev,
3244 tp->pcie_cap + PCI_EXP_LNKCTL, 3284 tp->pcie_cap + PCI_EXP_LNKCTL,
3245 newlnkctl); 3285 newlnkctl);
3246 } else if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) {
3247 u32 newreg, oldreg = tr32(TG3_PCIE_LNKCTL);
3248 if (tp->link_config.active_speed == SPEED_100 ||
3249 tp->link_config.active_speed == SPEED_10)
3250 newreg = oldreg & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
3251 else
3252 newreg = oldreg | TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
3253 if (newreg != oldreg)
3254 tw32(TG3_PCIE_LNKCTL, newreg);
3255 } 3286 }
3256 3287
3257 if (current_link_up != netif_carrier_ok(tp->dev)) { 3288 if (current_link_up != netif_carrier_ok(tp->dev)) {
@@ -4375,6 +4406,17 @@ static void tg3_tx(struct tg3_napi *tnapi)
4375 } 4406 }
4376} 4407}
4377 4408
4409static void tg3_rx_skb_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
4410{
4411 if (!ri->skb)
4412 return;
4413
4414 pci_unmap_single(tp->pdev, pci_unmap_addr(ri, mapping),
4415 map_sz, PCI_DMA_FROMDEVICE);
4416 dev_kfree_skb_any(ri->skb);
4417 ri->skb = NULL;
4418}
4419
4378/* Returns size of skb allocated or < 0 on error. 4420/* Returns size of skb allocated or < 0 on error.
4379 * 4421 *
4380 * We only need to fill in the address because the other members 4422 * We only need to fill in the address because the other members
@@ -4386,16 +4428,14 @@ static void tg3_tx(struct tg3_napi *tnapi)
4386 * buffers the cpu only reads the last cacheline of the RX descriptor 4428 * buffers the cpu only reads the last cacheline of the RX descriptor
4387 * (to fetch the error flags, vlan tag, checksum, and opaque cookie). 4429 * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
4388 */ 4430 */
4389static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key, 4431static int tg3_alloc_rx_skb(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
4390 int src_idx, u32 dest_idx_unmasked) 4432 u32 opaque_key, u32 dest_idx_unmasked)
4391{ 4433{
4392 struct tg3 *tp = tnapi->tp;
4393 struct tg3_rx_buffer_desc *desc; 4434 struct tg3_rx_buffer_desc *desc;
4394 struct ring_info *map, *src_map; 4435 struct ring_info *map, *src_map;
4395 struct sk_buff *skb; 4436 struct sk_buff *skb;
4396 dma_addr_t mapping; 4437 dma_addr_t mapping;
4397 int skb_size, dest_idx; 4438 int skb_size, dest_idx;
4398 struct tg3_rx_prodring_set *tpr = &tp->prodring[0];
4399 4439
4400 src_map = NULL; 4440 src_map = NULL;
4401 switch (opaque_key) { 4441 switch (opaque_key) {
@@ -4403,8 +4443,6 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4403 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; 4443 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4404 desc = &tpr->rx_std[dest_idx]; 4444 desc = &tpr->rx_std[dest_idx];
4405 map = &tpr->rx_std_buffers[dest_idx]; 4445 map = &tpr->rx_std_buffers[dest_idx];
4406 if (src_idx >= 0)
4407 src_map = &tpr->rx_std_buffers[src_idx];
4408 skb_size = tp->rx_pkt_map_sz; 4446 skb_size = tp->rx_pkt_map_sz;
4409 break; 4447 break;
4410 4448
@@ -4412,8 +4450,6 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4412 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; 4450 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4413 desc = &tpr->rx_jmb[dest_idx].std; 4451 desc = &tpr->rx_jmb[dest_idx].std;
4414 map = &tpr->rx_jmb_buffers[dest_idx]; 4452 map = &tpr->rx_jmb_buffers[dest_idx];
4415 if (src_idx >= 0)
4416 src_map = &tpr->rx_jmb_buffers[src_idx];
4417 skb_size = TG3_RX_JMB_MAP_SZ; 4453 skb_size = TG3_RX_JMB_MAP_SZ;
4418 break; 4454 break;
4419 4455
@@ -4435,13 +4471,14 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4435 4471
4436 mapping = pci_map_single(tp->pdev, skb->data, skb_size, 4472 mapping = pci_map_single(tp->pdev, skb->data, skb_size,
4437 PCI_DMA_FROMDEVICE); 4473 PCI_DMA_FROMDEVICE);
4474 if (pci_dma_mapping_error(tp->pdev, mapping)) {
4475 dev_kfree_skb(skb);
4476 return -EIO;
4477 }
4438 4478
4439 map->skb = skb; 4479 map->skb = skb;
4440 pci_unmap_addr_set(map, mapping, mapping); 4480 pci_unmap_addr_set(map, mapping, mapping);
4441 4481
4442 if (src_map != NULL)
4443 src_map->skb = NULL;
4444
4445 desc->addr_hi = ((u64)mapping >> 32); 4482 desc->addr_hi = ((u64)mapping >> 32);
4446 desc->addr_lo = ((u64)mapping & 0xffffffff); 4483 desc->addr_lo = ((u64)mapping & 0xffffffff);
4447 4484
@@ -4452,30 +4489,32 @@ static int tg3_alloc_rx_skb(struct tg3_napi *tnapi, u32 opaque_key,
4452 * members of the RX descriptor are invariant. See notes above 4489 * members of the RX descriptor are invariant. See notes above
4453 * tg3_alloc_rx_skb for full details. 4490 * tg3_alloc_rx_skb for full details.
4454 */ 4491 */
4455static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key, 4492static void tg3_recycle_rx(struct tg3_napi *tnapi,
4456 int src_idx, u32 dest_idx_unmasked) 4493 struct tg3_rx_prodring_set *dpr,
4494 u32 opaque_key, int src_idx,
4495 u32 dest_idx_unmasked)
4457{ 4496{
4458 struct tg3 *tp = tnapi->tp; 4497 struct tg3 *tp = tnapi->tp;
4459 struct tg3_rx_buffer_desc *src_desc, *dest_desc; 4498 struct tg3_rx_buffer_desc *src_desc, *dest_desc;
4460 struct ring_info *src_map, *dest_map; 4499 struct ring_info *src_map, *dest_map;
4461 int dest_idx; 4500 int dest_idx;
4462 struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; 4501 struct tg3_rx_prodring_set *spr = &tp->prodring[0];
4463 4502
4464 switch (opaque_key) { 4503 switch (opaque_key) {
4465 case RXD_OPAQUE_RING_STD: 4504 case RXD_OPAQUE_RING_STD:
4466 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE; 4505 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
4467 dest_desc = &tpr->rx_std[dest_idx]; 4506 dest_desc = &dpr->rx_std[dest_idx];
4468 dest_map = &tpr->rx_std_buffers[dest_idx]; 4507 dest_map = &dpr->rx_std_buffers[dest_idx];
4469 src_desc = &tpr->rx_std[src_idx]; 4508 src_desc = &spr->rx_std[src_idx];
4470 src_map = &tpr->rx_std_buffers[src_idx]; 4509 src_map = &spr->rx_std_buffers[src_idx];
4471 break; 4510 break;
4472 4511
4473 case RXD_OPAQUE_RING_JUMBO: 4512 case RXD_OPAQUE_RING_JUMBO:
4474 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE; 4513 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
4475 dest_desc = &tpr->rx_jmb[dest_idx].std; 4514 dest_desc = &dpr->rx_jmb[dest_idx].std;
4476 dest_map = &tpr->rx_jmb_buffers[dest_idx]; 4515 dest_map = &dpr->rx_jmb_buffers[dest_idx];
4477 src_desc = &tpr->rx_jmb[src_idx].std; 4516 src_desc = &spr->rx_jmb[src_idx].std;
4478 src_map = &tpr->rx_jmb_buffers[src_idx]; 4517 src_map = &spr->rx_jmb_buffers[src_idx];
4479 break; 4518 break;
4480 4519
4481 default: 4520 default:
@@ -4487,7 +4526,6 @@ static void tg3_recycle_rx(struct tg3_napi *tnapi, u32 opaque_key,
4487 pci_unmap_addr(src_map, mapping)); 4526 pci_unmap_addr(src_map, mapping));
4488 dest_desc->addr_hi = src_desc->addr_hi; 4527 dest_desc->addr_hi = src_desc->addr_hi;
4489 dest_desc->addr_lo = src_desc->addr_lo; 4528 dest_desc->addr_lo = src_desc->addr_lo;
4490
4491 src_map->skb = NULL; 4529 src_map->skb = NULL;
4492} 4530}
4493 4531
@@ -4519,10 +4557,11 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4519{ 4557{
4520 struct tg3 *tp = tnapi->tp; 4558 struct tg3 *tp = tnapi->tp;
4521 u32 work_mask, rx_std_posted = 0; 4559 u32 work_mask, rx_std_posted = 0;
4560 u32 std_prod_idx, jmb_prod_idx;
4522 u32 sw_idx = tnapi->rx_rcb_ptr; 4561 u32 sw_idx = tnapi->rx_rcb_ptr;
4523 u16 hw_idx; 4562 u16 hw_idx;
4524 int received; 4563 int received;
4525 struct tg3_rx_prodring_set *tpr = &tp->prodring[0]; 4564 struct tg3_rx_prodring_set *tpr = tnapi->prodring;
4526 4565
4527 hw_idx = *(tnapi->rx_rcb_prod_idx); 4566 hw_idx = *(tnapi->rx_rcb_prod_idx);
4528 /* 4567 /*
@@ -4532,7 +4571,10 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4532 rmb(); 4571 rmb();
4533 work_mask = 0; 4572 work_mask = 0;
4534 received = 0; 4573 received = 0;
4574 std_prod_idx = tpr->rx_std_prod_idx;
4575 jmb_prod_idx = tpr->rx_jmb_prod_idx;
4535 while (sw_idx != hw_idx && budget > 0) { 4576 while (sw_idx != hw_idx && budget > 0) {
4577 struct ring_info *ri;
4536 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx]; 4578 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
4537 unsigned int len; 4579 unsigned int len;
4538 struct sk_buff *skb; 4580 struct sk_buff *skb;
@@ -4542,16 +4584,16 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4542 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK; 4584 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
4543 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK; 4585 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
4544 if (opaque_key == RXD_OPAQUE_RING_STD) { 4586 if (opaque_key == RXD_OPAQUE_RING_STD) {
4545 struct ring_info *ri = &tpr->rx_std_buffers[desc_idx]; 4587 ri = &tp->prodring[0].rx_std_buffers[desc_idx];
4546 dma_addr = pci_unmap_addr(ri, mapping); 4588 dma_addr = pci_unmap_addr(ri, mapping);
4547 skb = ri->skb; 4589 skb = ri->skb;
4548 post_ptr = &tpr->rx_std_ptr; 4590 post_ptr = &std_prod_idx;
4549 rx_std_posted++; 4591 rx_std_posted++;
4550 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) { 4592 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
4551 struct ring_info *ri = &tpr->rx_jmb_buffers[desc_idx]; 4593 ri = &tp->prodring[0].rx_jmb_buffers[desc_idx];
4552 dma_addr = pci_unmap_addr(ri, mapping); 4594 dma_addr = pci_unmap_addr(ri, mapping);
4553 skb = ri->skb; 4595 skb = ri->skb;
4554 post_ptr = &tpr->rx_jmb_ptr; 4596 post_ptr = &jmb_prod_idx;
4555 } else 4597 } else
4556 goto next_pkt_nopost; 4598 goto next_pkt_nopost;
4557 4599
@@ -4560,7 +4602,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4560 if ((desc->err_vlan & RXD_ERR_MASK) != 0 && 4602 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
4561 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) { 4603 (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
4562 drop_it: 4604 drop_it:
4563 tg3_recycle_rx(tnapi, opaque_key, 4605 tg3_recycle_rx(tnapi, tpr, opaque_key,
4564 desc_idx, *post_ptr); 4606 desc_idx, *post_ptr);
4565 drop_it_no_recycle: 4607 drop_it_no_recycle:
4566 /* Other statistics kept track of by card. */ 4608 /* Other statistics kept track of by card. */
@@ -4580,11 +4622,13 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4580 ) { 4622 ) {
4581 int skb_size; 4623 int skb_size;
4582 4624
4583 skb_size = tg3_alloc_rx_skb(tnapi, opaque_key, 4625 skb_size = tg3_alloc_rx_skb(tp, tpr, opaque_key,
4584 desc_idx, *post_ptr); 4626 *post_ptr);
4585 if (skb_size < 0) 4627 if (skb_size < 0)
4586 goto drop_it; 4628 goto drop_it;
4587 4629
4630 ri->skb = NULL;
4631
4588 pci_unmap_single(tp->pdev, dma_addr, skb_size, 4632 pci_unmap_single(tp->pdev, dma_addr, skb_size,
4589 PCI_DMA_FROMDEVICE); 4633 PCI_DMA_FROMDEVICE);
4590 4634
@@ -4592,7 +4636,7 @@ static int tg3_rx(struct tg3_napi *tnapi, int budget)
4592 } else { 4636 } else {
4593 struct sk_buff *copy_skb; 4637 struct sk_buff *copy_skb;
4594 4638
4595 tg3_recycle_rx(tnapi, opaque_key, 4639 tg3_recycle_rx(tnapi, tpr, opaque_key,
4596 desc_idx, *post_ptr); 4640 desc_idx, *post_ptr);
4597 4641
4598 copy_skb = netdev_alloc_skb(tp->dev, 4642 copy_skb = netdev_alloc_skb(tp->dev,
@@ -4643,9 +4687,7 @@ next_pkt:
4643 4687
4644 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) { 4688 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
4645 u32 idx = *post_ptr % TG3_RX_RING_SIZE; 4689 u32 idx = *post_ptr % TG3_RX_RING_SIZE;
4646 4690 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, idx);
4647 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX +
4648 TG3_64BIT_REG_LOW, idx);
4649 work_mask &= ~RXD_OPAQUE_RING_STD; 4691 work_mask &= ~RXD_OPAQUE_RING_STD;
4650 rx_std_posted = 0; 4692 rx_std_posted = 0;
4651 } 4693 }
@@ -4665,33 +4707,45 @@ next_pkt_nopost:
4665 tw32_rx_mbox(tnapi->consmbox, sw_idx); 4707 tw32_rx_mbox(tnapi->consmbox, sw_idx);
4666 4708
4667 /* Refill RX ring(s). */ 4709 /* Refill RX ring(s). */
4668 if (work_mask & RXD_OPAQUE_RING_STD) { 4710 if (!(tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) || tnapi == &tp->napi[1]) {
4669 sw_idx = tpr->rx_std_ptr % TG3_RX_RING_SIZE; 4711 if (work_mask & RXD_OPAQUE_RING_STD) {
4670 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, 4712 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4671 sw_idx); 4713 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
4672 } 4714 tpr->rx_std_prod_idx);
4673 if (work_mask & RXD_OPAQUE_RING_JUMBO) { 4715 }
4674 sw_idx = tpr->rx_jmb_ptr % TG3_RX_JUMBO_RING_SIZE; 4716 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
4675 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, 4717 tpr->rx_jmb_prod_idx = jmb_prod_idx %
4676 sw_idx); 4718 TG3_RX_JUMBO_RING_SIZE;
4719 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
4720 tpr->rx_jmb_prod_idx);
4721 }
4722 mmiowb();
4723 } else if (work_mask) {
4724 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
4725 * updated before the producer indices can be updated.
4726 */
4727 smp_wmb();
4728
4729 tpr->rx_std_prod_idx = std_prod_idx % TG3_RX_RING_SIZE;
4730 tpr->rx_jmb_prod_idx = jmb_prod_idx % TG3_RX_JUMBO_RING_SIZE;
4731
4732 napi_schedule(&tp->napi[1].napi);
4677 } 4733 }
4678 mmiowb();
4679 4734
4680 return received; 4735 return received;
4681} 4736}
4682 4737
4683static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget) 4738static void tg3_poll_link(struct tg3 *tp)
4684{ 4739{
4685 struct tg3 *tp = tnapi->tp;
4686 struct tg3_hw_status *sblk = tnapi->hw_status;
4687
4688 /* handle link change and other phy events */ 4740 /* handle link change and other phy events */
4689 if (!(tp->tg3_flags & 4741 if (!(tp->tg3_flags &
4690 (TG3_FLAG_USE_LINKCHG_REG | 4742 (TG3_FLAG_USE_LINKCHG_REG |
4691 TG3_FLAG_POLL_SERDES))) { 4743 TG3_FLAG_POLL_SERDES))) {
4744 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
4745
4692 if (sblk->status & SD_STATUS_LINK_CHG) { 4746 if (sblk->status & SD_STATUS_LINK_CHG) {
4693 sblk->status = SD_STATUS_UPDATED | 4747 sblk->status = SD_STATUS_UPDATED |
4694 (sblk->status & ~SD_STATUS_LINK_CHG); 4748 (sblk->status & ~SD_STATUS_LINK_CHG);
4695 spin_lock(&tp->lock); 4749 spin_lock(&tp->lock);
4696 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 4750 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
4697 tw32_f(MAC_STATUS, 4751 tw32_f(MAC_STATUS,
@@ -4705,6 +4759,98 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4705 spin_unlock(&tp->lock); 4759 spin_unlock(&tp->lock);
4706 } 4760 }
4707 } 4761 }
4762}
4763
4764static void tg3_rx_prodring_xfer(struct tg3 *tp,
4765 struct tg3_rx_prodring_set *dpr,
4766 struct tg3_rx_prodring_set *spr)
4767{
4768 u32 si, di, cpycnt, src_prod_idx;
4769 int i;
4770
4771 while (1) {
4772 src_prod_idx = spr->rx_std_prod_idx;
4773
4774 /* Make sure updates to the rx_std_buffers[] entries and the
4775 * standard producer index are seen in the correct order.
4776 */
4777 smp_rmb();
4778
4779 if (spr->rx_std_cons_idx == src_prod_idx)
4780 break;
4781
4782 if (spr->rx_std_cons_idx < src_prod_idx)
4783 cpycnt = src_prod_idx - spr->rx_std_cons_idx;
4784 else
4785 cpycnt = TG3_RX_RING_SIZE - spr->rx_std_cons_idx;
4786
4787 cpycnt = min(cpycnt, TG3_RX_RING_SIZE - dpr->rx_std_prod_idx);
4788
4789 si = spr->rx_std_cons_idx;
4790 di = dpr->rx_std_prod_idx;
4791
4792 memcpy(&dpr->rx_std_buffers[di],
4793 &spr->rx_std_buffers[si],
4794 cpycnt * sizeof(struct ring_info));
4795
4796 for (i = 0; i < cpycnt; i++, di++, si++) {
4797 struct tg3_rx_buffer_desc *sbd, *dbd;
4798 sbd = &spr->rx_std[si];
4799 dbd = &dpr->rx_std[di];
4800 dbd->addr_hi = sbd->addr_hi;
4801 dbd->addr_lo = sbd->addr_lo;
4802 }
4803
4804 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) %
4805 TG3_RX_RING_SIZE;
4806 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) %
4807 TG3_RX_RING_SIZE;
4808 }
4809
4810 while (1) {
4811 src_prod_idx = spr->rx_jmb_prod_idx;
4812
4813 /* Make sure updates to the rx_jmb_buffers[] entries and
4814 * the jumbo producer index are seen in the correct order.
4815 */
4816 smp_rmb();
4817
4818 if (spr->rx_jmb_cons_idx == src_prod_idx)
4819 break;
4820
4821 if (spr->rx_jmb_cons_idx < src_prod_idx)
4822 cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
4823 else
4824 cpycnt = TG3_RX_JUMBO_RING_SIZE - spr->rx_jmb_cons_idx;
4825
4826 cpycnt = min(cpycnt,
4827 TG3_RX_JUMBO_RING_SIZE - dpr->rx_jmb_prod_idx);
4828
4829 si = spr->rx_jmb_cons_idx;
4830 di = dpr->rx_jmb_prod_idx;
4831
4832 memcpy(&dpr->rx_jmb_buffers[di],
4833 &spr->rx_jmb_buffers[si],
4834 cpycnt * sizeof(struct ring_info));
4835
4836 for (i = 0; i < cpycnt; i++, di++, si++) {
4837 struct tg3_rx_buffer_desc *sbd, *dbd;
4838 sbd = &spr->rx_jmb[si].std;
4839 dbd = &dpr->rx_jmb[di].std;
4840 dbd->addr_hi = sbd->addr_hi;
4841 dbd->addr_lo = sbd->addr_lo;
4842 }
4843
4844 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) %
4845 TG3_RX_JUMBO_RING_SIZE;
4846 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) %
4847 TG3_RX_JUMBO_RING_SIZE;
4848 }
4849}
4850
4851static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4852{
4853 struct tg3 *tp = tnapi->tp;
4708 4854
4709 /* run TX completion thread */ 4855 /* run TX completion thread */
4710 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) { 4856 if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
@@ -4720,6 +4866,74 @@ static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
4720 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr) 4866 if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
4721 work_done += tg3_rx(tnapi, budget - work_done); 4867 work_done += tg3_rx(tnapi, budget - work_done);
4722 4868
4869 if ((tp->tg3_flags3 & TG3_FLG3_ENABLE_RSS) && tnapi == &tp->napi[1]) {
4870 int i;
4871 u32 std_prod_idx = tp->prodring[0].rx_std_prod_idx;
4872 u32 jmb_prod_idx = tp->prodring[0].rx_jmb_prod_idx;
4873
4874 for (i = 2; i < tp->irq_cnt; i++)
4875 tg3_rx_prodring_xfer(tp, tnapi->prodring,
4876 tp->napi[i].prodring);
4877
4878 wmb();
4879
4880 if (std_prod_idx != tp->prodring[0].rx_std_prod_idx) {
4881 u32 mbox = TG3_RX_STD_PROD_IDX_REG;
4882 tw32_rx_mbox(mbox, tp->prodring[0].rx_std_prod_idx);
4883 }
4884
4885 if (jmb_prod_idx != tp->prodring[0].rx_jmb_prod_idx) {
4886 u32 mbox = TG3_RX_JMB_PROD_IDX_REG;
4887 tw32_rx_mbox(mbox, tp->prodring[0].rx_jmb_prod_idx);
4888 }
4889
4890 mmiowb();
4891 }
4892
4893 return work_done;
4894}
4895
4896static int tg3_poll_msix(struct napi_struct *napi, int budget)
4897{
4898 struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
4899 struct tg3 *tp = tnapi->tp;
4900 int work_done = 0;
4901 struct tg3_hw_status *sblk = tnapi->hw_status;
4902
4903 while (1) {
4904 work_done = tg3_poll_work(tnapi, work_done, budget);
4905
4906 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
4907 goto tx_recovery;
4908
4909 if (unlikely(work_done >= budget))
4910 break;
4911
4912 /* tp->last_tag is used in tg3_restart_ints() below
4913 * to tell the hw how much work has been processed,
4914 * so we must read it before checking for more work.
4915 */
4916 tnapi->last_tag = sblk->status_tag;
4917 tnapi->last_irq_tag = tnapi->last_tag;
4918 rmb();
4919
4920 /* check for RX/TX work to do */
4921 if (sblk->idx[0].tx_consumer == tnapi->tx_cons &&
4922 *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr) {
4923 napi_complete(napi);
4924 /* Reenable interrupts. */
4925 tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
4926 mmiowb();
4927 break;
4928 }
4929 }
4930
4931 return work_done;
4932
4933tx_recovery:
4934 /* work_done is guaranteed to be less than budget. */
4935 napi_complete(napi);
4936 schedule_work(&tp->reset_task);
4723 return work_done; 4937 return work_done;
4724} 4938}
4725 4939
@@ -4731,6 +4945,8 @@ static int tg3_poll(struct napi_struct *napi, int budget)
4731 struct tg3_hw_status *sblk = tnapi->hw_status; 4945 struct tg3_hw_status *sblk = tnapi->hw_status;
4732 4946
4733 while (1) { 4947 while (1) {
4948 tg3_poll_link(tp);
4949
4734 work_done = tg3_poll_work(tnapi, work_done, budget); 4950 work_done = tg3_poll_work(tnapi, work_done, budget);
4735 4951
4736 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING)) 4952 if (unlikely(tp->tg3_flags & TG3_FLAG_TX_RECOVERY_PENDING))
@@ -5093,11 +5309,11 @@ static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
5093static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32); 5309static void tg3_set_txd(struct tg3_napi *, int, dma_addr_t, int, u32, u32);
5094 5310
5095/* Workaround 4GB and 40-bit hardware DMA bugs. */ 5311/* Workaround 4GB and 40-bit hardware DMA bugs. */
5096static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb, 5312static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
5097 u32 last_plus_one, u32 *start, 5313 struct sk_buff *skb, u32 last_plus_one,
5098 u32 base_flags, u32 mss) 5314 u32 *start, u32 base_flags, u32 mss)
5099{ 5315{
5100 struct tg3_napi *tnapi = &tp->napi[0]; 5316 struct tg3 *tp = tnapi->tp;
5101 struct sk_buff *new_skb; 5317 struct sk_buff *new_skb;
5102 dma_addr_t new_addr = 0; 5318 dma_addr_t new_addr = 0;
5103 u32 entry = *start; 5319 u32 entry = *start;
@@ -5124,7 +5340,8 @@ static int tigon3_dma_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
5124 /* Make sure new skb does not cross any 4G boundaries. 5340 /* Make sure new skb does not cross any 4G boundaries.
5125 * Drop the packet if it does. 5341 * Drop the packet if it does.
5126 */ 5342 */
5127 if (ret || tg3_4g_overflow_test(new_addr, new_skb->len)) { 5343 if (ret || ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5344 tg3_4g_overflow_test(new_addr, new_skb->len))) {
5128 if (!ret) 5345 if (!ret)
5129 skb_dma_unmap(&tp->pdev->dev, new_skb, 5346 skb_dma_unmap(&tp->pdev->dev, new_skb,
5130 DMA_TO_DEVICE); 5347 DMA_TO_DEVICE);
@@ -5179,7 +5396,7 @@ static void tg3_set_txd(struct tg3_napi *tnapi, int entry,
5179} 5396}
5180 5397
5181/* hard_start_xmit for devices that don't have any bugs and 5398/* hard_start_xmit for devices that don't have any bugs and
5182 * support TG3_FLG2_HW_TSO_2 only. 5399 * support TG3_FLG2_HW_TSO_2 and TG3_FLG2_HW_TSO_3 only.
5183 */ 5400 */
5184static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, 5401static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5185 struct net_device *dev) 5402 struct net_device *dev)
@@ -5238,7 +5455,7 @@ static netdev_tx_t tg3_start_xmit(struct sk_buff *skb,
5238 hdrlen = ip_tcp_len + tcp_opt_len; 5455 hdrlen = ip_tcp_len + tcp_opt_len;
5239 } 5456 }
5240 5457
5241 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 5458 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5242 mss |= (hdrlen & 0xc) << 12; 5459 mss |= (hdrlen & 0xc) << 12;
5243 if (hdrlen & 0x10) 5460 if (hdrlen & 0x10)
5244 base_flags |= 0x00000010; 5461 base_flags |= 0x00000010;
@@ -5365,9 +5582,13 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5365 struct skb_shared_info *sp; 5582 struct skb_shared_info *sp;
5366 int would_hit_hwbug; 5583 int would_hit_hwbug;
5367 dma_addr_t mapping; 5584 dma_addr_t mapping;
5368 struct tg3_napi *tnapi = &tp->napi[0]; 5585 struct tg3_napi *tnapi;
5586 struct netdev_queue *txq;
5369 5587
5370 len = skb_headlen(skb); 5588 txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
5589 tnapi = &tp->napi[skb_get_queue_mapping(skb)];
5590 if (tp->tg3_flags2 & TG3_FLG2_USING_MSIX)
5591 tnapi++;
5371 5592
5372 /* We are running in BH disabled context with netif_tx_lock 5593 /* We are running in BH disabled context with netif_tx_lock
5373 * and TX reclaim runs via tp->napi.poll inside of a software 5594 * and TX reclaim runs via tp->napi.poll inside of a software
@@ -5375,8 +5596,8 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5375 * no IRQ context deadlocks to worry about either. Rejoice! 5596 * no IRQ context deadlocks to worry about either. Rejoice!
5376 */ 5597 */
5377 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) { 5598 if (unlikely(tg3_tx_avail(tnapi) <= (skb_shinfo(skb)->nr_frags + 1))) {
5378 if (!netif_queue_stopped(dev)) { 5599 if (!netif_tx_queue_stopped(txq)) {
5379 netif_stop_queue(dev); 5600 netif_tx_stop_queue(txq);
5380 5601
5381 /* This is a hard error, log it. */ 5602 /* This is a hard error, log it. */
5382 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when " 5603 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when "
@@ -5389,10 +5610,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5389 base_flags = 0; 5610 base_flags = 0;
5390 if (skb->ip_summed == CHECKSUM_PARTIAL) 5611 if (skb->ip_summed == CHECKSUM_PARTIAL)
5391 base_flags |= TXD_FLAG_TCPUDP_CSUM; 5612 base_flags |= TXD_FLAG_TCPUDP_CSUM;
5392 mss = 0; 5613
5393 if ((mss = skb_shinfo(skb)->gso_size) != 0) { 5614 if ((mss = skb_shinfo(skb)->gso_size) != 0) {
5394 struct iphdr *iph; 5615 struct iphdr *iph;
5395 int tcp_opt_len, ip_tcp_len, hdr_len; 5616 u32 tcp_opt_len, ip_tcp_len, hdr_len;
5396 5617
5397 if (skb_header_cloned(skb) && 5618 if (skb_header_cloned(skb) &&
5398 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) { 5619 pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
@@ -5423,8 +5644,15 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5423 IPPROTO_TCP, 5644 IPPROTO_TCP,
5424 0); 5645 0);
5425 5646
5426 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) || 5647 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) {
5427 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) { 5648 mss |= (hdr_len & 0xc) << 12;
5649 if (hdr_len & 0x10)
5650 base_flags |= 0x00000010;
5651 base_flags |= (hdr_len & 0x3e0) << 5;
5652 } else if (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)
5653 mss |= hdr_len << 9;
5654 else if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_1) ||
5655 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
5428 if (tcp_opt_len || iph->ihl > 5) { 5656 if (tcp_opt_len || iph->ihl > 5) {
5429 int tsflags; 5657 int tsflags;
5430 5658
@@ -5446,6 +5674,10 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5446 (vlan_tx_tag_get(skb) << 16)); 5674 (vlan_tx_tag_get(skb) << 16));
5447#endif 5675#endif
5448 5676
5677 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 &&
5678 !mss && skb->len > ETH_DATA_LEN)
5679 base_flags |= TXD_FLAG_JMB_PKT;
5680
5449 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) { 5681 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
5450 dev_kfree_skb(skb); 5682 dev_kfree_skb(skb);
5451 goto out_unlock; 5683 goto out_unlock;
@@ -5459,9 +5691,20 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5459 5691
5460 would_hit_hwbug = 0; 5692 would_hit_hwbug = 0;
5461 5693
5462 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG) 5694 len = skb_headlen(skb);
5695
5696 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) && len <= 8)
5463 would_hit_hwbug = 1; 5697 would_hit_hwbug = 1;
5464 else if (tg3_4g_overflow_test(mapping, len)) 5698
5699 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5700 tg3_4g_overflow_test(mapping, len))
5701 would_hit_hwbug = 1;
5702
5703 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5704 tg3_40bit_overflow_test(tp, mapping, len))
5705 would_hit_hwbug = 1;
5706
5707 if (tp->tg3_flags3 & TG3_FLG3_5701_DMA_BUG)
5465 would_hit_hwbug = 1; 5708 would_hit_hwbug = 1;
5466 5709
5467 tg3_set_txd(tnapi, entry, mapping, len, base_flags, 5710 tg3_set_txd(tnapi, entry, mapping, len, base_flags,
@@ -5482,10 +5725,16 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5482 5725
5483 tnapi->tx_buffers[entry].skb = NULL; 5726 tnapi->tx_buffers[entry].skb = NULL;
5484 5727
5485 if (tg3_4g_overflow_test(mapping, len)) 5728 if ((tp->tg3_flags3 & TG3_FLG3_SHORT_DMA_BUG) &&
5729 len <= 8)
5486 would_hit_hwbug = 1; 5730 would_hit_hwbug = 1;
5487 5731
5488 if (tg3_40bit_overflow_test(tp, mapping, len)) 5732 if ((tp->tg3_flags3 & TG3_FLG3_4G_DMA_BNDRY_BUG) &&
5733 tg3_4g_overflow_test(mapping, len))
5734 would_hit_hwbug = 1;
5735
5736 if ((tp->tg3_flags3 & TG3_FLG3_40BIT_DMA_LIMIT_BUG) &&
5737 tg3_40bit_overflow_test(tp, mapping, len))
5489 would_hit_hwbug = 1; 5738 would_hit_hwbug = 1;
5490 5739
5491 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 5740 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
@@ -5509,7 +5758,7 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5509 /* If the workaround fails due to memory/mapping 5758 /* If the workaround fails due to memory/mapping
5510 * failure, silently drop this packet. 5759 * failure, silently drop this packet.
5511 */ 5760 */
5512 if (tigon3_dma_hwbug_workaround(tp, skb, last_plus_one, 5761 if (tigon3_dma_hwbug_workaround(tnapi, skb, last_plus_one,
5513 &start, base_flags, mss)) 5762 &start, base_flags, mss))
5514 goto out_unlock; 5763 goto out_unlock;
5515 5764
@@ -5517,13 +5766,13 @@ static netdev_tx_t tg3_start_xmit_dma_bug(struct sk_buff *skb,
5517 } 5766 }
5518 5767
5519 /* Packets are ready, update Tx producer idx local and on card. */ 5768 /* Packets are ready, update Tx producer idx local and on card. */
5520 tw32_tx_mbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, entry); 5769 tw32_tx_mbox(tnapi->prodmbox, entry);
5521 5770
5522 tnapi->tx_prod = entry; 5771 tnapi->tx_prod = entry;
5523 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) { 5772 if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
5524 netif_stop_queue(dev); 5773 netif_tx_stop_queue(txq);
5525 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)) 5774 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
5526 netif_wake_queue(tp->dev); 5775 netif_tx_wake_queue(txq);
5527 } 5776 }
5528 5777
5529out_unlock: 5778out_unlock:
@@ -5594,36 +5843,33 @@ static void tg3_rx_prodring_free(struct tg3 *tp,
5594 struct tg3_rx_prodring_set *tpr) 5843 struct tg3_rx_prodring_set *tpr)
5595{ 5844{
5596 int i; 5845 int i;
5597 struct ring_info *rxp;
5598
5599 for (i = 0; i < TG3_RX_RING_SIZE; i++) {
5600 rxp = &tpr->rx_std_buffers[i];
5601 5846
5602 if (rxp->skb == NULL) 5847 if (tpr != &tp->prodring[0]) {
5603 continue; 5848 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
5849 i = (i + 1) % TG3_RX_RING_SIZE)
5850 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
5851 tp->rx_pkt_map_sz);
5852
5853 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5854 for (i = tpr->rx_jmb_cons_idx;
5855 i != tpr->rx_jmb_prod_idx;
5856 i = (i + 1) % TG3_RX_JUMBO_RING_SIZE) {
5857 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
5858 TG3_RX_JMB_MAP_SZ);
5859 }
5860 }
5604 5861
5605 pci_unmap_single(tp->pdev, 5862 return;
5606 pci_unmap_addr(rxp, mapping),
5607 tp->rx_pkt_map_sz,
5608 PCI_DMA_FROMDEVICE);
5609 dev_kfree_skb_any(rxp->skb);
5610 rxp->skb = NULL;
5611 } 5863 }
5612 5864
5613 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 5865 for (i = 0; i < TG3_RX_RING_SIZE; i++)
5614 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) { 5866 tg3_rx_skb_free(tp, &tpr->rx_std_buffers[i],
5615 rxp = &tpr->rx_jmb_buffers[i]; 5867 tp->rx_pkt_map_sz);
5616 5868
5617 if (rxp->skb == NULL) 5869 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5618 continue; 5870 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++)
5619 5871 tg3_rx_skb_free(tp, &tpr->rx_jmb_buffers[i],
5620 pci_unmap_single(tp->pdev, 5872 TG3_RX_JMB_MAP_SZ);
5621 pci_unmap_addr(rxp, mapping),
5622 TG3_RX_JMB_MAP_SZ,
5623 PCI_DMA_FROMDEVICE);
5624 dev_kfree_skb_any(rxp->skb);
5625 rxp->skb = NULL;
5626 }
5627 } 5873 }
5628} 5874}
5629 5875
@@ -5638,7 +5884,19 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
5638 struct tg3_rx_prodring_set *tpr) 5884 struct tg3_rx_prodring_set *tpr)
5639{ 5885{
5640 u32 i, rx_pkt_dma_sz; 5886 u32 i, rx_pkt_dma_sz;
5641 struct tg3_napi *tnapi = &tp->napi[0]; 5887
5888 tpr->rx_std_cons_idx = 0;
5889 tpr->rx_std_prod_idx = 0;
5890 tpr->rx_jmb_cons_idx = 0;
5891 tpr->rx_jmb_prod_idx = 0;
5892
5893 if (tpr != &tp->prodring[0]) {
5894 memset(&tpr->rx_std_buffers[0], 0, TG3_RX_STD_BUFF_RING_SIZE);
5895 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE)
5896 memset(&tpr->rx_jmb_buffers[0], 0,
5897 TG3_RX_JMB_BUFF_RING_SIZE);
5898 goto done;
5899 }
5642 5900
5643 /* Zero out all descriptors. */ 5901 /* Zero out all descriptors. */
5644 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES); 5902 memset(tpr->rx_std, 0, TG3_RX_RING_BYTES);
@@ -5665,7 +5923,7 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
5665 5923
5666 /* Now allocate fresh SKBs for each rx ring. */ 5924 /* Now allocate fresh SKBs for each rx ring. */
5667 for (i = 0; i < tp->rx_pending; i++) { 5925 for (i = 0; i < tp->rx_pending; i++) {
5668 if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_STD, -1, i) < 0) { 5926 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
5669 printk(KERN_WARNING PFX 5927 printk(KERN_WARNING PFX
5670 "%s: Using a smaller RX standard ring, " 5928 "%s: Using a smaller RX standard ring, "
5671 "only %d out of %d buffers were allocated " 5929 "only %d out of %d buffers were allocated "
@@ -5696,8 +5954,8 @@ static int tg3_rx_prodring_alloc(struct tg3 *tp,
5696 } 5954 }
5697 5955
5698 for (i = 0; i < tp->rx_jumbo_pending; i++) { 5956 for (i = 0; i < tp->rx_jumbo_pending; i++) {
5699 if (tg3_alloc_rx_skb(tnapi, RXD_OPAQUE_RING_JUMBO, 5957 if (tg3_alloc_rx_skb(tp, tpr, RXD_OPAQUE_RING_JUMBO,
5700 -1, i) < 0) { 5958 i) < 0) {
5701 printk(KERN_WARNING PFX 5959 printk(KERN_WARNING PFX
5702 "%s: Using a smaller RX jumbo ring, " 5960 "%s: Using a smaller RX jumbo ring, "
5703 "only %d out of %d buffers were " 5961 "only %d out of %d buffers were "
@@ -5741,8 +5999,7 @@ static void tg3_rx_prodring_fini(struct tg3 *tp,
5741static int tg3_rx_prodring_init(struct tg3 *tp, 5999static int tg3_rx_prodring_init(struct tg3 *tp,
5742 struct tg3_rx_prodring_set *tpr) 6000 struct tg3_rx_prodring_set *tpr)
5743{ 6001{
5744 tpr->rx_std_buffers = kzalloc(sizeof(struct ring_info) * 6002 tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE, GFP_KERNEL);
5745 TG3_RX_RING_SIZE, GFP_KERNEL);
5746 if (!tpr->rx_std_buffers) 6003 if (!tpr->rx_std_buffers)
5747 return -ENOMEM; 6004 return -ENOMEM;
5748 6005
@@ -5752,8 +6009,7 @@ static int tg3_rx_prodring_init(struct tg3 *tp,
5752 goto err_out; 6009 goto err_out;
5753 6010
5754 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) { 6011 if (tp->tg3_flags & TG3_FLAG_JUMBO_CAPABLE) {
5755 tpr->rx_jmb_buffers = kzalloc(sizeof(struct ring_info) * 6012 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE,
5756 TG3_RX_JUMBO_RING_SIZE,
5757 GFP_KERNEL); 6013 GFP_KERNEL);
5758 if (!tpr->rx_jmb_buffers) 6014 if (!tpr->rx_jmb_buffers)
5759 goto err_out; 6015 goto err_out;
@@ -5809,9 +6065,10 @@ static void tg3_free_rings(struct tg3 *tp)
5809 6065
5810 dev_kfree_skb_any(skb); 6066 dev_kfree_skb_any(skb);
5811 } 6067 }
5812 }
5813 6068
5814 tg3_rx_prodring_free(tp, &tp->prodring[0]); 6069 if (tp->irq_cnt == 1 || j != tp->irq_cnt - 1)
6070 tg3_rx_prodring_free(tp, &tp->prodring[j]);
6071 }
5815} 6072}
5816 6073
5817/* Initialize tx/rx rings for packet processing. 6074/* Initialize tx/rx rings for packet processing.
@@ -5845,9 +6102,13 @@ static int tg3_init_rings(struct tg3 *tp)
5845 tnapi->rx_rcb_ptr = 0; 6102 tnapi->rx_rcb_ptr = 0;
5846 if (tnapi->rx_rcb) 6103 if (tnapi->rx_rcb)
5847 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp)); 6104 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
6105
6106 if ((tp->irq_cnt == 1 || i != tp->irq_cnt - 1) &&
6107 tg3_rx_prodring_alloc(tp, &tp->prodring[i]))
6108 return -ENOMEM;
5848 } 6109 }
5849 6110
5850 return tg3_rx_prodring_alloc(tp, &tp->prodring[0]); 6111 return 0;
5851} 6112}
5852 6113
5853/* 6114/*
@@ -5891,7 +6152,8 @@ static void tg3_free_consistent(struct tg3 *tp)
5891 tp->hw_stats = NULL; 6152 tp->hw_stats = NULL;
5892 } 6153 }
5893 6154
5894 tg3_rx_prodring_fini(tp, &tp->prodring[0]); 6155 for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++)
6156 tg3_rx_prodring_fini(tp, &tp->prodring[i]);
5895} 6157}
5896 6158
5897/* 6159/*
@@ -5902,8 +6164,10 @@ static int tg3_alloc_consistent(struct tg3 *tp)
5902{ 6164{
5903 int i; 6165 int i;
5904 6166
5905 if (tg3_rx_prodring_init(tp, &tp->prodring[0])) 6167 for (i = 0; i < (tp->irq_cnt == 1 ? 1 : tp->irq_cnt - 1); i++) {
5906 return -ENOMEM; 6168 if (tg3_rx_prodring_init(tp, &tp->prodring[i]))
6169 goto err_out;
6170 }
5907 6171
5908 tp->hw_stats = pci_alloc_consistent(tp->pdev, 6172 tp->hw_stats = pci_alloc_consistent(tp->pdev,
5909 sizeof(struct tg3_hw_stats), 6173 sizeof(struct tg3_hw_stats),
@@ -5947,6 +6211,11 @@ static int tg3_alloc_consistent(struct tg3 *tp)
5947 break; 6211 break;
5948 } 6212 }
5949 6213
6214 if (tp->irq_cnt == 1)
6215 tnapi->prodring = &tp->prodring[0];
6216 else if (i)
6217 tnapi->prodring = &tp->prodring[i - 1];
6218
5950 /* 6219 /*
5951 * If multivector RSS is enabled, vector 0 does not handle 6220 * If multivector RSS is enabled, vector 0 does not handle
5952 * rx or tx interrupts. Don't allocate any resources for it. 6221 * rx or tx interrupts. Don't allocate any resources for it.
@@ -6580,6 +6849,30 @@ static int tg3_chip_reset(struct tg3 *tp)
6580 6849
6581 tg3_mdio_start(tp); 6850 tg3_mdio_start(tp);
6582 6851
6852 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
6853 u8 phy_addr;
6854
6855 phy_addr = tp->phy_addr;
6856 tp->phy_addr = TG3_PHY_PCIE_ADDR;
6857
6858 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
6859 TG3_PCIEPHY_TXB_BLK << TG3_PCIEPHY_BLOCK_SHIFT);
6860 val = TG3_PCIEPHY_TX0CTRL1_TXOCM | TG3_PCIEPHY_TX0CTRL1_RDCTL |
6861 TG3_PCIEPHY_TX0CTRL1_TXCMV | TG3_PCIEPHY_TX0CTRL1_TKSEL |
6862 TG3_PCIEPHY_TX0CTRL1_NB_EN;
6863 tg3_writephy(tp, TG3_PCIEPHY_TX0CTRL1, val);
6864 udelay(10);
6865
6866 tg3_writephy(tp, TG3_PCIEPHY_BLOCK_ADDR,
6867 TG3_PCIEPHY_XGXS_BLK1 << TG3_PCIEPHY_BLOCK_SHIFT);
6868 val = TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN |
6869 TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN;
6870 tg3_writephy(tp, TG3_PCIEPHY_PWRMGMT4, val);
6871 udelay(10);
6872
6873 tp->phy_addr = phy_addr;
6874 }
6875
6583 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) && 6876 if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
6584 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 && 6877 tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
6585 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 && 6878 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
@@ -7162,15 +7455,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7162 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS); 7455 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
7163 7456
7164 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR); 7457 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
7165 }
7166 7458
7167 if (tp->tg3_flags3 & TG3_FLG3_TOGGLE_10_100_L1PLLPD) { 7459 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7168 val = tr32(TG3_PCIE_LNKCTL); 7460 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7169 if (tp->tg3_flags3 & TG3_FLG3_CLKREQ_BUG)
7170 val |= TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
7171 else
7172 val &= ~TG3_PCIE_LNKCTL_L1_PLL_PD_DIS;
7173 tw32(TG3_PCIE_LNKCTL, val);
7174 } 7461 }
7175 7462
7176 /* This works around an issue with Athlon chipsets on 7463 /* This works around an issue with Athlon chipsets on
@@ -7217,9 +7504,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7217 if (err) 7504 if (err)
7218 return err; 7505 return err;
7219 7506
7220 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 && 7507 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
7221 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761 && 7508 val = tr32(TG3PCI_DMA_RW_CTRL) &
7222 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717) { 7509 ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
7510 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
7511 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
7512 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
7223 /* This value is determined during the probe time DMA 7513 /* This value is determined during the probe time DMA
7224 * engine test, tg3_test_dma. 7514 * engine test, tg3_test_dma.
7225 */ 7515 */
@@ -7342,8 +7632,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7342 ((u64) tpr->rx_std_mapping >> 32)); 7632 ((u64) tpr->rx_std_mapping >> 32));
7343 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW, 7633 tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
7344 ((u64) tpr->rx_std_mapping & 0xffffffff)); 7634 ((u64) tpr->rx_std_mapping & 0xffffffff));
7345 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR, 7635 if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS))
7346 NIC_SRAM_RX_BUFFER_DESC); 7636 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
7637 NIC_SRAM_RX_BUFFER_DESC);
7347 7638
7348 /* Disable the mini ring */ 7639 /* Disable the mini ring */
7349 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) 7640 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
@@ -7366,8 +7657,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7366 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 7657 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7367 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) | 7658 (RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT) |
7368 BDINFO_FLAGS_USE_EXT_RECV); 7659 BDINFO_FLAGS_USE_EXT_RECV);
7369 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR, 7660 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
7370 NIC_SRAM_RX_JUMBO_BUFFER_DESC); 7661 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
7662 NIC_SRAM_RX_JUMBO_BUFFER_DESC);
7371 } else { 7663 } else {
7372 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS, 7664 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
7373 BDINFO_FLAGS_DISABLED); 7665 BDINFO_FLAGS_DISABLED);
@@ -7383,14 +7675,12 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7383 7675
7384 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val); 7676 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
7385 7677
7386 tpr->rx_std_ptr = tp->rx_pending; 7678 tpr->rx_std_prod_idx = tp->rx_pending;
7387 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW, 7679 tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
7388 tpr->rx_std_ptr);
7389 7680
7390 tpr->rx_jmb_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ? 7681 tpr->rx_jmb_prod_idx = (tp->tg3_flags & TG3_FLAG_JUMBO_RING_ENABLE) ?
7391 tp->rx_jumbo_pending : 0; 7682 tp->rx_jumbo_pending : 0;
7392 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW, 7683 tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
7393 tpr->rx_jmb_ptr);
7394 7684
7395 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 7685 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
7396 tw32(STD_REPLENISH_LWM, 32); 7686 tw32(STD_REPLENISH_LWM, 32);
@@ -7453,7 +7743,8 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7453 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) 7743 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
7454 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN; 7744 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
7455 7745
7456 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 7746 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
7747 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
7457 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 7748 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
7458 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN; 7749 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
7459 7750
@@ -7602,6 +7893,9 @@ static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
7602 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS) 7893 if (tp->tg3_flags3 & TG3_FLG3_5755_PLUS)
7603 val |= WDMAC_MODE_STATUS_TAG_FIX; 7894 val |= WDMAC_MODE_STATUS_TAG_FIX;
7604 7895
7896 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
7897 val |= WDMAC_MODE_BURST_ALL_DATA;
7898
7605 tw32_f(WDMAC_MODE, val); 7899 tw32_f(WDMAC_MODE, val);
7606 udelay(40); 7900 udelay(40);
7607 7901
@@ -9240,9 +9534,11 @@ static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9240 struct tg3 *tp = netdev_priv(dev); 9534 struct tg3 *tp = netdev_priv(dev);
9241 9535
9242 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 9536 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9537 struct phy_device *phydev;
9243 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 9538 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9244 return -EAGAIN; 9539 return -EAGAIN;
9245 return phy_ethtool_gset(tp->mdio_bus->phy_map[PHY_ADDR], cmd); 9540 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9541 return phy_ethtool_gset(phydev, cmd);
9246 } 9542 }
9247 9543
9248 cmd->supported = (SUPPORTED_Autoneg); 9544 cmd->supported = (SUPPORTED_Autoneg);
@@ -9281,9 +9577,11 @@ static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
9281 struct tg3 *tp = netdev_priv(dev); 9577 struct tg3 *tp = netdev_priv(dev);
9282 9578
9283 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 9579 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9580 struct phy_device *phydev;
9284 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 9581 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9285 return -EAGAIN; 9582 return -EAGAIN;
9286 return phy_ethtool_sset(tp->mdio_bus->phy_map[PHY_ADDR], cmd); 9583 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9584 return phy_ethtool_sset(phydev, cmd);
9287 } 9585 }
9288 9586
9289 if (cmd->autoneg != AUTONEG_ENABLE && 9587 if (cmd->autoneg != AUTONEG_ENABLE &&
@@ -9436,15 +9734,16 @@ static int tg3_set_tso(struct net_device *dev, u32 value)
9436 return 0; 9734 return 0;
9437 } 9735 }
9438 if ((dev->features & NETIF_F_IPV6_CSUM) && 9736 if ((dev->features & NETIF_F_IPV6_CSUM) &&
9439 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) { 9737 ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
9738 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3))) {
9440 if (value) { 9739 if (value) {
9441 dev->features |= NETIF_F_TSO6; 9740 dev->features |= NETIF_F_TSO6;
9442 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 9741 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
9742 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
9443 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 9743 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
9444 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || 9744 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
9445 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 9745 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
9446 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 9746 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
9447 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
9448 dev->features |= NETIF_F_TSO_ECN; 9747 dev->features |= NETIF_F_TSO_ECN;
9449 } else 9748 } else
9450 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN); 9749 dev->features &= ~(NETIF_F_TSO6 | NETIF_F_TSO_ECN);
@@ -9466,7 +9765,7 @@ static int tg3_nway_reset(struct net_device *dev)
9466 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 9765 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
9467 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 9766 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
9468 return -EAGAIN; 9767 return -EAGAIN;
9469 r = phy_start_aneg(tp->mdio_bus->phy_map[PHY_ADDR]); 9768 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
9470 } else { 9769 } else {
9471 u32 bmcr; 9770 u32 bmcr;
9472 9771
@@ -9585,7 +9884,7 @@ static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam
9585 u32 newadv; 9884 u32 newadv;
9586 struct phy_device *phydev; 9885 struct phy_device *phydev;
9587 9886
9588 phydev = tp->mdio_bus->phy_map[PHY_ADDR]; 9887 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
9589 9888
9590 if (epause->rx_pause) { 9889 if (epause->rx_pause) {
9591 if (epause->tx_pause) 9890 if (epause->tx_pause)
@@ -10338,7 +10637,10 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10338 for (i = 14; i < tx_len; i++) 10637 for (i = 14; i < tx_len; i++)
10339 tx_data[i] = (u8) (i & 0xff); 10638 tx_data[i] = (u8) (i & 0xff);
10340 10639
10341 map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE); 10640 if (skb_dma_map(&tp->pdev->dev, skb, DMA_TO_DEVICE)) {
10641 dev_kfree_skb(skb);
10642 return -EIO;
10643 }
10342 10644
10343 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 10645 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10344 rnapi->coal_now); 10646 rnapi->coal_now);
@@ -10349,7 +10651,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10349 10651
10350 num_pkts = 0; 10652 num_pkts = 0;
10351 10653
10352 tg3_set_txd(tnapi, tnapi->tx_prod, map, tx_len, 0, 1); 10654 tg3_set_txd(tnapi, tnapi->tx_prod,
10655 skb_shinfo(skb)->dma_head, tx_len, 0, 1);
10353 10656
10354 tnapi->tx_prod++; 10657 tnapi->tx_prod++;
10355 num_pkts++; 10658 num_pkts++;
@@ -10359,8 +10662,8 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10359 10662
10360 udelay(10); 10663 udelay(10);
10361 10664
10362 /* 250 usec to allow enough time on some 10/100 Mbps devices. */ 10665 /* 350 usec to allow enough time on some 10/100 Mbps devices. */
10363 for (i = 0; i < 25; i++) { 10666 for (i = 0; i < 35; i++) {
10364 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE | 10667 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
10365 coal_now); 10668 coal_now);
10366 10669
@@ -10373,7 +10676,7 @@ static int tg3_run_loopback(struct tg3 *tp, int loopback_mode)
10373 break; 10676 break;
10374 } 10677 }
10375 10678
10376 pci_unmap_single(tp->pdev, map, tx_len, PCI_DMA_TODEVICE); 10679 skb_dma_unmap(&tp->pdev->dev, skb, DMA_TO_DEVICE);
10377 dev_kfree_skb(skb); 10680 dev_kfree_skb(skb);
10378 10681
10379 if (tx_idx != tnapi->tx_prod) 10682 if (tx_idx != tnapi->tx_prod)
@@ -10565,9 +10868,11 @@ static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10565 int err; 10868 int err;
10566 10869
10567 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) { 10870 if (tp->tg3_flags3 & TG3_FLG3_USE_PHYLIB) {
10871 struct phy_device *phydev;
10568 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED)) 10872 if (!(tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED))
10569 return -EAGAIN; 10873 return -EAGAIN;
10570 return phy_mii_ioctl(tp->mdio_bus->phy_map[PHY_ADDR], data, cmd); 10874 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10875 return phy_mii_ioctl(phydev, data, cmd);
10571 } 10876 }
10572 10877
10573 switch(cmd) { 10878 switch(cmd) {
@@ -10887,7 +11192,7 @@ static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
10887 11192
10888 /* NVRAM protection for TPM */ 11193 /* NVRAM protection for TPM */
10889 if (nvcfg1 & (1 << 27)) 11194 if (nvcfg1 & (1 << 27))
10890 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; 11195 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
10891 11196
10892 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) { 11197 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
10893 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ: 11198 case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
@@ -10928,7 +11233,7 @@ static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
10928 11233
10929 /* NVRAM protection for TPM */ 11234 /* NVRAM protection for TPM */
10930 if (nvcfg1 & (1 << 27)) { 11235 if (nvcfg1 & (1 << 27)) {
10931 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; 11236 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
10932 protect = 1; 11237 protect = 1;
10933 } 11238 }
10934 11239
@@ -11022,7 +11327,7 @@ static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
11022 11327
11023 /* NVRAM protection for TPM */ 11328 /* NVRAM protection for TPM */
11024 if (nvcfg1 & (1 << 27)) { 11329 if (nvcfg1 & (1 << 27)) {
11025 tp->tg3_flags2 |= TG3_FLG2_PROTECTED_NVRAM; 11330 tp->tg3_flags3 |= TG3_FLG3_PROTECTED_NVRAM;
11026 protect = 1; 11331 protect = 1;
11027 } 11332 }
11028 11333
@@ -11524,7 +11829,7 @@ static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
11524 11829
11525 tg3_enable_nvram_access(tp); 11830 tg3_enable_nvram_access(tp);
11526 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) && 11831 if ((tp->tg3_flags2 & TG3_FLG2_5750_PLUS) &&
11527 !(tp->tg3_flags2 & TG3_FLG2_PROTECTED_NVRAM)) 11832 !(tp->tg3_flags3 & TG3_FLG3_PROTECTED_NVRAM))
11528 tw32(NVRAM_WRITE1, 0x406); 11833 tw32(NVRAM_WRITE1, 0x406);
11529 11834
11530 grc_mode = tr32(GRC_MODE); 11835 grc_mode = tr32(GRC_MODE);
@@ -12400,10 +12705,9 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12400 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) { 12705 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
12401 u32 prod_id_asic_rev; 12706 u32 prod_id_asic_rev;
12402 12707
12403 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717C || 12708 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
12404 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717S || 12709 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
12405 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718C || 12710 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5724)
12406 tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718S)
12407 pci_read_config_dword(tp->pdev, 12711 pci_read_config_dword(tp->pdev,
12408 TG3PCI_GEN2_PRODID_ASICREV, 12712 TG3PCI_GEN2_PRODID_ASICREV,
12409 &prod_id_asic_rev); 12713 &prod_id_asic_rev);
@@ -12586,6 +12890,29 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12586 tp->dev->features |= NETIF_F_IPV6_CSUM; 12890 tp->dev->features |= NETIF_F_IPV6_CSUM;
12587 } 12891 }
12588 12892
12893 /* Determine TSO capabilities */
12894 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
12895 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_3;
12896 else if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12897 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12898 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12899 else if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12900 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12901 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
12902 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12903 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12904 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12905 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
12906 tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
12907 tp->tg3_flags2 |= TG3_FLG2_TSO_BUG;
12908 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
12909 tp->fw_needed = FIRMWARE_TG3TSO5;
12910 else
12911 tp->fw_needed = FIRMWARE_TG3TSO;
12912 }
12913
12914 tp->irq_max = 1;
12915
12589 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) { 12916 if (tp->tg3_flags2 & TG3_FLG2_5750_PLUS) {
12590 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI; 12917 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSI;
12591 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX || 12918 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
@@ -12597,25 +12924,22 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12597 12924
12598 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || 12925 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) ||
12599 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) { 12926 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
12600 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_2;
12601 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI; 12927 tp->tg3_flags2 |= TG3_FLG2_1SHOT_MSI;
12602 } else {
12603 tp->tg3_flags2 |= TG3_FLG2_HW_TSO_1 | TG3_FLG2_TSO_BUG;
12604 if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
12605 ASIC_REV_5750 &&
12606 tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
12607 tp->tg3_flags2 &= ~TG3_FLG2_TSO_BUG;
12608 } 12928 }
12609 }
12610 12929
12611 tp->irq_max = 1; 12930 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
12931 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX;
12932 tp->irq_max = TG3_IRQ_MAX_VECS;
12933 }
12934 }
12612 12935
12613#ifdef TG3_NAPI 12936 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
12614 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) { 12937 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12615 tp->tg3_flags |= TG3_FLAG_SUPPORT_MSIX; 12938 tp->tg3_flags3 |= TG3_FLG3_SHORT_DMA_BUG;
12616 tp->irq_max = TG3_IRQ_MAX_VECS; 12939 else if (!(tp->tg3_flags3 & TG3_FLG3_5755_PLUS)) {
12940 tp->tg3_flags3 |= TG3_FLG3_4G_DMA_BNDRY_BUG;
12941 tp->tg3_flags3 |= TG3_FLG3_40BIT_DMA_LIMIT_BUG;
12617 } 12942 }
12618#endif
12619 12943
12620 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) || 12944 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ||
12621 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) || 12945 (tp->tg3_flags2 & TG3_FLG2_5780_CLASS) ||
@@ -12926,11 +13250,6 @@ static int __devinit tg3_get_invariants(struct tg3 *tp)
12926 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) 13250 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
12927 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB; 13251 tp->tg3_flags3 |= TG3_FLG3_USE_PHYLIB;
12928 13252
12929 if ((tp->pci_chip_rev_id == CHIPREV_ID_57780_A1 &&
12930 tr32(RCVLPC_STATS_ENABLE) & RCVLPC_STATSENAB_ASF_FIX) ||
12931 tp->pci_chip_rev_id == CHIPREV_ID_57780_A0)
12932 tp->tg3_flags3 |= TG3_FLG3_TOGGLE_10_100_L1PLLPD;
12933
12934 err = tg3_mdio_init(tp); 13253 err = tg3_mdio_init(tp);
12935 if (err) 13254 if (err)
12936 return err; 13255 return err;
@@ -13220,6 +13539,11 @@ static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
13220#endif 13539#endif
13221#endif 13540#endif
13222 13541
13542 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13543 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
13544 goto out;
13545 }
13546
13223 if (!goal) 13547 if (!goal)
13224 goto out; 13548 goto out;
13225 13549
@@ -13414,7 +13738,7 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
13414{ 13738{
13415 dma_addr_t buf_dma; 13739 dma_addr_t buf_dma;
13416 u32 *buf, saved_dma_rwctrl; 13740 u32 *buf, saved_dma_rwctrl;
13417 int ret; 13741 int ret = 0;
13418 13742
13419 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma); 13743 buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
13420 if (!buf) { 13744 if (!buf) {
@@ -13427,6 +13751,9 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
13427 13751
13428 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl); 13752 tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
13429 13753
13754 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
13755 goto out;
13756
13430 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) { 13757 if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
13431 /* DMA read watermark not used on PCIE */ 13758 /* DMA read watermark not used on PCIE */
13432 tp->dma_rwctrl |= 0x00180000; 13759 tp->dma_rwctrl |= 0x00180000;
@@ -13499,7 +13826,6 @@ static int __devinit tg3_test_dma(struct tg3 *tp)
13499 tg3_switch_clocks(tp); 13826 tg3_switch_clocks(tp);
13500#endif 13827#endif
13501 13828
13502 ret = 0;
13503 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 && 13829 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13504 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) 13830 GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
13505 goto out; 13831 goto out;
@@ -13678,6 +14004,7 @@ static char * __devinit tg3_phy_string(struct tg3 *tp)
13678 case PHY_ID_BCM5756: return "5722/5756"; 14004 case PHY_ID_BCM5756: return "5722/5756";
13679 case PHY_ID_BCM5906: return "5906"; 14005 case PHY_ID_BCM5906: return "5906";
13680 case PHY_ID_BCM5761: return "5761"; 14006 case PHY_ID_BCM5761: return "5761";
14007 case PHY_ID_BCM5717: return "5717";
13681 case PHY_ID_BCM8002: return "8002/serdes"; 14008 case PHY_ID_BCM8002: return "8002/serdes";
13682 case 0: return "serdes"; 14009 case 0: return "serdes";
13683 default: return "unknown"; 14010 default: return "unknown";
@@ -13919,51 +14246,6 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13919 tp->rx_pending = TG3_DEF_RX_RING_PENDING; 14246 tp->rx_pending = TG3_DEF_RX_RING_PENDING;
13920 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING; 14247 tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
13921 14248
13922 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
13923 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
13924 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
13925 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
13926 struct tg3_napi *tnapi = &tp->napi[i];
13927
13928 tnapi->tp = tp;
13929 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
13930
13931 tnapi->int_mbox = intmbx;
13932 if (i < 4)
13933 intmbx += 0x8;
13934 else
13935 intmbx += 0x4;
13936
13937 tnapi->consmbox = rcvmbx;
13938 tnapi->prodmbox = sndmbx;
13939
13940 if (i)
13941 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
13942 else
13943 tnapi->coal_now = HOSTCC_MODE_NOW;
13944
13945 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
13946 break;
13947
13948 /*
13949 * If we support MSIX, we'll be using RSS. If we're using
13950 * RSS, the first vector only handles link interrupts and the
13951 * remaining vectors handle rx and tx interrupts. Reuse the
13952 * mailbox values for the next iteration. The values we setup
13953 * above are still useful for the single vectored mode.
13954 */
13955 if (!i)
13956 continue;
13957
13958 rcvmbx += 0x8;
13959
13960 if (sndmbx & 0x4)
13961 sndmbx -= 0x4;
13962 else
13963 sndmbx += 0xc;
13964 }
13965
13966 netif_napi_add(dev, &tp->napi[0].napi, tg3_poll, 64);
13967 dev->ethtool_ops = &tg3_ethtool_ops; 14249 dev->ethtool_ops = &tg3_ethtool_ops;
13968 dev->watchdog_timeo = TG3_TX_TIMEOUT; 14250 dev->watchdog_timeo = TG3_TX_TIMEOUT;
13969 dev->irq = pdev->irq; 14251 dev->irq = pdev->irq;
@@ -13975,8 +14257,8 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
13975 goto err_out_iounmap; 14257 goto err_out_iounmap;
13976 } 14258 }
13977 14259
13978 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) || 14260 if ((tp->tg3_flags3 & TG3_FLG3_5755_PLUS) &&
13979 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) 14261 tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
13980 dev->netdev_ops = &tg3_netdev_ops; 14262 dev->netdev_ops = &tg3_netdev_ops;
13981 else 14263 else
13982 dev->netdev_ops = &tg3_netdev_ops_dma_bug; 14264 dev->netdev_ops = &tg3_netdev_ops_dma_bug;
@@ -14023,46 +14305,39 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14023 14305
14024 tg3_init_bufmgr_config(tp); 14306 tg3_init_bufmgr_config(tp);
14025 14307
14026 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) 14308 /* Selectively allow TSO based on operating conditions */
14027 tp->fw_needed = FIRMWARE_TG3; 14309 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
14028 14310 (tp->fw_needed && !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)))
14029 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
14030 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE; 14311 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
14031 } 14312 else {
14032 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 || 14313 tp->tg3_flags2 &= ~(TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG);
14033 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 || 14314 tp->fw_needed = NULL;
14034 tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
14035 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
14036 (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
14037 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
14038 } else {
14039 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE | TG3_FLG2_TSO_BUG;
14040 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14041 tp->fw_needed = FIRMWARE_TG3TSO5;
14042 else
14043 tp->fw_needed = FIRMWARE_TG3TSO;
14044 } 14315 }
14045 14316
14317 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14318 tp->fw_needed = FIRMWARE_TG3;
14319
14046 /* TSO is on by default on chips that support hardware TSO. 14320 /* TSO is on by default on chips that support hardware TSO.
14047 * Firmware TSO on older chips gives lower performance, so it 14321 * Firmware TSO on older chips gives lower performance, so it
14048 * is off by default, but can be enabled using ethtool. 14322 * is off by default, but can be enabled using ethtool.
14049 */ 14323 */
14050 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) { 14324 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) &&
14051 if (dev->features & NETIF_F_IP_CSUM) 14325 (dev->features & NETIF_F_IP_CSUM))
14052 dev->features |= NETIF_F_TSO; 14326 dev->features |= NETIF_F_TSO;
14053 if ((dev->features & NETIF_F_IPV6_CSUM) && 14327
14054 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_2)) 14328 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_2) ||
14329 (tp->tg3_flags2 & TG3_FLG2_HW_TSO_3)) {
14330 if (dev->features & NETIF_F_IPV6_CSUM)
14055 dev->features |= NETIF_F_TSO6; 14331 dev->features |= NETIF_F_TSO6;
14056 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 || 14332 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO_3) ||
14333 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14057 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 && 14334 (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14058 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) || 14335 GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
14059 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 || 14336 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14060 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 || 14337 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14061 GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
14062 dev->features |= NETIF_F_TSO_ECN; 14338 dev->features |= NETIF_F_TSO_ECN;
14063 } 14339 }
14064 14340
14065
14066 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 && 14341 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
14067 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) && 14342 !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
14068 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) { 14343 !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
@@ -14113,6 +14388,53 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14113 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG; 14388 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
14114 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX; 14389 tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
14115 14390
14391 intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
14392 rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
14393 sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
14394 for (i = 0; i < TG3_IRQ_MAX_VECS; i++) {
14395 struct tg3_napi *tnapi = &tp->napi[i];
14396
14397 tnapi->tp = tp;
14398 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
14399
14400 tnapi->int_mbox = intmbx;
14401 if (i < 4)
14402 intmbx += 0x8;
14403 else
14404 intmbx += 0x4;
14405
14406 tnapi->consmbox = rcvmbx;
14407 tnapi->prodmbox = sndmbx;
14408
14409 if (i) {
14410 tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
14411 netif_napi_add(dev, &tnapi->napi, tg3_poll_msix, 64);
14412 } else {
14413 tnapi->coal_now = HOSTCC_MODE_NOW;
14414 netif_napi_add(dev, &tnapi->napi, tg3_poll, 64);
14415 }
14416
14417 if (!(tp->tg3_flags & TG3_FLAG_SUPPORT_MSIX))
14418 break;
14419
14420 /*
14421 * If we support MSIX, we'll be using RSS. If we're using
14422 * RSS, the first vector only handles link interrupts and the
14423 * remaining vectors handle rx and tx interrupts. Reuse the
14424 * mailbox values for the next iteration. The values we setup
14425 * above are still useful for the single vectored mode.
14426 */
14427 if (!i)
14428 continue;
14429
14430 rcvmbx += 0x8;
14431
14432 if (sndmbx & 0x4)
14433 sndmbx -= 0x4;
14434 else
14435 sndmbx += 0xc;
14436 }
14437
14116 tg3_init_coal(tp); 14438 tg3_init_coal(tp);
14117 14439
14118 pci_set_drvdata(pdev, dev); 14440 pci_set_drvdata(pdev, dev);
@@ -14131,13 +14453,14 @@ static int __devinit tg3_init_one(struct pci_dev *pdev,
14131 tg3_bus_string(tp, str), 14453 tg3_bus_string(tp, str),
14132 dev->dev_addr); 14454 dev->dev_addr);
14133 14455
14134 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) 14456 if (tp->tg3_flags3 & TG3_FLG3_PHY_CONNECTED) {
14457 struct phy_device *phydev;
14458 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
14135 printk(KERN_INFO 14459 printk(KERN_INFO
14136 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n", 14460 "%s: attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
14137 tp->dev->name, 14461 tp->dev->name, phydev->drv->name,
14138 tp->mdio_bus->phy_map[PHY_ADDR]->drv->name, 14462 dev_name(&phydev->dev));
14139 dev_name(&tp->mdio_bus->phy_map[PHY_ADDR]->dev)); 14463 } else
14140 else
14141 printk(KERN_INFO 14464 printk(KERN_INFO
14142 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n", 14465 "%s: attached PHY is %s (%s Ethernet) (WireSpeed[%d])\n",
14143 tp->dev->name, tg3_phy_string(tp), 14466 tp->dev->name, tg3_phy_string(tp),
diff --git a/drivers/net/tg3.h b/drivers/net/tg3.h
index bab7940158e6..453a34fb72b9 100644
--- a/drivers/net/tg3.h
+++ b/drivers/net/tg3.h
@@ -46,10 +46,9 @@
46#define TG3PCI_DEVICE_TIGON3_57788 0x1691 46#define TG3PCI_DEVICE_TIGON3_57788 0x1691
47#define TG3PCI_DEVICE_TIGON3_5785_G 0x1699 /* GPHY */ 47#define TG3PCI_DEVICE_TIGON3_5785_G 0x1699 /* GPHY */
48#define TG3PCI_DEVICE_TIGON3_5785_F 0x16a0 /* 10/100 only */ 48#define TG3PCI_DEVICE_TIGON3_5785_F 0x16a0 /* 10/100 only */
49#define TG3PCI_DEVICE_TIGON3_5717C 0x1655 49#define TG3PCI_DEVICE_TIGON3_5717 0x1655
50#define TG3PCI_DEVICE_TIGON3_5717S 0x1656 50#define TG3PCI_DEVICE_TIGON3_5718 0x1656
51#define TG3PCI_DEVICE_TIGON3_5718C 0x1665 51#define TG3PCI_DEVICE_TIGON3_5724 0x165c
52#define TG3PCI_DEVICE_TIGON3_5718S 0x1666
53/* 0x04 --> 0x64 unused */ 52/* 0x04 --> 0x64 unused */
54#define TG3PCI_MSI_DATA 0x00000064 53#define TG3PCI_MSI_DATA 0x00000064
55/* 0x66 --> 0x68 unused */ 54/* 0x66 --> 0x68 unused */
@@ -103,6 +102,7 @@
103#define CHIPREV_ID_5906_A1 0xc001 102#define CHIPREV_ID_5906_A1 0xc001
104#define CHIPREV_ID_57780_A0 0x57780000 103#define CHIPREV_ID_57780_A0 0x57780000
105#define CHIPREV_ID_57780_A1 0x57780001 104#define CHIPREV_ID_57780_A1 0x57780001
105#define CHIPREV_ID_5717_A0 0x05717000
106#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12) 106#define GET_ASIC_REV(CHIP_REV_ID) ((CHIP_REV_ID) >> 12)
107#define ASIC_REV_5700 0x07 107#define ASIC_REV_5700 0x07
108#define ASIC_REV_5701 0x00 108#define ASIC_REV_5701 0x00
@@ -141,8 +141,7 @@
141#define METAL_REV_B1 0x01 141#define METAL_REV_B1 0x01
142#define METAL_REV_B2 0x02 142#define METAL_REV_B2 0x02
143#define TG3PCI_DMA_RW_CTRL 0x0000006c 143#define TG3PCI_DMA_RW_CTRL 0x0000006c
144#define DMA_RWCTRL_MIN_DMA 0x000000ff 144#define DMA_RWCTRL_DIS_CACHE_ALIGNMENT 0x00000001
145#define DMA_RWCTRL_MIN_DMA_SHIFT 0
146#define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700 145#define DMA_RWCTRL_READ_BNDRY_MASK 0x00000700
147#define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000 146#define DMA_RWCTRL_READ_BNDRY_DISAB 0x00000000
148#define DMA_RWCTRL_READ_BNDRY_16 0x00000100 147#define DMA_RWCTRL_READ_BNDRY_16 0x00000100
@@ -242,7 +241,11 @@
242#define MAILBOX_GENERAL_7 0x00000258 /* 64-bit */ 241#define MAILBOX_GENERAL_7 0x00000258 /* 64-bit */
243#define MAILBOX_RELOAD_STAT 0x00000260 /* 64-bit */ 242#define MAILBOX_RELOAD_STAT 0x00000260 /* 64-bit */
244#define MAILBOX_RCV_STD_PROD_IDX 0x00000268 /* 64-bit */ 243#define MAILBOX_RCV_STD_PROD_IDX 0x00000268 /* 64-bit */
244#define TG3_RX_STD_PROD_IDX_REG (MAILBOX_RCV_STD_PROD_IDX + \
245 TG3_64BIT_REG_LOW)
245#define MAILBOX_RCV_JUMBO_PROD_IDX 0x00000270 /* 64-bit */ 246#define MAILBOX_RCV_JUMBO_PROD_IDX 0x00000270 /* 64-bit */
247#define TG3_RX_JMB_PROD_IDX_REG (MAILBOX_RCV_JUMBO_PROD_IDX + \
248 TG3_64BIT_REG_LOW)
246#define MAILBOX_RCV_MINI_PROD_IDX 0x00000278 /* 64-bit */ 249#define MAILBOX_RCV_MINI_PROD_IDX 0x00000278 /* 64-bit */
247#define MAILBOX_RCVRET_CON_IDX_0 0x00000280 /* 64-bit */ 250#define MAILBOX_RCVRET_CON_IDX_0 0x00000280 /* 64-bit */
248#define MAILBOX_RCVRET_CON_IDX_1 0x00000288 /* 64-bit */ 251#define MAILBOX_RCVRET_CON_IDX_1 0x00000288 /* 64-bit */
@@ -1264,8 +1267,9 @@
1264#define WDMAC_MODE_FIFOURUN_ENAB 0x00000080 1267#define WDMAC_MODE_FIFOURUN_ENAB 0x00000080
1265#define WDMAC_MODE_FIFOOREAD_ENAB 0x00000100 1268#define WDMAC_MODE_FIFOOREAD_ENAB 0x00000100
1266#define WDMAC_MODE_LNGREAD_ENAB 0x00000200 1269#define WDMAC_MODE_LNGREAD_ENAB 0x00000200
1267#define WDMAC_MODE_RX_ACCEL 0x00000400 1270#define WDMAC_MODE_RX_ACCEL 0x00000400
1268#define WDMAC_MODE_STATUS_TAG_FIX 0x20000000 1271#define WDMAC_MODE_STATUS_TAG_FIX 0x20000000
1272#define WDMAC_MODE_BURST_ALL_DATA 0xc0000000
1269#define WDMAC_STATUS 0x00004c04 1273#define WDMAC_STATUS 0x00004c04
1270#define WDMAC_STATUS_TGTABORT 0x00000004 1274#define WDMAC_STATUS_TGTABORT 0x00000004
1271#define WDMAC_STATUS_MSTABORT 0x00000008 1275#define WDMAC_STATUS_MSTABORT 0x00000008
@@ -1953,10 +1957,34 @@
1953#define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000 1957#define NIC_SRAM_MBUF_POOL_BASE5705 0x00010000
1954#define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000 1958#define NIC_SRAM_MBUF_POOL_SIZE5705 0x0000e000
1955 1959
1960
1956/* Currently this is fixed. */ 1961/* Currently this is fixed. */
1957#define PHY_ADDR 0x01 1962#define TG3_PHY_PCIE_ADDR 0x00
1963#define TG3_PHY_MII_ADDR 0x01
1964
1965
1966/*** Tigon3 specific PHY PCIE registers. ***/
1967
1968#define TG3_PCIEPHY_BLOCK_ADDR 0x1f
1969#define TG3_PCIEPHY_XGXS_BLK1 0x0801
1970#define TG3_PCIEPHY_TXB_BLK 0x0861
1971#define TG3_PCIEPHY_BLOCK_SHIFT 4
1958 1972
1959/* Tigon3 specific PHY MII registers. */ 1973/* TG3_PCIEPHY_TXB_BLK */
1974#define TG3_PCIEPHY_TX0CTRL1 0x15
1975#define TG3_PCIEPHY_TX0CTRL1_TXOCM 0x0003
1976#define TG3_PCIEPHY_TX0CTRL1_RDCTL 0x0008
1977#define TG3_PCIEPHY_TX0CTRL1_TXCMV 0x0030
1978#define TG3_PCIEPHY_TX0CTRL1_TKSEL 0x0040
1979#define TG3_PCIEPHY_TX0CTRL1_NB_EN 0x0400
1980
1981/* TG3_PCIEPHY_XGXS_BLK1 */
1982#define TG3_PCIEPHY_PWRMGMT4 0x1a
1983#define TG3_PCIEPHY_PWRMGMT4_L1PLLPD_EN 0x0038
1984#define TG3_PCIEPHY_PWRMGMT4_LOWPWR_EN 0x4000
1985
1986
1987/*** Tigon3 specific PHY MII registers. ***/
1960#define TG3_BMCR_SPEED1000 0x0040 1988#define TG3_BMCR_SPEED1000 0x0040
1961 1989
1962#define MII_TG3_CTRL 0x09 /* 1000-baseT control register */ 1990#define MII_TG3_CTRL 0x09 /* 1000-baseT control register */
@@ -2055,6 +2083,9 @@
2055#define MII_TG3_FET_SHDW_MISCCTRL 0x10 2083#define MII_TG3_FET_SHDW_MISCCTRL 0x10
2056#define MII_TG3_FET_SHDW_MISCCTRL_MDIX 0x4000 2084#define MII_TG3_FET_SHDW_MISCCTRL_MDIX 0x4000
2057 2085
2086#define MII_TG3_FET_SHDW_AUXMODE4 0x1a
2087#define MII_TG3_FET_SHDW_AUXMODE4_SBPD 0x0008
2088
2058#define MII_TG3_FET_SHDW_AUXSTAT2 0x1b 2089#define MII_TG3_FET_SHDW_AUXSTAT2 0x1b
2059#define MII_TG3_FET_SHDW_AUXSTAT2_APD 0x0020 2090#define MII_TG3_FET_SHDW_AUXSTAT2_APD 0x0020
2060 2091
@@ -2542,8 +2573,10 @@ struct tg3_ethtool_stats {
2542}; 2573};
2543 2574
2544struct tg3_rx_prodring_set { 2575struct tg3_rx_prodring_set {
2545 u32 rx_std_ptr; 2576 u32 rx_std_prod_idx;
2546 u32 rx_jmb_ptr; 2577 u32 rx_std_cons_idx;
2578 u32 rx_jmb_prod_idx;
2579 u32 rx_jmb_cons_idx;
2547 struct tg3_rx_buffer_desc *rx_std; 2580 struct tg3_rx_buffer_desc *rx_std;
2548 struct tg3_ext_rx_buffer_desc *rx_jmb; 2581 struct tg3_ext_rx_buffer_desc *rx_jmb;
2549 struct ring_info *rx_std_buffers; 2582 struct ring_info *rx_std_buffers;
@@ -2571,6 +2604,7 @@ struct tg3_napi {
2571 u32 consmbox; 2604 u32 consmbox;
2572 u32 rx_rcb_ptr; 2605 u32 rx_rcb_ptr;
2573 u16 *rx_rcb_prod_idx; 2606 u16 *rx_rcb_prod_idx;
2607 struct tg3_rx_prodring_set *prodring;
2574 2608
2575 struct tg3_rx_buffer_desc *rx_rcb; 2609 struct tg3_rx_buffer_desc *rx_rcb;
2576 struct tg3_tx_buffer_desc *tx_ring; 2610 struct tg3_tx_buffer_desc *tx_ring;
@@ -2654,7 +2688,7 @@ struct tg3 {
2654 struct vlan_group *vlgrp; 2688 struct vlan_group *vlgrp;
2655#endif 2689#endif
2656 2690
2657 struct tg3_rx_prodring_set prodring[1]; 2691 struct tg3_rx_prodring_set prodring[TG3_IRQ_MAX_VECS - 1];
2658 2692
2659 2693
2660 /* begin "everything else" cacheline(s) section */ 2694 /* begin "everything else" cacheline(s) section */
@@ -2725,7 +2759,7 @@ struct tg3 {
2725#define TG3_FLG2_SERDES_PREEMPHASIS 0x00020000 2759#define TG3_FLG2_SERDES_PREEMPHASIS 0x00020000
2726#define TG3_FLG2_5705_PLUS 0x00040000 2760#define TG3_FLG2_5705_PLUS 0x00040000
2727#define TG3_FLG2_5750_PLUS 0x00080000 2761#define TG3_FLG2_5750_PLUS 0x00080000
2728#define TG3_FLG2_PROTECTED_NVRAM 0x00100000 2762#define TG3_FLG2_HW_TSO_3 0x00100000
2729#define TG3_FLG2_USING_MSI 0x00200000 2763#define TG3_FLG2_USING_MSI 0x00200000
2730#define TG3_FLG2_USING_MSIX 0x00400000 2764#define TG3_FLG2_USING_MSIX 0x00400000
2731#define TG3_FLG2_USING_MSI_OR_MSIX (TG3_FLG2_USING_MSI | \ 2765#define TG3_FLG2_USING_MSI_OR_MSIX (TG3_FLG2_USING_MSI | \
@@ -2737,7 +2771,9 @@ struct tg3 {
2737#define TG3_FLG2_ICH_WORKAROUND 0x02000000 2771#define TG3_FLG2_ICH_WORKAROUND 0x02000000
2738#define TG3_FLG2_5780_CLASS 0x04000000 2772#define TG3_FLG2_5780_CLASS 0x04000000
2739#define TG3_FLG2_HW_TSO_2 0x08000000 2773#define TG3_FLG2_HW_TSO_2 0x08000000
2740#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | TG3_FLG2_HW_TSO_2) 2774#define TG3_FLG2_HW_TSO (TG3_FLG2_HW_TSO_1 | \
2775 TG3_FLG2_HW_TSO_2 | \
2776 TG3_FLG2_HW_TSO_3)
2741#define TG3_FLG2_1SHOT_MSI 0x10000000 2777#define TG3_FLG2_1SHOT_MSI 0x10000000
2742#define TG3_FLG2_PHY_JITTER_BUG 0x20000000 2778#define TG3_FLG2_PHY_JITTER_BUG 0x20000000
2743#define TG3_FLG2_NO_FWARE_REPORTED 0x40000000 2779#define TG3_FLG2_NO_FWARE_REPORTED 0x40000000
@@ -2745,6 +2781,7 @@ struct tg3 {
2745 u32 tg3_flags3; 2781 u32 tg3_flags3;
2746#define TG3_FLG3_NO_NVRAM_ADDR_TRANS 0x00000001 2782#define TG3_FLG3_NO_NVRAM_ADDR_TRANS 0x00000001
2747#define TG3_FLG3_ENABLE_APE 0x00000002 2783#define TG3_FLG3_ENABLE_APE 0x00000002
2784#define TG3_FLG3_PROTECTED_NVRAM 0x00000004
2748#define TG3_FLG3_5701_DMA_BUG 0x00000008 2785#define TG3_FLG3_5701_DMA_BUG 0x00000008
2749#define TG3_FLG3_USE_PHYLIB 0x00000010 2786#define TG3_FLG3_USE_PHYLIB 0x00000010
2750#define TG3_FLG3_MDIOBUS_INITED 0x00000020 2787#define TG3_FLG3_MDIOBUS_INITED 0x00000020
@@ -2756,9 +2793,11 @@ struct tg3 {
2756#define TG3_FLG3_PHY_ENABLE_APD 0x00001000 2793#define TG3_FLG3_PHY_ENABLE_APD 0x00001000
2757#define TG3_FLG3_5755_PLUS 0x00002000 2794#define TG3_FLG3_5755_PLUS 0x00002000
2758#define TG3_FLG3_NO_NVRAM 0x00004000 2795#define TG3_FLG3_NO_NVRAM 0x00004000
2759#define TG3_FLG3_TOGGLE_10_100_L1PLLPD 0x00008000
2760#define TG3_FLG3_PHY_IS_FET 0x00010000 2796#define TG3_FLG3_PHY_IS_FET 0x00010000
2761#define TG3_FLG3_ENABLE_RSS 0x00020000 2797#define TG3_FLG3_ENABLE_RSS 0x00020000
2798#define TG3_FLG3_4G_DMA_BNDRY_BUG 0x00080000
2799#define TG3_FLG3_40BIT_DMA_LIMIT_BUG 0x00100000
2800#define TG3_FLG3_SHORT_DMA_BUG 0x00200000
2762 2801
2763 struct timer_list timer; 2802 struct timer_list timer;
2764 u16 timer_counter; 2803 u16 timer_counter;
@@ -2825,6 +2864,7 @@ struct tg3 {
2825#define PHY_ID_BCM5756 0xbc050ed0 2864#define PHY_ID_BCM5756 0xbc050ed0
2826#define PHY_ID_BCM5784 0xbc050fa0 2865#define PHY_ID_BCM5784 0xbc050fa0
2827#define PHY_ID_BCM5761 0xbc050fd0 2866#define PHY_ID_BCM5761 0xbc050fd0
2867#define PHY_ID_BCM5717 0x5c0d8a00
2828#define PHY_ID_BCM5906 0xdc00ac40 2868#define PHY_ID_BCM5906 0xdc00ac40
2829#define PHY_ID_BCM8002 0x60010140 2869#define PHY_ID_BCM8002 0x60010140
2830#define PHY_ID_INVALID 0xffffffff 2870#define PHY_ID_INVALID 0xffffffff
@@ -2834,6 +2874,7 @@ struct tg3 {
2834#define PHY_REV_BCM5401_C0 0x6 2874#define PHY_REV_BCM5401_C0 0x6
2835#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */ 2875#define PHY_REV_BCM5411_X0 0x1 /* Found on Netgear GA302T */
2836#define TG3_PHY_ID_BCM50610 0x143bd60 2876#define TG3_PHY_ID_BCM50610 0x143bd60
2877#define TG3_PHY_ID_BCM50610M 0x143bd70
2837#define TG3_PHY_ID_BCMAC131 0x143bc70 2878#define TG3_PHY_ID_BCMAC131 0x143bc70
2838#define TG3_PHY_ID_RTL8211C 0x001cc910 2879#define TG3_PHY_ID_RTL8211C 0x001cc910
2839#define TG3_PHY_ID_RTL8201E 0x00008200 2880#define TG3_PHY_ID_RTL8201E 0x00008200
@@ -2865,7 +2906,7 @@ struct tg3 {
2865 (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \ 2906 (X) == PHY_ID_BCM5780 || (X) == PHY_ID_BCM5787 || \
2866 (X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM5756 || \ 2907 (X) == PHY_ID_BCM5755 || (X) == PHY_ID_BCM5756 || \
2867 (X) == PHY_ID_BCM5906 || (X) == PHY_ID_BCM5761 || \ 2908 (X) == PHY_ID_BCM5906 || (X) == PHY_ID_BCM5761 || \
2868 (X) == PHY_ID_BCM8002) 2909 (X) == PHY_ID_BCM5717 || (X) == PHY_ID_BCM8002)
2869 2910
2870 struct tg3_hw_stats *hw_stats; 2911 struct tg3_hw_stats *hw_stats;
2871 dma_addr_t stats_mapping; 2912 dma_addr_t stats_mapping;
diff --git a/drivers/net/tlan.c b/drivers/net/tlan.c
index 3d31b47332bb..16f23f84920b 100644
--- a/drivers/net/tlan.c
+++ b/drivers/net/tlan.c
@@ -1549,7 +1549,8 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1549 if (tmpCStat & TLAN_CSTAT_EOC) 1549 if (tmpCStat & TLAN_CSTAT_EOC)
1550 eoc = 1; 1550 eoc = 1;
1551 1551
1552 new_skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 ); 1552 new_skb = netdev_alloc_skb_ip_align(dev,
1553 TLAN_MAX_FRAME_SIZE + 5);
1553 if ( !new_skb ) 1554 if ( !new_skb )
1554 goto drop_and_reuse; 1555 goto drop_and_reuse;
1555 1556
@@ -1563,7 +1564,6 @@ static u32 TLan_HandleRxEOF( struct net_device *dev, u16 host_int )
1563 skb->protocol = eth_type_trans( skb, dev ); 1564 skb->protocol = eth_type_trans( skb, dev );
1564 netif_rx( skb ); 1565 netif_rx( skb );
1565 1566
1566 skb_reserve( new_skb, NET_IP_ALIGN );
1567 head_list->buffer[0].address = pci_map_single(priv->pciDev, 1567 head_list->buffer[0].address = pci_map_single(priv->pciDev,
1568 new_skb->data, 1568 new_skb->data,
1569 TLAN_MAX_FRAME_SIZE, 1569 TLAN_MAX_FRAME_SIZE,
@@ -1967,13 +1967,12 @@ static void TLan_ResetLists( struct net_device *dev )
1967 list->cStat = TLAN_CSTAT_READY; 1967 list->cStat = TLAN_CSTAT_READY;
1968 list->frameSize = TLAN_MAX_FRAME_SIZE; 1968 list->frameSize = TLAN_MAX_FRAME_SIZE;
1969 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER; 1969 list->buffer[0].count = TLAN_MAX_FRAME_SIZE | TLAN_LAST_BUFFER;
1970 skb = netdev_alloc_skb(dev, TLAN_MAX_FRAME_SIZE + 7 ); 1970 skb = netdev_alloc_skb_ip_align(dev, TLAN_MAX_FRAME_SIZE + 5);
1971 if ( !skb ) { 1971 if ( !skb ) {
1972 pr_err("TLAN: out of memory for received data.\n" ); 1972 pr_err("TLAN: out of memory for received data.\n" );
1973 break; 1973 break;
1974 } 1974 }
1975 1975
1976 skb_reserve( skb, NET_IP_ALIGN );
1977 list->buffer[0].address = pci_map_single(priv->pciDev, 1976 list->buffer[0].address = pci_map_single(priv->pciDev,
1978 skb->data, 1977 skb->data,
1979 TLAN_MAX_FRAME_SIZE, 1978 TLAN_MAX_FRAME_SIZE,
diff --git a/drivers/net/tokenring/tms380tr.c b/drivers/net/tokenring/tms380tr.c
index a7b6888829b5..fa152144aacf 100644
--- a/drivers/net/tokenring/tms380tr.c
+++ b/drivers/net/tokenring/tms380tr.c
@@ -1364,6 +1364,8 @@ static int tms380tr_reset_adapter(struct net_device *dev)
1364 return (-1); 1364 return (-1);
1365} 1365}
1366 1366
1367MODULE_FIRMWARE("tms380tr.bin");
1368
1367/* 1369/*
1368 * Starts bring up diagnostics of token ring adapter and evaluates 1370 * Starts bring up diagnostics of token ring adapter and evaluates
1369 * diagnostic results. 1371 * diagnostic results.
diff --git a/drivers/net/tsi108_eth.c b/drivers/net/tsi108_eth.c
index 7030bd5e9848..a69c4a48bab9 100644
--- a/drivers/net/tsi108_eth.c
+++ b/drivers/net/tsi108_eth.c
@@ -802,13 +802,11 @@ static int tsi108_refill_rx(struct net_device *dev, int budget)
802 int rx = data->rxhead; 802 int rx = data->rxhead;
803 struct sk_buff *skb; 803 struct sk_buff *skb;
804 804
805 data->rxskbs[rx] = skb = netdev_alloc_skb(dev, 805 skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE);
806 TSI108_RXBUF_SIZE + 2); 806 data->rxskbs[rx] = skb;
807 if (!skb) 807 if (!skb)
808 break; 808 break;
809 809
810 skb_reserve(skb, 2); /* Align the data on a 4-byte boundary. */
811
812 data->rxring[rx].buf0 = dma_map_single(NULL, skb->data, 810 data->rxring[rx].buf0 = dma_map_single(NULL, skb->data,
813 TSI108_RX_SKB_SIZE, 811 TSI108_RX_SKB_SIZE,
814 DMA_FROM_DEVICE); 812 DMA_FROM_DEVICE);
@@ -1356,7 +1354,7 @@ static int tsi108_open(struct net_device *dev)
1356 for (i = 0; i < TSI108_RXRING_LEN; i++) { 1354 for (i = 0; i < TSI108_RXRING_LEN; i++) {
1357 struct sk_buff *skb; 1355 struct sk_buff *skb;
1358 1356
1359 skb = netdev_alloc_skb(dev, TSI108_RXBUF_SIZE + NET_IP_ALIGN); 1357 skb = netdev_alloc_skb_ip_align(dev, TSI108_RXBUF_SIZE);
1360 if (!skb) { 1358 if (!skb) {
1361 /* Bah. No memory for now, but maybe we'll get 1359 /* Bah. No memory for now, but maybe we'll get
1362 * some more later. 1360 * some more later.
@@ -1370,8 +1368,6 @@ static int tsi108_open(struct net_device *dev)
1370 } 1368 }
1371 1369
1372 data->rxskbs[i] = skb; 1370 data->rxskbs[i] = skb;
1373 /* Align the payload on a 4-byte boundary */
1374 skb_reserve(skb, 2);
1375 data->rxskbs[i] = skb; 1371 data->rxskbs[i] = skb;
1376 data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data); 1372 data->rxring[i].buf0 = virt_to_phys(data->rxskbs[i]->data);
1377 data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT; 1373 data->rxring[i].misc = TSI108_RX_OWN | TSI108_RX_INT;
diff --git a/drivers/net/tun.c b/drivers/net/tun.c
index 4fdfa2ae5418..01e99f22210e 100644
--- a/drivers/net/tun.c
+++ b/drivers/net/tun.c
@@ -44,7 +44,6 @@
44#include <linux/kernel.h> 44#include <linux/kernel.h>
45#include <linux/major.h> 45#include <linux/major.h>
46#include <linux/slab.h> 46#include <linux/slab.h>
47#include <linux/smp_lock.h>
48#include <linux/poll.h> 47#include <linux/poll.h>
49#include <linux/fcntl.h> 48#include <linux/fcntl.h>
50#include <linux/init.h> 49#include <linux/init.h>
@@ -54,6 +53,7 @@
54#include <linux/miscdevice.h> 53#include <linux/miscdevice.h>
55#include <linux/ethtool.h> 54#include <linux/ethtool.h>
56#include <linux/rtnetlink.h> 55#include <linux/rtnetlink.h>
56#include <linux/compat.h>
57#include <linux/if.h> 57#include <linux/if.h>
58#include <linux/if_arp.h> 58#include <linux/if_arp.h>
59#include <linux/if_ether.h> 59#include <linux/if_ether.h>
@@ -1110,8 +1110,8 @@ static int set_offload(struct net_device *dev, unsigned long arg)
1110 return 0; 1110 return 0;
1111} 1111}
1112 1112
1113static long tun_chr_ioctl(struct file *file, unsigned int cmd, 1113static long __tun_chr_ioctl(struct file *file, unsigned int cmd,
1114 unsigned long arg) 1114 unsigned long arg, int ifreq_len)
1115{ 1115{
1116 struct tun_file *tfile = file->private_data; 1116 struct tun_file *tfile = file->private_data;
1117 struct tun_struct *tun; 1117 struct tun_struct *tun;
@@ -1121,7 +1121,7 @@ static long tun_chr_ioctl(struct file *file, unsigned int cmd,
1121 int ret; 1121 int ret;
1122 1122
1123 if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89) 1123 if (cmd == TUNSETIFF || _IOC_TYPE(cmd) == 0x89)
1124 if (copy_from_user(&ifr, argp, sizeof ifr)) 1124 if (copy_from_user(&ifr, argp, ifreq_len))
1125 return -EFAULT; 1125 return -EFAULT;
1126 1126
1127 if (cmd == TUNGETFEATURES) { 1127 if (cmd == TUNGETFEATURES) {
@@ -1144,7 +1144,7 @@ static long tun_chr_ioctl(struct file *file, unsigned int cmd,
1144 if (ret) 1144 if (ret)
1145 goto unlock; 1145 goto unlock;
1146 1146
1147 if (copy_to_user(argp, &ifr, sizeof(ifr))) 1147 if (copy_to_user(argp, &ifr, ifreq_len))
1148 ret = -EFAULT; 1148 ret = -EFAULT;
1149 goto unlock; 1149 goto unlock;
1150 } 1150 }
@@ -1162,7 +1162,7 @@ static long tun_chr_ioctl(struct file *file, unsigned int cmd,
1162 if (ret) 1162 if (ret)
1163 break; 1163 break;
1164 1164
1165 if (copy_to_user(argp, &ifr, sizeof(ifr))) 1165 if (copy_to_user(argp, &ifr, ifreq_len))
1166 ret = -EFAULT; 1166 ret = -EFAULT;
1167 break; 1167 break;
1168 1168
@@ -1236,7 +1236,7 @@ static long tun_chr_ioctl(struct file *file, unsigned int cmd,
1236 /* Get hw addres */ 1236 /* Get hw addres */
1237 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN); 1237 memcpy(ifr.ifr_hwaddr.sa_data, tun->dev->dev_addr, ETH_ALEN);
1238 ifr.ifr_hwaddr.sa_family = tun->dev->type; 1238 ifr.ifr_hwaddr.sa_family = tun->dev->type;
1239 if (copy_to_user(argp, &ifr, sizeof ifr)) 1239 if (copy_to_user(argp, &ifr, ifreq_len))
1240 ret = -EFAULT; 1240 ret = -EFAULT;
1241 break; 1241 break;
1242 1242
@@ -1275,6 +1275,41 @@ unlock:
1275 return ret; 1275 return ret;
1276} 1276}
1277 1277
1278static long tun_chr_ioctl(struct file *file,
1279 unsigned int cmd, unsigned long arg)
1280{
1281 return __tun_chr_ioctl(file, cmd, arg, sizeof (struct ifreq));
1282}
1283
1284#ifdef CONFIG_COMPAT
1285static long tun_chr_compat_ioctl(struct file *file,
1286 unsigned int cmd, unsigned long arg)
1287{
1288 switch (cmd) {
1289 case TUNSETIFF:
1290 case TUNGETIFF:
1291 case TUNSETTXFILTER:
1292 case TUNGETSNDBUF:
1293 case TUNSETSNDBUF:
1294 case SIOCGIFHWADDR:
1295 case SIOCSIFHWADDR:
1296 arg = (unsigned long)compat_ptr(arg);
1297 break;
1298 default:
1299 arg = (compat_ulong_t)arg;
1300 break;
1301 }
1302
1303 /*
1304 * compat_ifreq is shorter than ifreq, so we must not access beyond
1305 * the end of that structure. All fields that are used in this
1306 * driver are compatible though, we don't need to convert the
1307 * contents.
1308 */
1309 return __tun_chr_ioctl(file, cmd, arg, sizeof(struct compat_ifreq));
1310}
1311#endif /* CONFIG_COMPAT */
1312
1278static int tun_chr_fasync(int fd, struct file *file, int on) 1313static int tun_chr_fasync(int fd, struct file *file, int on)
1279{ 1314{
1280 struct tun_struct *tun = tun_get(file); 1315 struct tun_struct *tun = tun_get(file);
@@ -1285,7 +1320,6 @@ static int tun_chr_fasync(int fd, struct file *file, int on)
1285 1320
1286 DBG(KERN_INFO "%s: tun_chr_fasync %d\n", tun->dev->name, on); 1321 DBG(KERN_INFO "%s: tun_chr_fasync %d\n", tun->dev->name, on);
1287 1322
1288 lock_kernel();
1289 if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0) 1323 if ((ret = fasync_helper(fd, file, on, &tun->fasync)) < 0)
1290 goto out; 1324 goto out;
1291 1325
@@ -1298,7 +1332,6 @@ static int tun_chr_fasync(int fd, struct file *file, int on)
1298 tun->flags &= ~TUN_FASYNC; 1332 tun->flags &= ~TUN_FASYNC;
1299 ret = 0; 1333 ret = 0;
1300out: 1334out:
1301 unlock_kernel();
1302 tun_put(tun); 1335 tun_put(tun);
1303 return ret; 1336 return ret;
1304} 1337}
@@ -1306,7 +1339,7 @@ out:
1306static int tun_chr_open(struct inode *inode, struct file * file) 1339static int tun_chr_open(struct inode *inode, struct file * file)
1307{ 1340{
1308 struct tun_file *tfile; 1341 struct tun_file *tfile;
1309 cycle_kernel_lock(); 1342
1310 DBG1(KERN_INFO "tunX: tun_chr_open\n"); 1343 DBG1(KERN_INFO "tunX: tun_chr_open\n");
1311 1344
1312 tfile = kmalloc(sizeof(*tfile), GFP_KERNEL); 1345 tfile = kmalloc(sizeof(*tfile), GFP_KERNEL);
@@ -1359,7 +1392,10 @@ static const struct file_operations tun_fops = {
1359 .write = do_sync_write, 1392 .write = do_sync_write,
1360 .aio_write = tun_chr_aio_write, 1393 .aio_write = tun_chr_aio_write,
1361 .poll = tun_chr_poll, 1394 .poll = tun_chr_poll,
1362 .unlocked_ioctl = tun_chr_ioctl, 1395 .unlocked_ioctl = tun_chr_ioctl,
1396#ifdef CONFIG_COMPAT
1397 .compat_ioctl = tun_chr_compat_ioctl,
1398#endif
1363 .open = tun_chr_open, 1399 .open = tun_chr_open,
1364 .release = tun_chr_close, 1400 .release = tun_chr_close,
1365 .fasync = tun_chr_fasync 1401 .fasync = tun_chr_fasync
diff --git a/drivers/net/usb/asix.c b/drivers/net/usb/asix.c
index 6ce7f775bb74..1bef39a60a62 100644
--- a/drivers/net/usb/asix.c
+++ b/drivers/net/usb/asix.c
@@ -1327,7 +1327,7 @@ static const struct driver_info ax8817x_info = {
1327 .status = asix_status, 1327 .status = asix_status,
1328 .link_reset = ax88172_link_reset, 1328 .link_reset = ax88172_link_reset,
1329 .reset = ax88172_link_reset, 1329 .reset = ax88172_link_reset,
1330 .flags = FLAG_ETHER, 1330 .flags = FLAG_ETHER | FLAG_LINK_INTR,
1331 .data = 0x00130103, 1331 .data = 0x00130103,
1332}; 1332};
1333 1333
@@ -1337,7 +1337,7 @@ static const struct driver_info dlink_dub_e100_info = {
1337 .status = asix_status, 1337 .status = asix_status,
1338 .link_reset = ax88172_link_reset, 1338 .link_reset = ax88172_link_reset,
1339 .reset = ax88172_link_reset, 1339 .reset = ax88172_link_reset,
1340 .flags = FLAG_ETHER, 1340 .flags = FLAG_ETHER | FLAG_LINK_INTR,
1341 .data = 0x009f9d9f, 1341 .data = 0x009f9d9f,
1342}; 1342};
1343 1343
@@ -1347,7 +1347,7 @@ static const struct driver_info netgear_fa120_info = {
1347 .status = asix_status, 1347 .status = asix_status,
1348 .link_reset = ax88172_link_reset, 1348 .link_reset = ax88172_link_reset,
1349 .reset = ax88172_link_reset, 1349 .reset = ax88172_link_reset,
1350 .flags = FLAG_ETHER, 1350 .flags = FLAG_ETHER | FLAG_LINK_INTR,
1351 .data = 0x00130103, 1351 .data = 0x00130103,
1352}; 1352};
1353 1353
@@ -1357,7 +1357,7 @@ static const struct driver_info hawking_uf200_info = {
1357 .status = asix_status, 1357 .status = asix_status,
1358 .link_reset = ax88172_link_reset, 1358 .link_reset = ax88172_link_reset,
1359 .reset = ax88172_link_reset, 1359 .reset = ax88172_link_reset,
1360 .flags = FLAG_ETHER, 1360 .flags = FLAG_ETHER | FLAG_LINK_INTR,
1361 .data = 0x001f1d1f, 1361 .data = 0x001f1d1f,
1362}; 1362};
1363 1363
@@ -1367,7 +1367,7 @@ static const struct driver_info ax88772_info = {
1367 .status = asix_status, 1367 .status = asix_status,
1368 .link_reset = ax88772_link_reset, 1368 .link_reset = ax88772_link_reset,
1369 .reset = ax88772_link_reset, 1369 .reset = ax88772_link_reset,
1370 .flags = FLAG_ETHER | FLAG_FRAMING_AX, 1370 .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
1371 .rx_fixup = asix_rx_fixup, 1371 .rx_fixup = asix_rx_fixup,
1372 .tx_fixup = asix_tx_fixup, 1372 .tx_fixup = asix_tx_fixup,
1373}; 1373};
@@ -1378,7 +1378,7 @@ static const struct driver_info ax88178_info = {
1378 .status = asix_status, 1378 .status = asix_status,
1379 .link_reset = ax88178_link_reset, 1379 .link_reset = ax88178_link_reset,
1380 .reset = ax88178_link_reset, 1380 .reset = ax88178_link_reset,
1381 .flags = FLAG_ETHER | FLAG_FRAMING_AX, 1381 .flags = FLAG_ETHER | FLAG_FRAMING_AX | FLAG_LINK_INTR,
1382 .rx_fixup = asix_rx_fixup, 1382 .rx_fixup = asix_rx_fixup,
1383 .tx_fixup = asix_tx_fixup, 1383 .tx_fixup = asix_tx_fixup,
1384}; 1384};
diff --git a/drivers/net/usb/cdc_ether.c b/drivers/net/usb/cdc_ether.c
index 21e1ba160008..7ec24c9b2535 100644
--- a/drivers/net/usb/cdc_ether.c
+++ b/drivers/net/usb/cdc_ether.c
@@ -413,13 +413,21 @@ static int cdc_bind(struct usbnet *dev, struct usb_interface *intf)
413 413
414static const struct driver_info cdc_info = { 414static const struct driver_info cdc_info = {
415 .description = "CDC Ethernet Device", 415 .description = "CDC Ethernet Device",
416 .flags = FLAG_ETHER, 416 .flags = FLAG_ETHER | FLAG_LINK_INTR,
417 // .check_connect = cdc_check_connect, 417 // .check_connect = cdc_check_connect,
418 .bind = cdc_bind, 418 .bind = cdc_bind,
419 .unbind = usbnet_cdc_unbind, 419 .unbind = usbnet_cdc_unbind,
420 .status = cdc_status, 420 .status = cdc_status,
421}; 421};
422 422
423static const struct driver_info mbm_info = {
424 .description = "Mobile Broadband Network Device",
425 .flags = FLAG_WWAN,
426 .bind = cdc_bind,
427 .unbind = usbnet_cdc_unbind,
428 .status = cdc_status,
429};
430
423/*-------------------------------------------------------------------------*/ 431/*-------------------------------------------------------------------------*/
424 432
425 433
@@ -532,72 +540,72 @@ static const struct usb_device_id products [] = {
532 /* Ericsson F3507g */ 540 /* Ericsson F3507g */
533 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM, 541 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1900, USB_CLASS_COMM,
534 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 542 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
535 .driver_info = (unsigned long) &cdc_info, 543 .driver_info = (unsigned long) &mbm_info,
536}, { 544}, {
537 /* Ericsson F3507g ver. 2 */ 545 /* Ericsson F3507g ver. 2 */
538 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1902, USB_CLASS_COMM, 546 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1902, USB_CLASS_COMM,
539 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 547 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
540 .driver_info = (unsigned long) &cdc_info, 548 .driver_info = (unsigned long) &mbm_info,
541}, { 549}, {
542 /* Ericsson F3607gw */ 550 /* Ericsson F3607gw */
543 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1904, USB_CLASS_COMM, 551 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1904, USB_CLASS_COMM,
544 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 552 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
545 .driver_info = (unsigned long) &cdc_info, 553 .driver_info = (unsigned long) &mbm_info,
546}, { 554}, {
547 /* Ericsson F3607gw ver 2 */ 555 /* Ericsson F3607gw ver 2 */
548 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1905, USB_CLASS_COMM, 556 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1905, USB_CLASS_COMM,
549 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 557 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
550 .driver_info = (unsigned long) &cdc_info, 558 .driver_info = (unsigned long) &mbm_info,
551}, { 559}, {
552 /* Ericsson F3607gw ver 3 */ 560 /* Ericsson F3607gw ver 3 */
553 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM, 561 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1906, USB_CLASS_COMM,
554 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 562 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
555 .driver_info = (unsigned long) &cdc_info, 563 .driver_info = (unsigned long) &mbm_info,
556}, { 564}, {
557 /* Ericsson F3307 */ 565 /* Ericsson F3307 */
558 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190a, USB_CLASS_COMM, 566 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x190a, USB_CLASS_COMM,
559 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 567 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
560 .driver_info = (unsigned long) &cdc_info, 568 .driver_info = (unsigned long) &mbm_info,
561}, { 569}, {
562 /* Ericsson F3307 ver 2 */ 570 /* Ericsson F3307 ver 2 */
563 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1909, USB_CLASS_COMM, 571 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1909, USB_CLASS_COMM,
564 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 572 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
565 .driver_info = (unsigned long) &cdc_info, 573 .driver_info = (unsigned long) &mbm_info,
566}, { 574}, {
567 /* Ericsson C3607w */ 575 /* Ericsson C3607w */
568 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1049, USB_CLASS_COMM, 576 USB_DEVICE_AND_INTERFACE_INFO(0x0bdb, 0x1049, USB_CLASS_COMM,
569 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 577 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
570 .driver_info = (unsigned long) &cdc_info, 578 .driver_info = (unsigned long) &mbm_info,
571}, { 579}, {
572 /* Toshiba F3507g */ 580 /* Toshiba F3507g */
573 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM, 581 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130b, USB_CLASS_COMM,
574 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 582 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
575 .driver_info = (unsigned long) &cdc_info, 583 .driver_info = (unsigned long) &mbm_info,
576}, { 584}, {
577 /* Toshiba F3607gw */ 585 /* Toshiba F3607gw */
578 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130c, USB_CLASS_COMM, 586 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x130c, USB_CLASS_COMM,
579 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 587 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
580 .driver_info = (unsigned long) &cdc_info, 588 .driver_info = (unsigned long) &mbm_info,
581}, { 589}, {
582 /* Toshiba F3607gw ver 2 */ 590 /* Toshiba F3607gw ver 2 */
583 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x1311, USB_CLASS_COMM, 591 USB_DEVICE_AND_INTERFACE_INFO(0x0930, 0x1311, USB_CLASS_COMM,
584 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 592 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
585 .driver_info = (unsigned long) &cdc_info, 593 .driver_info = (unsigned long) &mbm_info,
586}, { 594}, {
587 /* Dell F3507g */ 595 /* Dell F3507g */
588 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM, 596 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8147, USB_CLASS_COMM,
589 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 597 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
590 .driver_info = (unsigned long) &cdc_info, 598 .driver_info = (unsigned long) &mbm_info,
591}, { 599}, {
592 /* Dell F3607gw */ 600 /* Dell F3607gw */
593 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8183, USB_CLASS_COMM, 601 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8183, USB_CLASS_COMM,
594 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 602 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
595 .driver_info = (unsigned long) &cdc_info, 603 .driver_info = (unsigned long) &mbm_info,
596}, { 604}, {
597 /* Dell F3607gw ver 2 */ 605 /* Dell F3607gw ver 2 */
598 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8184, USB_CLASS_COMM, 606 USB_DEVICE_AND_INTERFACE_INFO(0x413c, 0x8184, USB_CLASS_COMM,
599 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE), 607 USB_CDC_SUBCLASS_MDLM, USB_CDC_PROTO_NONE),
600 .driver_info = (unsigned long) &cdc_info, 608 .driver_info = (unsigned long) &mbm_info,
601}, 609},
602 { }, // END 610 { }, // END
603}; 611};
diff --git a/drivers/net/usb/dm9601.c b/drivers/net/usb/dm9601.c
index a2b30a10064f..3d406f9b2f29 100644
--- a/drivers/net/usb/dm9601.c
+++ b/drivers/net/usb/dm9601.c
@@ -611,7 +611,7 @@ static int dm9601_link_reset(struct usbnet *dev)
611 611
612static const struct driver_info dm9601_info = { 612static const struct driver_info dm9601_info = {
613 .description = "Davicom DM9601 USB Ethernet", 613 .description = "Davicom DM9601 USB Ethernet",
614 .flags = FLAG_ETHER, 614 .flags = FLAG_ETHER | FLAG_LINK_INTR,
615 .bind = dm9601_bind, 615 .bind = dm9601_bind,
616 .rx_fixup = dm9601_rx_fixup, 616 .rx_fixup = dm9601_rx_fixup,
617 .tx_fixup = dm9601_tx_fixup, 617 .tx_fixup = dm9601_tx_fixup,
diff --git a/drivers/net/usb/kaweth.c b/drivers/net/usb/kaweth.c
index e391ef969c28..3b80e8d2d621 100644
--- a/drivers/net/usb/kaweth.c
+++ b/drivers/net/usb/kaweth.c
@@ -471,16 +471,7 @@ static int kaweth_reset(struct kaweth_device *kaweth)
471 int result; 471 int result;
472 472
473 dbg("kaweth_reset(%p)", kaweth); 473 dbg("kaweth_reset(%p)", kaweth);
474 result = kaweth_control(kaweth, 474 result = usb_reset_configuration(kaweth->dev);
475 usb_sndctrlpipe(kaweth->dev, 0),
476 USB_REQ_SET_CONFIGURATION,
477 0,
478 kaweth->dev->config[0].desc.bConfigurationValue,
479 0,
480 NULL,
481 0,
482 KAWETH_CONTROL_TIMEOUT);
483
484 mdelay(10); 475 mdelay(10);
485 476
486 dbg("kaweth_reset() returns %d.",result); 477 dbg("kaweth_reset() returns %d.",result);
diff --git a/drivers/net/usb/usbnet.c b/drivers/net/usb/usbnet.c
index ca5ca5ae061d..04f3f289e87c 100644
--- a/drivers/net/usb/usbnet.c
+++ b/drivers/net/usb/usbnet.c
@@ -1210,6 +1210,14 @@ static const struct net_device_ops usbnet_netdev_ops = {
1210 1210
1211// precondition: never called in_interrupt 1211// precondition: never called in_interrupt
1212 1212
1213static struct device_type wlan_type = {
1214 .name = "wlan",
1215};
1216
1217static struct device_type wwan_type = {
1218 .name = "wwan",
1219};
1220
1213int 1221int
1214usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod) 1222usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1215{ 1223{
@@ -1295,6 +1303,9 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1295 /* WLAN devices should always be named "wlan%d" */ 1303 /* WLAN devices should always be named "wlan%d" */
1296 if ((dev->driver_info->flags & FLAG_WLAN) != 0) 1304 if ((dev->driver_info->flags & FLAG_WLAN) != 0)
1297 strcpy(net->name, "wlan%d"); 1305 strcpy(net->name, "wlan%d");
1306 /* WWAN devices should always be named "wwan%d" */
1307 if ((dev->driver_info->flags & FLAG_WWAN) != 0)
1308 strcpy(net->name, "wwan%d");
1298 1309
1299 /* maybe the remote can't receive an Ethernet MTU */ 1310 /* maybe the remote can't receive an Ethernet MTU */
1300 if (net->mtu > (dev->hard_mtu - net->hard_header_len)) 1311 if (net->mtu > (dev->hard_mtu - net->hard_header_len))
@@ -1322,6 +1333,12 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1322 dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1); 1333 dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);
1323 1334
1324 SET_NETDEV_DEV(net, &udev->dev); 1335 SET_NETDEV_DEV(net, &udev->dev);
1336
1337 if ((dev->driver_info->flags & FLAG_WLAN) != 0)
1338 SET_NETDEV_DEVTYPE(net, &wlan_type);
1339 if ((dev->driver_info->flags & FLAG_WWAN) != 0)
1340 SET_NETDEV_DEVTYPE(net, &wwan_type);
1341
1325 status = register_netdev (net); 1342 status = register_netdev (net);
1326 if (status) 1343 if (status)
1327 goto out3; 1344 goto out3;
@@ -1335,9 +1352,11 @@ usbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
1335 // ok, it's ready to go. 1352 // ok, it's ready to go.
1336 usb_set_intfdata (udev, dev); 1353 usb_set_intfdata (udev, dev);
1337 1354
1338 // start as if the link is up
1339 netif_device_attach (net); 1355 netif_device_attach (net);
1340 1356
1357 if (dev->driver_info->flags & FLAG_LINK_INTR)
1358 netif_carrier_off(net);
1359
1341 return 0; 1360 return 0;
1342 1361
1343out3: 1362out3:
diff --git a/drivers/net/veth.c b/drivers/net/veth.c
index ade5b344f75d..2d657f2314cb 100644
--- a/drivers/net/veth.c
+++ b/drivers/net/veth.c
@@ -340,7 +340,7 @@ static int veth_validate(struct nlattr *tb[], struct nlattr *data[])
340 340
341static struct rtnl_link_ops veth_link_ops; 341static struct rtnl_link_ops veth_link_ops;
342 342
343static int veth_newlink(struct net_device *dev, 343static int veth_newlink(struct net *src_net, struct net_device *dev,
344 struct nlattr *tb[], struct nlattr *data[]) 344 struct nlattr *tb[], struct nlattr *data[])
345{ 345{
346 int err; 346 int err;
@@ -348,6 +348,7 @@ static int veth_newlink(struct net_device *dev,
348 struct veth_priv *priv; 348 struct veth_priv *priv;
349 char ifname[IFNAMSIZ]; 349 char ifname[IFNAMSIZ];
350 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp; 350 struct nlattr *peer_tb[IFLA_MAX + 1], **tbp;
351 struct net *net;
351 352
352 /* 353 /*
353 * create and register peer first 354 * create and register peer first
@@ -380,14 +381,22 @@ static int veth_newlink(struct net_device *dev,
380 else 381 else
381 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d"); 382 snprintf(ifname, IFNAMSIZ, DRV_NAME "%%d");
382 383
383 peer = rtnl_create_link(dev_net(dev), ifname, &veth_link_ops, tbp); 384 net = rtnl_link_get_net(src_net, tbp);
384 if (IS_ERR(peer)) 385 if (IS_ERR(net))
386 return PTR_ERR(net);
387
388 peer = rtnl_create_link(src_net, net, ifname, &veth_link_ops, tbp);
389 if (IS_ERR(peer)) {
390 put_net(net);
385 return PTR_ERR(peer); 391 return PTR_ERR(peer);
392 }
386 393
387 if (tbp[IFLA_ADDRESS] == NULL) 394 if (tbp[IFLA_ADDRESS] == NULL)
388 random_ether_addr(peer->dev_addr); 395 random_ether_addr(peer->dev_addr);
389 396
390 err = register_netdevice(peer); 397 err = register_netdevice(peer);
398 put_net(net);
399 net = NULL;
391 if (err < 0) 400 if (err < 0)
392 goto err_register_peer; 401 goto err_register_peer;
393 402
@@ -442,7 +451,7 @@ err_register_peer:
442 return err; 451 return err;
443} 452}
444 453
445static void veth_dellink(struct net_device *dev) 454static void veth_dellink(struct net_device *dev, struct list_head *head)
446{ 455{
447 struct veth_priv *priv; 456 struct veth_priv *priv;
448 struct net_device *peer; 457 struct net_device *peer;
@@ -450,8 +459,8 @@ static void veth_dellink(struct net_device *dev)
450 priv = netdev_priv(dev); 459 priv = netdev_priv(dev);
451 peer = priv->peer; 460 peer = priv->peer;
452 461
453 unregister_netdevice(dev); 462 unregister_netdevice_queue(dev, head);
454 unregister_netdevice(peer); 463 unregister_netdevice_queue(peer, head);
455} 464}
456 465
457static const struct nla_policy veth_policy[VETH_INFO_MAX + 1]; 466static const struct nla_policy veth_policy[VETH_INFO_MAX + 1];
diff --git a/drivers/net/via-rhine.c b/drivers/net/via-rhine.c
index 1fd70583be44..4535e89dfff1 100644
--- a/drivers/net/via-rhine.c
+++ b/drivers/net/via-rhine.c
@@ -1484,15 +1484,15 @@ static int rhine_rx(struct net_device *dev, int limit)
1484 } 1484 }
1485 } 1485 }
1486 } else { 1486 } else {
1487 struct sk_buff *skb; 1487 struct sk_buff *skb = NULL;
1488 /* Length should omit the CRC */ 1488 /* Length should omit the CRC */
1489 int pkt_len = data_size - 4; 1489 int pkt_len = data_size - 4;
1490 1490
1491 /* Check if the packet is long enough to accept without 1491 /* Check if the packet is long enough to accept without
1492 copying to a minimally-sized skbuff. */ 1492 copying to a minimally-sized skbuff. */
1493 if (pkt_len < rx_copybreak && 1493 if (pkt_len < rx_copybreak)
1494 (skb = netdev_alloc_skb(dev, pkt_len + NET_IP_ALIGN)) != NULL) { 1494 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1495 skb_reserve(skb, NET_IP_ALIGN); /* 16 byte align the IP header */ 1495 if (skb) {
1496 pci_dma_sync_single_for_cpu(rp->pdev, 1496 pci_dma_sync_single_for_cpu(rp->pdev,
1497 rp->rx_skbuff_dma[entry], 1497 rp->rx_skbuff_dma[entry],
1498 rp->rx_buf_sz, 1498 rp->rx_buf_sz,
diff --git a/drivers/net/via-velocity.c b/drivers/net/via-velocity.c
index e04e5bee005c..158f411bd555 100644
--- a/drivers/net/via-velocity.c
+++ b/drivers/net/via-velocity.c
@@ -364,11 +364,6 @@ static int rx_copybreak = 200;
364module_param(rx_copybreak, int, 0644); 364module_param(rx_copybreak, int, 0644);
365MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames"); 365MODULE_PARM_DESC(rx_copybreak, "Copy breakpoint for copy-only-tiny-frames");
366 366
367#ifdef CONFIG_PM
368static DEFINE_SPINLOCK(velocity_dev_list_lock);
369static LIST_HEAD(velocity_dev_list);
370#endif
371
372/* 367/*
373 * Internal board variants. At the moment we have only one 368 * Internal board variants. At the moment we have only one
374 */ 369 */
@@ -417,14 +412,6 @@ static void __devexit velocity_remove1(struct pci_dev *pdev)
417 struct net_device *dev = pci_get_drvdata(pdev); 412 struct net_device *dev = pci_get_drvdata(pdev);
418 struct velocity_info *vptr = netdev_priv(dev); 413 struct velocity_info *vptr = netdev_priv(dev);
419 414
420#ifdef CONFIG_PM
421 unsigned long flags;
422
423 spin_lock_irqsave(&velocity_dev_list_lock, flags);
424 if (!list_empty(&velocity_dev_list))
425 list_del(&vptr->list);
426 spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
427#endif
428 unregister_netdev(dev); 415 unregister_netdev(dev);
429 iounmap(vptr->mac_regs); 416 iounmap(vptr->mac_regs);
430 pci_release_regions(pdev); 417 pci_release_regions(pdev);
@@ -1949,10 +1936,9 @@ static int velocity_rx_copy(struct sk_buff **rx_skb, int pkt_size,
1949 if (pkt_size < rx_copybreak) { 1936 if (pkt_size < rx_copybreak) {
1950 struct sk_buff *new_skb; 1937 struct sk_buff *new_skb;
1951 1938
1952 new_skb = netdev_alloc_skb(vptr->dev, pkt_size + 2); 1939 new_skb = netdev_alloc_skb_ip_align(vptr->dev, pkt_size);
1953 if (new_skb) { 1940 if (new_skb) {
1954 new_skb->ip_summed = rx_skb[0]->ip_summed; 1941 new_skb->ip_summed = rx_skb[0]->ip_summed;
1955 skb_reserve(new_skb, 2);
1956 skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size); 1942 skb_copy_from_linear_data(*rx_skb, new_skb->data, pkt_size);
1957 *rx_skb = new_skb; 1943 *rx_skb = new_skb;
1958 ret = 0; 1944 ret = 0;
@@ -2578,7 +2564,6 @@ static void __devinit velocity_init_info(struct pci_dev *pdev,
2578 vptr->tx.numq = info->txqueue; 2564 vptr->tx.numq = info->txqueue;
2579 vptr->multicast_limit = MCAM_SIZE; 2565 vptr->multicast_limit = MCAM_SIZE;
2580 spin_lock_init(&vptr->lock); 2566 spin_lock_init(&vptr->lock);
2581 INIT_LIST_HEAD(&vptr->list);
2582} 2567}
2583 2568
2584/** 2569/**
@@ -2777,15 +2762,6 @@ static int __devinit velocity_found1(struct pci_dev *pdev, const struct pci_devi
2777 /* and leave the chip powered down */ 2762 /* and leave the chip powered down */
2778 2763
2779 pci_set_power_state(pdev, PCI_D3hot); 2764 pci_set_power_state(pdev, PCI_D3hot);
2780#ifdef CONFIG_PM
2781 {
2782 unsigned long flags;
2783
2784 spin_lock_irqsave(&velocity_dev_list_lock, flags);
2785 list_add(&vptr->list, &velocity_dev_list);
2786 spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
2787 }
2788#endif
2789 velocity_nics++; 2765 velocity_nics++;
2790out: 2766out:
2791 return ret; 2767 return ret;
@@ -3241,20 +3217,10 @@ static int velocity_netdev_event(struct notifier_block *nb, unsigned long notifi
3241{ 3217{
3242 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr; 3218 struct in_ifaddr *ifa = (struct in_ifaddr *) ptr;
3243 struct net_device *dev = ifa->ifa_dev->dev; 3219 struct net_device *dev = ifa->ifa_dev->dev;
3244 struct velocity_info *vptr;
3245 unsigned long flags;
3246 3220
3247 if (dev_net(dev) != &init_net) 3221 if (dev_net(dev) == &init_net &&
3248 return NOTIFY_DONE; 3222 dev->netdev_ops == &velocity_netdev_ops)
3249 3223 velocity_get_ip(netdev_priv(dev));
3250 spin_lock_irqsave(&velocity_dev_list_lock, flags);
3251 list_for_each_entry(vptr, &velocity_dev_list, list) {
3252 if (vptr->dev == dev) {
3253 velocity_get_ip(vptr);
3254 break;
3255 }
3256 }
3257 spin_unlock_irqrestore(&velocity_dev_list_lock, flags);
3258 3224
3259 return NOTIFY_DONE; 3225 return NOTIFY_DONE;
3260} 3226}
diff --git a/drivers/net/via-velocity.h b/drivers/net/via-velocity.h
index 2f00c13ab502..ce894ffa7c91 100644
--- a/drivers/net/via-velocity.h
+++ b/drivers/net/via-velocity.h
@@ -1499,8 +1499,6 @@ struct velocity_opt {
1499#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx]) 1499#define GET_RD_BY_IDX(vptr, idx) (vptr->rd_ring[idx])
1500 1500
1501struct velocity_info { 1501struct velocity_info {
1502 struct list_head list;
1503
1504 struct pci_dev *pdev; 1502 struct pci_dev *pdev;
1505 struct net_device *dev; 1503 struct net_device *dev;
1506 1504
diff --git a/drivers/net/virtio_net.c b/drivers/net/virtio_net.c
index bb8b52d0d1ce..22a8ca5d67d5 100644
--- a/drivers/net/virtio_net.c
+++ b/drivers/net/virtio_net.c
@@ -283,13 +283,12 @@ static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
283 do { 283 do {
284 struct skb_vnet_hdr *hdr; 284 struct skb_vnet_hdr *hdr;
285 285
286 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN); 286 skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
287 if (unlikely(!skb)) { 287 if (unlikely(!skb)) {
288 oom = true; 288 oom = true;
289 break; 289 break;
290 } 290 }
291 291
292 skb_reserve(skb, NET_IP_ALIGN);
293 skb_put(skb, MAX_PACKET_LEN); 292 skb_put(skb, MAX_PACKET_LEN);
294 293
295 hdr = skb_vnet_hdr(skb); 294 hdr = skb_vnet_hdr(skb);
@@ -344,14 +343,12 @@ static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
344 do { 343 do {
345 skb_frag_t *f; 344 skb_frag_t *f;
346 345
347 skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN); 346 skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
348 if (unlikely(!skb)) { 347 if (unlikely(!skb)) {
349 oom = true; 348 oom = true;
350 break; 349 break;
351 } 350 }
352 351
353 skb_reserve(skb, NET_IP_ALIGN);
354
355 f = &skb_shinfo(skb)->frags[0]; 352 f = &skb_shinfo(skb)->frags[0];
356 f->page = get_a_page(vi, gfp); 353 f->page = get_a_page(vi, gfp);
357 if (!f->page) { 354 if (!f->page) {
diff --git a/drivers/net/vmxnet3/vmxnet3_int.h b/drivers/net/vmxnet3/vmxnet3_int.h
index 3c0d70d58111..445081686d5d 100644
--- a/drivers/net/vmxnet3/vmxnet3_int.h
+++ b/drivers/net/vmxnet3/vmxnet3_int.h
@@ -27,16 +27,11 @@
27#ifndef _VMXNET3_INT_H 27#ifndef _VMXNET3_INT_H
28#define _VMXNET3_INT_H 28#define _VMXNET3_INT_H
29 29
30#include <linux/types.h>
31#include <linux/ethtool.h> 30#include <linux/ethtool.h>
32#include <linux/delay.h> 31#include <linux/delay.h>
33#include <linux/device.h>
34#include <linux/netdevice.h> 32#include <linux/netdevice.h>
35#include <linux/pci.h> 33#include <linux/pci.h>
36#include <linux/ethtool.h>
37#include <linux/compiler.h> 34#include <linux/compiler.h>
38#include <linux/module.h>
39#include <linux/moduleparam.h>
40#include <linux/slab.h> 35#include <linux/slab.h>
41#include <linux/spinlock.h> 36#include <linux/spinlock.h>
42#include <linux/ioport.h> 37#include <linux/ioport.h>
diff --git a/drivers/net/vxge/vxge-config.c b/drivers/net/vxge/vxge-config.c
index 9e94c4b0fb18..32a75fa935ed 100644
--- a/drivers/net/vxge/vxge-config.c
+++ b/drivers/net/vxge/vxge-config.c
@@ -356,10 +356,8 @@ __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
356 356
357 switch (host_type) { 357 switch (host_type) {
358 case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION: 358 case VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION:
359 if (func_id == 0) { 359 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
360 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | 360 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
361 VXGE_HW_DEVICE_ACCESS_RIGHT_SRPCIM;
362 }
363 break; 361 break;
364 case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION: 362 case VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION:
365 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM | 363 access_rights |= VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM |
@@ -382,6 +380,22 @@ __vxge_hw_device_access_rights_get(u32 host_type, u32 func_id)
382 return access_rights; 380 return access_rights;
383} 381}
384/* 382/*
383 * __vxge_hw_device_is_privilaged
384 * This routine checks if the device function is privilaged or not
385 */
386
387enum vxge_hw_status
388__vxge_hw_device_is_privilaged(u32 host_type, u32 func_id)
389{
390 if (__vxge_hw_device_access_rights_get(host_type,
391 func_id) &
392 VXGE_HW_DEVICE_ACCESS_RIGHT_MRPCIM)
393 return VXGE_HW_OK;
394 else
395 return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
396}
397
398/*
385 * __vxge_hw_device_host_info_get 399 * __vxge_hw_device_host_info_get
386 * This routine returns the host type assignments 400 * This routine returns the host type assignments
387 */ 401 */
@@ -446,220 +460,6 @@ __vxge_hw_verify_pci_e_info(struct __vxge_hw_device *hldev)
446 return VXGE_HW_OK; 460 return VXGE_HW_OK;
447} 461}
448 462
449enum vxge_hw_status
450__vxge_hw_device_is_privilaged(struct __vxge_hw_device *hldev)
451{
452 if ((hldev->host_type == VXGE_HW_NO_MR_NO_SR_NORMAL_FUNCTION ||
453 hldev->host_type == VXGE_HW_MR_NO_SR_VH0_BASE_FUNCTION ||
454 hldev->host_type == VXGE_HW_NO_MR_SR_VH0_FUNCTION0) &&
455 (hldev->func_id == 0))
456 return VXGE_HW_OK;
457 else
458 return VXGE_HW_ERR_PRIVILAGED_OPEARATION;
459}
460
461/*
462 * vxge_hw_wrr_rebalance - Rebalance the RX_WRR and KDFC_WRR calandars.
463 * Rebalance the RX_WRR and KDFC_WRR calandars.
464 */
465static enum
466vxge_hw_status vxge_hw_wrr_rebalance(struct __vxge_hw_device *hldev)
467{
468 u64 val64;
469 u32 wrr_states[VXGE_HW_WEIGHTED_RR_SERVICE_STATES];
470 u32 i, j, how_often = 1;
471 enum vxge_hw_status status = VXGE_HW_OK;
472
473 status = __vxge_hw_device_is_privilaged(hldev);
474 if (status != VXGE_HW_OK)
475 goto exit;
476
477 /* Reset the priorities assigned to the WRR arbitration
478 phases for the receive traffic */
479 for (i = 0; i < VXGE_HW_WRR_RING_COUNT; i++)
480 writeq(0, ((&hldev->mrpcim_reg->rx_w_round_robin_0) + i));
481
482 /* Reset the transmit FIFO servicing calendar for FIFOs */
483 for (i = 0; i < VXGE_HW_WRR_FIFO_COUNT; i++) {
484 writeq(0, ((&hldev->mrpcim_reg->kdfc_w_round_robin_0) + i));
485 writeq(0, ((&hldev->mrpcim_reg->kdfc_w_round_robin_20) + i));
486 }
487
488 /* Assign WRR priority 0 for all FIFOs */
489 for (i = 1; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
490 writeq(VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(0),
491 ((&hldev->mrpcim_reg->kdfc_fifo_0_ctrl) + i));
492
493 writeq(VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(0),
494 ((&hldev->mrpcim_reg->kdfc_fifo_17_ctrl) + i));
495 }
496
497 /* Reset to service non-offload doorbells */
498 writeq(0, &hldev->mrpcim_reg->kdfc_entry_type_sel_0);
499 writeq(0, &hldev->mrpcim_reg->kdfc_entry_type_sel_1);
500
501 /* Set priority 0 to all receive queues */
502 writeq(0, &hldev->mrpcim_reg->rx_queue_priority_0);
503 writeq(0, &hldev->mrpcim_reg->rx_queue_priority_1);
504 writeq(0, &hldev->mrpcim_reg->rx_queue_priority_2);
505
506 /* Initialize all the slots as unused */
507 for (i = 0; i < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; i++)
508 wrr_states[i] = -1;
509
510 /* Prepare the Fifo service states */
511 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
512
513 if (!hldev->config.vp_config[i].min_bandwidth)
514 continue;
515
516 how_often = VXGE_HW_VPATH_BANDWIDTH_MAX /
517 hldev->config.vp_config[i].min_bandwidth;
518 if (how_often) {
519
520 for (j = 0; j < VXGE_HW_WRR_FIFO_SERVICE_STATES;) {
521 if (wrr_states[j] == -1) {
522 wrr_states[j] = i;
523 /* Make sure each fifo is serviced
524 * atleast once */
525 if (i == j)
526 j += VXGE_HW_MAX_VIRTUAL_PATHS;
527 else
528 j += how_often;
529 } else
530 j++;
531 }
532 }
533 }
534
535 /* Fill the unused slots with 0 */
536 for (j = 0; j < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; j++) {
537 if (wrr_states[j] == -1)
538 wrr_states[j] = 0;
539 }
540
541 /* Assign WRR priority number for FIFOs */
542 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
543 writeq(VXGE_HW_KDFC_FIFO_0_CTRL_WRR_NUMBER(i),
544 ((&hldev->mrpcim_reg->kdfc_fifo_0_ctrl) + i));
545
546 writeq(VXGE_HW_KDFC_FIFO_17_CTRL_WRR_NUMBER(i),
547 ((&hldev->mrpcim_reg->kdfc_fifo_17_ctrl) + i));
548 }
549
550 /* Modify the servicing algorithm applied to the 3 types of doorbells.
551 i.e, none-offload, message and offload */
552 writeq(VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_0(0) |
553 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_1(0) |
554 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_2(0) |
555 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_3(0) |
556 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_4(1) |
557 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_5(0) |
558 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_6(0) |
559 VXGE_HW_KDFC_ENTRY_TYPE_SEL_0_NUMBER_7(0),
560 &hldev->mrpcim_reg->kdfc_entry_type_sel_0);
561
562 writeq(VXGE_HW_KDFC_ENTRY_TYPE_SEL_1_NUMBER_8(1),
563 &hldev->mrpcim_reg->kdfc_entry_type_sel_1);
564
565 for (i = 0, j = 0; i < VXGE_HW_WRR_FIFO_COUNT; i++) {
566
567 val64 = VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_0(wrr_states[j++]);
568 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_1(wrr_states[j++]);
569 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_2(wrr_states[j++]);
570 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_3(wrr_states[j++]);
571 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_4(wrr_states[j++]);
572 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_5(wrr_states[j++]);
573 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_6(wrr_states[j++]);
574 val64 |= VXGE_HW_KDFC_W_ROUND_ROBIN_0_NUMBER_7(wrr_states[j++]);
575
576 writeq(val64, (&hldev->mrpcim_reg->kdfc_w_round_robin_0 + i));
577 writeq(val64, (&hldev->mrpcim_reg->kdfc_w_round_robin_20 + i));
578 }
579
580 /* Set up the priorities assigned to receive queues */
581 writeq(VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_0(0) |
582 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_1(1) |
583 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_2(2) |
584 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_3(3) |
585 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_4(4) |
586 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_5(5) |
587 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_6(6) |
588 VXGE_HW_RX_QUEUE_PRIORITY_0_RX_Q_NUMBER_7(7),
589 &hldev->mrpcim_reg->rx_queue_priority_0);
590
591 writeq(VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_8(8) |
592 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_9(9) |
593 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_10(10) |
594 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_11(11) |
595 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_12(12) |
596 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_13(13) |
597 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_14(14) |
598 VXGE_HW_RX_QUEUE_PRIORITY_1_RX_Q_NUMBER_15(15),
599 &hldev->mrpcim_reg->rx_queue_priority_1);
600
601 writeq(VXGE_HW_RX_QUEUE_PRIORITY_2_RX_Q_NUMBER_16(16),
602 &hldev->mrpcim_reg->rx_queue_priority_2);
603
604 /* Initialize all the slots as unused */
605 for (i = 0; i < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; i++)
606 wrr_states[i] = -1;
607
608 /* Prepare the Ring service states */
609 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
610
611 if (!hldev->config.vp_config[i].min_bandwidth)
612 continue;
613
614 how_often = VXGE_HW_VPATH_BANDWIDTH_MAX /
615 hldev->config.vp_config[i].min_bandwidth;
616
617 if (how_often) {
618 for (j = 0; j < VXGE_HW_WRR_RING_SERVICE_STATES;) {
619 if (wrr_states[j] == -1) {
620 wrr_states[j] = i;
621 /* Make sure each ring is
622 * serviced atleast once */
623 if (i == j)
624 j += VXGE_HW_MAX_VIRTUAL_PATHS;
625 else
626 j += how_often;
627 } else
628 j++;
629 }
630 }
631 }
632
633 /* Fill the unused slots with 0 */
634 for (j = 0; j < VXGE_HW_WEIGHTED_RR_SERVICE_STATES; j++) {
635 if (wrr_states[j] == -1)
636 wrr_states[j] = 0;
637 }
638
639 for (i = 0, j = 0; i < VXGE_HW_WRR_RING_COUNT; i++) {
640 val64 = VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_0(
641 wrr_states[j++]);
642 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_1(
643 wrr_states[j++]);
644 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_2(
645 wrr_states[j++]);
646 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_3(
647 wrr_states[j++]);
648 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_4(
649 wrr_states[j++]);
650 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_5(
651 wrr_states[j++]);
652 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_6(
653 wrr_states[j++]);
654 val64 |= VXGE_HW_RX_W_ROUND_ROBIN_0_RX_W_PRIORITY_SS_7(
655 wrr_states[j++]);
656
657 writeq(val64, ((&hldev->mrpcim_reg->rx_w_round_robin_0) + i));
658 }
659exit:
660 return status;
661}
662
663/* 463/*
664 * __vxge_hw_device_initialize 464 * __vxge_hw_device_initialize
665 * Initialize Titan-V hardware. 465 * Initialize Titan-V hardware.
@@ -668,14 +468,14 @@ enum vxge_hw_status __vxge_hw_device_initialize(struct __vxge_hw_device *hldev)
668{ 468{
669 enum vxge_hw_status status = VXGE_HW_OK; 469 enum vxge_hw_status status = VXGE_HW_OK;
670 470
671 if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev)) { 471 if (VXGE_HW_OK == __vxge_hw_device_is_privilaged(hldev->host_type,
472 hldev->func_id)) {
672 /* Validate the pci-e link width and speed */ 473 /* Validate the pci-e link width and speed */
673 status = __vxge_hw_verify_pci_e_info(hldev); 474 status = __vxge_hw_verify_pci_e_info(hldev);
674 if (status != VXGE_HW_OK) 475 if (status != VXGE_HW_OK)
675 goto exit; 476 goto exit;
676 } 477 }
677 478
678 vxge_hw_wrr_rebalance(hldev);
679exit: 479exit:
680 return status; 480 return status;
681} 481}
@@ -953,7 +753,8 @@ vxge_hw_mrpcim_stats_access(struct __vxge_hw_device *hldev,
953 u64 val64; 753 u64 val64;
954 enum vxge_hw_status status = VXGE_HW_OK; 754 enum vxge_hw_status status = VXGE_HW_OK;
955 755
956 status = __vxge_hw_device_is_privilaged(hldev); 756 status = __vxge_hw_device_is_privilaged(hldev->host_type,
757 hldev->func_id);
957 if (status != VXGE_HW_OK) 758 if (status != VXGE_HW_OK)
958 goto exit; 759 goto exit;
959 760
@@ -990,7 +791,8 @@ vxge_hw_device_xmac_aggr_stats_get(struct __vxge_hw_device *hldev, u32 port,
990 791
991 val64 = (u64 *)aggr_stats; 792 val64 = (u64 *)aggr_stats;
992 793
993 status = __vxge_hw_device_is_privilaged(hldev); 794 status = __vxge_hw_device_is_privilaged(hldev->host_type,
795 hldev->func_id);
994 if (status != VXGE_HW_OK) 796 if (status != VXGE_HW_OK)
995 goto exit; 797 goto exit;
996 798
@@ -1023,7 +825,8 @@ vxge_hw_device_xmac_port_stats_get(struct __vxge_hw_device *hldev, u32 port,
1023 u32 offset = 0x0; 825 u32 offset = 0x0;
1024 val64 = (u64 *) port_stats; 826 val64 = (u64 *) port_stats;
1025 827
1026 status = __vxge_hw_device_is_privilaged(hldev); 828 status = __vxge_hw_device_is_privilaged(hldev->host_type,
829 hldev->func_id);
1027 if (status != VXGE_HW_OK) 830 if (status != VXGE_HW_OK)
1028 goto exit; 831 goto exit;
1029 832
@@ -1221,7 +1024,8 @@ enum vxge_hw_status vxge_hw_device_setpause_data(struct __vxge_hw_device *hldev,
1221 goto exit; 1024 goto exit;
1222 } 1025 }
1223 1026
1224 status = __vxge_hw_device_is_privilaged(hldev); 1027 status = __vxge_hw_device_is_privilaged(hldev->host_type,
1028 hldev->func_id);
1225 if (status != VXGE_HW_OK) 1029 if (status != VXGE_HW_OK)
1226 goto exit; 1030 goto exit;
1227 1031
@@ -2353,6 +2157,28 @@ exit:
2353} 2157}
2354 2158
2355/* 2159/*
2160 * vxge_hw_vpath_strip_fcs_check - Check for FCS strip.
2161 */
2162enum vxge_hw_status
2163vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask)
2164{
2165 struct vxge_hw_vpmgmt_reg __iomem *vpmgmt_reg;
2166 enum vxge_hw_status status = VXGE_HW_OK;
2167 int i = 0, j = 0;
2168
2169 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
2170 if (!((vpath_mask) & vxge_mBIT(i)))
2171 continue;
2172 vpmgmt_reg = hldev->vpmgmt_reg[i];
2173 for (j = 0; j < VXGE_HW_MAC_MAX_MAC_PORT_ID; j++) {
2174 if (readq(&vpmgmt_reg->rxmac_cfg0_port_vpmgmt_clone[j])
2175 & VXGE_HW_RXMAC_CFG0_PORT_VPMGMT_CLONE_STRIP_FCS)
2176 return VXGE_HW_FAIL;
2177 }
2178 }
2179 return status;
2180}
2181/*
2356 * vxge_hw_mgmt_reg_Write - Write Titan register. 2182 * vxge_hw_mgmt_reg_Write - Write Titan register.
2357 */ 2183 */
2358enum vxge_hw_status 2184enum vxge_hw_status
@@ -4056,6 +3882,30 @@ __vxge_hw_vpath_tim_configure(struct __vxge_hw_device *hldev, u32 vp_id)
4056 return status; 3882 return status;
4057} 3883}
4058 3884
3885void
3886vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id)
3887{
3888 struct __vxge_hw_virtualpath *vpath;
3889 struct vxge_hw_vpath_reg __iomem *vp_reg;
3890 struct vxge_hw_vp_config *config;
3891 u64 val64;
3892
3893 vpath = &hldev->virtual_paths[vp_id];
3894 vp_reg = vpath->vp_reg;
3895 config = vpath->vp_config;
3896
3897 if (config->fifo.enable == VXGE_HW_FIFO_ENABLE) {
3898 val64 = readq(&vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3899
3900 if (config->tti.timer_ci_en != VXGE_HW_TIM_TIMER_CI_ENABLE) {
3901 config->tti.timer_ci_en = VXGE_HW_TIM_TIMER_CI_ENABLE;
3902 val64 |= VXGE_HW_TIM_CFG1_INT_NUM_TIMER_CI;
3903 writeq(val64,
3904 &vp_reg->tim_cfg1_int_num[VXGE_HW_VPATH_INTR_TX]);
3905 }
3906 }
3907 return;
3908}
4059/* 3909/*
4060 * __vxge_hw_vpath_initialize 3910 * __vxge_hw_vpath_initialize
4061 * This routine is the final phase of init which initializes the 3911 * This routine is the final phase of init which initializes the
@@ -4098,8 +3948,6 @@ __vxge_hw_vpath_initialize(struct __vxge_hw_device *hldev, u32 vp_id)
4098 if (status != VXGE_HW_OK) 3948 if (status != VXGE_HW_OK)
4099 goto exit; 3949 goto exit;
4100 3950
4101 writeq(0, &vp_reg->gendma_int);
4102
4103 val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl); 3951 val64 = readq(&vp_reg->rtdma_rd_optimization_ctrl);
4104 3952
4105 /* Get MRRS value from device control */ 3953 /* Get MRRS value from device control */
diff --git a/drivers/net/vxge/vxge-config.h b/drivers/net/vxge/vxge-config.h
index 3e94f0ce0900..e7877df092f3 100644
--- a/drivers/net/vxge/vxge-config.h
+++ b/drivers/net/vxge/vxge-config.h
@@ -2201,6 +2201,8 @@ __vxge_hw_vpath_func_id_get(
2201enum vxge_hw_status 2201enum vxge_hw_status
2202__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath); 2202__vxge_hw_vpath_reset_check(struct __vxge_hw_virtualpath *vpath);
2203 2203
2204enum vxge_hw_status
2205vxge_hw_vpath_strip_fcs_check(struct __vxge_hw_device *hldev, u64 vpath_mask);
2204/** 2206/**
2205 * vxge_debug 2207 * vxge_debug
2206 * @level: level of debug verbosity. 2208 * @level: level of debug verbosity.
diff --git a/drivers/net/vxge/vxge-main.c b/drivers/net/vxge/vxge-main.c
index 068d7a9d3e36..e21358e82c74 100644
--- a/drivers/net/vxge/vxge-main.c
+++ b/drivers/net/vxge/vxge-main.c
@@ -2435,7 +2435,6 @@ static int vxge_add_isr(struct vxgedev *vdev)
2435 int ret = 0; 2435 int ret = 0;
2436#ifdef CONFIG_PCI_MSI 2436#ifdef CONFIG_PCI_MSI
2437 int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0; 2437 int vp_idx = 0, intr_idx = 0, intr_cnt = 0, msix_idx = 0, irq_req = 0;
2438 u64 function_mode = vdev->config.device_hw_info.function_mode;
2439 int pci_fun = PCI_FUNC(vdev->pdev->devfn); 2438 int pci_fun = PCI_FUNC(vdev->pdev->devfn);
2440 2439
2441 if (vdev->config.intr_type == MSI_X) 2440 if (vdev->config.intr_type == MSI_X)
@@ -2444,20 +2443,9 @@ static int vxge_add_isr(struct vxgedev *vdev)
2444 if (ret) { 2443 if (ret) {
2445 vxge_debug_init(VXGE_ERR, 2444 vxge_debug_init(VXGE_ERR,
2446 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME); 2445 "%s: Enabling MSI-X Failed", VXGE_DRIVER_NAME);
2447 if ((function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) && 2446 vxge_debug_init(VXGE_ERR,
2448 test_and_set_bit(__VXGE_STATE_CARD_UP, 2447 "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
2449 &driver_config->inta_dev_open)) 2448 vdev->config.intr_type = INTA;
2450 return VXGE_HW_FAIL;
2451 else {
2452 vxge_debug_init(VXGE_ERR,
2453 "%s: Defaulting to INTA", VXGE_DRIVER_NAME);
2454 vdev->config.intr_type = INTA;
2455 vxge_hw_device_set_intr_type(vdev->devh,
2456 VXGE_HW_INTR_MODE_IRQLINE);
2457 vxge_close_vpaths(vdev, 1);
2458 vdev->no_of_vpath = 1;
2459 vdev->stats.vpaths_open = 1;
2460 }
2461 } 2449 }
2462 2450
2463 if (vdev->config.intr_type == MSI_X) { 2451 if (vdev->config.intr_type == MSI_X) {
@@ -2505,24 +2493,11 @@ static int vxge_add_isr(struct vxgedev *vdev)
2505 "%s: MSIX - %d Registration failed", 2493 "%s: MSIX - %d Registration failed",
2506 vdev->ndev->name, intr_cnt); 2494 vdev->ndev->name, intr_cnt);
2507 vxge_rem_msix_isr(vdev); 2495 vxge_rem_msix_isr(vdev);
2508 if ((function_mode == 2496 vdev->config.intr_type = INTA;
2509 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) && 2497 vxge_debug_init(VXGE_ERR,
2510 test_and_set_bit(__VXGE_STATE_CARD_UP, 2498 "%s: Defaulting to INTA"
2511 &driver_config->inta_dev_open)) 2499 , vdev->ndev->name);
2512 return VXGE_HW_FAIL;
2513 else {
2514 vxge_hw_device_set_intr_type(
2515 vdev->devh,
2516 VXGE_HW_INTR_MODE_IRQLINE);
2517 vdev->config.intr_type = INTA;
2518 vxge_debug_init(VXGE_ERR,
2519 "%s: Defaulting to INTA"
2520 , vdev->ndev->name);
2521 vxge_close_vpaths(vdev, 1);
2522 vdev->no_of_vpath = 1;
2523 vdev->stats.vpaths_open = 1;
2524 goto INTA_MODE; 2500 goto INTA_MODE;
2525 }
2526 } 2501 }
2527 2502
2528 if (irq_req) { 2503 if (irq_req) {
@@ -2555,23 +2530,11 @@ static int vxge_add_isr(struct vxgedev *vdev)
2555 "%s: MSIX - %d Registration failed", 2530 "%s: MSIX - %d Registration failed",
2556 vdev->ndev->name, intr_cnt); 2531 vdev->ndev->name, intr_cnt);
2557 vxge_rem_msix_isr(vdev); 2532 vxge_rem_msix_isr(vdev);
2558 if ((function_mode == 2533 vdev->config.intr_type = INTA;
2559 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) && 2534 vxge_debug_init(VXGE_ERR,
2560 test_and_set_bit(__VXGE_STATE_CARD_UP, 2535 "%s: Defaulting to INTA",
2561 &driver_config->inta_dev_open)) 2536 vdev->ndev->name);
2562 return VXGE_HW_FAIL;
2563 else {
2564 vxge_hw_device_set_intr_type(vdev->devh,
2565 VXGE_HW_INTR_MODE_IRQLINE);
2566 vdev->config.intr_type = INTA;
2567 vxge_debug_init(VXGE_ERR,
2568 "%s: Defaulting to INTA",
2569 vdev->ndev->name);
2570 vxge_close_vpaths(vdev, 1);
2571 vdev->no_of_vpath = 1;
2572 vdev->stats.vpaths_open = 1;
2573 goto INTA_MODE; 2537 goto INTA_MODE;
2574 }
2575 } 2538 }
2576 2539
2577 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle, 2540 vxge_hw_vpath_msix_unmask(vdev->vpaths[vp_idx].handle,
@@ -2584,6 +2547,10 @@ INTA_MODE:
2584 snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge", vdev->ndev->name); 2547 snprintf(vdev->desc[0], VXGE_INTR_STRLEN, "%s:vxge", vdev->ndev->name);
2585 2548
2586 if (vdev->config.intr_type == INTA) { 2549 if (vdev->config.intr_type == INTA) {
2550 vxge_hw_device_set_intr_type(vdev->devh,
2551 VXGE_HW_INTR_MODE_IRQLINE);
2552 vxge_hw_vpath_tti_ci_set(vdev->devh,
2553 vdev->vpaths[0].device_id);
2587 ret = request_irq((int) vdev->pdev->irq, 2554 ret = request_irq((int) vdev->pdev->irq,
2588 vxge_isr_napi, 2555 vxge_isr_napi,
2589 IRQF_SHARED, vdev->desc[0], vdev); 2556 IRQF_SHARED, vdev->desc[0], vdev);
@@ -2688,13 +2655,6 @@ vxge_open(struct net_device *dev)
2688 * initialized */ 2655 * initialized */
2689 netif_carrier_off(dev); 2656 netif_carrier_off(dev);
2690 2657
2691 /* Check for another device already opn with INTA */
2692 if ((function_mode == VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION) &&
2693 test_bit(__VXGE_STATE_CARD_UP, &driver_config->inta_dev_open)) {
2694 ret = -EPERM;
2695 goto out0;
2696 }
2697
2698 /* Open VPATHs */ 2658 /* Open VPATHs */
2699 status = vxge_open_vpaths(vdev); 2659 status = vxge_open_vpaths(vdev);
2700 if (status != VXGE_HW_OK) { 2660 if (status != VXGE_HW_OK) {
@@ -2983,7 +2943,6 @@ int do_vxge_close(struct net_device *dev, int do_io)
2983 vxge_debug_entryexit(VXGE_TRACE, 2943 vxge_debug_entryexit(VXGE_TRACE,
2984 "%s: %s:%d Exiting...", dev->name, __func__, __LINE__); 2944 "%s: %s:%d Exiting...", dev->name, __func__, __LINE__);
2985 2945
2986 clear_bit(__VXGE_STATE_CARD_UP, &driver_config->inta_dev_open);
2987 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state); 2946 clear_bit(__VXGE_STATE_RESET_CARD, &vdev->state);
2988 2947
2989 return 0; 2948 return 0;
@@ -3653,11 +3612,12 @@ static int __devinit vxge_config_vpaths(
3653 device_config->vp_config[i].fifo.enable = 3612 device_config->vp_config[i].fifo.enable =
3654 VXGE_HW_FIFO_ENABLE; 3613 VXGE_HW_FIFO_ENABLE;
3655 device_config->vp_config[i].fifo.max_frags = 3614 device_config->vp_config[i].fifo.max_frags =
3656 MAX_SKB_FRAGS; 3615 MAX_SKB_FRAGS + 1;
3657 device_config->vp_config[i].fifo.memblock_size = 3616 device_config->vp_config[i].fifo.memblock_size =
3658 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE; 3617 VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE;
3659 3618
3660 txdl_size = MAX_SKB_FRAGS * sizeof(struct vxge_hw_fifo_txd); 3619 txdl_size = device_config->vp_config[i].fifo.max_frags *
3620 sizeof(struct vxge_hw_fifo_txd);
3661 txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size; 3621 txdl_per_memblock = VXGE_HW_MIN_FIFO_MEMBLOCK_SIZE / txdl_size;
3662 3622
3663 device_config->vp_config[i].fifo.fifo_blocks = 3623 device_config->vp_config[i].fifo.fifo_blocks =
@@ -4088,9 +4048,10 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4088 driver_config->config_dev_cnt = 0; 4048 driver_config->config_dev_cnt = 0;
4089 driver_config->total_dev_cnt = 0; 4049 driver_config->total_dev_cnt = 0;
4090 driver_config->g_no_cpus = 0; 4050 driver_config->g_no_cpus = 0;
4091 driver_config->vpath_per_dev = max_config_vpath;
4092 } 4051 }
4093 4052
4053 driver_config->vpath_per_dev = max_config_vpath;
4054
4094 driver_config->total_dev_cnt++; 4055 driver_config->total_dev_cnt++;
4095 if (++driver_config->config_dev_cnt > max_config_dev) { 4056 if (++driver_config->config_dev_cnt > max_config_dev) {
4096 ret = 0; 4057 ret = 0;
@@ -4243,6 +4204,15 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4243 goto _exit3; 4204 goto _exit3;
4244 } 4205 }
4245 4206
4207 /* if FCS stripping is not disabled in MAC fail driver load */
4208 if (vxge_hw_vpath_strip_fcs_check(hldev, vpath_mask) != VXGE_HW_OK) {
4209 vxge_debug_init(VXGE_ERR,
4210 "%s: FCS stripping is not disabled in MAC"
4211 " failing driver load", VXGE_DRIVER_NAME);
4212 ret = -EINVAL;
4213 goto _exit4;
4214 }
4215
4246 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL); 4216 vxge_hw_device_debug_set(hldev, VXGE_ERR, VXGE_COMPONENT_LL);
4247 4217
4248 /* set private device info */ 4218 /* set private device info */
@@ -4387,6 +4357,27 @@ vxge_probe(struct pci_dev *pdev, const struct pci_device_id *pre)
4387 } 4357 }
4388 4358
4389 kfree(device_config); 4359 kfree(device_config);
4360
4361 /*
4362 * INTA is shared in multi-function mode. This is unlike the INTA
4363 * implementation in MR mode, where each VH has its own INTA message.
4364 * - INTA is masked (disabled) as long as at least one function sets
4365 * its TITAN_MASK_ALL_INT.ALARM bit.
4366 * - INTA is unmasked (enabled) when all enabled functions have cleared
4367 * their own TITAN_MASK_ALL_INT.ALARM bit.
4368 * The TITAN_MASK_ALL_INT ALARM & TRAFFIC bits are cleared on power up.
4369 * Though this driver leaves the top level interrupts unmasked while
4370 * leaving the required module interrupt bits masked on exit, there
4371 * could be a rougue driver around that does not follow this procedure
4372 * resulting in a failure to generate interrupts. The following code is
4373 * present to prevent such a failure.
4374 */
4375
4376 if (ll_config.device_hw_info.function_mode ==
4377 VXGE_HW_FUNCTION_MODE_MULTI_FUNCTION)
4378 if (vdev->config.intr_type == INTA)
4379 vxge_hw_device_unmask_all(hldev);
4380
4390 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...", 4381 vxge_debug_entryexit(VXGE_TRACE, "%s: %s:%d Exiting...",
4391 vdev->ndev->name, __func__, __LINE__); 4382 vdev->ndev->name, __func__, __LINE__);
4392 4383
diff --git a/drivers/net/vxge/vxge-main.h b/drivers/net/vxge/vxge-main.h
index 9c36b3a9a63d..7c83ba4be9d7 100644
--- a/drivers/net/vxge/vxge-main.h
+++ b/drivers/net/vxge/vxge-main.h
@@ -112,7 +112,6 @@ enum vxge_mac_addr_state {
112struct vxge_drv_config { 112struct vxge_drv_config {
113 int config_dev_cnt; 113 int config_dev_cnt;
114 int total_dev_cnt; 114 int total_dev_cnt;
115 unsigned long inta_dev_open;
116 int g_no_cpus; 115 int g_no_cpus;
117 unsigned int vpath_per_dev; 116 unsigned int vpath_per_dev;
118}; 117};
diff --git a/drivers/net/vxge/vxge-reg.h b/drivers/net/vxge/vxge-reg.h
index 9a3b823e08d4..9a0cf8eaa328 100644
--- a/drivers/net/vxge/vxge-reg.h
+++ b/drivers/net/vxge/vxge-reg.h
@@ -4326,10 +4326,6 @@ struct vxge_hw_vpath_reg {
4326/*0x011e0*/ u64 umq_bwr_init_byte; 4326/*0x011e0*/ u64 umq_bwr_init_byte;
4327#define VXGE_HW_UMQ_BWR_INIT_BYTE_COUNT(val) vxge_vBIT(val, 0, 32) 4327#define VXGE_HW_UMQ_BWR_INIT_BYTE_COUNT(val) vxge_vBIT(val, 0, 32)
4328/*0x011e8*/ u64 gendma_int; 4328/*0x011e8*/ u64 gendma_int;
4329#define VXGE_HW_GENDMA_INT_IMMED_ENABLE vxge_mBIT(6)
4330#define VXGE_HW_GENDMA_INT_EVENT_ENABLE vxge_mBIT(7)
4331#define VXGE_HW_GENDMA_INT_NUMBER(val) vxge_vBIT(val, 9, 7)
4332#define VXGE_HW_GENDMA_INT_BITMAP(val) vxge_vBIT(val, 16, 16)
4333/*0x011f0*/ u64 umqdmq_ir_init_notify; 4329/*0x011f0*/ u64 umqdmq_ir_init_notify;
4334#define VXGE_HW_UMQDMQ_IR_INIT_NOTIFY_PULSE vxge_mBIT(3) 4330#define VXGE_HW_UMQDMQ_IR_INIT_NOTIFY_PULSE vxge_mBIT(3)
4335/*0x011f8*/ u64 dmq_init_notify; 4331/*0x011f8*/ u64 dmq_init_notify;
diff --git a/drivers/net/vxge/vxge-traffic.c b/drivers/net/vxge/vxge-traffic.c
index fe3ae518c69c..61ce754fa9d0 100644
--- a/drivers/net/vxge/vxge-traffic.c
+++ b/drivers/net/vxge/vxge-traffic.c
@@ -295,6 +295,8 @@ void vxge_hw_device_intr_enable(struct __vxge_hw_device *hldev)
295 u64 val64; 295 u64 val64;
296 u32 val32; 296 u32 val32;
297 297
298 vxge_hw_device_mask_all(hldev);
299
298 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) { 300 for (i = 0; i < VXGE_HW_MAX_VIRTUAL_PATHS; i++) {
299 301
300 if (!(hldev->vpaths_deployed & vxge_mBIT(i))) 302 if (!(hldev->vpaths_deployed & vxge_mBIT(i)))
@@ -1232,7 +1234,7 @@ void vxge_hw_fifo_txdl_post(struct __vxge_hw_fifo *fifo, void *txdlh)
1232 vxge_hw_channel_dtr_post(&fifo->channel, txdlh); 1234 vxge_hw_channel_dtr_post(&fifo->channel, txdlh);
1233 1235
1234 __vxge_hw_non_offload_db_post(fifo, 1236 __vxge_hw_non_offload_db_post(fifo,
1235 (u64)(size_t)txdl_priv->dma_addr, 1237 (u64)txdl_priv->dma_addr,
1236 txdl_priv->frags - 1, 1238 txdl_priv->frags - 1,
1237 fifo->no_snoop_bits); 1239 fifo->no_snoop_bits);
1238 1240
diff --git a/drivers/net/vxge/vxge-traffic.h b/drivers/net/vxge/vxge-traffic.h
index 461742b4442b..861c853e3e84 100644
--- a/drivers/net/vxge/vxge-traffic.h
+++ b/drivers/net/vxge/vxge-traffic.h
@@ -2389,6 +2389,8 @@ vxge_hw_channel_dtr_free(struct __vxge_hw_channel *channel, void *dtrh);
2389 2389
2390int 2390int
2391vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel); 2391vxge_hw_channel_dtr_count(struct __vxge_hw_channel *channel);
2392void
2393vxge_hw_vpath_tti_ci_set(struct __vxge_hw_device *hldev, u32 vp_id);
2392 2394
2393/* ========================== PRIVATE API ================================= */ 2395/* ========================== PRIVATE API ================================= */
2394 2396
diff --git a/drivers/net/vxge/vxge-version.h b/drivers/net/vxge/vxge-version.h
index 8fbce7552035..77c2a754b7b8 100644
--- a/drivers/net/vxge/vxge-version.h
+++ b/drivers/net/vxge/vxge-version.h
@@ -17,7 +17,7 @@
17 17
18#define VXGE_VERSION_MAJOR "2" 18#define VXGE_VERSION_MAJOR "2"
19#define VXGE_VERSION_MINOR "0" 19#define VXGE_VERSION_MINOR "0"
20#define VXGE_VERSION_FIX "5" 20#define VXGE_VERSION_FIX "6"
21#define VXGE_VERSION_BUILD "18053" 21#define VXGE_VERSION_BUILD "18937"
22#define VXGE_VERSION_FOR "k" 22#define VXGE_VERSION_FOR "k"
23#endif 23#endif
diff --git a/drivers/net/wan/cosa.c b/drivers/net/wan/cosa.c
index 66360a2a14c2..2eceb1a24df2 100644
--- a/drivers/net/wan/cosa.c
+++ b/drivers/net/wan/cosa.c
@@ -296,8 +296,8 @@ static ssize_t cosa_write(struct file *file,
296static unsigned int cosa_poll(struct file *file, poll_table *poll); 296static unsigned int cosa_poll(struct file *file, poll_table *poll);
297static int cosa_open(struct inode *inode, struct file *file); 297static int cosa_open(struct inode *inode, struct file *file);
298static int cosa_release(struct inode *inode, struct file *file); 298static int cosa_release(struct inode *inode, struct file *file);
299static int cosa_chardev_ioctl(struct inode *inode, struct file *file, 299static long cosa_chardev_ioctl(struct file *file, unsigned int cmd,
300 unsigned int cmd, unsigned long arg); 300 unsigned long arg);
301#ifdef COSA_FASYNC_WORKING 301#ifdef COSA_FASYNC_WORKING
302static int cosa_fasync(struct inode *inode, struct file *file, int on); 302static int cosa_fasync(struct inode *inode, struct file *file, int on);
303#endif 303#endif
@@ -308,7 +308,7 @@ static const struct file_operations cosa_fops = {
308 .read = cosa_read, 308 .read = cosa_read,
309 .write = cosa_write, 309 .write = cosa_write,
310 .poll = cosa_poll, 310 .poll = cosa_poll,
311 .ioctl = cosa_chardev_ioctl, 311 .unlocked_ioctl = cosa_chardev_ioctl,
312 .open = cosa_open, 312 .open = cosa_open,
313 .release = cosa_release, 313 .release = cosa_release,
314#ifdef COSA_FASYNC_WORKING 314#ifdef COSA_FASYNC_WORKING
@@ -1203,12 +1203,18 @@ static int cosa_net_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1203 return hdlc_ioctl(dev, ifr, cmd); 1203 return hdlc_ioctl(dev, ifr, cmd);
1204} 1204}
1205 1205
1206static int cosa_chardev_ioctl(struct inode *inode, struct file *file, 1206static long cosa_chardev_ioctl(struct file *file, unsigned int cmd,
1207 unsigned int cmd, unsigned long arg) 1207 unsigned long arg)
1208{ 1208{
1209 struct channel_data *channel = file->private_data; 1209 struct channel_data *channel = file->private_data;
1210 struct cosa_data *cosa = channel->cosa; 1210 struct cosa_data *cosa;
1211 return cosa_ioctl_common(cosa, channel, cmd, arg); 1211 long ret;
1212
1213 lock_kernel();
1214 cosa = channel->cosa;
1215 ret = cosa_ioctl_common(cosa, channel, cmd, arg);
1216 unlock_kernel();
1217 return ret;
1212} 1218}
1213 1219
1214 1220
diff --git a/drivers/net/wan/x25_asy.c b/drivers/net/wan/x25_asy.c
index 27945049c9e1..3c325d77939b 100644
--- a/drivers/net/wan/x25_asy.c
+++ b/drivers/net/wan/x25_asy.c
@@ -33,6 +33,7 @@
33#include <linux/lapb.h> 33#include <linux/lapb.h>
34#include <linux/init.h> 34#include <linux/init.h>
35#include <linux/rtnetlink.h> 35#include <linux/rtnetlink.h>
36#include <linux/compat.h>
36#include "x25_asy.h" 37#include "x25_asy.h"
37 38
38#include <net/x25device.h> 39#include <net/x25device.h>
@@ -705,6 +706,21 @@ static int x25_asy_ioctl(struct tty_struct *tty, struct file *file,
705 } 706 }
706} 707}
707 708
709#ifdef CONFIG_COMPAT
710static long x25_asy_compat_ioctl(struct tty_struct *tty, struct file *file,
711 unsigned int cmd, unsigned long arg)
712{
713 switch (cmd) {
714 case SIOCGIFNAME:
715 case SIOCSIFHWADDR:
716 return x25_asy_ioctl(tty, file, cmd,
717 (unsigned long)compat_ptr(arg));
718 }
719
720 return -ENOIOCTLCMD;
721}
722#endif
723
708static int x25_asy_open_dev(struct net_device *dev) 724static int x25_asy_open_dev(struct net_device *dev)
709{ 725{
710 struct x25_asy *sl = netdev_priv(dev); 726 struct x25_asy *sl = netdev_priv(dev);
@@ -754,6 +770,9 @@ static struct tty_ldisc_ops x25_ldisc = {
754 .open = x25_asy_open_tty, 770 .open = x25_asy_open_tty,
755 .close = x25_asy_close_tty, 771 .close = x25_asy_close_tty,
756 .ioctl = x25_asy_ioctl, 772 .ioctl = x25_asy_ioctl,
773#ifdef CONFIG_COMPAT
774 .compat_ioctl = x25_asy_compat_ioctl,
775#endif
757 .receive_buf = x25_asy_receive_buf, 776 .receive_buf = x25_asy_receive_buf,
758 .write_wakeup = x25_asy_write_wakeup, 777 .write_wakeup = x25_asy_write_wakeup,
759}; 778};
diff --git a/drivers/net/wimax/i2400m/Kconfig b/drivers/net/wimax/i2400m/Kconfig
index d623b3d99a4b..3f703384295e 100644
--- a/drivers/net/wimax/i2400m/Kconfig
+++ b/drivers/net/wimax/i2400m/Kconfig
@@ -31,6 +31,14 @@ config WIMAX_I2400M_SDIO
31 31
32 If unsure, it is safe to select M (module). 32 If unsure, it is safe to select M (module).
33 33
34config WIMAX_IWMC3200_SDIO
35 bool "Intel Wireless Multicom WiMAX Connection 3200 over SDIO"
36 depends on WIMAX_I2400M_SDIO
37 select IWMC3200TOP
38 help
39 Select if you have a device based on the Intel Multicom WiMAX
40 Connection 3200 over SDIO.
41
34config WIMAX_I2400M_DEBUG_LEVEL 42config WIMAX_I2400M_DEBUG_LEVEL
35 int "WiMAX i2400m debug level" 43 int "WiMAX i2400m debug level"
36 depends on WIMAX_I2400M 44 depends on WIMAX_I2400M
diff --git a/drivers/net/wimax/i2400m/control.c b/drivers/net/wimax/i2400m/control.c
index 07308686dbcf..944945540391 100644
--- a/drivers/net/wimax/i2400m/control.c
+++ b/drivers/net/wimax/i2400m/control.c
@@ -54,7 +54,7 @@
54 * i2400m_set_init_config() 54 * i2400m_set_init_config()
55 * i2400m_cmd_get_state() 55 * i2400m_cmd_get_state()
56 * i2400m_dev_shutdown() Called by i2400m_dev_stop() 56 * i2400m_dev_shutdown() Called by i2400m_dev_stop()
57 * i2400m->bus_reset() 57 * i2400m_reset()
58 * 58 *
59 * i2400m_{cmd,get,set}_*() 59 * i2400m_{cmd,get,set}_*()
60 * i2400m_msg_to_dev() 60 * i2400m_msg_to_dev()
@@ -82,6 +82,13 @@
82#define D_SUBMODULE control 82#define D_SUBMODULE control
83#include "debug-levels.h" 83#include "debug-levels.h"
84 84
85int i2400m_passive_mode; /* 0 (passive mode disabled) by default */
86module_param_named(passive_mode, i2400m_passive_mode, int, 0644);
87MODULE_PARM_DESC(passive_mode,
88 "If true, the driver will not do any device setup "
89 "and leave it up to user space, who must be properly "
90 "setup.");
91
85 92
86/* 93/*
87 * Return if a TLV is of a give type and size 94 * Return if a TLV is of a give type and size
@@ -263,7 +270,7 @@ int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *l3l4_hdr,
263 270
264 if (status == 0) 271 if (status == 0)
265 return 0; 272 return 0;
266 if (status > ARRAY_SIZE(ms_to_errno)) { 273 if (status >= ARRAY_SIZE(ms_to_errno)) {
267 str = "unknown status code"; 274 str = "unknown status code";
268 result = -EBADR; 275 result = -EBADR;
269 } else { 276 } else {
@@ -336,7 +343,7 @@ void i2400m_report_tlv_system_state(struct i2400m *i2400m,
336 /* Huh? just in case, shut it down */ 343 /* Huh? just in case, shut it down */
337 dev_err(dev, "HW BUG? unknown state %u: shutting down\n", 344 dev_err(dev, "HW BUG? unknown state %u: shutting down\n",
338 i2400m_state); 345 i2400m_state);
339 i2400m->bus_reset(i2400m, I2400M_RT_WARM); 346 i2400m_reset(i2400m, I2400M_RT_WARM);
340 break; 347 break;
341 }; 348 };
342 d_fnend(3, dev, "(i2400m %p ss %p [%u]) = void\n", 349 d_fnend(3, dev, "(i2400m %p ss %p [%u]) = void\n",
@@ -1335,6 +1342,8 @@ int i2400m_dev_initialize(struct i2400m *i2400m)
1335 unsigned argc = 0; 1342 unsigned argc = 0;
1336 1343
1337 d_fnstart(3, dev, "(i2400m %p)\n", i2400m); 1344 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
1345 if (i2400m_passive_mode)
1346 goto out_passive;
1338 /* Disable idle mode? (enabled by default) */ 1347 /* Disable idle mode? (enabled by default) */
1339 if (i2400m_idle_mode_disabled) { 1348 if (i2400m_idle_mode_disabled) {
1340 if (i2400m_le_v1_3(i2400m)) { 1349 if (i2400m_le_v1_3(i2400m)) {
@@ -1377,6 +1386,7 @@ int i2400m_dev_initialize(struct i2400m *i2400m)
1377 result = i2400m_set_init_config(i2400m, args, argc); 1386 result = i2400m_set_init_config(i2400m, args, argc);
1378 if (result < 0) 1387 if (result < 0)
1379 goto error; 1388 goto error;
1389out_passive:
1380 /* 1390 /*
1381 * Update state: Here it just calls a get state; parsing the 1391 * Update state: Here it just calls a get state; parsing the
1382 * result (System State TLV and RF Status TLV [done in the rx 1392 * result (System State TLV and RF Status TLV [done in the rx
diff --git a/drivers/net/wimax/i2400m/debugfs.c b/drivers/net/wimax/i2400m/debugfs.c
index 9b81af3f80a9..b1aec3e1892f 100644
--- a/drivers/net/wimax/i2400m/debugfs.c
+++ b/drivers/net/wimax/i2400m/debugfs.c
@@ -214,7 +214,7 @@ int debugfs_i2400m_reset_set(void *data, u64 val)
214 case I2400M_RT_WARM: 214 case I2400M_RT_WARM:
215 case I2400M_RT_COLD: 215 case I2400M_RT_COLD:
216 case I2400M_RT_BUS: 216 case I2400M_RT_BUS:
217 result = i2400m->bus_reset(i2400m, rt); 217 result = i2400m_reset(i2400m, rt);
218 if (result >= 0) 218 if (result >= 0)
219 result = 0; 219 result = 0;
220 default: 220 default:
diff --git a/drivers/net/wimax/i2400m/driver.c b/drivers/net/wimax/i2400m/driver.c
index 304f0443ca4b..96a615fe09de 100644
--- a/drivers/net/wimax/i2400m/driver.c
+++ b/drivers/net/wimax/i2400m/driver.c
@@ -41,8 +41,10 @@
41 * __i2400m_dev_start() 41 * __i2400m_dev_start()
42 * 42 *
43 * i2400m_setup() 43 * i2400m_setup()
44 * i2400m->bus_setup()
44 * i2400m_bootrom_init() 45 * i2400m_bootrom_init()
45 * register_netdev() 46 * register_netdev()
47 * wimax_dev_add()
46 * i2400m_dev_start() 48 * i2400m_dev_start()
47 * __i2400m_dev_start() 49 * __i2400m_dev_start()
48 * i2400m_dev_bootstrap() 50 * i2400m_dev_bootstrap()
@@ -50,15 +52,15 @@
50 * i2400m->bus_dev_start() 52 * i2400m->bus_dev_start()
51 * i2400m_firmware_check() 53 * i2400m_firmware_check()
52 * i2400m_check_mac_addr() 54 * i2400m_check_mac_addr()
53 * wimax_dev_add()
54 * 55 *
55 * i2400m_release() 56 * i2400m_release()
56 * wimax_dev_rm()
57 * i2400m_dev_stop() 57 * i2400m_dev_stop()
58 * __i2400m_dev_stop() 58 * __i2400m_dev_stop()
59 * i2400m_dev_shutdown() 59 * i2400m_dev_shutdown()
60 * i2400m->bus_dev_stop() 60 * i2400m->bus_dev_stop()
61 * i2400m_tx_release() 61 * i2400m_tx_release()
62 * i2400m->bus_release()
63 * wimax_dev_rm()
62 * unregister_netdev() 64 * unregister_netdev()
63 */ 65 */
64#include "i2400m.h" 66#include "i2400m.h"
@@ -66,6 +68,7 @@
66#include <linux/wimax/i2400m.h> 68#include <linux/wimax/i2400m.h>
67#include <linux/module.h> 69#include <linux/module.h>
68#include <linux/moduleparam.h> 70#include <linux/moduleparam.h>
71#include <linux/suspend.h>
69 72
70#define D_SUBMODULE driver 73#define D_SUBMODULE driver
71#include "debug-levels.h" 74#include "debug-levels.h"
@@ -90,76 +93,39 @@ MODULE_PARM_DESC(power_save_disabled,
90 "False by default (so the device is told to do power " 93 "False by default (so the device is told to do power "
91 "saving)."); 94 "saving).");
92 95
93/** 96static char i2400m_debug_params[128];
94 * i2400m_queue_work - schedule work on a i2400m's queue 97module_param_string(debug, i2400m_debug_params, sizeof(i2400m_debug_params),
95 * 98 0644);
96 * @i2400m: device descriptor 99MODULE_PARM_DESC(debug,
97 * 100 "String of space-separated NAME:VALUE pairs, where NAMEs "
98 * @fn: function to run to execute work. It gets passed a 'struct 101 "are the different debug submodules and VALUE are the "
99 * work_struct' that is wrapped in a 'struct i2400m_work'. Once 102 "initial debug value to set.");
100 * done, you have to (1) i2400m_put(i2400m_work->i2400m) and then 103
101 * (2) kfree(i2400m_work). 104static char i2400m_barkers_params[128];
102 * 105module_param_string(barkers, i2400m_barkers_params,
103 * @gfp_flags: GFP flags for memory allocation. 106 sizeof(i2400m_barkers_params), 0644);
104 * 107MODULE_PARM_DESC(barkers,
105 * @pl: pointer to a payload buffer that you want to pass to the _work 108 "String of comma-separated 32-bit values; each is "
106 * function. Use this to pack (for example) a struct with extra 109 "recognized as the value the device sends as a reboot "
107 * arguments. 110 "signal; values are appended to a list--setting one value "
108 * 111 "as zero cleans the existing list and starts a new one.");
109 * @pl_size: size of the payload buffer. 112
110 * 113static
111 * We do this quite often, so this just saves typing; allocate a 114struct i2400m_work *__i2400m_work_setup(
112 * wrapper for a i2400m, get a ref to it, pack arguments and launch 115 struct i2400m *i2400m, void (*fn)(struct work_struct *),
113 * the work. 116 gfp_t gfp_flags, const void *pl, size_t pl_size)
114 *
115 * A usual workflow is:
116 *
117 * struct my_work_args {
118 * void *something;
119 * int whatever;
120 * };
121 * ...
122 *
123 * struct my_work_args my_args = {
124 * .something = FOO,
125 * .whaetever = BLAH
126 * };
127 * i2400m_queue_work(i2400m, 1, my_work_function, GFP_KERNEL,
128 * &args, sizeof(args))
129 *
130 * And now the work function can unpack the arguments and call the
131 * real function (or do the job itself):
132 *
133 * static
134 * void my_work_fn((struct work_struct *ws)
135 * {
136 * struct i2400m_work *iw =
137 * container_of(ws, struct i2400m_work, ws);
138 * struct my_work_args *my_args = (void *) iw->pl;
139 *
140 * my_work(iw->i2400m, my_args->something, my_args->whatevert);
141 * }
142 */
143int i2400m_queue_work(struct i2400m *i2400m,
144 void (*fn)(struct work_struct *), gfp_t gfp_flags,
145 const void *pl, size_t pl_size)
146{ 117{
147 int result;
148 struct i2400m_work *iw; 118 struct i2400m_work *iw;
149 119
150 BUG_ON(i2400m->work_queue == NULL);
151 result = -ENOMEM;
152 iw = kzalloc(sizeof(*iw) + pl_size, gfp_flags); 120 iw = kzalloc(sizeof(*iw) + pl_size, gfp_flags);
153 if (iw == NULL) 121 if (iw == NULL)
154 goto error_kzalloc; 122 return NULL;
155 iw->i2400m = i2400m_get(i2400m); 123 iw->i2400m = i2400m_get(i2400m);
124 iw->pl_size = pl_size;
156 memcpy(iw->pl, pl, pl_size); 125 memcpy(iw->pl, pl, pl_size);
157 INIT_WORK(&iw->ws, fn); 126 INIT_WORK(&iw->ws, fn);
158 result = queue_work(i2400m->work_queue, &iw->ws); 127 return iw;
159error_kzalloc:
160 return result;
161} 128}
162EXPORT_SYMBOL_GPL(i2400m_queue_work);
163 129
164 130
165/* 131/*
@@ -175,21 +141,19 @@ EXPORT_SYMBOL_GPL(i2400m_queue_work);
175 * it should not happen. 141 * it should not happen.
176 */ 142 */
177int i2400m_schedule_work(struct i2400m *i2400m, 143int i2400m_schedule_work(struct i2400m *i2400m,
178 void (*fn)(struct work_struct *), gfp_t gfp_flags) 144 void (*fn)(struct work_struct *), gfp_t gfp_flags,
145 const void *pl, size_t pl_size)
179{ 146{
180 int result; 147 int result;
181 struct i2400m_work *iw; 148 struct i2400m_work *iw;
182 149
183 result = -ENOMEM; 150 result = -ENOMEM;
184 iw = kzalloc(sizeof(*iw), gfp_flags); 151 iw = __i2400m_work_setup(i2400m, fn, gfp_flags, pl, pl_size);
185 if (iw == NULL) 152 if (iw != NULL) {
186 goto error_kzalloc; 153 result = schedule_work(&iw->ws);
187 iw->i2400m = i2400m_get(i2400m); 154 if (WARN_ON(result == 0))
188 INIT_WORK(&iw->ws, fn); 155 result = -ENXIO;
189 result = schedule_work(&iw->ws); 156 }
190 if (result == 0)
191 result = -ENXIO;
192error_kzalloc:
193 return result; 157 return result;
194} 158}
195 159
@@ -291,7 +255,7 @@ int i2400m_op_reset(struct wimax_dev *wimax_dev)
291 mutex_lock(&i2400m->init_mutex); 255 mutex_lock(&i2400m->init_mutex);
292 i2400m->reset_ctx = &ctx; 256 i2400m->reset_ctx = &ctx;
293 mutex_unlock(&i2400m->init_mutex); 257 mutex_unlock(&i2400m->init_mutex);
294 result = i2400m->bus_reset(i2400m, I2400M_RT_WARM); 258 result = i2400m_reset(i2400m, I2400M_RT_WARM);
295 if (result < 0) 259 if (result < 0)
296 goto out; 260 goto out;
297 result = wait_for_completion_timeout(&ctx.completion, 4*HZ); 261 result = wait_for_completion_timeout(&ctx.completion, 4*HZ);
@@ -420,9 +384,15 @@ retry:
420 dev_err(dev, "cannot create workqueue\n"); 384 dev_err(dev, "cannot create workqueue\n");
421 goto error_create_workqueue; 385 goto error_create_workqueue;
422 } 386 }
423 result = i2400m->bus_dev_start(i2400m); 387 if (i2400m->bus_dev_start) {
424 if (result < 0) 388 result = i2400m->bus_dev_start(i2400m);
425 goto error_bus_dev_start; 389 if (result < 0)
390 goto error_bus_dev_start;
391 }
392 i2400m->ready = 1;
393 wmb(); /* see i2400m->ready's documentation */
394 /* process pending reports from the device */
395 queue_work(i2400m->work_queue, &i2400m->rx_report_ws);
426 result = i2400m_firmware_check(i2400m); /* fw versions ok? */ 396 result = i2400m_firmware_check(i2400m); /* fw versions ok? */
427 if (result < 0) 397 if (result < 0)
428 goto error_fw_check; 398 goto error_fw_check;
@@ -430,8 +400,6 @@ retry:
430 result = i2400m_check_mac_addr(i2400m); 400 result = i2400m_check_mac_addr(i2400m);
431 if (result < 0) 401 if (result < 0)
432 goto error_check_mac_addr; 402 goto error_check_mac_addr;
433 i2400m->ready = 1;
434 wimax_state_change(wimax_dev, WIMAX_ST_UNINITIALIZED);
435 result = i2400m_dev_initialize(i2400m); 403 result = i2400m_dev_initialize(i2400m);
436 if (result < 0) 404 if (result < 0)
437 goto error_dev_initialize; 405 goto error_dev_initialize;
@@ -443,8 +411,12 @@ retry:
443 411
444error_dev_initialize: 412error_dev_initialize:
445error_check_mac_addr: 413error_check_mac_addr:
414 i2400m->ready = 0;
415 wmb(); /* see i2400m->ready's documentation */
416 flush_workqueue(i2400m->work_queue);
446error_fw_check: 417error_fw_check:
447 i2400m->bus_dev_stop(i2400m); 418 if (i2400m->bus_dev_stop)
419 i2400m->bus_dev_stop(i2400m);
448error_bus_dev_start: 420error_bus_dev_start:
449 destroy_workqueue(i2400m->work_queue); 421 destroy_workqueue(i2400m->work_queue);
450error_create_workqueue: 422error_create_workqueue:
@@ -466,11 +438,15 @@ error_bootstrap:
466static 438static
467int i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri bm_flags) 439int i2400m_dev_start(struct i2400m *i2400m, enum i2400m_bri bm_flags)
468{ 440{
469 int result; 441 int result = 0;
470 mutex_lock(&i2400m->init_mutex); /* Well, start the device */ 442 mutex_lock(&i2400m->init_mutex); /* Well, start the device */
471 result = __i2400m_dev_start(i2400m, bm_flags); 443 if (i2400m->updown == 0) {
472 if (result >= 0) 444 result = __i2400m_dev_start(i2400m, bm_flags);
473 i2400m->updown = 1; 445 if (result >= 0) {
446 i2400m->updown = 1;
447 wmb(); /* see i2400m->updown's documentation */
448 }
449 }
474 mutex_unlock(&i2400m->init_mutex); 450 mutex_unlock(&i2400m->init_mutex);
475 return result; 451 return result;
476} 452}
@@ -495,9 +471,20 @@ void __i2400m_dev_stop(struct i2400m *i2400m)
495 471
496 d_fnstart(3, dev, "(i2400m %p)\n", i2400m); 472 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
497 wimax_state_change(wimax_dev, __WIMAX_ST_QUIESCING); 473 wimax_state_change(wimax_dev, __WIMAX_ST_QUIESCING);
474 i2400m_msg_to_dev_cancel_wait(i2400m, -EL3RST);
475 complete(&i2400m->msg_completion);
476 i2400m_net_wake_stop(i2400m);
498 i2400m_dev_shutdown(i2400m); 477 i2400m_dev_shutdown(i2400m);
499 i2400m->ready = 0; 478 /*
500 i2400m->bus_dev_stop(i2400m); 479 * Make sure no report hooks are running *before* we stop the
480 * communication infrastructure with the device.
481 */
482 i2400m->ready = 0; /* nobody can queue work anymore */
483 wmb(); /* see i2400m->ready's documentation */
484 flush_workqueue(i2400m->work_queue);
485
486 if (i2400m->bus_dev_stop)
487 i2400m->bus_dev_stop(i2400m);
501 destroy_workqueue(i2400m->work_queue); 488 destroy_workqueue(i2400m->work_queue);
502 i2400m_rx_release(i2400m); 489 i2400m_rx_release(i2400m);
503 i2400m_tx_release(i2400m); 490 i2400m_tx_release(i2400m);
@@ -518,12 +505,139 @@ void i2400m_dev_stop(struct i2400m *i2400m)
518 if (i2400m->updown) { 505 if (i2400m->updown) {
519 __i2400m_dev_stop(i2400m); 506 __i2400m_dev_stop(i2400m);
520 i2400m->updown = 0; 507 i2400m->updown = 0;
508 wmb(); /* see i2400m->updown's documentation */
521 } 509 }
522 mutex_unlock(&i2400m->init_mutex); 510 mutex_unlock(&i2400m->init_mutex);
523} 511}
524 512
525 513
526/* 514/*
515 * Listen to PM events to cache the firmware before suspend/hibernation
516 *
517 * When the device comes out of suspend, it might go into reset and
518 * firmware has to be uploaded again. At resume, most of the times, we
519 * can't load firmware images from disk, so we need to cache it.
520 *
521 * i2400m_fw_cache() will allocate a kobject and attach the firmware
522 * to it; that way we don't have to worry too much about the fw loader
523 * hitting a race condition.
524 *
525 * Note: modus operandi stolen from the Orinoco driver; thx.
526 */
527static
528int i2400m_pm_notifier(struct notifier_block *notifier,
529 unsigned long pm_event,
530 void *unused)
531{
532 struct i2400m *i2400m =
533 container_of(notifier, struct i2400m, pm_notifier);
534 struct device *dev = i2400m_dev(i2400m);
535
536 d_fnstart(3, dev, "(i2400m %p pm_event %lx)\n", i2400m, pm_event);
537 switch (pm_event) {
538 case PM_HIBERNATION_PREPARE:
539 case PM_SUSPEND_PREPARE:
540 i2400m_fw_cache(i2400m);
541 break;
542 case PM_POST_RESTORE:
543 /* Restore from hibernation failed. We need to clean
544 * up in exactly the same way, so fall through. */
545 case PM_POST_HIBERNATION:
546 case PM_POST_SUSPEND:
547 i2400m_fw_uncache(i2400m);
548 break;
549
550 case PM_RESTORE_PREPARE:
551 default:
552 break;
553 }
554 d_fnend(3, dev, "(i2400m %p pm_event %lx) = void\n", i2400m, pm_event);
555 return NOTIFY_DONE;
556}
557
558
559/*
560 * pre-reset is called before a device is going on reset
561 *
562 * This has to be followed by a call to i2400m_post_reset(), otherwise
563 * bad things might happen.
564 */
565int i2400m_pre_reset(struct i2400m *i2400m)
566{
567 int result;
568 struct device *dev = i2400m_dev(i2400m);
569
570 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
571 d_printf(1, dev, "pre-reset shut down\n");
572
573 result = 0;
574 mutex_lock(&i2400m->init_mutex);
575 if (i2400m->updown) {
576 netif_tx_disable(i2400m->wimax_dev.net_dev);
577 __i2400m_dev_stop(i2400m);
578 result = 0;
579 /* down't set updown to zero -- this way
580 * post_reset can restore properly */
581 }
582 mutex_unlock(&i2400m->init_mutex);
583 if (i2400m->bus_release)
584 i2400m->bus_release(i2400m);
585 d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
586 return result;
587}
588EXPORT_SYMBOL_GPL(i2400m_pre_reset);
589
590
591/*
592 * Restore device state after a reset
593 *
594 * Do the work needed after a device reset to bring it up to the same
595 * state as it was before the reset.
596 *
597 * NOTE: this requires i2400m->init_mutex taken
598 */
599int i2400m_post_reset(struct i2400m *i2400m)
600{
601 int result = 0;
602 struct device *dev = i2400m_dev(i2400m);
603
604 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
605 d_printf(1, dev, "post-reset start\n");
606 if (i2400m->bus_setup) {
607 result = i2400m->bus_setup(i2400m);
608 if (result < 0) {
609 dev_err(dev, "bus-specific setup failed: %d\n",
610 result);
611 goto error_bus_setup;
612 }
613 }
614 mutex_lock(&i2400m->init_mutex);
615 if (i2400m->updown) {
616 result = __i2400m_dev_start(
617 i2400m, I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT);
618 if (result < 0)
619 goto error_dev_start;
620 }
621 mutex_unlock(&i2400m->init_mutex);
622 d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
623 return result;
624
625error_dev_start:
626 if (i2400m->bus_release)
627 i2400m->bus_release(i2400m);
628error_bus_setup:
629 /* even if the device was up, it could not be recovered, so we
630 * mark it as down. */
631 i2400m->updown = 0;
632 wmb(); /* see i2400m->updown's documentation */
633 mutex_unlock(&i2400m->init_mutex);
634 d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
635 return result;
636}
637EXPORT_SYMBOL_GPL(i2400m_post_reset);
638
639
640/*
527 * The device has rebooted; fix up the device and the driver 641 * The device has rebooted; fix up the device and the driver
528 * 642 *
529 * Tear down the driver communication with the device, reload the 643 * Tear down the driver communication with the device, reload the
@@ -542,56 +656,69 @@ void i2400m_dev_stop(struct i2400m *i2400m)
542 * _stop()], don't do anything, let it fail and handle it. 656 * _stop()], don't do anything, let it fail and handle it.
543 * 657 *
544 * This function is ran always in a thread context 658 * This function is ran always in a thread context
659 *
660 * This function gets passed, as payload to i2400m_work() a 'const
661 * char *' ptr with a "reason" why the reset happened (for messages).
545 */ 662 */
546static 663static
547void __i2400m_dev_reset_handle(struct work_struct *ws) 664void __i2400m_dev_reset_handle(struct work_struct *ws)
548{ 665{
549 int result; 666 int result;
550 struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws); 667 struct i2400m_work *iw = container_of(ws, struct i2400m_work, ws);
668 const char *reason;
551 struct i2400m *i2400m = iw->i2400m; 669 struct i2400m *i2400m = iw->i2400m;
552 struct device *dev = i2400m_dev(i2400m); 670 struct device *dev = i2400m_dev(i2400m);
553 enum wimax_st wimax_state;
554 struct i2400m_reset_ctx *ctx = i2400m->reset_ctx; 671 struct i2400m_reset_ctx *ctx = i2400m->reset_ctx;
555 672
556 d_fnstart(3, dev, "(ws %p i2400m %p)\n", ws, i2400m); 673 if (WARN_ON(iw->pl_size != sizeof(reason)))
674 reason = "SW BUG: reason n/a";
675 else
676 memcpy(&reason, iw->pl, sizeof(reason));
677
678 d_fnstart(3, dev, "(ws %p i2400m %p reason %s)\n", ws, i2400m, reason);
679
557 result = 0; 680 result = 0;
558 if (mutex_trylock(&i2400m->init_mutex) == 0) { 681 if (mutex_trylock(&i2400m->init_mutex) == 0) {
559 /* We are still in i2400m_dev_start() [let it fail] or 682 /* We are still in i2400m_dev_start() [let it fail] or
560 * i2400m_dev_stop() [we are shutting down anyway, so 683 * i2400m_dev_stop() [we are shutting down anyway, so
561 * ignore it] or we are resetting somewhere else. */ 684 * ignore it] or we are resetting somewhere else. */
562 dev_err(dev, "device rebooted\n"); 685 dev_err(dev, "device rebooted somewhere else?\n");
563 i2400m_msg_to_dev_cancel_wait(i2400m, -EL3RST); 686 i2400m_msg_to_dev_cancel_wait(i2400m, -EL3RST);
564 complete(&i2400m->msg_completion); 687 complete(&i2400m->msg_completion);
565 goto out; 688 goto out;
566 } 689 }
567 wimax_state = wimax_state_get(&i2400m->wimax_dev); 690 if (i2400m->updown == 0) {
568 if (wimax_state < WIMAX_ST_UNINITIALIZED) { 691 dev_info(dev, "%s: device is down, doing nothing\n", reason);
569 dev_info(dev, "device rebooted: it is down, ignoring\n"); 692 goto out_unlock;
570 goto out_unlock; /* ifconfig up/down wasn't called */
571 } 693 }
572 dev_err(dev, "device rebooted: reinitializing driver\n"); 694 dev_err(dev, "%s: reinitializing driver\n", reason);
573 __i2400m_dev_stop(i2400m); 695 __i2400m_dev_stop(i2400m);
574 i2400m->updown = 0;
575 result = __i2400m_dev_start(i2400m, 696 result = __i2400m_dev_start(i2400m,
576 I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT); 697 I2400M_BRI_SOFT | I2400M_BRI_MAC_REINIT);
577 if (result < 0) { 698 if (result < 0) {
578 dev_err(dev, "device reboot: cannot start the device: %d\n", 699 i2400m->updown = 0;
579 result); 700 wmb(); /* see i2400m->updown's documentation */
580 result = i2400m->bus_reset(i2400m, I2400M_RT_BUS); 701 dev_err(dev, "%s: cannot start the device: %d\n",
581 if (result >= 0) 702 reason, result);
582 result = -ENODEV; 703 result = -EUCLEAN;
583 } else 704 }
584 i2400m->updown = 1;
585out_unlock: 705out_unlock:
586 if (i2400m->reset_ctx) { 706 if (i2400m->reset_ctx) {
587 ctx->result = result; 707 ctx->result = result;
588 complete(&ctx->completion); 708 complete(&ctx->completion);
589 } 709 }
590 mutex_unlock(&i2400m->init_mutex); 710 mutex_unlock(&i2400m->init_mutex);
711 if (result == -EUCLEAN) {
712 /* ops, need to clean up [w/ init_mutex not held] */
713 result = i2400m_reset(i2400m, I2400M_RT_BUS);
714 if (result >= 0)
715 result = -ENODEV;
716 }
591out: 717out:
592 i2400m_put(i2400m); 718 i2400m_put(i2400m);
593 kfree(iw); 719 kfree(iw);
594 d_fnend(3, dev, "(ws %p i2400m %p) = void\n", ws, i2400m); 720 d_fnend(3, dev, "(ws %p i2400m %p reason %s) = void\n",
721 ws, i2400m, reason);
595 return; 722 return;
596} 723}
597 724
@@ -608,16 +735,104 @@ out:
608 * reinitializing the driver to handle the reset, calling into the 735 * reinitializing the driver to handle the reset, calling into the
609 * bus-specific functions ops as needed. 736 * bus-specific functions ops as needed.
610 */ 737 */
611int i2400m_dev_reset_handle(struct i2400m *i2400m) 738int i2400m_dev_reset_handle(struct i2400m *i2400m, const char *reason)
612{ 739{
613 i2400m->boot_mode = 1; 740 i2400m->boot_mode = 1;
614 wmb(); /* Make sure i2400m_msg_to_dev() sees boot_mode */ 741 wmb(); /* Make sure i2400m_msg_to_dev() sees boot_mode */
615 return i2400m_schedule_work(i2400m, __i2400m_dev_reset_handle, 742 return i2400m_schedule_work(i2400m, __i2400m_dev_reset_handle,
616 GFP_ATOMIC); 743 GFP_ATOMIC, &reason, sizeof(reason));
617} 744}
618EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle); 745EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
619 746
620 747
748/*
749 * Alloc the command and ack buffers for boot mode
750 *
751 * Get the buffers needed to deal with boot mode messages. These
752 * buffers need to be allocated before the sdio recieve irq is setup.
753 */
754static
755int i2400m_bm_buf_alloc(struct i2400m *i2400m)
756{
757 int result;
758
759 result = -ENOMEM;
760 i2400m->bm_cmd_buf = kzalloc(I2400M_BM_CMD_BUF_SIZE, GFP_KERNEL);
761 if (i2400m->bm_cmd_buf == NULL)
762 goto error_bm_cmd_kzalloc;
763 i2400m->bm_ack_buf = kzalloc(I2400M_BM_ACK_BUF_SIZE, GFP_KERNEL);
764 if (i2400m->bm_ack_buf == NULL)
765 goto error_bm_ack_buf_kzalloc;
766 return 0;
767
768error_bm_ack_buf_kzalloc:
769 kfree(i2400m->bm_cmd_buf);
770error_bm_cmd_kzalloc:
771 return result;
772}
773
774
775/*
776 * Free boot mode command and ack buffers.
777 */
778static
779void i2400m_bm_buf_free(struct i2400m *i2400m)
780{
781 kfree(i2400m->bm_ack_buf);
782 kfree(i2400m->bm_cmd_buf);
783}
784
785
786/**
787 * i2400m_init - Initialize a 'struct i2400m' from all zeroes
788 *
789 * This is a bus-generic API call.
790 */
791void i2400m_init(struct i2400m *i2400m)
792{
793 wimax_dev_init(&i2400m->wimax_dev);
794
795 i2400m->boot_mode = 1;
796 i2400m->rx_reorder = 1;
797 init_waitqueue_head(&i2400m->state_wq);
798
799 spin_lock_init(&i2400m->tx_lock);
800 i2400m->tx_pl_min = UINT_MAX;
801 i2400m->tx_size_min = UINT_MAX;
802
803 spin_lock_init(&i2400m->rx_lock);
804 i2400m->rx_pl_min = UINT_MAX;
805 i2400m->rx_size_min = UINT_MAX;
806 INIT_LIST_HEAD(&i2400m->rx_reports);
807 INIT_WORK(&i2400m->rx_report_ws, i2400m_report_hook_work);
808
809 mutex_init(&i2400m->msg_mutex);
810 init_completion(&i2400m->msg_completion);
811
812 mutex_init(&i2400m->init_mutex);
813 /* wake_tx_ws is initialized in i2400m_tx_setup() */
814}
815EXPORT_SYMBOL_GPL(i2400m_init);
816
817
818int i2400m_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
819{
820 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
821
822 /*
823 * Make sure we stop TXs and down the carrier before
824 * resetting; this is needed to avoid things like
825 * i2400m_wake_tx() scheduling stuff in parallel.
826 */
827 if (net_dev->reg_state == NETREG_REGISTERED) {
828 netif_tx_disable(net_dev);
829 netif_carrier_off(net_dev);
830 }
831 return i2400m->bus_reset(i2400m, rt);
832}
833EXPORT_SYMBOL_GPL(i2400m_reset);
834
835
621/** 836/**
622 * i2400m_setup - bus-generic setup function for the i2400m device 837 * i2400m_setup - bus-generic setup function for the i2400m device
623 * 838 *
@@ -625,13 +840,9 @@ EXPORT_SYMBOL_GPL(i2400m_dev_reset_handle);
625 * 840 *
626 * Returns: 0 if ok, < 0 errno code on error. 841 * Returns: 0 if ok, < 0 errno code on error.
627 * 842 *
628 * Initializes the bus-generic parts of the i2400m driver; the 843 * Sets up basic device comunication infrastructure, boots the ROM to
629 * bus-specific parts have been initialized, function pointers filled 844 * read the MAC address, registers with the WiMAX and network stacks
630 * out by the bus-specific probe function. 845 * and then brings up the device.
631 *
632 * As well, this registers the WiMAX and net device nodes. Once this
633 * function returns, the device is operative and has to be ready to
634 * receive and send network traffic and WiMAX control operations.
635 */ 846 */
636int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags) 847int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
637{ 848{
@@ -645,16 +856,21 @@ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
645 snprintf(wimax_dev->name, sizeof(wimax_dev->name), 856 snprintf(wimax_dev->name, sizeof(wimax_dev->name),
646 "i2400m-%s:%s", dev->bus->name, dev_name(dev)); 857 "i2400m-%s:%s", dev->bus->name, dev_name(dev));
647 858
648 i2400m->bm_cmd_buf = kzalloc(I2400M_BM_CMD_BUF_SIZE, GFP_KERNEL); 859 result = i2400m_bm_buf_alloc(i2400m);
649 if (i2400m->bm_cmd_buf == NULL) { 860 if (result < 0) {
650 dev_err(dev, "cannot allocate USB command buffer\n"); 861 dev_err(dev, "cannot allocate bootmode scratch buffers\n");
651 goto error_bm_cmd_kzalloc; 862 goto error_bm_buf_alloc;
652 } 863 }
653 i2400m->bm_ack_buf = kzalloc(I2400M_BM_ACK_BUF_SIZE, GFP_KERNEL); 864
654 if (i2400m->bm_ack_buf == NULL) { 865 if (i2400m->bus_setup) {
655 dev_err(dev, "cannot allocate USB ack buffer\n"); 866 result = i2400m->bus_setup(i2400m);
656 goto error_bm_ack_buf_kzalloc; 867 if (result < 0) {
868 dev_err(dev, "bus-specific setup failed: %d\n",
869 result);
870 goto error_bus_setup;
871 }
657 } 872 }
873
658 result = i2400m_bootrom_init(i2400m, bm_flags); 874 result = i2400m_bootrom_init(i2400m, bm_flags);
659 if (result < 0) { 875 if (result < 0) {
660 dev_err(dev, "read mac addr: bootrom init " 876 dev_err(dev, "read mac addr: bootrom init "
@@ -666,6 +882,9 @@ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
666 goto error_read_mac_addr; 882 goto error_read_mac_addr;
667 random_ether_addr(i2400m->src_mac_addr); 883 random_ether_addr(i2400m->src_mac_addr);
668 884
885 i2400m->pm_notifier.notifier_call = i2400m_pm_notifier;
886 register_pm_notifier(&i2400m->pm_notifier);
887
669 result = register_netdev(net_dev); /* Okey dokey, bring it up */ 888 result = register_netdev(net_dev); /* Okey dokey, bring it up */
670 if (result < 0) { 889 if (result < 0) {
671 dev_err(dev, "cannot register i2400m network device: %d\n", 890 dev_err(dev, "cannot register i2400m network device: %d\n",
@@ -674,18 +893,13 @@ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
674 } 893 }
675 netif_carrier_off(net_dev); 894 netif_carrier_off(net_dev);
676 895
677 result = i2400m_dev_start(i2400m, bm_flags);
678 if (result < 0)
679 goto error_dev_start;
680
681 i2400m->wimax_dev.op_msg_from_user = i2400m_op_msg_from_user; 896 i2400m->wimax_dev.op_msg_from_user = i2400m_op_msg_from_user;
682 i2400m->wimax_dev.op_rfkill_sw_toggle = i2400m_op_rfkill_sw_toggle; 897 i2400m->wimax_dev.op_rfkill_sw_toggle = i2400m_op_rfkill_sw_toggle;
683 i2400m->wimax_dev.op_reset = i2400m_op_reset; 898 i2400m->wimax_dev.op_reset = i2400m_op_reset;
899
684 result = wimax_dev_add(&i2400m->wimax_dev, net_dev); 900 result = wimax_dev_add(&i2400m->wimax_dev, net_dev);
685 if (result < 0) 901 if (result < 0)
686 goto error_wimax_dev_add; 902 goto error_wimax_dev_add;
687 /* User space needs to do some init stuff */
688 wimax_state_change(wimax_dev, WIMAX_ST_UNINITIALIZED);
689 903
690 /* Now setup all that requires a registered net and wimax device. */ 904 /* Now setup all that requires a registered net and wimax device. */
691 result = sysfs_create_group(&net_dev->dev.kobj, &i2400m_dev_attr_group); 905 result = sysfs_create_group(&net_dev->dev.kobj, &i2400m_dev_attr_group);
@@ -693,30 +907,37 @@ int i2400m_setup(struct i2400m *i2400m, enum i2400m_bri bm_flags)
693 dev_err(dev, "cannot setup i2400m's sysfs: %d\n", result); 907 dev_err(dev, "cannot setup i2400m's sysfs: %d\n", result);
694 goto error_sysfs_setup; 908 goto error_sysfs_setup;
695 } 909 }
910
696 result = i2400m_debugfs_add(i2400m); 911 result = i2400m_debugfs_add(i2400m);
697 if (result < 0) { 912 if (result < 0) {
698 dev_err(dev, "cannot setup i2400m's debugfs: %d\n", result); 913 dev_err(dev, "cannot setup i2400m's debugfs: %d\n", result);
699 goto error_debugfs_setup; 914 goto error_debugfs_setup;
700 } 915 }
916
917 result = i2400m_dev_start(i2400m, bm_flags);
918 if (result < 0)
919 goto error_dev_start;
701 d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); 920 d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
702 return result; 921 return result;
703 922
923error_dev_start:
924 i2400m_debugfs_rm(i2400m);
704error_debugfs_setup: 925error_debugfs_setup:
705 sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj, 926 sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj,
706 &i2400m_dev_attr_group); 927 &i2400m_dev_attr_group);
707error_sysfs_setup: 928error_sysfs_setup:
708 wimax_dev_rm(&i2400m->wimax_dev); 929 wimax_dev_rm(&i2400m->wimax_dev);
709error_wimax_dev_add: 930error_wimax_dev_add:
710 i2400m_dev_stop(i2400m);
711error_dev_start:
712 unregister_netdev(net_dev); 931 unregister_netdev(net_dev);
713error_register_netdev: 932error_register_netdev:
933 unregister_pm_notifier(&i2400m->pm_notifier);
714error_read_mac_addr: 934error_read_mac_addr:
715error_bootrom_init: 935error_bootrom_init:
716 kfree(i2400m->bm_ack_buf); 936 if (i2400m->bus_release)
717error_bm_ack_buf_kzalloc: 937 i2400m->bus_release(i2400m);
718 kfree(i2400m->bm_cmd_buf); 938error_bus_setup:
719error_bm_cmd_kzalloc: 939 i2400m_bm_buf_free(i2400m);
940error_bm_buf_alloc:
720 d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); 941 d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result);
721 return result; 942 return result;
722} 943}
@@ -735,14 +956,17 @@ void i2400m_release(struct i2400m *i2400m)
735 d_fnstart(3, dev, "(i2400m %p)\n", i2400m); 956 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
736 netif_stop_queue(i2400m->wimax_dev.net_dev); 957 netif_stop_queue(i2400m->wimax_dev.net_dev);
737 958
959 i2400m_dev_stop(i2400m);
960
738 i2400m_debugfs_rm(i2400m); 961 i2400m_debugfs_rm(i2400m);
739 sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj, 962 sysfs_remove_group(&i2400m->wimax_dev.net_dev->dev.kobj,
740 &i2400m_dev_attr_group); 963 &i2400m_dev_attr_group);
741 wimax_dev_rm(&i2400m->wimax_dev); 964 wimax_dev_rm(&i2400m->wimax_dev);
742 i2400m_dev_stop(i2400m);
743 unregister_netdev(i2400m->wimax_dev.net_dev); 965 unregister_netdev(i2400m->wimax_dev.net_dev);
744 kfree(i2400m->bm_ack_buf); 966 unregister_pm_notifier(&i2400m->pm_notifier);
745 kfree(i2400m->bm_cmd_buf); 967 if (i2400m->bus_release)
968 i2400m->bus_release(i2400m);
969 i2400m_bm_buf_free(i2400m);
746 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); 970 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
747} 971}
748EXPORT_SYMBOL_GPL(i2400m_release); 972EXPORT_SYMBOL_GPL(i2400m_release);
@@ -759,6 +983,7 @@ struct d_level D_LEVEL[] = {
759 D_SUBMODULE_DEFINE(netdev), 983 D_SUBMODULE_DEFINE(netdev),
760 D_SUBMODULE_DEFINE(rfkill), 984 D_SUBMODULE_DEFINE(rfkill),
761 D_SUBMODULE_DEFINE(rx), 985 D_SUBMODULE_DEFINE(rx),
986 D_SUBMODULE_DEFINE(sysfs),
762 D_SUBMODULE_DEFINE(tx), 987 D_SUBMODULE_DEFINE(tx),
763}; 988};
764size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL); 989size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
@@ -767,7 +992,9 @@ size_t D_LEVEL_SIZE = ARRAY_SIZE(D_LEVEL);
767static 992static
768int __init i2400m_driver_init(void) 993int __init i2400m_driver_init(void)
769{ 994{
770 return 0; 995 d_parse_params(D_LEVEL, D_LEVEL_SIZE, i2400m_debug_params,
996 "i2400m.debug");
997 return i2400m_barker_db_init(i2400m_barkers_params);
771} 998}
772module_init(i2400m_driver_init); 999module_init(i2400m_driver_init);
773 1000
@@ -776,6 +1003,7 @@ void __exit i2400m_driver_exit(void)
776{ 1003{
777 /* for scheds i2400m_dev_reset_handle() */ 1004 /* for scheds i2400m_dev_reset_handle() */
778 flush_scheduled_work(); 1005 flush_scheduled_work();
1006 i2400m_barker_db_exit();
779 return; 1007 return;
780} 1008}
781module_exit(i2400m_driver_exit); 1009module_exit(i2400m_driver_exit);
diff --git a/drivers/net/wimax/i2400m/fw.c b/drivers/net/wimax/i2400m/fw.c
index e81750e54452..64cdfeb299ca 100644
--- a/drivers/net/wimax/i2400m/fw.c
+++ b/drivers/net/wimax/i2400m/fw.c
@@ -40,11 +40,9 @@
40 * 40 *
41 * THE PROCEDURE 41 * THE PROCEDURE
42 * 42 *
43 * (this is decribed for USB, but for SDIO is similar) 43 * The 2400m and derived devices work in two modes: boot-mode or
44 * 44 * normal mode. In boot mode we can execute only a handful of commands
45 * The 2400m works in two modes: boot-mode or normal mode. In boot 45 * targeted at uploading the firmware and launching it.
46 * mode we can execute only a handful of commands targeted at
47 * uploading the firmware and launching it.
48 * 46 *
49 * The 2400m enters boot mode when it is first connected to the 47 * The 2400m enters boot mode when it is first connected to the
50 * system, when it crashes and when you ask it to reboot. There are 48 * system, when it crashes and when you ask it to reboot. There are
@@ -52,18 +50,26 @@
52 * firmwares signed with a certain private key, non-signed takes any 50 * firmwares signed with a certain private key, non-signed takes any
53 * firmware. Normal hardware takes only signed firmware. 51 * firmware. Normal hardware takes only signed firmware.
54 * 52 *
55 * Upon entrance to boot mode, the device sends a few zero length 53 * On boot mode, in USB, we write to the device using the bulk out
56 * packets (ZLPs) on the notification endpoint, then a reboot barker 54 * endpoint and read from it in the notification endpoint. In SDIO we
57 * (4 le32 words with value I2400M_{S,N}BOOT_BARKER). We ack it by 55 * talk to it via the write address and read from the read address.
58 * sending the same barker on the bulk out endpoint. The device acks 56 *
59 * with a reboot ack barker (4 le32 words with value 0xfeedbabe) and 57 * Upon entrance to boot mode, the device sends (preceeded with a few
60 * then the device is fully rebooted. At this point we can upload the 58 * zero length packets (ZLPs) on the notification endpoint in USB) a
61 * firmware. 59 * reboot barker (4 le32 words with the same value). We ack it by
60 * sending the same barker to the device. The device acks with a
61 * reboot ack barker (4 le32 words with value I2400M_ACK_BARKER) and
62 * then is fully booted. At this point we can upload the firmware.
63 *
64 * Note that different iterations of the device and EEPROM
65 * configurations will send different [re]boot barkers; these are
66 * collected in i2400m_barker_db along with the firmware
67 * characteristics they require.
62 * 68 *
63 * This process is accomplished by the i2400m_bootrom_init() 69 * This process is accomplished by the i2400m_bootrom_init()
64 * function. All the device interaction happens through the 70 * function. All the device interaction happens through the
65 * i2400m_bm_cmd() [boot mode command]. Special return values will 71 * i2400m_bm_cmd() [boot mode command]. Special return values will
66 * indicate if the device resets. 72 * indicate if the device did reset during the process.
67 * 73 *
68 * After this, we read the MAC address and then (if needed) 74 * After this, we read the MAC address and then (if needed)
69 * reinitialize the device. We need to read it ahead of time because 75 * reinitialize the device. We need to read it ahead of time because
@@ -72,11 +78,11 @@
72 * 78 *
73 * We can then upload the firmware file. The file is composed of a BCF 79 * We can then upload the firmware file. The file is composed of a BCF
74 * header (basic data, keys and signatures) and a list of write 80 * header (basic data, keys and signatures) and a list of write
75 * commands and payloads. We first upload the header 81 * commands and payloads. Optionally more BCF headers might follow the
76 * [i2400m_dnload_init()] and then pass the commands and payloads 82 * main payload. We first upload the header [i2400m_dnload_init()] and
77 * verbatim to the i2400m_bm_cmd() function 83 * then pass the commands and payloads verbatim to the i2400m_bm_cmd()
78 * [i2400m_dnload_bcf()]. Then we tell the device to jump to the new 84 * function [i2400m_dnload_bcf()]. Then we tell the device to jump to
79 * firmware [i2400m_dnload_finalize()]. 85 * the new firmware [i2400m_dnload_finalize()].
80 * 86 *
81 * Once firmware is uploaded, we are good to go :) 87 * Once firmware is uploaded, we are good to go :)
82 * 88 *
@@ -99,18 +105,32 @@
99 * read an acknolwedgement from it (or an asynchronous notification) 105 * read an acknolwedgement from it (or an asynchronous notification)
100 * from it. 106 * from it.
101 * 107 *
108 * FIRMWARE LOADING
109 *
110 * Note that in some cases, we can't just load a firmware file (for
111 * example, when resuming). For that, we might cache the firmware
112 * file. Thus, when doing the bootstrap, if there is a cache firmware
113 * file, it is used; if not, loading from disk is attempted.
114 *
102 * ROADMAP 115 * ROADMAP
103 * 116 *
117 * i2400m_barker_db_init Called by i2400m_driver_init()
118 * i2400m_barker_db_add
119 *
120 * i2400m_barker_db_exit Called by i2400m_driver_exit()
121 *
104 * i2400m_dev_bootstrap Called by __i2400m_dev_start() 122 * i2400m_dev_bootstrap Called by __i2400m_dev_start()
105 * request_firmware 123 * request_firmware
106 * i2400m_fw_check 124 * i2400m_fw_bootstrap
107 * i2400m_fw_dnload 125 * i2400m_fw_check
126 * i2400m_fw_hdr_check
127 * i2400m_fw_dnload
108 * release_firmware 128 * release_firmware
109 * 129 *
110 * i2400m_fw_dnload 130 * i2400m_fw_dnload
111 * i2400m_bootrom_init 131 * i2400m_bootrom_init
112 * i2400m_bm_cmd 132 * i2400m_bm_cmd
113 * i2400m->bus_reset 133 * i2400m_reset
114 * i2400m_dnload_init 134 * i2400m_dnload_init
115 * i2400m_dnload_init_signed 135 * i2400m_dnload_init_signed
116 * i2400m_dnload_init_nonsigned 136 * i2400m_dnload_init_nonsigned
@@ -125,9 +145,14 @@
125 * i2400m->bus_bm_cmd_send() 145 * i2400m->bus_bm_cmd_send()
126 * i2400m->bus_bm_wait_for_ack 146 * i2400m->bus_bm_wait_for_ack
127 * __i2400m_bm_ack_verify 147 * __i2400m_bm_ack_verify
148 * i2400m_is_boot_barker
128 * 149 *
129 * i2400m_bm_cmd_prepare Used by bus-drivers to prep 150 * i2400m_bm_cmd_prepare Used by bus-drivers to prep
130 * commands before sending 151 * commands before sending
152 *
153 * i2400m_pm_notifier Called on Power Management events
154 * i2400m_fw_cache
155 * i2400m_fw_uncache
131 */ 156 */
132#include <linux/firmware.h> 157#include <linux/firmware.h>
133#include <linux/sched.h> 158#include <linux/sched.h>
@@ -175,6 +200,240 @@ EXPORT_SYMBOL_GPL(i2400m_bm_cmd_prepare);
175 200
176 201
177/* 202/*
203 * Database of known barkers.
204 *
205 * A barker is what the device sends indicating he is ready to be
206 * bootloaded. Different versions of the device will send different
207 * barkers. Depending on the barker, it might mean the device wants
208 * some kind of firmware or the other.
209 */
210static struct i2400m_barker_db {
211 __le32 data[4];
212} *i2400m_barker_db;
213static size_t i2400m_barker_db_used, i2400m_barker_db_size;
214
215
216static
217int i2400m_zrealloc_2x(void **ptr, size_t *_count, size_t el_size,
218 gfp_t gfp_flags)
219{
220 size_t old_count = *_count,
221 new_count = old_count ? 2 * old_count : 2,
222 old_size = el_size * old_count,
223 new_size = el_size * new_count;
224 void *nptr = krealloc(*ptr, new_size, gfp_flags);
225 if (nptr) {
226 /* zero the other half or the whole thing if old_count
227 * was zero */
228 if (old_size == 0)
229 memset(nptr, 0, new_size);
230 else
231 memset(nptr + old_size, 0, old_size);
232 *_count = new_count;
233 *ptr = nptr;
234 return 0;
235 } else
236 return -ENOMEM;
237}
238
239
240/*
241 * Add a barker to the database
242 *
243 * This cannot used outside of this module and only at at module_init
244 * time. This is to avoid the need to do locking.
245 */
246static
247int i2400m_barker_db_add(u32 barker_id)
248{
249 int result;
250
251 struct i2400m_barker_db *barker;
252 if (i2400m_barker_db_used >= i2400m_barker_db_size) {
253 result = i2400m_zrealloc_2x(
254 (void **) &i2400m_barker_db, &i2400m_barker_db_size,
255 sizeof(i2400m_barker_db[0]), GFP_KERNEL);
256 if (result < 0)
257 return result;
258 }
259 barker = i2400m_barker_db + i2400m_barker_db_used++;
260 barker->data[0] = le32_to_cpu(barker_id);
261 barker->data[1] = le32_to_cpu(barker_id);
262 barker->data[2] = le32_to_cpu(barker_id);
263 barker->data[3] = le32_to_cpu(barker_id);
264 return 0;
265}
266
267
268void i2400m_barker_db_exit(void)
269{
270 kfree(i2400m_barker_db);
271 i2400m_barker_db = NULL;
272 i2400m_barker_db_size = 0;
273 i2400m_barker_db_used = 0;
274}
275
276
277/*
278 * Helper function to add all the known stable barkers to the barker
279 * database.
280 */
281static
282int i2400m_barker_db_known_barkers(void)
283{
284 int result;
285
286 result = i2400m_barker_db_add(I2400M_NBOOT_BARKER);
287 if (result < 0)
288 goto error_add;
289 result = i2400m_barker_db_add(I2400M_SBOOT_BARKER);
290 if (result < 0)
291 goto error_add;
292 result = i2400m_barker_db_add(I2400M_SBOOT_BARKER_6050);
293 if (result < 0)
294 goto error_add;
295error_add:
296 return result;
297}
298
299
300/*
301 * Initialize the barker database
302 *
303 * This can only be used from the module_init function for this
304 * module; this is to avoid the need to do locking.
305 *
306 * @options: command line argument with extra barkers to
307 * recognize. This is a comma-separated list of 32-bit hex
308 * numbers. They are appended to the existing list. Setting 0
309 * cleans the existing list and starts a new one.
310 */
311int i2400m_barker_db_init(const char *_options)
312{
313 int result;
314 char *options = NULL, *options_orig, *token;
315
316 i2400m_barker_db = NULL;
317 i2400m_barker_db_size = 0;
318 i2400m_barker_db_used = 0;
319
320 result = i2400m_barker_db_known_barkers();
321 if (result < 0)
322 goto error_add;
323 /* parse command line options from i2400m.barkers */
324 if (_options != NULL) {
325 unsigned barker;
326
327 options_orig = kstrdup(_options, GFP_KERNEL);
328 if (options_orig == NULL)
329 goto error_parse;
330 options = options_orig;
331
332 while ((token = strsep(&options, ",")) != NULL) {
333 if (*token == '\0') /* eat joint commas */
334 continue;
335 if (sscanf(token, "%x", &barker) != 1
336 || barker > 0xffffffff) {
337 printk(KERN_ERR "%s: can't recognize "
338 "i2400m.barkers value '%s' as "
339 "a 32-bit number\n",
340 __func__, token);
341 result = -EINVAL;
342 goto error_parse;
343 }
344 if (barker == 0) {
345 /* clean list and start new */
346 i2400m_barker_db_exit();
347 continue;
348 }
349 result = i2400m_barker_db_add(barker);
350 if (result < 0)
351 goto error_add;
352 }
353 kfree(options_orig);
354 }
355 return 0;
356
357error_parse:
358error_add:
359 kfree(i2400m_barker_db);
360 return result;
361}
362
363
364/*
365 * Recognize a boot barker
366 *
367 * @buf: buffer where the boot barker.
368 * @buf_size: size of the buffer (has to be 16 bytes). It is passed
369 * here so the function can check it for the caller.
370 *
371 * Note that as a side effect, upon identifying the obtained boot
372 * barker, this function will set i2400m->barker to point to the right
373 * barker database entry. Subsequent calls to the function will result
374 * in verifying that the same type of boot barker is returned when the
375 * device [re]boots (as long as the same device instance is used).
376 *
377 * Return: 0 if @buf matches a known boot barker. -ENOENT if the
378 * buffer in @buf doesn't match any boot barker in the database or
379 * -EILSEQ if the buffer doesn't have the right size.
380 */
381int i2400m_is_boot_barker(struct i2400m *i2400m,
382 const void *buf, size_t buf_size)
383{
384 int result;
385 struct device *dev = i2400m_dev(i2400m);
386 struct i2400m_barker_db *barker;
387 int i;
388
389 result = -ENOENT;
390 if (buf_size != sizeof(i2400m_barker_db[i].data))
391 return result;
392
393 /* Short circuit if we have already discovered the barker
394 * associated with the device. */
395 if (i2400m->barker
396 && !memcmp(buf, i2400m->barker, sizeof(i2400m->barker->data))) {
397 unsigned index = (i2400m->barker - i2400m_barker_db)
398 / sizeof(*i2400m->barker);
399 d_printf(2, dev, "boot barker cache-confirmed #%u/%08x\n",
400 index, le32_to_cpu(i2400m->barker->data[0]));
401 return 0;
402 }
403
404 for (i = 0; i < i2400m_barker_db_used; i++) {
405 barker = &i2400m_barker_db[i];
406 BUILD_BUG_ON(sizeof(barker->data) != 16);
407 if (memcmp(buf, barker->data, sizeof(barker->data)))
408 continue;
409
410 if (i2400m->barker == NULL) {
411 i2400m->barker = barker;
412 d_printf(1, dev, "boot barker set to #%u/%08x\n",
413 i, le32_to_cpu(barker->data[0]));
414 if (barker->data[0] == le32_to_cpu(I2400M_NBOOT_BARKER))
415 i2400m->sboot = 0;
416 else
417 i2400m->sboot = 1;
418 } else if (i2400m->barker != barker) {
419 dev_err(dev, "HW inconsistency: device "
420 "reports a different boot barker "
421 "than set (from %08x to %08x)\n",
422 le32_to_cpu(i2400m->barker->data[0]),
423 le32_to_cpu(barker->data[0]));
424 result = -EIO;
425 } else
426 d_printf(2, dev, "boot barker confirmed #%u/%08x\n",
427 i, le32_to_cpu(barker->data[0]));
428 result = 0;
429 break;
430 }
431 return result;
432}
433EXPORT_SYMBOL_GPL(i2400m_is_boot_barker);
434
435
436/*
178 * Verify the ack data received 437 * Verify the ack data received
179 * 438 *
180 * Given a reply to a boot mode command, chew it and verify everything 439 * Given a reply to a boot mode command, chew it and verify everything
@@ -204,20 +463,10 @@ ssize_t __i2400m_bm_ack_verify(struct i2400m *i2400m, int opcode,
204 opcode, ack_size, sizeof(*ack)); 463 opcode, ack_size, sizeof(*ack));
205 goto error_ack_short; 464 goto error_ack_short;
206 } 465 }
207 if (ack_size == sizeof(i2400m_NBOOT_BARKER) 466 result = i2400m_is_boot_barker(i2400m, ack, ack_size);
208 && memcmp(ack, i2400m_NBOOT_BARKER, sizeof(*ack)) == 0) { 467 if (result >= 0) {
209 result = -ERESTARTSYS; 468 result = -ERESTARTSYS;
210 i2400m->sboot = 0; 469 d_printf(6, dev, "boot-mode cmd %d: HW boot barker\n", opcode);
211 d_printf(6, dev, "boot-mode cmd %d: "
212 "HW non-signed boot barker\n", opcode);
213 goto error_reboot;
214 }
215 if (ack_size == sizeof(i2400m_SBOOT_BARKER)
216 && memcmp(ack, i2400m_SBOOT_BARKER, sizeof(*ack)) == 0) {
217 result = -ERESTARTSYS;
218 i2400m->sboot = 1;
219 d_printf(6, dev, "boot-mode cmd %d: HW signed reboot barker\n",
220 opcode);
221 goto error_reboot; 470 goto error_reboot;
222 } 471 }
223 if (ack_size == sizeof(i2400m_ACK_BARKER) 472 if (ack_size == sizeof(i2400m_ACK_BARKER)
@@ -343,7 +592,6 @@ ssize_t i2400m_bm_cmd(struct i2400m *i2400m,
343 BUG_ON(i2400m->boot_mode == 0); 592 BUG_ON(i2400m->boot_mode == 0);
344 593
345 if (cmd != NULL) { /* send the command */ 594 if (cmd != NULL) { /* send the command */
346 memcpy(i2400m->bm_cmd_buf, cmd, cmd_size);
347 result = i2400m->bus_bm_cmd_send(i2400m, cmd, cmd_size, flags); 595 result = i2400m->bus_bm_cmd_send(i2400m, cmd, cmd_size, flags);
348 if (result < 0) 596 if (result < 0)
349 goto error_cmd_send; 597 goto error_cmd_send;
@@ -432,8 +680,8 @@ static int i2400m_download_chunk(struct i2400m *i2400m, const void *chunk,
432 * Download a BCF file's sections to the device 680 * Download a BCF file's sections to the device
433 * 681 *
434 * @i2400m: device descriptor 682 * @i2400m: device descriptor
435 * @bcf: pointer to firmware data (followed by the payloads). Assumed 683 * @bcf: pointer to firmware data (first header followed by the
436 * verified and consistent. 684 * payloads). Assumed verified and consistent.
437 * @bcf_len: length (in bytes) of the @bcf buffer. 685 * @bcf_len: length (in bytes) of the @bcf buffer.
438 * 686 *
439 * Returns: < 0 errno code on error or the offset to the jump instruction. 687 * Returns: < 0 errno code on error or the offset to the jump instruction.
@@ -472,14 +720,17 @@ ssize_t i2400m_dnload_bcf(struct i2400m *i2400m,
472 "downloading section #%zu (@%zu %zu B) to 0x%08x\n", 720 "downloading section #%zu (@%zu %zu B) to 0x%08x\n",
473 section, offset, sizeof(*bh) + data_size, 721 section, offset, sizeof(*bh) + data_size,
474 le32_to_cpu(bh->target_addr)); 722 le32_to_cpu(bh->target_addr));
475 if (i2400m_brh_get_opcode(bh) == I2400M_BRH_SIGNED_JUMP) { 723 /*
476 /* Secure boot needs to stop here */ 724 * We look for JUMP cmd from the bootmode header,
477 d_printf(5, dev, "signed jump found @%zu\n", offset); 725 * either I2400M_BRH_SIGNED_JUMP for secure boot
726 * or I2400M_BRH_JUMP for unsecure boot, the last chunk
727 * should be the bootmode header with JUMP cmd.
728 */
729 if (i2400m_brh_get_opcode(bh) == I2400M_BRH_SIGNED_JUMP ||
730 i2400m_brh_get_opcode(bh) == I2400M_BRH_JUMP) {
731 d_printf(5, dev, "jump found @%zu\n", offset);
478 break; 732 break;
479 } 733 }
480 if (offset + section_size == bcf_len)
481 /* Non-secure boot stops here */
482 break;
483 if (offset + section_size > bcf_len) { 734 if (offset + section_size > bcf_len) {
484 dev_err(dev, "fw %s: bad section #%zu, " 735 dev_err(dev, "fw %s: bad section #%zu, "
485 "end (@%zu) beyond EOF (@%zu)\n", 736 "end (@%zu) beyond EOF (@%zu)\n",
@@ -510,13 +761,30 @@ error_send:
510 761
511 762
512/* 763/*
764 * Indicate if the device emitted a reboot barker that indicates
765 * "signed boot"
766 */
767static
768unsigned i2400m_boot_is_signed(struct i2400m *i2400m)
769{
770 return likely(i2400m->sboot);
771}
772
773
774/*
513 * Do the final steps of uploading firmware 775 * Do the final steps of uploading firmware
514 * 776 *
777 * @bcf_hdr: BCF header we are actually using
778 * @bcf: pointer to the firmware image (which matches the first header
779 * that is followed by the actual payloads).
780 * @offset: [byte] offset into @bcf for the command we need to send.
781 *
515 * Depending on the boot mode (signed vs non-signed), different 782 * Depending on the boot mode (signed vs non-signed), different
516 * actions need to be taken. 783 * actions need to be taken.
517 */ 784 */
518static 785static
519int i2400m_dnload_finalize(struct i2400m *i2400m, 786int i2400m_dnload_finalize(struct i2400m *i2400m,
787 const struct i2400m_bcf_hdr *bcf_hdr,
520 const struct i2400m_bcf_hdr *bcf, size_t offset) 788 const struct i2400m_bcf_hdr *bcf, size_t offset)
521{ 789{
522 int ret = 0; 790 int ret = 0;
@@ -530,10 +798,14 @@ int i2400m_dnload_finalize(struct i2400m *i2400m,
530 798
531 d_fnstart(3, dev, "offset %zu\n", offset); 799 d_fnstart(3, dev, "offset %zu\n", offset);
532 cmd = (void *) bcf + offset; 800 cmd = (void *) bcf + offset;
533 if (i2400m->sboot == 0) { 801 if (i2400m_boot_is_signed(i2400m) == 0) {
534 struct i2400m_bootrom_header jump_ack; 802 struct i2400m_bootrom_header jump_ack;
535 d_printf(1, dev, "unsecure boot, jumping to 0x%08x\n", 803 d_printf(1, dev, "unsecure boot, jumping to 0x%08x\n",
536 le32_to_cpu(cmd->target_addr)); 804 le32_to_cpu(cmd->target_addr));
805 cmd_buf = i2400m->bm_cmd_buf;
806 memcpy(&cmd_buf->cmd, cmd, sizeof(*cmd));
807 cmd = &cmd_buf->cmd;
808 /* now cmd points to the actual bootrom_header in cmd_buf */
537 i2400m_brh_set_opcode(cmd, I2400M_BRH_JUMP); 809 i2400m_brh_set_opcode(cmd, I2400M_BRH_JUMP);
538 cmd->data_size = 0; 810 cmd->data_size = 0;
539 ret = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd), 811 ret = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd),
@@ -544,12 +816,13 @@ int i2400m_dnload_finalize(struct i2400m *i2400m,
544 cmd_buf = i2400m->bm_cmd_buf; 816 cmd_buf = i2400m->bm_cmd_buf;
545 memcpy(&cmd_buf->cmd, cmd, sizeof(*cmd)); 817 memcpy(&cmd_buf->cmd, cmd, sizeof(*cmd));
546 signature_block_offset = 818 signature_block_offset =
547 sizeof(*bcf) 819 sizeof(*bcf_hdr)
548 + le32_to_cpu(bcf->key_size) * sizeof(u32) 820 + le32_to_cpu(bcf_hdr->key_size) * sizeof(u32)
549 + le32_to_cpu(bcf->exponent_size) * sizeof(u32); 821 + le32_to_cpu(bcf_hdr->exponent_size) * sizeof(u32);
550 signature_block_size = 822 signature_block_size =
551 le32_to_cpu(bcf->modulus_size) * sizeof(u32); 823 le32_to_cpu(bcf_hdr->modulus_size) * sizeof(u32);
552 memcpy(cmd_buf->cmd_pl, (void *) bcf + signature_block_offset, 824 memcpy(cmd_buf->cmd_pl,
825 (void *) bcf_hdr + signature_block_offset,
553 signature_block_size); 826 signature_block_size);
554 ret = i2400m_bm_cmd(i2400m, &cmd_buf->cmd, 827 ret = i2400m_bm_cmd(i2400m, &cmd_buf->cmd,
555 sizeof(cmd_buf->cmd) + signature_block_size, 828 sizeof(cmd_buf->cmd) + signature_block_size,
@@ -565,7 +838,7 @@ int i2400m_dnload_finalize(struct i2400m *i2400m,
565 * 838 *
566 * @i2400m: device descriptor 839 * @i2400m: device descriptor
567 * @flags: 840 * @flags:
568 * I2400M_BRI_SOFT: a reboot notification has been seen 841 * I2400M_BRI_SOFT: a reboot barker has been seen
569 * already, so don't wait for it. 842 * already, so don't wait for it.
570 * 843 *
571 * I2400M_BRI_NO_REBOOT: Don't send a reboot command, but wait 844 * I2400M_BRI_NO_REBOOT: Don't send a reboot command, but wait
@@ -576,17 +849,15 @@ int i2400m_dnload_finalize(struct i2400m *i2400m,
576 * 849 *
577 * < 0 errno code on error, 0 if ok. 850 * < 0 errno code on error, 0 if ok.
578 * 851 *
579 * i2400m->sboot set to 0 for unsecure boot process, 1 for secure
580 * boot process.
581 *
582 * Description: 852 * Description:
583 * 853 *
584 * Tries hard enough to put the device in boot-mode. There are two 854 * Tries hard enough to put the device in boot-mode. There are two
585 * main phases to this: 855 * main phases to this:
586 * 856 *
587 * a. (1) send a reboot command and (2) get a reboot barker 857 * a. (1) send a reboot command and (2) get a reboot barker
588 * b. (1) ack the reboot sending a reboot barker and (2) getting an 858 *
589 * ack barker in return 859 * b. (1) echo/ack the reboot sending the reboot barker back and (2)
860 * getting an ack barker in return
590 * 861 *
591 * We want to skip (a) in some cases [soft]. The state machine is 862 * We want to skip (a) in some cases [soft]. The state machine is
592 * horrible, but it is basically: on each phase, send what has to be 863 * horrible, but it is basically: on each phase, send what has to be
@@ -594,6 +865,16 @@ int i2400m_dnload_finalize(struct i2400m *i2400m,
594 * have to backtrack and retry, so we keep a max tries counter for 865 * have to backtrack and retry, so we keep a max tries counter for
595 * that. 866 * that.
596 * 867 *
868 * It sucks because we don't know ahead of time which is going to be
869 * the reboot barker (the device might send different ones depending
870 * on its EEPROM config) and once the device reboots and waits for the
871 * echo/ack reboot barker being sent back, it doesn't understand
872 * anything else. So we can be left at the point where we don't know
873 * what to send to it -- cold reset and bus reset seem to have little
874 * effect. So the function iterates (in this case) through all the
875 * known barkers and tries them all until an ACK is
876 * received. Otherwise, it gives up.
877 *
597 * If we get a timeout after sending a warm reset, we do it again. 878 * If we get a timeout after sending a warm reset, we do it again.
598 */ 879 */
599int i2400m_bootrom_init(struct i2400m *i2400m, enum i2400m_bri flags) 880int i2400m_bootrom_init(struct i2400m *i2400m, enum i2400m_bri flags)
@@ -602,10 +883,11 @@ int i2400m_bootrom_init(struct i2400m *i2400m, enum i2400m_bri flags)
602 struct device *dev = i2400m_dev(i2400m); 883 struct device *dev = i2400m_dev(i2400m);
603 struct i2400m_bootrom_header *cmd; 884 struct i2400m_bootrom_header *cmd;
604 struct i2400m_bootrom_header ack; 885 struct i2400m_bootrom_header ack;
605 int count = I2400M_BOOT_RETRIES; 886 int count = i2400m->bus_bm_retries;
606 int ack_timeout_cnt = 1; 887 int ack_timeout_cnt = 1;
888 unsigned i;
607 889
608 BUILD_BUG_ON(sizeof(*cmd) != sizeof(i2400m_NBOOT_BARKER)); 890 BUILD_BUG_ON(sizeof(*cmd) != sizeof(i2400m_barker_db[0].data));
609 BUILD_BUG_ON(sizeof(ack) != sizeof(i2400m_ACK_BARKER)); 891 BUILD_BUG_ON(sizeof(ack) != sizeof(i2400m_ACK_BARKER));
610 892
611 d_fnstart(4, dev, "(i2400m %p flags 0x%08x)\n", i2400m, flags); 893 d_fnstart(4, dev, "(i2400m %p flags 0x%08x)\n", i2400m, flags);
@@ -614,27 +896,59 @@ int i2400m_bootrom_init(struct i2400m *i2400m, enum i2400m_bri flags)
614 if (flags & I2400M_BRI_SOFT) 896 if (flags & I2400M_BRI_SOFT)
615 goto do_reboot_ack; 897 goto do_reboot_ack;
616do_reboot: 898do_reboot:
899 ack_timeout_cnt = 1;
617 if (--count < 0) 900 if (--count < 0)
618 goto error_timeout; 901 goto error_timeout;
619 d_printf(4, dev, "device reboot: reboot command [%d # left]\n", 902 d_printf(4, dev, "device reboot: reboot command [%d # left]\n",
620 count); 903 count);
621 if ((flags & I2400M_BRI_NO_REBOOT) == 0) 904 if ((flags & I2400M_BRI_NO_REBOOT) == 0)
622 i2400m->bus_reset(i2400m, I2400M_RT_WARM); 905 i2400m_reset(i2400m, I2400M_RT_WARM);
623 result = i2400m_bm_cmd(i2400m, NULL, 0, &ack, sizeof(ack), 906 result = i2400m_bm_cmd(i2400m, NULL, 0, &ack, sizeof(ack),
624 I2400M_BM_CMD_RAW); 907 I2400M_BM_CMD_RAW);
625 flags &= ~I2400M_BRI_NO_REBOOT; 908 flags &= ~I2400M_BRI_NO_REBOOT;
626 switch (result) { 909 switch (result) {
627 case -ERESTARTSYS: 910 case -ERESTARTSYS:
911 /*
912 * at this point, i2400m_bm_cmd(), through
913 * __i2400m_bm_ack_process(), has updated
914 * i2400m->barker and we are good to go.
915 */
628 d_printf(4, dev, "device reboot: got reboot barker\n"); 916 d_printf(4, dev, "device reboot: got reboot barker\n");
629 break; 917 break;
630 case -EISCONN: /* we don't know how it got here...but we follow it */ 918 case -EISCONN: /* we don't know how it got here...but we follow it */
631 d_printf(4, dev, "device reboot: got ack barker - whatever\n"); 919 d_printf(4, dev, "device reboot: got ack barker - whatever\n");
632 goto do_reboot; 920 goto do_reboot;
633 case -ETIMEDOUT: /* device has timed out, we might be in boot 921 case -ETIMEDOUT:
634 * mode already and expecting an ack, let's try 922 /*
635 * that */ 923 * Device has timed out, we might be in boot mode
636 dev_info(dev, "warm reset timed out, trying an ack\n"); 924 * already and expecting an ack; if we don't know what
637 goto do_reboot_ack; 925 * the barker is, we just send them all. Cold reset
926 * and bus reset don't work. Beats me.
927 */
928 if (i2400m->barker != NULL) {
929 dev_err(dev, "device boot: reboot barker timed out, "
930 "trying (set) %08x echo/ack\n",
931 le32_to_cpu(i2400m->barker->data[0]));
932 goto do_reboot_ack;
933 }
934 for (i = 0; i < i2400m_barker_db_used; i++) {
935 struct i2400m_barker_db *barker = &i2400m_barker_db[i];
936 memcpy(cmd, barker->data, sizeof(barker->data));
937 result = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd),
938 &ack, sizeof(ack),
939 I2400M_BM_CMD_RAW);
940 if (result == -EISCONN) {
941 dev_warn(dev, "device boot: got ack barker "
942 "after sending echo/ack barker "
943 "#%d/%08x; rebooting j.i.c.\n",
944 i, le32_to_cpu(barker->data[0]));
945 flags &= ~I2400M_BRI_NO_REBOOT;
946 goto do_reboot;
947 }
948 }
949 dev_err(dev, "device boot: tried all the echo/acks, could "
950 "not get device to respond; giving up");
951 result = -ESHUTDOWN;
638 case -EPROTO: 952 case -EPROTO:
639 case -ESHUTDOWN: /* dev is gone */ 953 case -ESHUTDOWN: /* dev is gone */
640 case -EINTR: /* user cancelled */ 954 case -EINTR: /* user cancelled */
@@ -642,6 +956,7 @@ do_reboot:
642 default: 956 default:
643 dev_err(dev, "device reboot: error %d while waiting " 957 dev_err(dev, "device reboot: error %d while waiting "
644 "for reboot barker - rebooting\n", result); 958 "for reboot barker - rebooting\n", result);
959 d_dump(1, dev, &ack, result);
645 goto do_reboot; 960 goto do_reboot;
646 } 961 }
647 /* At this point we ack back with 4 REBOOT barkers and expect 962 /* At this point we ack back with 4 REBOOT barkers and expect
@@ -650,12 +965,7 @@ do_reboot:
650 * notification and report it as -EISCONN. */ 965 * notification and report it as -EISCONN. */
651do_reboot_ack: 966do_reboot_ack:
652 d_printf(4, dev, "device reboot ack: sending ack [%d # left]\n", count); 967 d_printf(4, dev, "device reboot ack: sending ack [%d # left]\n", count);
653 if (i2400m->sboot == 0) 968 memcpy(cmd, i2400m->barker->data, sizeof(i2400m->barker->data));
654 memcpy(cmd, i2400m_NBOOT_BARKER,
655 sizeof(i2400m_NBOOT_BARKER));
656 else
657 memcpy(cmd, i2400m_SBOOT_BARKER,
658 sizeof(i2400m_SBOOT_BARKER));
659 result = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd), 969 result = i2400m_bm_cmd(i2400m, cmd, sizeof(*cmd),
660 &ack, sizeof(ack), I2400M_BM_CMD_RAW); 970 &ack, sizeof(ack), I2400M_BM_CMD_RAW);
661 switch (result) { 971 switch (result) {
@@ -668,10 +978,8 @@ do_reboot_ack:
668 d_printf(4, dev, "reboot ack: got ack barker - good\n"); 978 d_printf(4, dev, "reboot ack: got ack barker - good\n");
669 break; 979 break;
670 case -ETIMEDOUT: /* no response, maybe it is the other type? */ 980 case -ETIMEDOUT: /* no response, maybe it is the other type? */
671 if (ack_timeout_cnt-- >= 0) { 981 if (ack_timeout_cnt-- < 0) {
672 d_printf(4, dev, "reboot ack timedout: " 982 d_printf(4, dev, "reboot ack timedout: retrying\n");
673 "trying the other type?\n");
674 i2400m->sboot = !i2400m->sboot;
675 goto do_reboot_ack; 983 goto do_reboot_ack;
676 } else { 984 } else {
677 dev_err(dev, "reboot ack timedout too long: " 985 dev_err(dev, "reboot ack timedout too long: "
@@ -839,32 +1147,29 @@ int i2400m_dnload_init_signed(struct i2400m *i2400m,
839 * (signed or non-signed). 1147 * (signed or non-signed).
840 */ 1148 */
841static 1149static
842int i2400m_dnload_init(struct i2400m *i2400m, const struct i2400m_bcf_hdr *bcf) 1150int i2400m_dnload_init(struct i2400m *i2400m,
1151 const struct i2400m_bcf_hdr *bcf_hdr)
843{ 1152{
844 int result; 1153 int result;
845 struct device *dev = i2400m_dev(i2400m); 1154 struct device *dev = i2400m_dev(i2400m);
846 u32 module_id = le32_to_cpu(bcf->module_id);
847 1155
848 if (i2400m->sboot == 0 1156 if (i2400m_boot_is_signed(i2400m)) {
849 && (module_id & I2400M_BCF_MOD_ID_POKES) == 0) { 1157 d_printf(1, dev, "signed boot\n");
850 /* non-signed boot process without pokes */ 1158 result = i2400m_dnload_init_signed(i2400m, bcf_hdr);
851 result = i2400m_dnload_init_nonsigned(i2400m);
852 if (result == -ERESTARTSYS) 1159 if (result == -ERESTARTSYS)
853 return result; 1160 return result;
854 if (result < 0) 1161 if (result < 0)
855 dev_err(dev, "fw %s: non-signed download " 1162 dev_err(dev, "firmware %s: signed boot download "
856 "initialization failed: %d\n", 1163 "initialization failed: %d\n",
857 i2400m->fw_name, result); 1164 i2400m->fw_name, result);
858 } else if (i2400m->sboot == 0 1165 } else {
859 && (module_id & I2400M_BCF_MOD_ID_POKES)) { 1166 /* non-signed boot process without pokes */
860 /* non-signed boot process with pokes, nothing to do */ 1167 d_printf(1, dev, "non-signed boot\n");
861 result = 0; 1168 result = i2400m_dnload_init_nonsigned(i2400m);
862 } else { /* signed boot process */
863 result = i2400m_dnload_init_signed(i2400m, bcf);
864 if (result == -ERESTARTSYS) 1169 if (result == -ERESTARTSYS)
865 return result; 1170 return result;
866 if (result < 0) 1171 if (result < 0)
867 dev_err(dev, "fw %s: signed boot download " 1172 dev_err(dev, "firmware %s: non-signed download "
868 "initialization failed: %d\n", 1173 "initialization failed: %d\n",
869 i2400m->fw_name, result); 1174 i2400m->fw_name, result);
870 } 1175 }
@@ -873,74 +1178,201 @@ int i2400m_dnload_init(struct i2400m *i2400m, const struct i2400m_bcf_hdr *bcf)
873 1178
874 1179
875/* 1180/*
876 * Run quick consistency tests on the firmware file 1181 * Run consistency tests on the firmware file and load up headers
877 * 1182 *
878 * Check for the firmware being made for the i2400m device, 1183 * Check for the firmware being made for the i2400m device,
879 * etc...These checks are mostly informative, as the device will make 1184 * etc...These checks are mostly informative, as the device will make
880 * them too; but the driver's response is more informative on what 1185 * them too; but the driver's response is more informative on what
881 * went wrong. 1186 * went wrong.
1187 *
1188 * This will also look at all the headers present on the firmware
1189 * file, and update i2400m->fw_bcf_hdr to point to them.
882 */ 1190 */
883static 1191static
884int i2400m_fw_check(struct i2400m *i2400m, 1192int i2400m_fw_hdr_check(struct i2400m *i2400m,
885 const struct i2400m_bcf_hdr *bcf, 1193 const struct i2400m_bcf_hdr *bcf_hdr,
886 size_t bcf_size) 1194 size_t index, size_t offset)
887{ 1195{
888 int result;
889 struct device *dev = i2400m_dev(i2400m); 1196 struct device *dev = i2400m_dev(i2400m);
1197
890 unsigned module_type, header_len, major_version, minor_version, 1198 unsigned module_type, header_len, major_version, minor_version,
891 module_id, module_vendor, date, size; 1199 module_id, module_vendor, date, size;
892 1200
893 /* Check hard errors */ 1201 module_type = bcf_hdr->module_type;
894 result = -EINVAL; 1202 header_len = sizeof(u32) * le32_to_cpu(bcf_hdr->header_len);
895 if (bcf_size < sizeof(*bcf)) { /* big enough header? */ 1203 major_version = (le32_to_cpu(bcf_hdr->header_version) & 0xffff0000)
896 dev_err(dev, "firmware %s too short: " 1204 >> 16;
897 "%zu B vs %zu (at least) expected\n", 1205 minor_version = le32_to_cpu(bcf_hdr->header_version) & 0x0000ffff;
898 i2400m->fw_name, bcf_size, sizeof(*bcf)); 1206 module_id = le32_to_cpu(bcf_hdr->module_id);
899 goto error; 1207 module_vendor = le32_to_cpu(bcf_hdr->module_vendor);
900 } 1208 date = le32_to_cpu(bcf_hdr->date);
1209 size = sizeof(u32) * le32_to_cpu(bcf_hdr->size);
901 1210
902 module_type = bcf->module_type; 1211 d_printf(1, dev, "firmware %s #%zd@%08zx: BCF header "
903 header_len = sizeof(u32) * le32_to_cpu(bcf->header_len); 1212 "type:vendor:id 0x%x:%x:%x v%u.%u (%u/%u B) built %08x\n",
904 major_version = le32_to_cpu(bcf->header_version) & 0xffff0000 >> 16; 1213 i2400m->fw_name, index, offset,
905 minor_version = le32_to_cpu(bcf->header_version) & 0x0000ffff; 1214 module_type, module_vendor, module_id,
906 module_id = le32_to_cpu(bcf->module_id); 1215 major_version, minor_version, header_len, size, date);
907 module_vendor = le32_to_cpu(bcf->module_vendor);
908 date = le32_to_cpu(bcf->date);
909 size = sizeof(u32) * le32_to_cpu(bcf->size);
910
911 if (bcf_size != size) { /* annoyingly paranoid */
912 dev_err(dev, "firmware %s: bad size, got "
913 "%zu B vs %u expected\n",
914 i2400m->fw_name, bcf_size, size);
915 goto error;
916 }
917 1216
918 d_printf(2, dev, "type 0x%x id 0x%x vendor 0x%x; header v%u.%u (%zu B) " 1217 /* Hard errors */
919 "date %08x (%zu B)\n", 1218 if (major_version != 1) {
920 module_type, module_id, module_vendor, 1219 dev_err(dev, "firmware %s #%zd@%08zx: major header version "
921 major_version, minor_version, (size_t) header_len, 1220 "v%u.%u not supported\n",
922 date, (size_t) size); 1221 i2400m->fw_name, index, offset,
1222 major_version, minor_version);
1223 return -EBADF;
1224 }
923 1225
924 if (module_type != 6) { /* built for the right hardware? */ 1226 if (module_type != 6) { /* built for the right hardware? */
925 dev_err(dev, "bad fw %s: unexpected module type 0x%x; " 1227 dev_err(dev, "firmware %s #%zd@%08zx: unexpected module "
926 "aborting\n", i2400m->fw_name, module_type); 1228 "type 0x%x; aborting\n",
927 goto error; 1229 i2400m->fw_name, index, offset,
1230 module_type);
1231 return -EBADF;
1232 }
1233
1234 if (module_vendor != 0x8086) {
1235 dev_err(dev, "firmware %s #%zd@%08zx: unexpected module "
1236 "vendor 0x%x; aborting\n",
1237 i2400m->fw_name, index, offset, module_vendor);
1238 return -EBADF;
928 } 1239 }
929 1240
930 /* Check soft-er errors */
931 result = 0;
932 if (module_vendor != 0x8086)
933 dev_err(dev, "bad fw %s? unexpected vendor 0x%04x\n",
934 i2400m->fw_name, module_vendor);
935 if (date < 0x20080300) 1241 if (date < 0x20080300)
936 dev_err(dev, "bad fw %s? build date too old %08x\n", 1242 dev_warn(dev, "firmware %s #%zd@%08zx: build date %08x "
937 i2400m->fw_name, date); 1243 "too old; unsupported\n",
938error: 1244 i2400m->fw_name, index, offset, date);
1245 return 0;
1246}
1247
1248
1249/*
1250 * Run consistency tests on the firmware file and load up headers
1251 *
1252 * Check for the firmware being made for the i2400m device,
1253 * etc...These checks are mostly informative, as the device will make
1254 * them too; but the driver's response is more informative on what
1255 * went wrong.
1256 *
1257 * This will also look at all the headers present on the firmware
1258 * file, and update i2400m->fw_hdrs to point to them.
1259 */
1260static
1261int i2400m_fw_check(struct i2400m *i2400m, const void *bcf, size_t bcf_size)
1262{
1263 int result;
1264 struct device *dev = i2400m_dev(i2400m);
1265 size_t headers = 0;
1266 const struct i2400m_bcf_hdr *bcf_hdr;
1267 const void *itr, *next, *top;
1268 size_t slots = 0, used_slots = 0;
1269
1270 for (itr = bcf, top = itr + bcf_size;
1271 itr < top;
1272 headers++, itr = next) {
1273 size_t leftover, offset, header_len, size;
1274
1275 leftover = top - itr;
1276 offset = itr - (const void *) bcf;
1277 if (leftover <= sizeof(*bcf_hdr)) {
1278 dev_err(dev, "firmware %s: %zu B left at @%zx, "
1279 "not enough for BCF header\n",
1280 i2400m->fw_name, leftover, offset);
1281 break;
1282 }
1283 bcf_hdr = itr;
1284 /* Only the first header is supposed to be followed by
1285 * payload */
1286 header_len = sizeof(u32) * le32_to_cpu(bcf_hdr->header_len);
1287 size = sizeof(u32) * le32_to_cpu(bcf_hdr->size);
1288 if (headers == 0)
1289 next = itr + size;
1290 else
1291 next = itr + header_len;
1292
1293 result = i2400m_fw_hdr_check(i2400m, bcf_hdr, headers, offset);
1294 if (result < 0)
1295 continue;
1296 if (used_slots + 1 >= slots) {
1297 /* +1 -> we need to account for the one we'll
1298 * occupy and at least an extra one for
1299 * always being NULL */
1300 result = i2400m_zrealloc_2x(
1301 (void **) &i2400m->fw_hdrs, &slots,
1302 sizeof(i2400m->fw_hdrs[0]),
1303 GFP_KERNEL);
1304 if (result < 0)
1305 goto error_zrealloc;
1306 }
1307 i2400m->fw_hdrs[used_slots] = bcf_hdr;
1308 used_slots++;
1309 }
1310 if (headers == 0) {
1311 dev_err(dev, "firmware %s: no usable headers found\n",
1312 i2400m->fw_name);
1313 result = -EBADF;
1314 } else
1315 result = 0;
1316error_zrealloc:
939 return result; 1317 return result;
940} 1318}
941 1319
942 1320
943/* 1321/*
1322 * Match a barker to a BCF header module ID
1323 *
1324 * The device sends a barker which tells the firmware loader which
1325 * header in the BCF file has to be used. This does the matching.
1326 */
1327static
1328unsigned i2400m_bcf_hdr_match(struct i2400m *i2400m,
1329 const struct i2400m_bcf_hdr *bcf_hdr)
1330{
1331 u32 barker = le32_to_cpu(i2400m->barker->data[0])
1332 & 0x7fffffff;
1333 u32 module_id = le32_to_cpu(bcf_hdr->module_id)
1334 & 0x7fffffff; /* high bit used for something else */
1335
1336 /* special case for 5x50 */
1337 if (barker == I2400M_SBOOT_BARKER && module_id == 0)
1338 return 1;
1339 if (module_id == barker)
1340 return 1;
1341 return 0;
1342}
1343
1344static
1345const struct i2400m_bcf_hdr *i2400m_bcf_hdr_find(struct i2400m *i2400m)
1346{
1347 struct device *dev = i2400m_dev(i2400m);
1348 const struct i2400m_bcf_hdr **bcf_itr, *bcf_hdr;
1349 unsigned i = 0;
1350 u32 barker = le32_to_cpu(i2400m->barker->data[0]);
1351
1352 d_printf(2, dev, "finding BCF header for barker %08x\n", barker);
1353 if (barker == I2400M_NBOOT_BARKER) {
1354 bcf_hdr = i2400m->fw_hdrs[0];
1355 d_printf(1, dev, "using BCF header #%u/%08x for non-signed "
1356 "barker\n", 0, le32_to_cpu(bcf_hdr->module_id));
1357 return bcf_hdr;
1358 }
1359 for (bcf_itr = i2400m->fw_hdrs; *bcf_itr != NULL; bcf_itr++, i++) {
1360 bcf_hdr = *bcf_itr;
1361 if (i2400m_bcf_hdr_match(i2400m, bcf_hdr)) {
1362 d_printf(1, dev, "hit on BCF hdr #%u/%08x\n",
1363 i, le32_to_cpu(bcf_hdr->module_id));
1364 return bcf_hdr;
1365 } else
1366 d_printf(1, dev, "miss on BCF hdr #%u/%08x\n",
1367 i, le32_to_cpu(bcf_hdr->module_id));
1368 }
1369 dev_err(dev, "cannot find a matching BCF header for barker %08x\n",
1370 barker);
1371 return NULL;
1372}
1373
1374
1375/*
944 * Download the firmware to the device 1376 * Download the firmware to the device
945 * 1377 *
946 * @i2400m: device descriptor 1378 * @i2400m: device descriptor
@@ -956,14 +1388,16 @@ error:
956 */ 1388 */
957static 1389static
958int i2400m_fw_dnload(struct i2400m *i2400m, const struct i2400m_bcf_hdr *bcf, 1390int i2400m_fw_dnload(struct i2400m *i2400m, const struct i2400m_bcf_hdr *bcf,
959 size_t bcf_size, enum i2400m_bri flags) 1391 size_t fw_size, enum i2400m_bri flags)
960{ 1392{
961 int ret = 0; 1393 int ret = 0;
962 struct device *dev = i2400m_dev(i2400m); 1394 struct device *dev = i2400m_dev(i2400m);
963 int count = i2400m->bus_bm_retries; 1395 int count = i2400m->bus_bm_retries;
1396 const struct i2400m_bcf_hdr *bcf_hdr;
1397 size_t bcf_size;
964 1398
965 d_fnstart(5, dev, "(i2400m %p bcf %p size %zu)\n", 1399 d_fnstart(5, dev, "(i2400m %p bcf %p fw size %zu)\n",
966 i2400m, bcf, bcf_size); 1400 i2400m, bcf, fw_size);
967 i2400m->boot_mode = 1; 1401 i2400m->boot_mode = 1;
968 wmb(); /* Make sure other readers see it */ 1402 wmb(); /* Make sure other readers see it */
969hw_reboot: 1403hw_reboot:
@@ -985,13 +1419,28 @@ hw_reboot:
985 * Initialize the download, push the bytes to the device and 1419 * Initialize the download, push the bytes to the device and
986 * then jump to the new firmware. Note @ret is passed with the 1420 * then jump to the new firmware. Note @ret is passed with the
987 * offset of the jump instruction to _dnload_finalize() 1421 * offset of the jump instruction to _dnload_finalize()
1422 *
1423 * Note we need to use the BCF header in the firmware image
1424 * that matches the barker that the device sent when it
1425 * rebooted, so it has to be passed along.
988 */ 1426 */
989 ret = i2400m_dnload_init(i2400m, bcf); /* Init device's dnload */ 1427 ret = -EBADF;
1428 bcf_hdr = i2400m_bcf_hdr_find(i2400m);
1429 if (bcf_hdr == NULL)
1430 goto error_bcf_hdr_find;
1431
1432 ret = i2400m_dnload_init(i2400m, bcf_hdr);
990 if (ret == -ERESTARTSYS) 1433 if (ret == -ERESTARTSYS)
991 goto error_dev_rebooted; 1434 goto error_dev_rebooted;
992 if (ret < 0) 1435 if (ret < 0)
993 goto error_dnload_init; 1436 goto error_dnload_init;
994 1437
1438 /*
1439 * bcf_size refers to one header size plus the fw sections size
1440 * indicated by the header,ie. if there are other extended headers
1441 * at the tail, they are not counted
1442 */
1443 bcf_size = sizeof(u32) * le32_to_cpu(bcf_hdr->size);
995 ret = i2400m_dnload_bcf(i2400m, bcf, bcf_size); 1444 ret = i2400m_dnload_bcf(i2400m, bcf, bcf_size);
996 if (ret == -ERESTARTSYS) 1445 if (ret == -ERESTARTSYS)
997 goto error_dev_rebooted; 1446 goto error_dev_rebooted;
@@ -1001,7 +1450,7 @@ hw_reboot:
1001 goto error_dnload_bcf; 1450 goto error_dnload_bcf;
1002 } 1451 }
1003 1452
1004 ret = i2400m_dnload_finalize(i2400m, bcf, ret); 1453 ret = i2400m_dnload_finalize(i2400m, bcf_hdr, bcf, ret);
1005 if (ret == -ERESTARTSYS) 1454 if (ret == -ERESTARTSYS)
1006 goto error_dev_rebooted; 1455 goto error_dev_rebooted;
1007 if (ret < 0) { 1456 if (ret < 0) {
@@ -1018,10 +1467,11 @@ hw_reboot:
1018error_dnload_finalize: 1467error_dnload_finalize:
1019error_dnload_bcf: 1468error_dnload_bcf:
1020error_dnload_init: 1469error_dnload_init:
1470error_bcf_hdr_find:
1021error_bootrom_init: 1471error_bootrom_init:
1022error_too_many_reboots: 1472error_too_many_reboots:
1023 d_fnend(5, dev, "(i2400m %p bcf %p size %zu) = %d\n", 1473 d_fnend(5, dev, "(i2400m %p bcf %p size %zu) = %d\n",
1024 i2400m, bcf, bcf_size, ret); 1474 i2400m, bcf, fw_size, ret);
1025 return ret; 1475 return ret;
1026 1476
1027error_dev_rebooted: 1477error_dev_rebooted:
@@ -1031,6 +1481,61 @@ error_dev_rebooted:
1031 goto hw_reboot; 1481 goto hw_reboot;
1032} 1482}
1033 1483
1484static
1485int i2400m_fw_bootstrap(struct i2400m *i2400m, const struct firmware *fw,
1486 enum i2400m_bri flags)
1487{
1488 int ret;
1489 struct device *dev = i2400m_dev(i2400m);
1490 const struct i2400m_bcf_hdr *bcf; /* Firmware data */
1491
1492 d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
1493 bcf = (void *) fw->data;
1494 ret = i2400m_fw_check(i2400m, bcf, fw->size);
1495 if (ret >= 0)
1496 ret = i2400m_fw_dnload(i2400m, bcf, fw->size, flags);
1497 if (ret < 0)
1498 dev_err(dev, "%s: cannot use: %d, skipping\n",
1499 i2400m->fw_name, ret);
1500 kfree(i2400m->fw_hdrs);
1501 i2400m->fw_hdrs = NULL;
1502 d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, ret);
1503 return ret;
1504}
1505
1506
1507/* Refcounted container for firmware data */
1508struct i2400m_fw {
1509 struct kref kref;
1510 const struct firmware *fw;
1511};
1512
1513
1514static
1515void i2400m_fw_destroy(struct kref *kref)
1516{
1517 struct i2400m_fw *i2400m_fw =
1518 container_of(kref, struct i2400m_fw, kref);
1519 release_firmware(i2400m_fw->fw);
1520 kfree(i2400m_fw);
1521}
1522
1523
1524static
1525struct i2400m_fw *i2400m_fw_get(struct i2400m_fw *i2400m_fw)
1526{
1527 if (i2400m_fw != NULL && i2400m_fw != (void *) ~0)
1528 kref_get(&i2400m_fw->kref);
1529 return i2400m_fw;
1530}
1531
1532
1533static
1534void i2400m_fw_put(struct i2400m_fw *i2400m_fw)
1535{
1536 kref_put(&i2400m_fw->kref, i2400m_fw_destroy);
1537}
1538
1034 1539
1035/** 1540/**
1036 * i2400m_dev_bootstrap - Bring the device to a known state and upload firmware 1541 * i2400m_dev_bootstrap - Bring the device to a known state and upload firmware
@@ -1049,42 +1554,109 @@ error_dev_rebooted:
1049 */ 1554 */
1050int i2400m_dev_bootstrap(struct i2400m *i2400m, enum i2400m_bri flags) 1555int i2400m_dev_bootstrap(struct i2400m *i2400m, enum i2400m_bri flags)
1051{ 1556{
1052 int ret = 0, itr = 0; 1557 int ret, itr;
1053 struct device *dev = i2400m_dev(i2400m); 1558 struct device *dev = i2400m_dev(i2400m);
1054 const struct firmware *fw; 1559 struct i2400m_fw *i2400m_fw;
1055 const struct i2400m_bcf_hdr *bcf; /* Firmware data */ 1560 const struct i2400m_bcf_hdr *bcf; /* Firmware data */
1561 const struct firmware *fw;
1056 const char *fw_name; 1562 const char *fw_name;
1057 1563
1058 d_fnstart(5, dev, "(i2400m %p)\n", i2400m); 1564 d_fnstart(5, dev, "(i2400m %p)\n", i2400m);
1059 1565
1566 ret = -ENODEV;
1567 spin_lock(&i2400m->rx_lock);
1568 i2400m_fw = i2400m_fw_get(i2400m->fw_cached);
1569 spin_unlock(&i2400m->rx_lock);
1570 if (i2400m_fw == (void *) ~0) {
1571 dev_err(dev, "can't load firmware now!");
1572 goto out;
1573 } else if (i2400m_fw != NULL) {
1574 dev_info(dev, "firmware %s: loading from cache\n",
1575 i2400m->fw_name);
1576 ret = i2400m_fw_bootstrap(i2400m, i2400m_fw->fw, flags);
1577 i2400m_fw_put(i2400m_fw);
1578 goto out;
1579 }
1580
1060 /* Load firmware files to memory. */ 1581 /* Load firmware files to memory. */
1061 itr = 0; 1582 for (itr = 0, bcf = NULL, ret = -ENOENT; ; itr++) {
1062 while(1) {
1063 fw_name = i2400m->bus_fw_names[itr]; 1583 fw_name = i2400m->bus_fw_names[itr];
1064 if (fw_name == NULL) { 1584 if (fw_name == NULL) {
1065 dev_err(dev, "Could not find a usable firmware image\n"); 1585 dev_err(dev, "Could not find a usable firmware image\n");
1066 ret = -ENOENT; 1586 break;
1067 goto error_no_fw;
1068 } 1587 }
1588 d_printf(1, dev, "trying firmware %s (%d)\n", fw_name, itr);
1069 ret = request_firmware(&fw, fw_name, dev); 1589 ret = request_firmware(&fw, fw_name, dev);
1070 if (ret == 0) 1590 if (ret < 0) {
1071 break; /* got it */
1072 if (ret < 0)
1073 dev_err(dev, "fw %s: cannot load file: %d\n", 1591 dev_err(dev, "fw %s: cannot load file: %d\n",
1074 fw_name, ret); 1592 fw_name, ret);
1075 itr++; 1593 continue;
1594 }
1595 i2400m->fw_name = fw_name;
1596 ret = i2400m_fw_bootstrap(i2400m, fw, flags);
1597 release_firmware(fw);
1598 if (ret >= 0) /* firmware loaded succesfully */
1599 break;
1600 i2400m->fw_name = NULL;
1076 } 1601 }
1077 1602out:
1078 bcf = (void *) fw->data;
1079 i2400m->fw_name = fw_name;
1080 ret = i2400m_fw_check(i2400m, bcf, fw->size);
1081 if (ret < 0)
1082 goto error_fw_bad;
1083 ret = i2400m_fw_dnload(i2400m, bcf, fw->size, flags);
1084error_fw_bad:
1085 release_firmware(fw);
1086error_no_fw:
1087 d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, ret); 1603 d_fnend(5, dev, "(i2400m %p) = %d\n", i2400m, ret);
1088 return ret; 1604 return ret;
1089} 1605}
1090EXPORT_SYMBOL_GPL(i2400m_dev_bootstrap); 1606EXPORT_SYMBOL_GPL(i2400m_dev_bootstrap);
1607
1608
1609void i2400m_fw_cache(struct i2400m *i2400m)
1610{
1611 int result;
1612 struct i2400m_fw *i2400m_fw;
1613 struct device *dev = i2400m_dev(i2400m);
1614
1615 /* if there is anything there, free it -- now, this'd be weird */
1616 spin_lock(&i2400m->rx_lock);
1617 i2400m_fw = i2400m->fw_cached;
1618 spin_unlock(&i2400m->rx_lock);
1619 if (i2400m_fw != NULL && i2400m_fw != (void *) ~0) {
1620 i2400m_fw_put(i2400m_fw);
1621 WARN(1, "%s:%u: still cached fw still present?\n",
1622 __func__, __LINE__);
1623 }
1624
1625 if (i2400m->fw_name == NULL) {
1626 dev_err(dev, "firmware n/a: can't cache\n");
1627 i2400m_fw = (void *) ~0;
1628 goto out;
1629 }
1630
1631 i2400m_fw = kzalloc(sizeof(*i2400m_fw), GFP_ATOMIC);
1632 if (i2400m_fw == NULL)
1633 goto out;
1634 kref_init(&i2400m_fw->kref);
1635 result = request_firmware(&i2400m_fw->fw, i2400m->fw_name, dev);
1636 if (result < 0) {
1637 dev_err(dev, "firmware %s: failed to cache: %d\n",
1638 i2400m->fw_name, result);
1639 kfree(i2400m_fw);
1640 i2400m_fw = (void *) ~0;
1641 } else
1642 dev_info(dev, "firmware %s: cached\n", i2400m->fw_name);
1643out:
1644 spin_lock(&i2400m->rx_lock);
1645 i2400m->fw_cached = i2400m_fw;
1646 spin_unlock(&i2400m->rx_lock);
1647}
1648
1649
1650void i2400m_fw_uncache(struct i2400m *i2400m)
1651{
1652 struct i2400m_fw *i2400m_fw;
1653
1654 spin_lock(&i2400m->rx_lock);
1655 i2400m_fw = i2400m->fw_cached;
1656 i2400m->fw_cached = NULL;
1657 spin_unlock(&i2400m->rx_lock);
1658
1659 if (i2400m_fw != NULL && i2400m_fw != (void *) ~0)
1660 i2400m_fw_put(i2400m_fw);
1661}
1662
diff --git a/drivers/net/wimax/i2400m/i2400m-sdio.h b/drivers/net/wimax/i2400m/i2400m-sdio.h
index 9c4e3189f7b5..b9c4bed3b457 100644
--- a/drivers/net/wimax/i2400m/i2400m-sdio.h
+++ b/drivers/net/wimax/i2400m/i2400m-sdio.h
@@ -67,6 +67,7 @@
67 67
68/* Host-Device interface for SDIO */ 68/* Host-Device interface for SDIO */
69enum { 69enum {
70 I2400M_SDIO_BOOT_RETRIES = 3,
70 I2400MS_BLK_SIZE = 256, 71 I2400MS_BLK_SIZE = 256,
71 I2400MS_PL_SIZE_MAX = 0x3E00, 72 I2400MS_PL_SIZE_MAX = 0x3E00,
72 73
@@ -77,9 +78,11 @@ enum {
77 I2400MS_INTR_GET_SIZE_ADDR = 0x2C, 78 I2400MS_INTR_GET_SIZE_ADDR = 0x2C,
78 /* The number of ticks to wait for the device to signal that 79 /* The number of ticks to wait for the device to signal that
79 * it is ready */ 80 * it is ready */
80 I2400MS_INIT_SLEEP_INTERVAL = 10, 81 I2400MS_INIT_SLEEP_INTERVAL = 100,
81 /* How long to wait for the device to settle after reset */ 82 /* How long to wait for the device to settle after reset */
82 I2400MS_SETTLE_TIME = 40, 83 I2400MS_SETTLE_TIME = 40,
84 /* The number of msec to wait for IOR after sending IOE */
85 IWMC3200_IOR_TIMEOUT = 10,
83}; 86};
84 87
85 88
@@ -97,6 +100,14 @@ enum {
97 * @tx_workqueue: workqeueue used for data TX; we don't use the 100 * @tx_workqueue: workqeueue used for data TX; we don't use the
98 * system's workqueue as that might cause deadlocks with code in 101 * system's workqueue as that might cause deadlocks with code in
99 * the bus-generic driver. 102 * the bus-generic driver.
103 *
104 * @debugfs_dentry: dentry for the SDIO specific debugfs files
105 *
106 * Note this value is set to NULL upon destruction; this is
107 * because some routinges use it to determine if we are inside the
108 * probe() path or some other path. When debugfs is disabled,
109 * creation sets the dentry to '(void*) -ENODEV', which is valid
110 * for the test.
100 */ 111 */
101struct i2400ms { 112struct i2400ms {
102 struct i2400m i2400m; /* FIRST! See doc */ 113 struct i2400m i2400m; /* FIRST! See doc */
@@ -111,6 +122,9 @@ struct i2400ms {
111 wait_queue_head_t bm_wfa_wq; 122 wait_queue_head_t bm_wfa_wq;
112 int bm_wait_result; 123 int bm_wait_result;
113 size_t bm_ack_size; 124 size_t bm_ack_size;
125
126 /* Device is any of the iwmc3200 SKUs */
127 unsigned iwmc3200:1;
114}; 128};
115 129
116 130
diff --git a/drivers/net/wimax/i2400m/i2400m-usb.h b/drivers/net/wimax/i2400m/i2400m-usb.h
index 6f76558b170f..5cc0f279417e 100644
--- a/drivers/net/wimax/i2400m/i2400m-usb.h
+++ b/drivers/net/wimax/i2400m/i2400m-usb.h
@@ -88,6 +88,13 @@ struct edc {
88 u16 errorcount; 88 u16 errorcount;
89}; 89};
90 90
91struct i2400m_endpoint_cfg {
92 unsigned char bulk_out;
93 unsigned char notification;
94 unsigned char reset_cold;
95 unsigned char bulk_in;
96};
97
91static inline void edc_init(struct edc *edc) 98static inline void edc_init(struct edc *edc)
92{ 99{
93 edc->timestart = jiffies; 100 edc->timestart = jiffies;
@@ -137,15 +144,13 @@ static inline int edc_inc(struct edc *edc, u16 max_err, u16 timeframe)
137 144
138/* Host-Device interface for USB */ 145/* Host-Device interface for USB */
139enum { 146enum {
147 I2400M_USB_BOOT_RETRIES = 3,
140 I2400MU_MAX_NOTIFICATION_LEN = 256, 148 I2400MU_MAX_NOTIFICATION_LEN = 256,
141 I2400MU_BLK_SIZE = 16, 149 I2400MU_BLK_SIZE = 16,
142 I2400MU_PL_SIZE_MAX = 0x3EFF, 150 I2400MU_PL_SIZE_MAX = 0x3EFF,
143 151
144 /* Endpoints */ 152 /* Device IDs */
145 I2400MU_EP_BULK_OUT = 0, 153 USB_DEVICE_ID_I6050 = 0x0186,
146 I2400MU_EP_NOTIFICATION,
147 I2400MU_EP_RESET_COLD,
148 I2400MU_EP_BULK_IN,
149}; 154};
150 155
151 156
@@ -215,6 +220,7 @@ struct i2400mu {
215 struct usb_device *usb_dev; 220 struct usb_device *usb_dev;
216 struct usb_interface *usb_iface; 221 struct usb_interface *usb_iface;
217 struct edc urb_edc; /* Error density counter */ 222 struct edc urb_edc; /* Error density counter */
223 struct i2400m_endpoint_cfg endpoint_cfg;
218 224
219 struct urb *notif_urb; 225 struct urb *notif_urb;
220 struct task_struct *tx_kthread; 226 struct task_struct *tx_kthread;
diff --git a/drivers/net/wimax/i2400m/i2400m.h b/drivers/net/wimax/i2400m/i2400m.h
index 60330f313f27..04df9bbe340f 100644
--- a/drivers/net/wimax/i2400m/i2400m.h
+++ b/drivers/net/wimax/i2400m/i2400m.h
@@ -117,16 +117,30 @@
117 * well as i2400m->wimax_dev.net_dev and call i2400m_setup(). The 117 * well as i2400m->wimax_dev.net_dev and call i2400m_setup(). The
118 * i2400m driver will only register with the WiMAX and network stacks; 118 * i2400m driver will only register with the WiMAX and network stacks;
119 * the only access done to the device is to read the MAC address so we 119 * the only access done to the device is to read the MAC address so we
120 * can register a network device. This calls i2400m_dev_start() to 120 * can register a network device.
121 * load firmware, setup communication with the device and configure it
122 * for operation.
123 * 121 *
124 * At this point, control and data communications are possible. 122 * The high-level call flow is:
123 *
124 * bus_probe()
125 * i2400m_setup()
126 * i2400m->bus_setup()
127 * boot rom initialization / read mac addr
128 * network / WiMAX stacks registration
129 * i2400m_dev_start()
130 * i2400m->bus_dev_start()
131 * i2400m_dev_initialize()
125 * 132 *
126 * On disconnect/driver unload, the bus-specific disconnect function 133 * The reverse applies for a disconnect() call:
127 * calls i2400m_release() to undo i2400m_setup(). i2400m_dev_stop() 134 *
128 * shuts the firmware down and releases resources uses to communicate 135 * bus_disconnect()
129 * with the device. 136 * i2400m_release()
137 * i2400m_dev_stop()
138 * i2400m_dev_shutdown()
139 * i2400m->bus_dev_stop()
140 * network / WiMAX stack unregistration
141 * i2400m->bus_release()
142 *
143 * At this point, control and data communications are possible.
130 * 144 *
131 * While the device is up, it might reset. The bus-specific driver has 145 * While the device is up, it might reset. The bus-specific driver has
132 * to catch that situation and call i2400m_dev_reset_handle() to deal 146 * to catch that situation and call i2400m_dev_reset_handle() to deal
@@ -148,9 +162,6 @@
148 162
149/* Misc constants */ 163/* Misc constants */
150enum { 164enum {
151 /* Firmware uploading */
152 I2400M_BOOT_RETRIES = 3,
153 I3200_BOOT_RETRIES = 3,
154 /* Size of the Boot Mode Command buffer */ 165 /* Size of the Boot Mode Command buffer */
155 I2400M_BM_CMD_BUF_SIZE = 16 * 1024, 166 I2400M_BM_CMD_BUF_SIZE = 16 * 1024,
156 I2400M_BM_ACK_BUF_SIZE = 256, 167 I2400M_BM_ACK_BUF_SIZE = 256,
@@ -197,6 +208,7 @@ enum i2400m_reset_type {
197 208
198struct i2400m_reset_ctx; 209struct i2400m_reset_ctx;
199struct i2400m_roq; 210struct i2400m_roq;
211struct i2400m_barker_db;
200 212
201/** 213/**
202 * struct i2400m - descriptor for an Intel 2400m 214 * struct i2400m - descriptor for an Intel 2400m
@@ -204,27 +216,50 @@ struct i2400m_roq;
204 * Members marked with [fill] must be filled out/initialized before 216 * Members marked with [fill] must be filled out/initialized before
205 * calling i2400m_setup(). 217 * calling i2400m_setup().
206 * 218 *
219 * Note the @bus_setup/@bus_release, @bus_dev_start/@bus_dev_release
220 * call pairs are very much doing almost the same, and depending on
221 * the underlying bus, some stuff has to be put in one or the
222 * other. The idea of setup/release is that they setup the minimal
223 * amount needed for loading firmware, where us dev_start/stop setup
224 * the rest needed to do full data/control traffic.
225 *
207 * @bus_tx_block_size: [fill] SDIO imposes a 256 block size, USB 16, 226 * @bus_tx_block_size: [fill] SDIO imposes a 256 block size, USB 16,
208 * so we have a tx_blk_size variable that the bus layer sets to 227 * so we have a tx_blk_size variable that the bus layer sets to
209 * tell the engine how much of that we need. 228 * tell the engine how much of that we need.
210 * 229 *
211 * @bus_pl_size_max: [fill] Maximum payload size. 230 * @bus_pl_size_max: [fill] Maximum payload size.
212 * 231 *
213 * @bus_dev_start: [fill] Function called by the bus-generic code 232 * @bus_setup: [optional fill] Function called by the bus-generic code
214 * [i2400m_dev_start()] to setup the bus-specific communications 233 * [i2400m_setup()] to setup the basic bus-specific communications
215 * to the the device. See LIFE CYCLE above. 234 * to the the device needed to load firmware. See LIFE CYCLE above.
216 * 235 *
217 * NOTE: Doesn't need to upload the firmware, as that is taken 236 * NOTE: Doesn't need to upload the firmware, as that is taken
218 * care of by the bus-generic code. 237 * care of by the bus-generic code.
219 * 238 *
220 * @bus_dev_stop: [fill] Function called by the bus-generic code 239 * @bus_release: [optional fill] Function called by the bus-generic
221 * [i2400m_dev_stop()] to shutdown the bus-specific communications 240 * code [i2400m_release()] to shutdown the basic bus-specific
222 * to the the device. See LIFE CYCLE above. 241 * communications to the the device needed to load firmware. See
242 * LIFE CYCLE above.
223 * 243 *
224 * This function does not need to reset the device, just tear down 244 * This function does not need to reset the device, just tear down
225 * all the host resources created to handle communication with 245 * all the host resources created to handle communication with
226 * the device. 246 * the device.
227 * 247 *
248 * @bus_dev_start: [optional fill] Function called by the bus-generic
249 * code [i2400m_dev_start()] to do things needed to start the
250 * device. See LIFE CYCLE above.
251 *
252 * NOTE: Doesn't need to upload the firmware, as that is taken
253 * care of by the bus-generic code.
254 *
255 * @bus_dev_stop: [optional fill] Function called by the bus-generic
256 * code [i2400m_dev_stop()] to do things needed for stopping the
257 * device. See LIFE CYCLE above.
258 *
259 * This function does not need to reset the device, just tear down
260 * all the host resources created to handle communication with
261 * the device.
262 *
228 * @bus_tx_kick: [fill] Function called by the bus-generic code to let 263 * @bus_tx_kick: [fill] Function called by the bus-generic code to let
229 * the bus-specific code know that there is data available in the 264 * the bus-specific code know that there is data available in the
230 * TX FIFO for transmission to the device. 265 * TX FIFO for transmission to the device.
@@ -246,6 +281,9 @@ struct i2400m_roq;
246 * process, so it cannot rely on common infrastructure being laid 281 * process, so it cannot rely on common infrastructure being laid
247 * out. 282 * out.
248 * 283 *
284 * IMPORTANT: don't call reset on RT_BUS with i2400m->init_mutex
285 * held, as the .pre/.post reset handlers will deadlock.
286 *
249 * @bus_bm_retries: [fill] How many times shall a firmware upload / 287 * @bus_bm_retries: [fill] How many times shall a firmware upload /
250 * device initialization be retried? Different models of the same 288 * device initialization be retried? Different models of the same
251 * device might need different values, hence it is set by the 289 * device might need different values, hence it is set by the
@@ -297,6 +335,27 @@ struct i2400m_roq;
297 * force this to be the first field so that we can get from 335 * force this to be the first field so that we can get from
298 * netdev_priv() the right pointer. 336 * netdev_priv() the right pointer.
299 * 337 *
338 * @updown: the device is up and ready for transmitting control and
339 * data packets. This implies @ready (communication infrastructure
340 * with the device is ready) and the device's firmware has been
341 * loaded and the device initialized.
342 *
343 * Write to it only inside a i2400m->init_mutex protected area
344 * followed with a wmb(); rmb() before accesing (unless locked
345 * inside i2400m->init_mutex). Read access can be loose like that
346 * [just using rmb()] because the paths that use this also do
347 * other error checks later on.
348 *
349 * @ready: Communication infrastructure with the device is ready, data
350 * frames can start to be passed around (this is lighter than
351 * using the WiMAX state for certain hot paths).
352 *
353 * Write to it only inside a i2400m->init_mutex protected area
354 * followed with a wmb(); rmb() before accesing (unless locked
355 * inside i2400m->init_mutex). Read access can be loose like that
356 * [just using rmb()] because the paths that use this also do
357 * other error checks later on.
358 *
300 * @rx_reorder: 1 if RX reordering is enabled; this can only be 359 * @rx_reorder: 1 if RX reordering is enabled; this can only be
301 * set at probe time. 360 * set at probe time.
302 * 361 *
@@ -362,6 +421,13 @@ struct i2400m_roq;
362 * delivered. Then the driver can release them to the host. See 421 * delivered. Then the driver can release them to the host. See
363 * drivers/net/i2400m/rx.c for details. 422 * drivers/net/i2400m/rx.c for details.
364 * 423 *
424 * @rx_reports: reports received from the device that couldn't be
425 * processed because the driver wasn't still ready; when ready,
426 * they are pulled from here and chewed.
427 *
428 * @rx_reports_ws: Work struct used to kick a scan of the RX reports
429 * list and to process each.
430 *
365 * @src_mac_addr: MAC address used to make ethernet packets be coming 431 * @src_mac_addr: MAC address used to make ethernet packets be coming
366 * from. This is generated at i2400m_setup() time and used during 432 * from. This is generated at i2400m_setup() time and used during
367 * the life cycle of the instance. See i2400m_fake_eth_header(). 433 * the life cycle of the instance. See i2400m_fake_eth_header().
@@ -422,6 +488,25 @@ struct i2400m_roq;
422 * 488 *
423 * @fw_version: version of the firmware interface, Major.minor, 489 * @fw_version: version of the firmware interface, Major.minor,
424 * encoded in the high word and low word (major << 16 | minor). 490 * encoded in the high word and low word (major << 16 | minor).
491 *
492 * @fw_hdrs: NULL terminated array of pointers to the firmware
493 * headers. This is only available during firmware load time.
494 *
495 * @fw_cached: Used to cache firmware when the system goes to
496 * suspend/standby/hibernation (as on resume we can't read it). If
497 * NULL, no firmware was cached, read it. If ~0, you can't read
498 * any firmware files (the system still didn't come out of suspend
499 * and failed to cache one), so abort; otherwise, a valid cached
500 * firmware to be used. Access to this variable is protected by
501 * the spinlock i2400m->rx_lock.
502 *
503 * @barker: barker type that the device uses; this is initialized by
504 * i2400m_is_boot_barker() the first time it is called. Then it
505 * won't change during the life cycle of the device and everytime
506 * a boot barker is received, it is just verified for it being the
507 * same.
508 *
509 * @pm_notifier: used to register for PM events
425 */ 510 */
426struct i2400m { 511struct i2400m {
427 struct wimax_dev wimax_dev; /* FIRST! See doc */ 512 struct wimax_dev wimax_dev; /* FIRST! See doc */
@@ -429,7 +514,7 @@ struct i2400m {
429 unsigned updown:1; /* Network device is up or down */ 514 unsigned updown:1; /* Network device is up or down */
430 unsigned boot_mode:1; /* is the device in boot mode? */ 515 unsigned boot_mode:1; /* is the device in boot mode? */
431 unsigned sboot:1; /* signed or unsigned fw boot */ 516 unsigned sboot:1; /* signed or unsigned fw boot */
432 unsigned ready:1; /* all probing steps done */ 517 unsigned ready:1; /* Device comm infrastructure ready */
433 unsigned rx_reorder:1; /* RX reorder is enabled */ 518 unsigned rx_reorder:1; /* RX reorder is enabled */
434 u8 trace_msg_from_user; /* echo rx msgs to 'trace' pipe */ 519 u8 trace_msg_from_user; /* echo rx msgs to 'trace' pipe */
435 /* typed u8 so /sys/kernel/debug/u8 can tweak */ 520 /* typed u8 so /sys/kernel/debug/u8 can tweak */
@@ -440,8 +525,10 @@ struct i2400m {
440 size_t bus_pl_size_max; 525 size_t bus_pl_size_max;
441 unsigned bus_bm_retries; 526 unsigned bus_bm_retries;
442 527
528 int (*bus_setup)(struct i2400m *);
443 int (*bus_dev_start)(struct i2400m *); 529 int (*bus_dev_start)(struct i2400m *);
444 void (*bus_dev_stop)(struct i2400m *); 530 void (*bus_dev_stop)(struct i2400m *);
531 void (*bus_release)(struct i2400m *);
445 void (*bus_tx_kick)(struct i2400m *); 532 void (*bus_tx_kick)(struct i2400m *);
446 int (*bus_reset)(struct i2400m *, enum i2400m_reset_type); 533 int (*bus_reset)(struct i2400m *, enum i2400m_reset_type);
447 ssize_t (*bus_bm_cmd_send)(struct i2400m *, 534 ssize_t (*bus_bm_cmd_send)(struct i2400m *,
@@ -468,6 +555,8 @@ struct i2400m {
468 rx_num, rx_size_acc, rx_size_min, rx_size_max; 555 rx_num, rx_size_acc, rx_size_min, rx_size_max;
469 struct i2400m_roq *rx_roq; /* not under rx_lock! */ 556 struct i2400m_roq *rx_roq; /* not under rx_lock! */
470 u8 src_mac_addr[ETH_HLEN]; 557 u8 src_mac_addr[ETH_HLEN];
558 struct list_head rx_reports; /* under rx_lock! */
559 struct work_struct rx_report_ws;
471 560
472 struct mutex msg_mutex; /* serialize command execution */ 561 struct mutex msg_mutex; /* serialize command execution */
473 struct completion msg_completion; 562 struct completion msg_completion;
@@ -487,37 +576,12 @@ struct i2400m {
487 struct dentry *debugfs_dentry; 576 struct dentry *debugfs_dentry;
488 const char *fw_name; /* name of the current firmware image */ 577 const char *fw_name; /* name of the current firmware image */
489 unsigned long fw_version; /* version of the firmware interface */ 578 unsigned long fw_version; /* version of the firmware interface */
490}; 579 const struct i2400m_bcf_hdr **fw_hdrs;
491 580 struct i2400m_fw *fw_cached; /* protected by rx_lock */
581 struct i2400m_barker_db *barker;
492 582
493/* 583 struct notifier_block pm_notifier;
494 * Initialize a 'struct i2400m' from all zeroes 584};
495 *
496 * This is a bus-generic API call.
497 */
498static inline
499void i2400m_init(struct i2400m *i2400m)
500{
501 wimax_dev_init(&i2400m->wimax_dev);
502
503 i2400m->boot_mode = 1;
504 i2400m->rx_reorder = 1;
505 init_waitqueue_head(&i2400m->state_wq);
506
507 spin_lock_init(&i2400m->tx_lock);
508 i2400m->tx_pl_min = UINT_MAX;
509 i2400m->tx_size_min = UINT_MAX;
510
511 spin_lock_init(&i2400m->rx_lock);
512 i2400m->rx_pl_min = UINT_MAX;
513 i2400m->rx_size_min = UINT_MAX;
514
515 mutex_init(&i2400m->msg_mutex);
516 init_completion(&i2400m->msg_completion);
517
518 mutex_init(&i2400m->init_mutex);
519 /* wake_tx_ws is initialized in i2400m_tx_setup() */
520}
521 585
522 586
523/* 587/*
@@ -577,6 +641,14 @@ extern void i2400m_bm_cmd_prepare(struct i2400m_bootrom_header *);
577extern int i2400m_dev_bootstrap(struct i2400m *, enum i2400m_bri); 641extern int i2400m_dev_bootstrap(struct i2400m *, enum i2400m_bri);
578extern int i2400m_read_mac_addr(struct i2400m *); 642extern int i2400m_read_mac_addr(struct i2400m *);
579extern int i2400m_bootrom_init(struct i2400m *, enum i2400m_bri); 643extern int i2400m_bootrom_init(struct i2400m *, enum i2400m_bri);
644extern int i2400m_is_boot_barker(struct i2400m *, const void *, size_t);
645static inline
646int i2400m_is_d2h_barker(const void *buf)
647{
648 const __le32 *barker = buf;
649 return le32_to_cpu(*barker) == I2400M_D2H_MSG_BARKER;
650}
651extern void i2400m_unknown_barker(struct i2400m *, const void *, size_t);
580 652
581/* Make/grok boot-rom header commands */ 653/* Make/grok boot-rom header commands */
582 654
@@ -644,6 +716,8 @@ unsigned i2400m_brh_get_signature(const struct i2400m_bootrom_header *hdr)
644/* 716/*
645 * Driver / device setup and internal functions 717 * Driver / device setup and internal functions
646 */ 718 */
719extern void i2400m_init(struct i2400m *);
720extern int i2400m_reset(struct i2400m *, enum i2400m_reset_type);
647extern void i2400m_netdev_setup(struct net_device *net_dev); 721extern void i2400m_netdev_setup(struct net_device *net_dev);
648extern int i2400m_sysfs_setup(struct device_driver *); 722extern int i2400m_sysfs_setup(struct device_driver *);
649extern void i2400m_sysfs_release(struct device_driver *); 723extern void i2400m_sysfs_release(struct device_driver *);
@@ -654,10 +728,14 @@ extern void i2400m_tx_release(struct i2400m *);
654extern int i2400m_rx_setup(struct i2400m *); 728extern int i2400m_rx_setup(struct i2400m *);
655extern void i2400m_rx_release(struct i2400m *); 729extern void i2400m_rx_release(struct i2400m *);
656 730
731extern void i2400m_fw_cache(struct i2400m *);
732extern void i2400m_fw_uncache(struct i2400m *);
733
657extern void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned, 734extern void i2400m_net_rx(struct i2400m *, struct sk_buff *, unsigned,
658 const void *, int); 735 const void *, int);
659extern void i2400m_net_erx(struct i2400m *, struct sk_buff *, 736extern void i2400m_net_erx(struct i2400m *, struct sk_buff *,
660 enum i2400m_cs); 737 enum i2400m_cs);
738extern void i2400m_net_wake_stop(struct i2400m *);
661enum i2400m_pt; 739enum i2400m_pt;
662extern int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt); 740extern int i2400m_tx(struct i2400m *, const void *, size_t, enum i2400m_pt);
663 741
@@ -672,14 +750,12 @@ static inline int i2400m_debugfs_add(struct i2400m *i2400m)
672static inline void i2400m_debugfs_rm(struct i2400m *i2400m) {} 750static inline void i2400m_debugfs_rm(struct i2400m *i2400m) {}
673#endif 751#endif
674 752
675/* Called by _dev_start()/_dev_stop() to initialize the device itself */ 753/* Initialize/shutdown the device */
676extern int i2400m_dev_initialize(struct i2400m *); 754extern int i2400m_dev_initialize(struct i2400m *);
677extern void i2400m_dev_shutdown(struct i2400m *); 755extern void i2400m_dev_shutdown(struct i2400m *);
678 756
679extern struct attribute_group i2400m_dev_attr_group; 757extern struct attribute_group i2400m_dev_attr_group;
680 758
681extern int i2400m_schedule_work(struct i2400m *,
682 void (*)(struct work_struct *), gfp_t);
683 759
684/* HDI message's payload description handling */ 760/* HDI message's payload description handling */
685 761
@@ -724,7 +800,9 @@ void i2400m_put(struct i2400m *i2400m)
724 dev_put(i2400m->wimax_dev.net_dev); 800 dev_put(i2400m->wimax_dev.net_dev);
725} 801}
726 802
727extern int i2400m_dev_reset_handle(struct i2400m *); 803extern int i2400m_dev_reset_handle(struct i2400m *, const char *);
804extern int i2400m_pre_reset(struct i2400m *);
805extern int i2400m_post_reset(struct i2400m *);
728 806
729/* 807/*
730 * _setup()/_release() are called by the probe/disconnect functions of 808 * _setup()/_release() are called by the probe/disconnect functions of
@@ -737,20 +815,6 @@ extern int i2400m_rx(struct i2400m *, struct sk_buff *);
737extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *); 815extern struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *, size_t *);
738extern void i2400m_tx_msg_sent(struct i2400m *); 816extern void i2400m_tx_msg_sent(struct i2400m *);
739 817
740static const __le32 i2400m_NBOOT_BARKER[4] = {
741 cpu_to_le32(I2400M_NBOOT_BARKER),
742 cpu_to_le32(I2400M_NBOOT_BARKER),
743 cpu_to_le32(I2400M_NBOOT_BARKER),
744 cpu_to_le32(I2400M_NBOOT_BARKER)
745};
746
747static const __le32 i2400m_SBOOT_BARKER[4] = {
748 cpu_to_le32(I2400M_SBOOT_BARKER),
749 cpu_to_le32(I2400M_SBOOT_BARKER),
750 cpu_to_le32(I2400M_SBOOT_BARKER),
751 cpu_to_le32(I2400M_SBOOT_BARKER)
752};
753
754extern int i2400m_power_save_disabled; 818extern int i2400m_power_save_disabled;
755 819
756/* 820/*
@@ -773,10 +837,12 @@ struct device *i2400m_dev(struct i2400m *i2400m)
773struct i2400m_work { 837struct i2400m_work {
774 struct work_struct ws; 838 struct work_struct ws;
775 struct i2400m *i2400m; 839 struct i2400m *i2400m;
840 size_t pl_size;
776 u8 pl[0]; 841 u8 pl[0];
777}; 842};
778extern int i2400m_queue_work(struct i2400m *, 843
779 void (*)(struct work_struct *), gfp_t, 844extern int i2400m_schedule_work(struct i2400m *,
845 void (*)(struct work_struct *), gfp_t,
780 const void *, size_t); 846 const void *, size_t);
781 847
782extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *, 848extern int i2400m_msg_check_status(const struct i2400m_l3l4_hdr *,
@@ -789,6 +855,7 @@ extern void i2400m_msg_ack_hook(struct i2400m *,
789 const struct i2400m_l3l4_hdr *, size_t); 855 const struct i2400m_l3l4_hdr *, size_t);
790extern void i2400m_report_hook(struct i2400m *, 856extern void i2400m_report_hook(struct i2400m *,
791 const struct i2400m_l3l4_hdr *, size_t); 857 const struct i2400m_l3l4_hdr *, size_t);
858extern void i2400m_report_hook_work(struct work_struct *);
792extern int i2400m_cmd_enter_powersave(struct i2400m *); 859extern int i2400m_cmd_enter_powersave(struct i2400m *);
793extern int i2400m_cmd_get_state(struct i2400m *); 860extern int i2400m_cmd_get_state(struct i2400m *);
794extern int i2400m_cmd_exit_idle(struct i2400m *); 861extern int i2400m_cmd_exit_idle(struct i2400m *);
@@ -849,6 +916,12 @@ void __i2400m_msleep(unsigned ms)
849#endif 916#endif
850} 917}
851 918
919
920/* module initialization helpers */
921extern int i2400m_barker_db_init(const char *);
922extern void i2400m_barker_db_exit(void);
923
924
852/* Module parameters */ 925/* Module parameters */
853 926
854extern int i2400m_idle_mode_disabled; 927extern int i2400m_idle_mode_disabled;
diff --git a/drivers/net/wimax/i2400m/netdev.c b/drivers/net/wimax/i2400m/netdev.c
index 796396cb4c82..599aa4eb9baa 100644
--- a/drivers/net/wimax/i2400m/netdev.c
+++ b/drivers/net/wimax/i2400m/netdev.c
@@ -74,6 +74,7 @@
74 */ 74 */
75#include <linux/if_arp.h> 75#include <linux/if_arp.h>
76#include <linux/netdevice.h> 76#include <linux/netdevice.h>
77#include <linux/ethtool.h>
77#include "i2400m.h" 78#include "i2400m.h"
78 79
79 80
@@ -88,7 +89,10 @@ enum {
88 * The MTU is 1400 or less 89 * The MTU is 1400 or less
89 */ 90 */
90 I2400M_MAX_MTU = 1400, 91 I2400M_MAX_MTU = 1400,
91 I2400M_TX_TIMEOUT = HZ, 92 /* 20 secs? yep, this is the maximum timeout that the device
93 * might take to get out of IDLE / negotiate it with the base
94 * station. We add 1sec for good measure. */
95 I2400M_TX_TIMEOUT = 21 * HZ,
92 I2400M_TX_QLEN = 5, 96 I2400M_TX_QLEN = 5,
93}; 97};
94 98
@@ -101,22 +105,19 @@ int i2400m_open(struct net_device *net_dev)
101 struct device *dev = i2400m_dev(i2400m); 105 struct device *dev = i2400m_dev(i2400m);
102 106
103 d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m); 107 d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
104 if (i2400m->ready == 0) { 108 /* Make sure we wait until init is complete... */
105 dev_err(dev, "Device is still initializing\n"); 109 mutex_lock(&i2400m->init_mutex);
106 result = -EBUSY; 110 if (i2400m->updown)
107 } else
108 result = 0; 111 result = 0;
112 else
113 result = -EBUSY;
114 mutex_unlock(&i2400m->init_mutex);
109 d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n", 115 d_fnend(3, dev, "(net_dev %p [i2400m %p]) = %d\n",
110 net_dev, i2400m, result); 116 net_dev, i2400m, result);
111 return result; 117 return result;
112} 118}
113 119
114 120
115/*
116 *
117 * On kernel versions where cancel_work_sync() didn't return anything,
118 * we rely on wake_tx_skb() being non-NULL.
119 */
120static 121static
121int i2400m_stop(struct net_device *net_dev) 122int i2400m_stop(struct net_device *net_dev)
122{ 123{
@@ -124,21 +125,7 @@ int i2400m_stop(struct net_device *net_dev)
124 struct device *dev = i2400m_dev(i2400m); 125 struct device *dev = i2400m_dev(i2400m);
125 126
126 d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m); 127 d_fnstart(3, dev, "(net_dev %p [i2400m %p])\n", net_dev, i2400m);
127 /* See i2400m_hard_start_xmit(), references are taken there 128 i2400m_net_wake_stop(i2400m);
128 * and here we release them if the work was still
129 * pending. Note we can't differentiate work not pending vs
130 * never scheduled, so the NULL check does that. */
131 if (cancel_work_sync(&i2400m->wake_tx_ws) == 0
132 && i2400m->wake_tx_skb != NULL) {
133 unsigned long flags;
134 struct sk_buff *wake_tx_skb;
135 spin_lock_irqsave(&i2400m->tx_lock, flags);
136 wake_tx_skb = i2400m->wake_tx_skb; /* compat help */
137 i2400m->wake_tx_skb = NULL; /* compat help */
138 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
139 i2400m_put(i2400m);
140 kfree_skb(wake_tx_skb);
141 }
142 d_fnend(3, dev, "(net_dev %p [i2400m %p]) = 0\n", net_dev, i2400m); 129 d_fnend(3, dev, "(net_dev %p [i2400m %p]) = 0\n", net_dev, i2400m);
143 return 0; 130 return 0;
144} 131}
@@ -167,6 +154,7 @@ void i2400m_wake_tx_work(struct work_struct *ws)
167{ 154{
168 int result; 155 int result;
169 struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws); 156 struct i2400m *i2400m = container_of(ws, struct i2400m, wake_tx_ws);
157 struct net_device *net_dev = i2400m->wimax_dev.net_dev;
170 struct device *dev = i2400m_dev(i2400m); 158 struct device *dev = i2400m_dev(i2400m);
171 struct sk_buff *skb = i2400m->wake_tx_skb; 159 struct sk_buff *skb = i2400m->wake_tx_skb;
172 unsigned long flags; 160 unsigned long flags;
@@ -182,27 +170,36 @@ void i2400m_wake_tx_work(struct work_struct *ws)
182 dev_err(dev, "WAKE&TX: skb dissapeared!\n"); 170 dev_err(dev, "WAKE&TX: skb dissapeared!\n");
183 goto out_put; 171 goto out_put;
184 } 172 }
173 /* If we have, somehow, lost the connection after this was
174 * queued, don't do anything; this might be the device got
175 * reset or just disconnected. */
176 if (unlikely(!netif_carrier_ok(net_dev)))
177 goto out_kfree;
185 result = i2400m_cmd_exit_idle(i2400m); 178 result = i2400m_cmd_exit_idle(i2400m);
186 if (result == -EILSEQ) 179 if (result == -EILSEQ)
187 result = 0; 180 result = 0;
188 if (result < 0) { 181 if (result < 0) {
189 dev_err(dev, "WAKE&TX: device didn't get out of idle: " 182 dev_err(dev, "WAKE&TX: device didn't get out of idle: "
190 "%d\n", result); 183 "%d - resetting\n", result);
191 goto error; 184 i2400m_reset(i2400m, I2400M_RT_BUS);
185 goto error;
192 } 186 }
193 result = wait_event_timeout(i2400m->state_wq, 187 result = wait_event_timeout(i2400m->state_wq,
194 i2400m->state != I2400M_SS_IDLE, 5 * HZ); 188 i2400m->state != I2400M_SS_IDLE,
189 net_dev->watchdog_timeo - HZ/2);
195 if (result == 0) 190 if (result == 0)
196 result = -ETIMEDOUT; 191 result = -ETIMEDOUT;
197 if (result < 0) { 192 if (result < 0) {
198 dev_err(dev, "WAKE&TX: error waiting for device to exit IDLE: " 193 dev_err(dev, "WAKE&TX: error waiting for device to exit IDLE: "
199 "%d\n", result); 194 "%d - resetting\n", result);
195 i2400m_reset(i2400m, I2400M_RT_BUS);
200 goto error; 196 goto error;
201 } 197 }
202 msleep(20); /* device still needs some time or it drops it */ 198 msleep(20); /* device still needs some time or it drops it */
203 result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA); 199 result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
204 netif_wake_queue(i2400m->wimax_dev.net_dev);
205error: 200error:
201 netif_wake_queue(net_dev);
202out_kfree:
206 kfree_skb(skb); /* refcount transferred by _hard_start_xmit() */ 203 kfree_skb(skb); /* refcount transferred by _hard_start_xmit() */
207out_put: 204out_put:
208 i2400m_put(i2400m); 205 i2400m_put(i2400m);
@@ -229,6 +226,38 @@ void i2400m_tx_prep_header(struct sk_buff *skb)
229} 226}
230 227
231 228
229
230/*
231 * Cleanup resources acquired during i2400m_net_wake_tx()
232 *
233 * This is called by __i2400m_dev_stop and means we have to make sure
234 * the workqueue is flushed from any pending work.
235 */
236void i2400m_net_wake_stop(struct i2400m *i2400m)
237{
238 struct device *dev = i2400m_dev(i2400m);
239
240 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
241 /* See i2400m_hard_start_xmit(), references are taken there
242 * and here we release them if the work was still
243 * pending. Note we can't differentiate work not pending vs
244 * never scheduled, so the NULL check does that. */
245 if (cancel_work_sync(&i2400m->wake_tx_ws) == 0
246 && i2400m->wake_tx_skb != NULL) {
247 unsigned long flags;
248 struct sk_buff *wake_tx_skb;
249 spin_lock_irqsave(&i2400m->tx_lock, flags);
250 wake_tx_skb = i2400m->wake_tx_skb; /* compat help */
251 i2400m->wake_tx_skb = NULL; /* compat help */
252 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
253 i2400m_put(i2400m);
254 kfree_skb(wake_tx_skb);
255 }
256 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
257 return;
258}
259
260
232/* 261/*
233 * TX an skb to an idle device 262 * TX an skb to an idle device
234 * 263 *
@@ -342,6 +371,20 @@ netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb,
342 int result; 371 int result;
343 372
344 d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev); 373 d_fnstart(3, dev, "(skb %p net_dev %p)\n", skb, net_dev);
374 if (skb_header_cloned(skb)) {
375 /*
376 * Make tcpdump/wireshark happy -- if they are
377 * running, the skb is cloned and we will overwrite
378 * the mac fields in i2400m_tx_prep_header. Expand
379 * seems to fix this...
380 */
381 result = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
382 if (result) {
383 result = NETDEV_TX_BUSY;
384 goto error_expand;
385 }
386 }
387
345 if (i2400m->state == I2400M_SS_IDLE) 388 if (i2400m->state == I2400M_SS_IDLE)
346 result = i2400m_net_wake_tx(i2400m, net_dev, skb); 389 result = i2400m_net_wake_tx(i2400m, net_dev, skb);
347 else 390 else
@@ -352,10 +395,11 @@ netdev_tx_t i2400m_hard_start_xmit(struct sk_buff *skb,
352 net_dev->stats.tx_packets++; 395 net_dev->stats.tx_packets++;
353 net_dev->stats.tx_bytes += skb->len; 396 net_dev->stats.tx_bytes += skb->len;
354 } 397 }
398 result = NETDEV_TX_OK;
399error_expand:
355 kfree_skb(skb); 400 kfree_skb(skb);
356 401 d_fnend(3, dev, "(skb %p net_dev %p) = %d\n", skb, net_dev, result);
357 d_fnend(3, dev, "(skb %p net_dev %p)\n", skb, net_dev); 402 return result;
358 return NETDEV_TX_OK;
359} 403}
360 404
361 405
@@ -559,6 +603,22 @@ static const struct net_device_ops i2400m_netdev_ops = {
559 .ndo_change_mtu = i2400m_change_mtu, 603 .ndo_change_mtu = i2400m_change_mtu,
560}; 604};
561 605
606static void i2400m_get_drvinfo(struct net_device *net_dev,
607 struct ethtool_drvinfo *info)
608{
609 struct i2400m *i2400m = net_dev_to_i2400m(net_dev);
610
611 strncpy(info->driver, KBUILD_MODNAME, sizeof(info->driver) - 1);
612 strncpy(info->fw_version, i2400m->fw_name, sizeof(info->fw_version) - 1);
613 if (net_dev->dev.parent)
614 strncpy(info->bus_info, dev_name(net_dev->dev.parent),
615 sizeof(info->bus_info) - 1);
616}
617
618static const struct ethtool_ops i2400m_ethtool_ops = {
619 .get_drvinfo = i2400m_get_drvinfo,
620 .get_link = ethtool_op_get_link,
621};
562 622
563/** 623/**
564 * i2400m_netdev_setup - Setup setup @net_dev's i2400m private data 624 * i2400m_netdev_setup - Setup setup @net_dev's i2400m private data
@@ -580,6 +640,7 @@ void i2400m_netdev_setup(struct net_device *net_dev)
580 & ~IFF_MULTICAST); 640 & ~IFF_MULTICAST);
581 net_dev->watchdog_timeo = I2400M_TX_TIMEOUT; 641 net_dev->watchdog_timeo = I2400M_TX_TIMEOUT;
582 net_dev->netdev_ops = &i2400m_netdev_ops; 642 net_dev->netdev_ops = &i2400m_netdev_ops;
643 net_dev->ethtool_ops = &i2400m_ethtool_ops;
583 d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev); 644 d_fnend(3, NULL, "(net_dev %p) = void\n", net_dev);
584} 645}
585EXPORT_SYMBOL_GPL(i2400m_netdev_setup); 646EXPORT_SYMBOL_GPL(i2400m_netdev_setup);
diff --git a/drivers/net/wimax/i2400m/rx.c b/drivers/net/wimax/i2400m/rx.c
index 07c32e68909f..e3d2a9de023c 100644
--- a/drivers/net/wimax/i2400m/rx.c
+++ b/drivers/net/wimax/i2400m/rx.c
@@ -158,30 +158,104 @@ struct i2400m_report_hook_args {
158 struct sk_buff *skb_rx; 158 struct sk_buff *skb_rx;
159 const struct i2400m_l3l4_hdr *l3l4_hdr; 159 const struct i2400m_l3l4_hdr *l3l4_hdr;
160 size_t size; 160 size_t size;
161 struct list_head list_node;
161}; 162};
162 163
163 164
164/* 165/*
165 * Execute i2400m_report_hook in a workqueue 166 * Execute i2400m_report_hook in a workqueue
166 * 167 *
167 * Unpacks arguments from the deferred call, executes it and then 168 * Goes over the list of queued reports in i2400m->rx_reports and
168 * drops the references. 169 * processes them.
169 * 170 *
170 * Obvious NOTE: References are needed because we are a separate 171 * NOTE: refcounts on i2400m are not needed because we flush the
171 * thread; otherwise the buffer changes under us because it is 172 * workqueue this runs on (i2400m->work_queue) before destroying
172 * released by the original caller. 173 * i2400m.
173 */ 174 */
174static
175void i2400m_report_hook_work(struct work_struct *ws) 175void i2400m_report_hook_work(struct work_struct *ws)
176{ 176{
177 struct i2400m_work *iw = 177 struct i2400m *i2400m = container_of(ws, struct i2400m, rx_report_ws);
178 container_of(ws, struct i2400m_work, ws); 178 struct device *dev = i2400m_dev(i2400m);
179 struct i2400m_report_hook_args *args = (void *) iw->pl; 179 struct i2400m_report_hook_args *args, *args_next;
180 if (iw->i2400m->ready) 180 LIST_HEAD(list);
181 i2400m_report_hook(iw->i2400m, args->l3l4_hdr, args->size); 181 unsigned long flags;
182 kfree_skb(args->skb_rx); 182
183 i2400m_put(iw->i2400m); 183 while (1) {
184 kfree(iw); 184 spin_lock_irqsave(&i2400m->rx_lock, flags);
185 list_splice_init(&i2400m->rx_reports, &list);
186 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
187 if (list_empty(&list))
188 break;
189 else
190 d_printf(1, dev, "processing queued reports\n");
191 list_for_each_entry_safe(args, args_next, &list, list_node) {
192 d_printf(2, dev, "processing queued report %p\n", args);
193 i2400m_report_hook(i2400m, args->l3l4_hdr, args->size);
194 kfree_skb(args->skb_rx);
195 list_del(&args->list_node);
196 kfree(args);
197 }
198 }
199}
200
201
202/*
203 * Flush the list of queued reports
204 */
205static
206void i2400m_report_hook_flush(struct i2400m *i2400m)
207{
208 struct device *dev = i2400m_dev(i2400m);
209 struct i2400m_report_hook_args *args, *args_next;
210 LIST_HEAD(list);
211 unsigned long flags;
212
213 d_printf(1, dev, "flushing queued reports\n");
214 spin_lock_irqsave(&i2400m->rx_lock, flags);
215 list_splice_init(&i2400m->rx_reports, &list);
216 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
217 list_for_each_entry_safe(args, args_next, &list, list_node) {
218 d_printf(2, dev, "flushing queued report %p\n", args);
219 kfree_skb(args->skb_rx);
220 list_del(&args->list_node);
221 kfree(args);
222 }
223}
224
225
226/*
227 * Queue a report for later processing
228 *
229 * @i2400m: device descriptor
230 * @skb_rx: skb that contains the payload (for reference counting)
231 * @l3l4_hdr: pointer to the control
232 * @size: size of the message
233 */
234static
235void i2400m_report_hook_queue(struct i2400m *i2400m, struct sk_buff *skb_rx,
236 const void *l3l4_hdr, size_t size)
237{
238 struct device *dev = i2400m_dev(i2400m);
239 unsigned long flags;
240 struct i2400m_report_hook_args *args;
241
242 args = kzalloc(sizeof(*args), GFP_NOIO);
243 if (args) {
244 args->skb_rx = skb_get(skb_rx);
245 args->l3l4_hdr = l3l4_hdr;
246 args->size = size;
247 spin_lock_irqsave(&i2400m->rx_lock, flags);
248 list_add_tail(&args->list_node, &i2400m->rx_reports);
249 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
250 d_printf(2, dev, "queued report %p\n", args);
251 rmb(); /* see i2400m->ready's documentation */
252 if (likely(i2400m->ready)) /* only send if up */
253 queue_work(i2400m->work_queue, &i2400m->rx_report_ws);
254 } else {
255 if (printk_ratelimit())
256 dev_err(dev, "%s:%u: Can't allocate %zu B\n",
257 __func__, __LINE__, sizeof(*args));
258 }
185} 259}
186 260
187 261
@@ -295,21 +369,29 @@ void i2400m_rx_ctl(struct i2400m *i2400m, struct sk_buff *skb_rx,
295 msg_type, size); 369 msg_type, size);
296 d_dump(2, dev, l3l4_hdr, size); 370 d_dump(2, dev, l3l4_hdr, size);
297 if (msg_type & I2400M_MT_REPORT_MASK) { 371 if (msg_type & I2400M_MT_REPORT_MASK) {
298 /* These hooks have to be ran serialized; as well, the 372 /*
299 * handling might force the execution of commands, and 373 * Process each report
300 * that might cause reentrancy issues with 374 *
301 * bus-specific subdrivers and workqueues. So we run 375 * - has to be ran serialized as well
302 * it in a separate workqueue. */ 376 *
303 struct i2400m_report_hook_args args = { 377 * - the handling might force the execution of
304 .skb_rx = skb_rx, 378 * commands. That might cause reentrancy issues with
305 .l3l4_hdr = l3l4_hdr, 379 * bus-specific subdrivers and workqueues, so the we
306 .size = size 380 * run it in a separate workqueue.
307 }; 381 *
308 if (unlikely(i2400m->ready == 0)) /* only send if up */ 382 * - when the driver is not yet ready to handle them,
309 return; 383 * they are queued and at some point the queue is
310 skb_get(skb_rx); 384 * restarted [NOTE: we can't queue SKBs directly, as
311 i2400m_queue_work(i2400m, i2400m_report_hook_work, 385 * this might be a piece of a SKB, not the whole
312 GFP_KERNEL, &args, sizeof(args)); 386 * thing, and this is cheaper than cloning the
387 * SKB].
388 *
389 * Note we don't do refcounting for the device
390 * structure; this is because before destroying
391 * 'i2400m', we make sure to flush the
392 * i2400m->work_queue, so there are no issues.
393 */
394 i2400m_report_hook_queue(i2400m, skb_rx, l3l4_hdr, size);
313 if (unlikely(i2400m->trace_msg_from_user)) 395 if (unlikely(i2400m->trace_msg_from_user))
314 wimax_msg(&i2400m->wimax_dev, "echo", 396 wimax_msg(&i2400m->wimax_dev, "echo",
315 l3l4_hdr, size, GFP_KERNEL); 397 l3l4_hdr, size, GFP_KERNEL);
@@ -363,8 +445,6 @@ void i2400m_rx_trace(struct i2400m *i2400m,
363 msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET", 445 msg_type & I2400M_MT_REPORT_MASK ? "REPORT" : "CMD/SET/GET",
364 msg_type, size); 446 msg_type, size);
365 d_dump(2, dev, l3l4_hdr, size); 447 d_dump(2, dev, l3l4_hdr, size);
366 if (unlikely(i2400m->ready == 0)) /* only send if up */
367 return;
368 result = wimax_msg(wimax_dev, "trace", l3l4_hdr, size, GFP_KERNEL); 448 result = wimax_msg(wimax_dev, "trace", l3l4_hdr, size, GFP_KERNEL);
369 if (result < 0) 449 if (result < 0)
370 dev_err(dev, "error sending trace to userspace: %d\n", 450 dev_err(dev, "error sending trace to userspace: %d\n",
@@ -748,7 +828,7 @@ void i2400m_roq_queue(struct i2400m *i2400m, struct i2400m_roq *roq,
748 dev_err(dev, "SW BUG? queue nsn %d (lbn %u ws %u)\n", 828 dev_err(dev, "SW BUG? queue nsn %d (lbn %u ws %u)\n",
749 nsn, lbn, roq->ws); 829 nsn, lbn, roq->ws);
750 i2400m_roq_log_dump(i2400m, roq); 830 i2400m_roq_log_dump(i2400m, roq);
751 i2400m->bus_reset(i2400m, I2400M_RT_WARM); 831 i2400m_reset(i2400m, I2400M_RT_WARM);
752 } else { 832 } else {
753 __i2400m_roq_queue(i2400m, roq, skb, lbn, nsn); 833 __i2400m_roq_queue(i2400m, roq, skb, lbn, nsn);
754 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET, 834 i2400m_roq_log_add(i2400m, roq, I2400M_RO_TYPE_PACKET,
@@ -814,7 +894,7 @@ void i2400m_roq_queue_update_ws(struct i2400m *i2400m, struct i2400m_roq *roq,
814 dev_err(dev, "SW BUG? queue_update_ws nsn %u (sn %u ws %u)\n", 894 dev_err(dev, "SW BUG? queue_update_ws nsn %u (sn %u ws %u)\n",
815 nsn, sn, roq->ws); 895 nsn, sn, roq->ws);
816 i2400m_roq_log_dump(i2400m, roq); 896 i2400m_roq_log_dump(i2400m, roq);
817 i2400m->bus_reset(i2400m, I2400M_RT_WARM); 897 i2400m_reset(i2400m, I2400M_RT_WARM);
818 } else { 898 } else {
819 /* if the queue is empty, don't bother as we'd queue 899 /* if the queue is empty, don't bother as we'd queue
820 * it and inmediately unqueue it -- just deliver it */ 900 * it and inmediately unqueue it -- just deliver it */
@@ -1194,6 +1274,28 @@ error_msg_hdr_check:
1194EXPORT_SYMBOL_GPL(i2400m_rx); 1274EXPORT_SYMBOL_GPL(i2400m_rx);
1195 1275
1196 1276
1277void i2400m_unknown_barker(struct i2400m *i2400m,
1278 const void *buf, size_t size)
1279{
1280 struct device *dev = i2400m_dev(i2400m);
1281 char prefix[64];
1282 const __le32 *barker = buf;
1283 dev_err(dev, "RX: HW BUG? unknown barker %08x, "
1284 "dropping %zu bytes\n", le32_to_cpu(*barker), size);
1285 snprintf(prefix, sizeof(prefix), "%s %s: ",
1286 dev_driver_string(dev), dev_name(dev));
1287 if (size > 64) {
1288 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
1289 8, 4, buf, 64, 0);
1290 printk(KERN_ERR "%s... (only first 64 bytes "
1291 "dumped)\n", prefix);
1292 } else
1293 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
1294 8, 4, buf, size, 0);
1295}
1296EXPORT_SYMBOL(i2400m_unknown_barker);
1297
1298
1197/* 1299/*
1198 * Initialize the RX queue and infrastructure 1300 * Initialize the RX queue and infrastructure
1199 * 1301 *
@@ -1261,4 +1363,6 @@ void i2400m_rx_release(struct i2400m *i2400m)
1261 kfree(i2400m->rx_roq[0].log); 1363 kfree(i2400m->rx_roq[0].log);
1262 kfree(i2400m->rx_roq); 1364 kfree(i2400m->rx_roq);
1263 } 1365 }
1366 /* at this point, nothing can be received... */
1367 i2400m_report_hook_flush(i2400m);
1264} 1368}
diff --git a/drivers/net/wimax/i2400m/sdio-fw.c b/drivers/net/wimax/i2400m/sdio-fw.c
index 7d6ec0f475f8..8e025418f5be 100644
--- a/drivers/net/wimax/i2400m/sdio-fw.c
+++ b/drivers/net/wimax/i2400m/sdio-fw.c
@@ -118,7 +118,8 @@ ssize_t i2400ms_bus_bm_cmd_send(struct i2400m *i2400m,
118 if (cmd_size > I2400M_BM_CMD_BUF_SIZE) 118 if (cmd_size > I2400M_BM_CMD_BUF_SIZE)
119 goto error_too_big; 119 goto error_too_big;
120 120
121 memcpy(i2400m->bm_cmd_buf, _cmd, cmd_size); /* Prep command */ 121 if (_cmd != i2400m->bm_cmd_buf)
122 memmove(i2400m->bm_cmd_buf, _cmd, cmd_size);
122 cmd = i2400m->bm_cmd_buf; 123 cmd = i2400m->bm_cmd_buf;
123 if (cmd_size_a > cmd_size) /* Zero pad space */ 124 if (cmd_size_a > cmd_size) /* Zero pad space */
124 memset(i2400m->bm_cmd_buf + cmd_size, 0, cmd_size_a - cmd_size); 125 memset(i2400m->bm_cmd_buf + cmd_size, 0, cmd_size_a - cmd_size);
@@ -177,10 +178,6 @@ ssize_t i2400ms_bus_bm_wait_for_ack(struct i2400m *i2400m,
177 d_fnstart(5, dev, "(i2400m %p ack %p size %zu)\n", 178 d_fnstart(5, dev, "(i2400m %p ack %p size %zu)\n",
178 i2400m, ack, ack_size); 179 i2400m, ack, ack_size);
179 180
180 spin_lock(&i2400m->rx_lock);
181 i2400ms->bm_ack_size = -EINPROGRESS;
182 spin_unlock(&i2400m->rx_lock);
183
184 result = wait_event_timeout(i2400ms->bm_wfa_wq, 181 result = wait_event_timeout(i2400ms->bm_wfa_wq,
185 i2400ms->bm_ack_size != -EINPROGRESS, 182 i2400ms->bm_ack_size != -EINPROGRESS,
186 2 * HZ); 183 2 * HZ);
@@ -199,6 +196,10 @@ ssize_t i2400ms_bus_bm_wait_for_ack(struct i2400m *i2400m,
199 size = min(ack_size, i2400ms->bm_ack_size); 196 size = min(ack_size, i2400ms->bm_ack_size);
200 memcpy(ack, i2400m->bm_ack_buf, size); 197 memcpy(ack, i2400m->bm_ack_buf, size);
201 } 198 }
199 /*
200 * Remember always to clear the bm_ack_size to -EINPROGRESS
201 * after the RX data is processed
202 */
202 i2400ms->bm_ack_size = -EINPROGRESS; 203 i2400ms->bm_ack_size = -EINPROGRESS;
203 spin_unlock(&i2400m->rx_lock); 204 spin_unlock(&i2400m->rx_lock);
204 205
diff --git a/drivers/net/wimax/i2400m/sdio-rx.c b/drivers/net/wimax/i2400m/sdio-rx.c
index 321beadf6e47..8adf6c9b6f8f 100644
--- a/drivers/net/wimax/i2400m/sdio-rx.c
+++ b/drivers/net/wimax/i2400m/sdio-rx.c
@@ -53,6 +53,7 @@
53 * i2400ms_irq() 53 * i2400ms_irq()
54 * i2400ms_rx() 54 * i2400ms_rx()
55 * __i2400ms_rx_get_size() 55 * __i2400ms_rx_get_size()
56 * i2400m_is_boot_barker()
56 * i2400m_rx() 57 * i2400m_rx()
57 * 58 *
58 * i2400ms_rx_setup() 59 * i2400ms_rx_setup()
@@ -138,6 +139,11 @@ void i2400ms_rx(struct i2400ms *i2400ms)
138 ret = rx_size; 139 ret = rx_size;
139 goto error_get_size; 140 goto error_get_size;
140 } 141 }
142 /*
143 * Hardware quirk: make sure to clear the INTR status register
144 * AFTER getting the data transfer size.
145 */
146 sdio_writeb(func, 1, I2400MS_INTR_CLEAR_ADDR, &ret);
141 147
142 ret = -ENOMEM; 148 ret = -ENOMEM;
143 skb = alloc_skb(rx_size, GFP_ATOMIC); 149 skb = alloc_skb(rx_size, GFP_ATOMIC);
@@ -153,25 +159,34 @@ void i2400ms_rx(struct i2400ms *i2400ms)
153 } 159 }
154 160
155 rmb(); /* make sure we get boot_mode from dev_reset_handle */ 161 rmb(); /* make sure we get boot_mode from dev_reset_handle */
156 if (i2400m->boot_mode == 1) { 162 if (unlikely(i2400m->boot_mode == 1)) {
157 spin_lock(&i2400m->rx_lock); 163 spin_lock(&i2400m->rx_lock);
158 i2400ms->bm_ack_size = rx_size; 164 i2400ms->bm_ack_size = rx_size;
159 spin_unlock(&i2400m->rx_lock); 165 spin_unlock(&i2400m->rx_lock);
160 memcpy(i2400m->bm_ack_buf, skb->data, rx_size); 166 memcpy(i2400m->bm_ack_buf, skb->data, rx_size);
161 wake_up(&i2400ms->bm_wfa_wq); 167 wake_up(&i2400ms->bm_wfa_wq);
162 dev_err(dev, "RX: SDIO boot mode message\n"); 168 d_printf(5, dev, "RX: SDIO boot mode message\n");
163 kfree_skb(skb); 169 kfree_skb(skb);
164 } else if (unlikely(!memcmp(skb->data, i2400m_NBOOT_BARKER, 170 goto out;
165 sizeof(i2400m_NBOOT_BARKER)) 171 }
166 || !memcmp(skb->data, i2400m_SBOOT_BARKER, 172 ret = -EIO;
167 sizeof(i2400m_SBOOT_BARKER)))) { 173 if (unlikely(rx_size < sizeof(__le32))) {
168 ret = i2400m_dev_reset_handle(i2400m); 174 dev_err(dev, "HW BUG? only %zu bytes received\n", rx_size);
175 goto error_bad_size;
176 }
177 if (likely(i2400m_is_d2h_barker(skb->data))) {
178 skb_put(skb, rx_size);
179 i2400m_rx(i2400m, skb);
180 } else if (unlikely(i2400m_is_boot_barker(i2400m,
181 skb->data, rx_size))) {
182 ret = i2400m_dev_reset_handle(i2400m, "device rebooted");
169 dev_err(dev, "RX: SDIO reboot barker\n"); 183 dev_err(dev, "RX: SDIO reboot barker\n");
170 kfree_skb(skb); 184 kfree_skb(skb);
171 } else { 185 } else {
172 skb_put(skb, rx_size); 186 i2400m_unknown_barker(i2400m, skb->data, rx_size);
173 i2400m_rx(i2400m, skb); 187 kfree_skb(skb);
174 } 188 }
189out:
175 d_fnend(7, dev, "(i2400ms %p) = void\n", i2400ms); 190 d_fnend(7, dev, "(i2400ms %p) = void\n", i2400ms);
176 return; 191 return;
177 192
@@ -179,6 +194,7 @@ error_memcpy_fromio:
179 kfree_skb(skb); 194 kfree_skb(skb);
180error_alloc_skb: 195error_alloc_skb:
181error_get_size: 196error_get_size:
197error_bad_size:
182 d_fnend(7, dev, "(i2400ms %p) = %d\n", i2400ms, ret); 198 d_fnend(7, dev, "(i2400ms %p) = %d\n", i2400ms, ret);
183 return; 199 return;
184} 200}
@@ -209,7 +225,6 @@ void i2400ms_irq(struct sdio_func *func)
209 dev_err(dev, "RX: BUG? got IRQ but no interrupt ready?\n"); 225 dev_err(dev, "RX: BUG? got IRQ but no interrupt ready?\n");
210 goto error_no_irq; 226 goto error_no_irq;
211 } 227 }
212 sdio_writeb(func, 1, I2400MS_INTR_CLEAR_ADDR, &ret);
213 i2400ms_rx(i2400ms); 228 i2400ms_rx(i2400ms);
214error_no_irq: 229error_no_irq:
215 d_fnend(6, dev, "(i2400ms %p) = void\n", i2400ms); 230 d_fnend(6, dev, "(i2400ms %p) = void\n", i2400ms);
@@ -234,6 +249,13 @@ int i2400ms_rx_setup(struct i2400ms *i2400ms)
234 init_waitqueue_head(&i2400ms->bm_wfa_wq); 249 init_waitqueue_head(&i2400ms->bm_wfa_wq);
235 spin_lock(&i2400m->rx_lock); 250 spin_lock(&i2400m->rx_lock);
236 i2400ms->bm_wait_result = -EINPROGRESS; 251 i2400ms->bm_wait_result = -EINPROGRESS;
252 /*
253 * Before we are about to enable the RX interrupt, make sure
254 * bm_ack_size is cleared to -EINPROGRESS which indicates
255 * no RX interrupt happened yet or the previous interrupt
256 * has been handled, we are ready to take the new interrupt
257 */
258 i2400ms->bm_ack_size = -EINPROGRESS;
237 spin_unlock(&i2400m->rx_lock); 259 spin_unlock(&i2400m->rx_lock);
238 260
239 sdio_claim_host(func); 261 sdio_claim_host(func);
diff --git a/drivers/net/wimax/i2400m/sdio-tx.c b/drivers/net/wimax/i2400m/sdio-tx.c
index 5105a5ebc44f..de66d068c9cb 100644
--- a/drivers/net/wimax/i2400m/sdio-tx.c
+++ b/drivers/net/wimax/i2400m/sdio-tx.c
@@ -149,5 +149,8 @@ int i2400ms_tx_setup(struct i2400ms *i2400ms)
149 149
150void i2400ms_tx_release(struct i2400ms *i2400ms) 150void i2400ms_tx_release(struct i2400ms *i2400ms)
151{ 151{
152 destroy_workqueue(i2400ms->tx_workqueue); 152 if (i2400ms->tx_workqueue) {
153 destroy_workqueue(i2400ms->tx_workqueue);
154 i2400ms->tx_workqueue = NULL;
155 }
153} 156}
diff --git a/drivers/net/wimax/i2400m/sdio.c b/drivers/net/wimax/i2400m/sdio.c
index 2981e211e04f..76a50ac02ebb 100644
--- a/drivers/net/wimax/i2400m/sdio.c
+++ b/drivers/net/wimax/i2400m/sdio.c
@@ -43,18 +43,9 @@
43 * i2400m_release() 43 * i2400m_release()
44 * free_netdev(net_dev) 44 * free_netdev(net_dev)
45 * 45 *
46 * i2400ms_bus_reset() Called by i2400m->bus_reset 46 * i2400ms_bus_reset() Called by i2400m_reset
47 * __i2400ms_reset() 47 * __i2400ms_reset()
48 * __i2400ms_send_barker() 48 * __i2400ms_send_barker()
49 *
50 * i2400ms_bus_dev_start() Called by i2400m_dev_start() [who is
51 * i2400ms_tx_setup() called by i2400m_setup()]
52 * i2400ms_rx_setup()
53 *
54 * i2400ms_bus_dev_stop() Called by i2400m_dev_stop() [who is
55 * i2400ms_rx_release() is called by i2400m_release()]
56 * i2400ms_tx_release()
57 *
58 */ 49 */
59 50
60#include <linux/debugfs.h> 51#include <linux/debugfs.h>
@@ -71,6 +62,14 @@
71static int ioe_timeout = 2; 62static int ioe_timeout = 2;
72module_param(ioe_timeout, int, 0); 63module_param(ioe_timeout, int, 0);
73 64
65static char i2400ms_debug_params[128];
66module_param_string(debug, i2400ms_debug_params, sizeof(i2400ms_debug_params),
67 0644);
68MODULE_PARM_DESC(debug,
69 "String of space-separated NAME:VALUE pairs, where NAMEs "
70 "are the different debug submodules and VALUE are the "
71 "initial debug value to set.");
72
74/* Our firmware file name list */ 73/* Our firmware file name list */
75static const char *i2400ms_bus_fw_names[] = { 74static const char *i2400ms_bus_fw_names[] = {
76#define I2400MS_FW_FILE_NAME "i2400m-fw-sdio-1.3.sbcf" 75#define I2400MS_FW_FILE_NAME "i2400m-fw-sdio-1.3.sbcf"
@@ -95,17 +94,24 @@ static const struct i2400m_poke_table i2400ms_pokes[] = {
95 * when we ask it to explicitly doing). Tries until a timeout is 94 * when we ask it to explicitly doing). Tries until a timeout is
96 * reached. 95 * reached.
97 * 96 *
97 * The @maxtries argument indicates how many times (at most) it should
98 * be tried to enable the function. 0 means forever. This acts along
99 * with the timeout (ie: it'll stop trying as soon as the maximum
100 * number of tries is reached _or_ as soon as the timeout is reached).
101 *
98 * The reverse of this is...sdio_disable_function() 102 * The reverse of this is...sdio_disable_function()
99 * 103 *
100 * Returns: 0 if the SDIO function was enabled, < 0 errno code on 104 * Returns: 0 if the SDIO function was enabled, < 0 errno code on
101 * error (-ENODEV when it was unable to enable the function). 105 * error (-ENODEV when it was unable to enable the function).
102 */ 106 */
103static 107static
104int i2400ms_enable_function(struct sdio_func *func) 108int i2400ms_enable_function(struct i2400ms *i2400ms, unsigned maxtries)
105{ 109{
110 struct sdio_func *func = i2400ms->func;
106 u64 timeout; 111 u64 timeout;
107 int err; 112 int err;
108 struct device *dev = &func->dev; 113 struct device *dev = &func->dev;
114 unsigned tries = 0;
109 115
110 d_fnstart(3, dev, "(func %p)\n", func); 116 d_fnstart(3, dev, "(func %p)\n", func);
111 /* Setup timeout (FIXME: This needs to read the CIS table to 117 /* Setup timeout (FIXME: This needs to read the CIS table to
@@ -115,6 +121,14 @@ int i2400ms_enable_function(struct sdio_func *func)
115 err = -ENODEV; 121 err = -ENODEV;
116 while (err != 0 && time_before64(get_jiffies_64(), timeout)) { 122 while (err != 0 && time_before64(get_jiffies_64(), timeout)) {
117 sdio_claim_host(func); 123 sdio_claim_host(func);
124 /*
125 * There is a sillicon bug on the IWMC3200, where the
126 * IOE timeout will cause problems on Moorestown
127 * platforms (system hang). We explicitly overwrite
128 * func->enable_timeout here to work around the issue.
129 */
130 if (i2400ms->iwmc3200)
131 func->enable_timeout = IWMC3200_IOR_TIMEOUT;
118 err = sdio_enable_func(func); 132 err = sdio_enable_func(func);
119 if (0 == err) { 133 if (0 == err) {
120 sdio_release_host(func); 134 sdio_release_host(func);
@@ -122,8 +136,11 @@ int i2400ms_enable_function(struct sdio_func *func)
122 goto function_enabled; 136 goto function_enabled;
123 } 137 }
124 d_printf(2, dev, "SDIO function failed to enable: %d\n", err); 138 d_printf(2, dev, "SDIO function failed to enable: %d\n", err);
125 sdio_disable_func(func);
126 sdio_release_host(func); 139 sdio_release_host(func);
140 if (maxtries > 0 && ++tries >= maxtries) {
141 err = -ETIME;
142 break;
143 }
127 msleep(I2400MS_INIT_SLEEP_INTERVAL); 144 msleep(I2400MS_INIT_SLEEP_INTERVAL);
128 } 145 }
129 /* If timed out, device is not there yet -- get -ENODEV so 146 /* If timed out, device is not there yet -- get -ENODEV so
@@ -140,46 +157,99 @@ function_enabled:
140 157
141 158
142/* 159/*
143 * Setup driver resources needed to communicate with the device 160 * Setup minimal device communication infrastructure needed to at
161 * least be able to update the firmware.
144 * 162 *
145 * The fw needs some time to settle, and it was just uploaded, 163 * Note the ugly trick: if we are in the probe path
146 * so give it a break first. I'd prefer to just wait for the device to 164 * (i2400ms->debugfs_dentry == NULL), we only retry function
147 * send something, but seems the poking we do to enable SDIO stuff 165 * enablement one, to avoid racing with the iwmc3200 top controller.
148 * interferes with it, so just give it a break before starting...
149 */ 166 */
150static 167static
151int i2400ms_bus_dev_start(struct i2400m *i2400m) 168int i2400ms_bus_setup(struct i2400m *i2400m)
152{ 169{
153 int result; 170 int result;
154 struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m); 171 struct i2400ms *i2400ms =
172 container_of(i2400m, struct i2400ms, i2400m);
173 struct device *dev = i2400m_dev(i2400m);
155 struct sdio_func *func = i2400ms->func; 174 struct sdio_func *func = i2400ms->func;
156 struct device *dev = &func->dev; 175 int retries;
176
177 sdio_claim_host(func);
178 result = sdio_set_block_size(func, I2400MS_BLK_SIZE);
179 sdio_release_host(func);
180 if (result < 0) {
181 dev_err(dev, "Failed to set block size: %d\n", result);
182 goto error_set_blk_size;
183 }
184
185 if (i2400ms->iwmc3200 && i2400ms->debugfs_dentry == NULL)
186 retries = 1;
187 else
188 retries = 0;
189 result = i2400ms_enable_function(i2400ms, retries);
190 if (result < 0) {
191 dev_err(dev, "Cannot enable SDIO function: %d\n", result);
192 goto error_func_enable;
193 }
157 194
158 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
159 msleep(200);
160 result = i2400ms_tx_setup(i2400ms); 195 result = i2400ms_tx_setup(i2400ms);
161 if (result < 0) 196 if (result < 0)
162 goto error_tx_setup; 197 goto error_tx_setup;
163 d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, result); 198 result = i2400ms_rx_setup(i2400ms);
164 return result; 199 if (result < 0)
200 goto error_rx_setup;
201 return 0;
165 202
166error_tx_setup: 203error_rx_setup:
167 i2400ms_tx_release(i2400ms); 204 i2400ms_tx_release(i2400ms);
168 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); 205error_tx_setup:
206 sdio_claim_host(func);
207 sdio_disable_func(func);
208 sdio_release_host(func);
209error_func_enable:
210error_set_blk_size:
169 return result; 211 return result;
170} 212}
171 213
172 214
215/*
216 * Tear down minimal device communication infrastructure needed to at
217 * least be able to update the firmware.
218 */
219static
220void i2400ms_bus_release(struct i2400m *i2400m)
221{
222 struct i2400ms *i2400ms =
223 container_of(i2400m, struct i2400ms, i2400m);
224 struct sdio_func *func = i2400ms->func;
225
226 i2400ms_rx_release(i2400ms);
227 i2400ms_tx_release(i2400ms);
228 sdio_claim_host(func);
229 sdio_disable_func(func);
230 sdio_release_host(func);
231}
232
233
234/*
235 * Setup driver resources needed to communicate with the device
236 *
237 * The fw needs some time to settle, and it was just uploaded,
238 * so give it a break first. I'd prefer to just wait for the device to
239 * send something, but seems the poking we do to enable SDIO stuff
240 * interferes with it, so just give it a break before starting...
241 */
173static 242static
174void i2400ms_bus_dev_stop(struct i2400m *i2400m) 243int i2400ms_bus_dev_start(struct i2400m *i2400m)
175{ 244{
176 struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m); 245 struct i2400ms *i2400ms = container_of(i2400m, struct i2400ms, i2400m);
177 struct sdio_func *func = i2400ms->func; 246 struct sdio_func *func = i2400ms->func;
178 struct device *dev = &func->dev; 247 struct device *dev = &func->dev;
179 248
180 d_fnstart(3, dev, "(i2400m %p)\n", i2400m); 249 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
181 i2400ms_tx_release(i2400ms); 250 msleep(200);
182 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); 251 d_fnend(3, dev, "(i2400m %p) = %d\n", i2400m, 0);
252 return 0;
183} 253}
184 254
185 255
@@ -233,18 +303,17 @@ error_kzalloc:
233 * Warm reset: 303 * Warm reset:
234 * 304 *
235 * The device will be fully reset internally, but won't be 305 * The device will be fully reset internally, but won't be
236 * disconnected from the USB bus (so no reenumeration will 306 * disconnected from the bus (so no reenumeration will
237 * happen). Firmware upload will be neccessary. 307 * happen). Firmware upload will be neccessary.
238 * 308 *
239 * The device will send a reboot barker in the notification endpoint 309 * The device will send a reboot barker that will trigger the driver
240 * that will trigger the driver to reinitialize the state 310 * to reinitialize the state via __i2400m_dev_reset_handle.
241 * automatically from notif.c:i2400m_notification_grok() into
242 * i2400m_dev_bootstrap_delayed().
243 * 311 *
244 * Cold and bus (USB) reset: 312 *
313 * Cold and bus reset:
245 * 314 *
246 * The device will be fully reset internally, disconnected from the 315 * The device will be fully reset internally, disconnected from the
247 * USB bus an a reenumeration will happen. Firmware upload will be 316 * bus an a reenumeration will happen. Firmware upload will be
248 * neccessary. Thus, we don't do any locking or struct 317 * neccessary. Thus, we don't do any locking or struct
249 * reinitialization, as we are going to be fully disconnected and 318 * reinitialization, as we are going to be fully disconnected and
250 * reenumerated. 319 * reenumerated.
@@ -283,25 +352,13 @@ int i2400ms_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
283 sizeof(i2400m_COLD_BOOT_BARKER)); 352 sizeof(i2400m_COLD_BOOT_BARKER));
284 else if (rt == I2400M_RT_BUS) { 353 else if (rt == I2400M_RT_BUS) {
285do_bus_reset: 354do_bus_reset:
286 /* call netif_tx_disable() before sending IOE disable,
287 * so that all the tx from network layer are stopped
288 * while IOE is being reset. Make sure it is called
289 * only after register_netdev() was issued.
290 */
291 if (i2400m->wimax_dev.net_dev->reg_state == NETREG_REGISTERED)
292 netif_tx_disable(i2400m->wimax_dev.net_dev);
293 355
294 i2400ms_rx_release(i2400ms); 356 i2400ms_bus_release(i2400m);
295 sdio_claim_host(i2400ms->func);
296 sdio_disable_func(i2400ms->func);
297 sdio_release_host(i2400ms->func);
298 357
299 /* Wait for the device to settle */ 358 /* Wait for the device to settle */
300 msleep(40); 359 msleep(40);
301 360
302 result = i2400ms_enable_function(i2400ms->func); 361 result = i2400ms_bus_setup(i2400m);
303 if (result >= 0)
304 i2400ms_rx_setup(i2400ms);
305 } else 362 } else
306 BUG(); 363 BUG();
307 if (result < 0 && rt != I2400M_RT_BUS) { 364 if (result < 0 && rt != I2400M_RT_BUS) {
@@ -350,7 +407,7 @@ int i2400ms_debugfs_add(struct i2400ms *i2400ms)
350 int result; 407 int result;
351 struct dentry *dentry = i2400ms->i2400m.wimax_dev.debugfs_dentry; 408 struct dentry *dentry = i2400ms->i2400m.wimax_dev.debugfs_dentry;
352 409
353 dentry = debugfs_create_dir("i2400m-usb", dentry); 410 dentry = debugfs_create_dir("i2400m-sdio", dentry);
354 result = PTR_ERR(dentry); 411 result = PTR_ERR(dentry);
355 if (IS_ERR(dentry)) { 412 if (IS_ERR(dentry)) {
356 if (result == -ENODEV) 413 if (result == -ENODEV)
@@ -367,6 +424,7 @@ int i2400ms_debugfs_add(struct i2400ms *i2400ms)
367 424
368error: 425error:
369 debugfs_remove_recursive(i2400ms->debugfs_dentry); 426 debugfs_remove_recursive(i2400ms->debugfs_dentry);
427 i2400ms->debugfs_dentry = NULL;
370 return result; 428 return result;
371} 429}
372 430
@@ -425,37 +483,30 @@ int i2400ms_probe(struct sdio_func *func,
425 483
426 i2400m->bus_tx_block_size = I2400MS_BLK_SIZE; 484 i2400m->bus_tx_block_size = I2400MS_BLK_SIZE;
427 i2400m->bus_pl_size_max = I2400MS_PL_SIZE_MAX; 485 i2400m->bus_pl_size_max = I2400MS_PL_SIZE_MAX;
486 i2400m->bus_setup = i2400ms_bus_setup;
428 i2400m->bus_dev_start = i2400ms_bus_dev_start; 487 i2400m->bus_dev_start = i2400ms_bus_dev_start;
429 i2400m->bus_dev_stop = i2400ms_bus_dev_stop; 488 i2400m->bus_dev_stop = NULL;
489 i2400m->bus_release = i2400ms_bus_release;
430 i2400m->bus_tx_kick = i2400ms_bus_tx_kick; 490 i2400m->bus_tx_kick = i2400ms_bus_tx_kick;
431 i2400m->bus_reset = i2400ms_bus_reset; 491 i2400m->bus_reset = i2400ms_bus_reset;
432 /* The iwmc3200-wimax sometimes requires the driver to try 492 /* The iwmc3200-wimax sometimes requires the driver to try
433 * hard when we paint it into a corner. */ 493 * hard when we paint it into a corner. */
434 i2400m->bus_bm_retries = I3200_BOOT_RETRIES; 494 i2400m->bus_bm_retries = I2400M_SDIO_BOOT_RETRIES;
435 i2400m->bus_bm_cmd_send = i2400ms_bus_bm_cmd_send; 495 i2400m->bus_bm_cmd_send = i2400ms_bus_bm_cmd_send;
436 i2400m->bus_bm_wait_for_ack = i2400ms_bus_bm_wait_for_ack; 496 i2400m->bus_bm_wait_for_ack = i2400ms_bus_bm_wait_for_ack;
437 i2400m->bus_fw_names = i2400ms_bus_fw_names; 497 i2400m->bus_fw_names = i2400ms_bus_fw_names;
438 i2400m->bus_bm_mac_addr_impaired = 1; 498 i2400m->bus_bm_mac_addr_impaired = 1;
439 i2400m->bus_bm_pokes_table = &i2400ms_pokes[0]; 499 i2400m->bus_bm_pokes_table = &i2400ms_pokes[0];
440 500
441 sdio_claim_host(func); 501 switch (func->device) {
442 result = sdio_set_block_size(func, I2400MS_BLK_SIZE); 502 case SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX:
443 sdio_release_host(func); 503 case SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5:
444 if (result < 0) { 504 i2400ms->iwmc3200 = 1;
445 dev_err(dev, "Failed to set block size: %d\n", result); 505 break;
446 goto error_set_blk_size; 506 default:
447 } 507 i2400ms->iwmc3200 = 0;
448
449 result = i2400ms_enable_function(i2400ms->func);
450 if (result < 0) {
451 dev_err(dev, "Cannot enable SDIO function: %d\n", result);
452 goto error_func_enable;
453 } 508 }
454 509
455 result = i2400ms_rx_setup(i2400ms);
456 if (result < 0)
457 goto error_rx_setup;
458
459 result = i2400m_setup(i2400m, I2400M_BRI_NO_REBOOT); 510 result = i2400m_setup(i2400m, I2400M_BRI_NO_REBOOT);
460 if (result < 0) { 511 if (result < 0) {
461 dev_err(dev, "cannot setup device: %d\n", result); 512 dev_err(dev, "cannot setup device: %d\n", result);
@@ -473,13 +524,6 @@ int i2400ms_probe(struct sdio_func *func,
473error_debugfs_add: 524error_debugfs_add:
474 i2400m_release(i2400m); 525 i2400m_release(i2400m);
475error_setup: 526error_setup:
476 i2400ms_rx_release(i2400ms);
477error_rx_setup:
478 sdio_claim_host(func);
479 sdio_disable_func(func);
480 sdio_release_host(func);
481error_func_enable:
482error_set_blk_size:
483 sdio_set_drvdata(func, NULL); 527 sdio_set_drvdata(func, NULL);
484 free_netdev(net_dev); 528 free_netdev(net_dev);
485error_alloc_netdev: 529error_alloc_netdev:
@@ -497,12 +541,9 @@ void i2400ms_remove(struct sdio_func *func)
497 541
498 d_fnstart(3, dev, "SDIO func %p\n", func); 542 d_fnstart(3, dev, "SDIO func %p\n", func);
499 debugfs_remove_recursive(i2400ms->debugfs_dentry); 543 debugfs_remove_recursive(i2400ms->debugfs_dentry);
500 i2400ms_rx_release(i2400ms); 544 i2400ms->debugfs_dentry = NULL;
501 i2400m_release(i2400m); 545 i2400m_release(i2400m);
502 sdio_set_drvdata(func, NULL); 546 sdio_set_drvdata(func, NULL);
503 sdio_claim_host(func);
504 sdio_disable_func(func);
505 sdio_release_host(func);
506 free_netdev(net_dev); 547 free_netdev(net_dev);
507 d_fnend(3, dev, "SDIO func %p\n", func); 548 d_fnend(3, dev, "SDIO func %p\n", func);
508} 549}
@@ -512,6 +553,8 @@ const struct sdio_device_id i2400ms_sdio_ids[] = {
512 /* Intel: i2400m WiMAX (iwmc3200) over SDIO */ 553 /* Intel: i2400m WiMAX (iwmc3200) over SDIO */
513 { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 554 { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL,
514 SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX) }, 555 SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX) },
556 { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL,
557 SDIO_DEVICE_ID_INTEL_IWMC3200WIMAX_2G5) },
515 { /* end: all zeroes */ }, 558 { /* end: all zeroes */ },
516}; 559};
517MODULE_DEVICE_TABLE(sdio, i2400ms_sdio_ids); 560MODULE_DEVICE_TABLE(sdio, i2400ms_sdio_ids);
@@ -529,6 +572,8 @@ struct sdio_driver i2400m_sdio_driver = {
529static 572static
530int __init i2400ms_driver_init(void) 573int __init i2400ms_driver_init(void)
531{ 574{
575 d_parse_params(D_LEVEL, D_LEVEL_SIZE, i2400ms_debug_params,
576 "i2400m_sdio.debug");
532 return sdio_register_driver(&i2400m_sdio_driver); 577 return sdio_register_driver(&i2400m_sdio_driver);
533} 578}
534module_init(i2400ms_driver_init); 579module_init(i2400ms_driver_init);
diff --git a/drivers/net/wimax/i2400m/tx.c b/drivers/net/wimax/i2400m/tx.c
index fa16ccf8e26a..54480e8947f1 100644
--- a/drivers/net/wimax/i2400m/tx.c
+++ b/drivers/net/wimax/i2400m/tx.c
@@ -310,7 +310,7 @@ size_t __i2400m_tx_tail_room(struct i2400m *i2400m)
310 size_t tail_room; 310 size_t tail_room;
311 size_t tx_in; 311 size_t tx_in;
312 312
313 if (unlikely(i2400m->tx_in) == 0) 313 if (unlikely(i2400m->tx_in == 0))
314 return I2400M_TX_BUF_SIZE; 314 return I2400M_TX_BUF_SIZE;
315 tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE; 315 tx_in = i2400m->tx_in % I2400M_TX_BUF_SIZE;
316 tail_room = I2400M_TX_BUF_SIZE - tx_in; 316 tail_room = I2400M_TX_BUF_SIZE - tx_in;
@@ -642,6 +642,9 @@ int i2400m_tx(struct i2400m *i2400m, const void *buf, size_t buf_len,
642 * current one is out of payload slots or we have a singleton, 642 * current one is out of payload slots or we have a singleton,
643 * close it and start a new one */ 643 * close it and start a new one */
644 spin_lock_irqsave(&i2400m->tx_lock, flags); 644 spin_lock_irqsave(&i2400m->tx_lock, flags);
645 result = -ESHUTDOWN;
646 if (i2400m->tx_buf == NULL)
647 goto error_tx_new;
645try_new: 648try_new:
646 if (unlikely(i2400m->tx_msg == NULL)) 649 if (unlikely(i2400m->tx_msg == NULL))
647 i2400m_tx_new(i2400m); 650 i2400m_tx_new(i2400m);
@@ -697,7 +700,10 @@ try_new:
697 } 700 }
698error_tx_new: 701error_tx_new:
699 spin_unlock_irqrestore(&i2400m->tx_lock, flags); 702 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
700 i2400m->bus_tx_kick(i2400m); /* always kick, might free up space */ 703 /* kick in most cases, except when the TX subsys is down, as
704 * it might free space */
705 if (likely(result != -ESHUTDOWN))
706 i2400m->bus_tx_kick(i2400m);
701 d_fnend(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u) = %d\n", 707 d_fnend(3, dev, "(i2400m %p skb %p [%zu bytes] pt %u) = %d\n",
702 i2400m, buf, buf_len, pl_type, result); 708 i2400m, buf, buf_len, pl_type, result);
703 return result; 709 return result;
@@ -740,6 +746,9 @@ struct i2400m_msg_hdr *i2400m_tx_msg_get(struct i2400m *i2400m,
740 746
741 d_fnstart(3, dev, "(i2400m %p bus_size %p)\n", i2400m, bus_size); 747 d_fnstart(3, dev, "(i2400m %p bus_size %p)\n", i2400m, bus_size);
742 spin_lock_irqsave(&i2400m->tx_lock, flags); 748 spin_lock_irqsave(&i2400m->tx_lock, flags);
749 tx_msg_moved = NULL;
750 if (i2400m->tx_buf == NULL)
751 goto out_unlock;
743skip: 752skip:
744 tx_msg_moved = NULL; 753 tx_msg_moved = NULL;
745 if (i2400m->tx_in == i2400m->tx_out) { /* Empty FIFO? */ 754 if (i2400m->tx_in == i2400m->tx_out) { /* Empty FIFO? */
@@ -829,6 +838,8 @@ void i2400m_tx_msg_sent(struct i2400m *i2400m)
829 838
830 d_fnstart(3, dev, "(i2400m %p)\n", i2400m); 839 d_fnstart(3, dev, "(i2400m %p)\n", i2400m);
831 spin_lock_irqsave(&i2400m->tx_lock, flags); 840 spin_lock_irqsave(&i2400m->tx_lock, flags);
841 if (i2400m->tx_buf == NULL)
842 goto out_unlock;
832 i2400m->tx_out += i2400m->tx_msg_size; 843 i2400m->tx_out += i2400m->tx_msg_size;
833 d_printf(2, dev, "TX: sent %zu b\n", (size_t) i2400m->tx_msg_size); 844 d_printf(2, dev, "TX: sent %zu b\n", (size_t) i2400m->tx_msg_size);
834 i2400m->tx_msg_size = 0; 845 i2400m->tx_msg_size = 0;
@@ -837,6 +848,7 @@ void i2400m_tx_msg_sent(struct i2400m *i2400m)
837 n = i2400m->tx_out / I2400M_TX_BUF_SIZE; 848 n = i2400m->tx_out / I2400M_TX_BUF_SIZE;
838 i2400m->tx_out %= I2400M_TX_BUF_SIZE; 849 i2400m->tx_out %= I2400M_TX_BUF_SIZE;
839 i2400m->tx_in -= n * I2400M_TX_BUF_SIZE; 850 i2400m->tx_in -= n * I2400M_TX_BUF_SIZE;
851out_unlock:
840 spin_unlock_irqrestore(&i2400m->tx_lock, flags); 852 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
841 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m); 853 d_fnend(3, dev, "(i2400m %p) = void\n", i2400m);
842} 854}
@@ -876,5 +888,9 @@ int i2400m_tx_setup(struct i2400m *i2400m)
876 */ 888 */
877void i2400m_tx_release(struct i2400m *i2400m) 889void i2400m_tx_release(struct i2400m *i2400m)
878{ 890{
891 unsigned long flags;
892 spin_lock_irqsave(&i2400m->tx_lock, flags);
879 kfree(i2400m->tx_buf); 893 kfree(i2400m->tx_buf);
894 i2400m->tx_buf = NULL;
895 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
880} 896}
diff --git a/drivers/net/wimax/i2400m/usb-fw.c b/drivers/net/wimax/i2400m/usb-fw.c
index 5ad287c228b8..ce6b9938fde0 100644
--- a/drivers/net/wimax/i2400m/usb-fw.c
+++ b/drivers/net/wimax/i2400m/usb-fw.c
@@ -99,10 +99,10 @@ ssize_t i2400mu_tx_bulk_out(struct i2400mu *i2400mu, void *buf, size_t buf_size)
99 dev_err(dev, "BM-CMD: can't get autopm: %d\n", result); 99 dev_err(dev, "BM-CMD: can't get autopm: %d\n", result);
100 do_autopm = 0; 100 do_autopm = 0;
101 } 101 }
102 epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_BULK_OUT); 102 epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_out);
103 pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress); 103 pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
104retry: 104retry:
105 result = usb_bulk_msg(i2400mu->usb_dev, pipe, buf, buf_size, &len, HZ); 105 result = usb_bulk_msg(i2400mu->usb_dev, pipe, buf, buf_size, &len, 200);
106 switch (result) { 106 switch (result) {
107 case 0: 107 case 0:
108 if (len != buf_size) { 108 if (len != buf_size) {
@@ -113,6 +113,28 @@ retry:
113 } 113 }
114 result = len; 114 result = len;
115 break; 115 break;
116 case -EPIPE:
117 /*
118 * Stall -- maybe the device is choking with our
119 * requests. Clear it and give it some time. If they
120 * happen to often, it might be another symptom, so we
121 * reset.
122 *
123 * No error handling for usb_clear_halt(0; if it
124 * works, the retry works; if it fails, this switch
125 * does the error handling for us.
126 */
127 if (edc_inc(&i2400mu->urb_edc,
128 10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
129 dev_err(dev, "BM-CMD: too many stalls in "
130 "URB; resetting device\n");
131 usb_queue_reset_device(i2400mu->usb_iface);
132 /* fallthrough */
133 } else {
134 usb_clear_halt(i2400mu->usb_dev, pipe);
135 msleep(10); /* give the device some time */
136 goto retry;
137 }
116 case -EINVAL: /* while removing driver */ 138 case -EINVAL: /* while removing driver */
117 case -ENODEV: /* dev disconnect ... */ 139 case -ENODEV: /* dev disconnect ... */
118 case -ENOENT: /* just ignore it */ 140 case -ENOENT: /* just ignore it */
@@ -135,7 +157,6 @@ retry:
135 result); 157 result);
136 goto retry; 158 goto retry;
137 } 159 }
138 result = len;
139 if (do_autopm) 160 if (do_autopm)
140 usb_autopm_put_interface(i2400mu->usb_iface); 161 usb_autopm_put_interface(i2400mu->usb_iface);
141 return result; 162 return result;
@@ -172,7 +193,8 @@ ssize_t i2400mu_bus_bm_cmd_send(struct i2400m *i2400m,
172 result = -E2BIG; 193 result = -E2BIG;
173 if (cmd_size > I2400M_BM_CMD_BUF_SIZE) 194 if (cmd_size > I2400M_BM_CMD_BUF_SIZE)
174 goto error_too_big; 195 goto error_too_big;
175 memcpy(i2400m->bm_cmd_buf, _cmd, cmd_size); 196 if (_cmd != i2400m->bm_cmd_buf)
197 memmove(i2400m->bm_cmd_buf, _cmd, cmd_size);
176 cmd = i2400m->bm_cmd_buf; 198 cmd = i2400m->bm_cmd_buf;
177 if (cmd_size_a > cmd_size) /* Zero pad space */ 199 if (cmd_size_a > cmd_size) /* Zero pad space */
178 memset(i2400m->bm_cmd_buf + cmd_size, 0, cmd_size_a - cmd_size); 200 memset(i2400m->bm_cmd_buf + cmd_size, 0, cmd_size_a - cmd_size);
@@ -226,7 +248,8 @@ int i2400mu_notif_submit(struct i2400mu *i2400mu, struct urb *urb,
226 struct usb_endpoint_descriptor *epd; 248 struct usb_endpoint_descriptor *epd;
227 int pipe; 249 int pipe;
228 250
229 epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_NOTIFICATION); 251 epd = usb_get_epd(i2400mu->usb_iface,
252 i2400mu->endpoint_cfg.notification);
230 pipe = usb_rcvintpipe(i2400mu->usb_dev, epd->bEndpointAddress); 253 pipe = usb_rcvintpipe(i2400mu->usb_dev, epd->bEndpointAddress);
231 usb_fill_int_urb(urb, i2400mu->usb_dev, pipe, 254 usb_fill_int_urb(urb, i2400mu->usb_dev, pipe,
232 i2400m->bm_ack_buf, I2400M_BM_ACK_BUF_SIZE, 255 i2400m->bm_ack_buf, I2400M_BM_ACK_BUF_SIZE,
@@ -328,8 +351,8 @@ error_dev_gone:
328out: 351out:
329 if (do_autopm) 352 if (do_autopm)
330 usb_autopm_put_interface(i2400mu->usb_iface); 353 usb_autopm_put_interface(i2400mu->usb_iface);
331 d_fnend(8, dev, "(i2400m %p ack %p size %zu) = %zd\n", 354 d_fnend(8, dev, "(i2400m %p ack %p size %zu) = %ld\n",
332 i2400m, ack, ack_size, result); 355 i2400m, ack, ack_size, (long) result);
333 return result; 356 return result;
334 357
335error_exceeded: 358error_exceeded:
diff --git a/drivers/net/wimax/i2400m/usb-notif.c b/drivers/net/wimax/i2400m/usb-notif.c
index 6add27c3f35c..f88d1c6e35cb 100644
--- a/drivers/net/wimax/i2400m/usb-notif.c
+++ b/drivers/net/wimax/i2400m/usb-notif.c
@@ -51,6 +51,7 @@
51 * 51 *
52 * i2400mu_usb_notification_cb() Called when a URB is ready 52 * i2400mu_usb_notification_cb() Called when a URB is ready
53 * i2400mu_notif_grok() 53 * i2400mu_notif_grok()
54 * i2400m_is_boot_barker()
54 * i2400m_dev_reset_handle() 55 * i2400m_dev_reset_handle()
55 * i2400mu_rx_kick() 56 * i2400mu_rx_kick()
56 */ 57 */
@@ -87,32 +88,21 @@ int i2400mu_notification_grok(struct i2400mu *i2400mu, const void *buf,
87 d_fnstart(4, dev, "(i2400m %p buf %p buf_len %zu)\n", 88 d_fnstart(4, dev, "(i2400m %p buf %p buf_len %zu)\n",
88 i2400mu, buf, buf_len); 89 i2400mu, buf, buf_len);
89 ret = -EIO; 90 ret = -EIO;
90 if (buf_len < sizeof(i2400m_NBOOT_BARKER)) 91 if (buf_len < sizeof(i2400m_ZERO_BARKER))
91 /* Not a bug, just ignore */ 92 /* Not a bug, just ignore */
92 goto error_bad_size; 93 goto error_bad_size;
93 if (!memcmp(i2400m_NBOOT_BARKER, buf, sizeof(i2400m_NBOOT_BARKER)) 94 ret = 0;
94 || !memcmp(i2400m_SBOOT_BARKER, buf, sizeof(i2400m_SBOOT_BARKER))) 95 if (!memcmp(i2400m_ZERO_BARKER, buf, sizeof(i2400m_ZERO_BARKER))) {
95 ret = i2400m_dev_reset_handle(i2400m);
96 else if (!memcmp(i2400m_ZERO_BARKER, buf, sizeof(i2400m_ZERO_BARKER))) {
97 i2400mu_rx_kick(i2400mu); 96 i2400mu_rx_kick(i2400mu);
98 ret = 0; 97 goto out;
99 } else { /* Unknown or unexpected data in the notif message */
100 char prefix[64];
101 ret = -EIO;
102 dev_err(dev, "HW BUG? Unknown/unexpected data in notification "
103 "message (%zu bytes)\n", buf_len);
104 snprintf(prefix, sizeof(prefix), "%s %s: ",
105 dev_driver_string(dev), dev_name(dev));
106 if (buf_len > 64) {
107 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
108 8, 4, buf, 64, 0);
109 printk(KERN_ERR "%s... (only first 64 bytes "
110 "dumped)\n", prefix);
111 } else
112 print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
113 8, 4, buf, buf_len, 0);
114 } 98 }
99 ret = i2400m_is_boot_barker(i2400m, buf, buf_len);
100 if (unlikely(ret >= 0))
101 ret = i2400m_dev_reset_handle(i2400m, "device rebooted");
102 else /* Unknown or unexpected data in the notif message */
103 i2400m_unknown_barker(i2400m, buf, buf_len);
115error_bad_size: 104error_bad_size:
105out:
116 d_fnend(4, dev, "(i2400m %p buf %p buf_len %zu) = %d\n", 106 d_fnend(4, dev, "(i2400m %p buf %p buf_len %zu) = %d\n",
117 i2400mu, buf, buf_len, ret); 107 i2400mu, buf, buf_len, ret);
118 return ret; 108 return ret;
@@ -220,7 +210,8 @@ int i2400mu_notification_setup(struct i2400mu *i2400mu)
220 dev_err(dev, "notification: cannot allocate URB\n"); 210 dev_err(dev, "notification: cannot allocate URB\n");
221 goto error_alloc_urb; 211 goto error_alloc_urb;
222 } 212 }
223 epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_NOTIFICATION); 213 epd = usb_get_epd(i2400mu->usb_iface,
214 i2400mu->endpoint_cfg.notification);
224 usb_pipe = usb_rcvintpipe(i2400mu->usb_dev, epd->bEndpointAddress); 215 usb_pipe = usb_rcvintpipe(i2400mu->usb_dev, epd->bEndpointAddress);
225 usb_fill_int_urb(i2400mu->notif_urb, i2400mu->usb_dev, usb_pipe, 216 usb_fill_int_urb(i2400mu->notif_urb, i2400mu->usb_dev, usb_pipe,
226 buf, I2400MU_MAX_NOTIFICATION_LEN, 217 buf, I2400MU_MAX_NOTIFICATION_LEN,
diff --git a/drivers/net/wimax/i2400m/usb-rx.c b/drivers/net/wimax/i2400m/usb-rx.c
index a314799967cf..ba1b02362dfc 100644
--- a/drivers/net/wimax/i2400m/usb-rx.c
+++ b/drivers/net/wimax/i2400m/usb-rx.c
@@ -204,7 +204,7 @@ struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb)
204 dev_err(dev, "RX: can't get autopm: %d\n", result); 204 dev_err(dev, "RX: can't get autopm: %d\n", result);
205 do_autopm = 0; 205 do_autopm = 0;
206 } 206 }
207 epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_BULK_IN); 207 epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_in);
208 usb_pipe = usb_rcvbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress); 208 usb_pipe = usb_rcvbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
209retry: 209retry:
210 rx_size = skb_end_pointer(rx_skb) - rx_skb->data - rx_skb->len; 210 rx_size = skb_end_pointer(rx_skb) - rx_skb->data - rx_skb->len;
@@ -214,7 +214,7 @@ retry:
214 } 214 }
215 result = usb_bulk_msg( 215 result = usb_bulk_msg(
216 i2400mu->usb_dev, usb_pipe, rx_skb->data + rx_skb->len, 216 i2400mu->usb_dev, usb_pipe, rx_skb->data + rx_skb->len,
217 rx_size, &read_size, HZ); 217 rx_size, &read_size, 200);
218 usb_mark_last_busy(i2400mu->usb_dev); 218 usb_mark_last_busy(i2400mu->usb_dev);
219 switch (result) { 219 switch (result) {
220 case 0: 220 case 0:
@@ -222,6 +222,26 @@ retry:
222 goto retry; /* ZLP, just resubmit */ 222 goto retry; /* ZLP, just resubmit */
223 skb_put(rx_skb, read_size); 223 skb_put(rx_skb, read_size);
224 break; 224 break;
225 case -EPIPE:
226 /*
227 * Stall -- maybe the device is choking with our
228 * requests. Clear it and give it some time. If they
229 * happen to often, it might be another symptom, so we
230 * reset.
231 *
232 * No error handling for usb_clear_halt(0; if it
233 * works, the retry works; if it fails, this switch
234 * does the error handling for us.
235 */
236 if (edc_inc(&i2400mu->urb_edc,
237 10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
238 dev_err(dev, "BM-CMD: too many stalls in "
239 "URB; resetting device\n");
240 goto do_reset;
241 }
242 usb_clear_halt(i2400mu->usb_dev, usb_pipe);
243 msleep(10); /* give the device some time */
244 goto retry;
225 case -EINVAL: /* while removing driver */ 245 case -EINVAL: /* while removing driver */
226 case -ENODEV: /* dev disconnect ... */ 246 case -ENODEV: /* dev disconnect ... */
227 case -ENOENT: /* just ignore it */ 247 case -ENOENT: /* just ignore it */
@@ -283,6 +303,7 @@ out:
283error_reset: 303error_reset:
284 dev_err(dev, "RX: maximum errors in URB exceeded; " 304 dev_err(dev, "RX: maximum errors in URB exceeded; "
285 "resetting device\n"); 305 "resetting device\n");
306do_reset:
286 usb_queue_reset_device(i2400mu->usb_iface); 307 usb_queue_reset_device(i2400mu->usb_iface);
287 rx_skb = ERR_PTR(result); 308 rx_skb = ERR_PTR(result);
288 goto out; 309 goto out;
@@ -316,10 +337,15 @@ int i2400mu_rxd(void *_i2400mu)
316 size_t pending; 337 size_t pending;
317 int rx_size; 338 int rx_size;
318 struct sk_buff *rx_skb; 339 struct sk_buff *rx_skb;
340 unsigned long flags;
319 341
320 d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu); 342 d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
343 spin_lock_irqsave(&i2400m->rx_lock, flags);
344 BUG_ON(i2400mu->rx_kthread != NULL);
345 i2400mu->rx_kthread = current;
346 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
321 while (1) { 347 while (1) {
322 d_printf(2, dev, "TX: waiting for messages\n"); 348 d_printf(2, dev, "RX: waiting for messages\n");
323 pending = 0; 349 pending = 0;
324 wait_event_interruptible( 350 wait_event_interruptible(
325 i2400mu->rx_wq, 351 i2400mu->rx_wq,
@@ -367,6 +393,9 @@ int i2400mu_rxd(void *_i2400mu)
367 } 393 }
368 result = 0; 394 result = 0;
369out: 395out:
396 spin_lock_irqsave(&i2400m->rx_lock, flags);
397 i2400mu->rx_kthread = NULL;
398 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
370 d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result); 399 d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
371 return result; 400 return result;
372 401
@@ -403,18 +432,33 @@ int i2400mu_rx_setup(struct i2400mu *i2400mu)
403 struct i2400m *i2400m = &i2400mu->i2400m; 432 struct i2400m *i2400m = &i2400mu->i2400m;
404 struct device *dev = &i2400mu->usb_iface->dev; 433 struct device *dev = &i2400mu->usb_iface->dev;
405 struct wimax_dev *wimax_dev = &i2400m->wimax_dev; 434 struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
435 struct task_struct *kthread;
406 436
407 i2400mu->rx_kthread = kthread_run(i2400mu_rxd, i2400mu, "%s-rx", 437 kthread = kthread_run(i2400mu_rxd, i2400mu, "%s-rx",
408 wimax_dev->name); 438 wimax_dev->name);
409 if (IS_ERR(i2400mu->rx_kthread)) { 439 /* the kthread function sets i2400mu->rx_thread */
410 result = PTR_ERR(i2400mu->rx_kthread); 440 if (IS_ERR(kthread)) {
441 result = PTR_ERR(kthread);
411 dev_err(dev, "RX: cannot start thread: %d\n", result); 442 dev_err(dev, "RX: cannot start thread: %d\n", result);
412 } 443 }
413 return result; 444 return result;
414} 445}
415 446
447
416void i2400mu_rx_release(struct i2400mu *i2400mu) 448void i2400mu_rx_release(struct i2400mu *i2400mu)
417{ 449{
418 kthread_stop(i2400mu->rx_kthread); 450 unsigned long flags;
451 struct i2400m *i2400m = &i2400mu->i2400m;
452 struct device *dev = i2400m_dev(i2400m);
453 struct task_struct *kthread;
454
455 spin_lock_irqsave(&i2400m->rx_lock, flags);
456 kthread = i2400mu->rx_kthread;
457 i2400mu->rx_kthread = NULL;
458 spin_unlock_irqrestore(&i2400m->rx_lock, flags);
459 if (kthread)
460 kthread_stop(kthread);
461 else
462 d_printf(1, dev, "RX: kthread had already exited\n");
419} 463}
420 464
diff --git a/drivers/net/wimax/i2400m/usb-tx.c b/drivers/net/wimax/i2400m/usb-tx.c
index dfd893356f49..c65b9979f87e 100644
--- a/drivers/net/wimax/i2400m/usb-tx.c
+++ b/drivers/net/wimax/i2400m/usb-tx.c
@@ -101,11 +101,11 @@ int i2400mu_tx(struct i2400mu *i2400mu, struct i2400m_msg_hdr *tx_msg,
101 dev_err(dev, "TX: can't get autopm: %d\n", result); 101 dev_err(dev, "TX: can't get autopm: %d\n", result);
102 do_autopm = 0; 102 do_autopm = 0;
103 } 103 }
104 epd = usb_get_epd(i2400mu->usb_iface, I2400MU_EP_BULK_OUT); 104 epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_out);
105 usb_pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress); 105 usb_pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
106retry: 106retry:
107 result = usb_bulk_msg(i2400mu->usb_dev, usb_pipe, 107 result = usb_bulk_msg(i2400mu->usb_dev, usb_pipe,
108 tx_msg, tx_msg_size, &sent_size, HZ); 108 tx_msg, tx_msg_size, &sent_size, 200);
109 usb_mark_last_busy(i2400mu->usb_dev); 109 usb_mark_last_busy(i2400mu->usb_dev);
110 switch (result) { 110 switch (result) {
111 case 0: 111 case 0:
@@ -115,6 +115,28 @@ retry:
115 result = -EIO; 115 result = -EIO;
116 } 116 }
117 break; 117 break;
118 case -EPIPE:
119 /*
120 * Stall -- maybe the device is choking with our
121 * requests. Clear it and give it some time. If they
122 * happen to often, it might be another symptom, so we
123 * reset.
124 *
125 * No error handling for usb_clear_halt(0; if it
126 * works, the retry works; if it fails, this switch
127 * does the error handling for us.
128 */
129 if (edc_inc(&i2400mu->urb_edc,
130 10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
131 dev_err(dev, "BM-CMD: too many stalls in "
132 "URB; resetting device\n");
133 usb_queue_reset_device(i2400mu->usb_iface);
134 /* fallthrough */
135 } else {
136 usb_clear_halt(i2400mu->usb_dev, usb_pipe);
137 msleep(10); /* give the device some time */
138 goto retry;
139 }
118 case -EINVAL: /* while removing driver */ 140 case -EINVAL: /* while removing driver */
119 case -ENODEV: /* dev disconnect ... */ 141 case -ENODEV: /* dev disconnect ... */
120 case -ENOENT: /* just ignore it */ 142 case -ENOENT: /* just ignore it */
@@ -161,9 +183,15 @@ int i2400mu_txd(void *_i2400mu)
161 struct device *dev = &i2400mu->usb_iface->dev; 183 struct device *dev = &i2400mu->usb_iface->dev;
162 struct i2400m_msg_hdr *tx_msg; 184 struct i2400m_msg_hdr *tx_msg;
163 size_t tx_msg_size; 185 size_t tx_msg_size;
186 unsigned long flags;
164 187
165 d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu); 188 d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
166 189
190 spin_lock_irqsave(&i2400m->tx_lock, flags);
191 BUG_ON(i2400mu->tx_kthread != NULL);
192 i2400mu->tx_kthread = current;
193 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
194
167 while (1) { 195 while (1) {
168 d_printf(2, dev, "TX: waiting for messages\n"); 196 d_printf(2, dev, "TX: waiting for messages\n");
169 tx_msg = NULL; 197 tx_msg = NULL;
@@ -183,6 +211,11 @@ int i2400mu_txd(void *_i2400mu)
183 if (result < 0) 211 if (result < 0)
184 break; 212 break;
185 } 213 }
214
215 spin_lock_irqsave(&i2400m->tx_lock, flags);
216 i2400mu->tx_kthread = NULL;
217 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
218
186 d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result); 219 d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
187 return result; 220 return result;
188} 221}
@@ -213,11 +246,13 @@ int i2400mu_tx_setup(struct i2400mu *i2400mu)
213 struct i2400m *i2400m = &i2400mu->i2400m; 246 struct i2400m *i2400m = &i2400mu->i2400m;
214 struct device *dev = &i2400mu->usb_iface->dev; 247 struct device *dev = &i2400mu->usb_iface->dev;
215 struct wimax_dev *wimax_dev = &i2400m->wimax_dev; 248 struct wimax_dev *wimax_dev = &i2400m->wimax_dev;
249 struct task_struct *kthread;
216 250
217 i2400mu->tx_kthread = kthread_run(i2400mu_txd, i2400mu, "%s-tx", 251 kthread = kthread_run(i2400mu_txd, i2400mu, "%s-tx",
218 wimax_dev->name); 252 wimax_dev->name);
219 if (IS_ERR(i2400mu->tx_kthread)) { 253 /* the kthread function sets i2400mu->tx_thread */
220 result = PTR_ERR(i2400mu->tx_kthread); 254 if (IS_ERR(kthread)) {
255 result = PTR_ERR(kthread);
221 dev_err(dev, "TX: cannot start thread: %d\n", result); 256 dev_err(dev, "TX: cannot start thread: %d\n", result);
222 } 257 }
223 return result; 258 return result;
@@ -225,5 +260,17 @@ int i2400mu_tx_setup(struct i2400mu *i2400mu)
225 260
226void i2400mu_tx_release(struct i2400mu *i2400mu) 261void i2400mu_tx_release(struct i2400mu *i2400mu)
227{ 262{
228 kthread_stop(i2400mu->tx_kthread); 263 unsigned long flags;
264 struct i2400m *i2400m = &i2400mu->i2400m;
265 struct device *dev = i2400m_dev(i2400m);
266 struct task_struct *kthread;
267
268 spin_lock_irqsave(&i2400m->tx_lock, flags);
269 kthread = i2400mu->tx_kthread;
270 i2400mu->tx_kthread = NULL;
271 spin_unlock_irqrestore(&i2400m->tx_lock, flags);
272 if (kthread)
273 kthread_stop(kthread);
274 else
275 d_printf(1, dev, "TX: kthread had already exited\n");
229} 276}
diff --git a/drivers/net/wimax/i2400m/usb.c b/drivers/net/wimax/i2400m/usb.c
index 7eadd11c815b..47e84ef355c5 100644
--- a/drivers/net/wimax/i2400m/usb.c
+++ b/drivers/net/wimax/i2400m/usb.c
@@ -58,7 +58,7 @@
58 * i2400mu_rx_release() 58 * i2400mu_rx_release()
59 * i2400mu_tx_release() 59 * i2400mu_tx_release()
60 * 60 *
61 * i2400mu_bus_reset() Called by i2400m->bus_reset 61 * i2400mu_bus_reset() Called by i2400m_reset
62 * __i2400mu_reset() 62 * __i2400mu_reset()
63 * __i2400mu_send_barker() 63 * __i2400mu_send_barker()
64 * usb_reset_device() 64 * usb_reset_device()
@@ -71,13 +71,25 @@
71#define D_SUBMODULE usb 71#define D_SUBMODULE usb
72#include "usb-debug-levels.h" 72#include "usb-debug-levels.h"
73 73
74static char i2400mu_debug_params[128];
75module_param_string(debug, i2400mu_debug_params, sizeof(i2400mu_debug_params),
76 0644);
77MODULE_PARM_DESC(debug,
78 "String of space-separated NAME:VALUE pairs, where NAMEs "
79 "are the different debug submodules and VALUE are the "
80 "initial debug value to set.");
74 81
75/* Our firmware file name */ 82/* Our firmware file name */
76static const char *i2400mu_bus_fw_names[] = { 83static const char *i2400mu_bus_fw_names_5x50[] = {
77#define I2400MU_FW_FILE_NAME_v1_4 "i2400m-fw-usb-1.4.sbcf" 84#define I2400MU_FW_FILE_NAME_v1_4 "i2400m-fw-usb-1.4.sbcf"
78 I2400MU_FW_FILE_NAME_v1_4, 85 I2400MU_FW_FILE_NAME_v1_4,
79#define I2400MU_FW_FILE_NAME_v1_3 "i2400m-fw-usb-1.3.sbcf" 86 NULL,
80 I2400MU_FW_FILE_NAME_v1_3, 87};
88
89
90static const char *i2400mu_bus_fw_names_6050[] = {
91#define I6050U_FW_FILE_NAME_v1_5 "i6050-fw-usb-1.5.sbcf"
92 I6050U_FW_FILE_NAME_v1_5,
81 NULL, 93 NULL,
82}; 94};
83 95
@@ -160,14 +172,59 @@ int __i2400mu_send_barker(struct i2400mu *i2400mu,
160 epd = usb_get_epd(i2400mu->usb_iface, endpoint); 172 epd = usb_get_epd(i2400mu->usb_iface, endpoint);
161 pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress); 173 pipe = usb_sndbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
162 memcpy(buffer, barker, barker_size); 174 memcpy(buffer, barker, barker_size);
175retry:
163 ret = usb_bulk_msg(i2400mu->usb_dev, pipe, buffer, barker_size, 176 ret = usb_bulk_msg(i2400mu->usb_dev, pipe, buffer, barker_size,
164 &actual_len, HZ); 177 &actual_len, 200);
165 if (ret < 0) { 178 switch (ret) {
166 if (ret != -EINVAL) 179 case 0:
167 dev_err(dev, "E: barker error: %d\n", ret); 180 if (actual_len != barker_size) { /* Too short? drop it */
168 } else if (actual_len != barker_size) { 181 dev_err(dev, "E: %s: short write (%d B vs %zu "
169 dev_err(dev, "E: only %d bytes transmitted\n", actual_len); 182 "expected)\n",
170 ret = -EIO; 183 __func__, actual_len, barker_size);
184 ret = -EIO;
185 }
186 break;
187 case -EPIPE:
188 /*
189 * Stall -- maybe the device is choking with our
190 * requests. Clear it and give it some time. If they
191 * happen to often, it might be another symptom, so we
192 * reset.
193 *
194 * No error handling for usb_clear_halt(0; if it
195 * works, the retry works; if it fails, this switch
196 * does the error handling for us.
197 */
198 if (edc_inc(&i2400mu->urb_edc,
199 10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
200 dev_err(dev, "E: %s: too many stalls in "
201 "URB; resetting device\n", __func__);
202 usb_queue_reset_device(i2400mu->usb_iface);
203 /* fallthrough */
204 } else {
205 usb_clear_halt(i2400mu->usb_dev, pipe);
206 msleep(10); /* give the device some time */
207 goto retry;
208 }
209 case -EINVAL: /* while removing driver */
210 case -ENODEV: /* dev disconnect ... */
211 case -ENOENT: /* just ignore it */
212 case -ESHUTDOWN: /* and exit */
213 case -ECONNRESET:
214 ret = -ESHUTDOWN;
215 break;
216 default: /* Some error? */
217 if (edc_inc(&i2400mu->urb_edc,
218 EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
219 dev_err(dev, "E: %s: maximum errors in URB "
220 "exceeded; resetting device\n",
221 __func__);
222 usb_queue_reset_device(i2400mu->usb_iface);
223 } else {
224 dev_warn(dev, "W: %s: cannot send URB: %d\n",
225 __func__, ret);
226 goto retry;
227 }
171 } 228 }
172 kfree(buffer); 229 kfree(buffer);
173error_kzalloc: 230error_kzalloc:
@@ -232,15 +289,16 @@ int i2400mu_bus_reset(struct i2400m *i2400m, enum i2400m_reset_type rt)
232 289
233 d_fnstart(3, dev, "(i2400m %p rt %u)\n", i2400m, rt); 290 d_fnstart(3, dev, "(i2400m %p rt %u)\n", i2400m, rt);
234 if (rt == I2400M_RT_WARM) 291 if (rt == I2400M_RT_WARM)
235 result = __i2400mu_send_barker(i2400mu, i2400m_WARM_BOOT_BARKER, 292 result = __i2400mu_send_barker(
236 sizeof(i2400m_WARM_BOOT_BARKER), 293 i2400mu, i2400m_WARM_BOOT_BARKER,
237 I2400MU_EP_BULK_OUT); 294 sizeof(i2400m_WARM_BOOT_BARKER),
295 i2400mu->endpoint_cfg.bulk_out);
238 else if (rt == I2400M_RT_COLD) 296 else if (rt == I2400M_RT_COLD)
239 result = __i2400mu_send_barker(i2400mu, i2400m_COLD_BOOT_BARKER, 297 result = __i2400mu_send_barker(
240 sizeof(i2400m_COLD_BOOT_BARKER), 298 i2400mu, i2400m_COLD_BOOT_BARKER,
241 I2400MU_EP_RESET_COLD); 299 sizeof(i2400m_COLD_BOOT_BARKER),
300 i2400mu->endpoint_cfg.reset_cold);
242 else if (rt == I2400M_RT_BUS) { 301 else if (rt == I2400M_RT_BUS) {
243do_bus_reset:
244 result = usb_reset_device(i2400mu->usb_dev); 302 result = usb_reset_device(i2400mu->usb_dev);
245 switch (result) { 303 switch (result) {
246 case 0: 304 case 0:
@@ -248,7 +306,7 @@ do_bus_reset:
248 case -ENODEV: 306 case -ENODEV:
249 case -ENOENT: 307 case -ENOENT:
250 case -ESHUTDOWN: 308 case -ESHUTDOWN:
251 result = rt == I2400M_RT_WARM ? -ENODEV : 0; 309 result = 0;
252 break; /* We assume the device is disconnected */ 310 break; /* We assume the device is disconnected */
253 default: 311 default:
254 dev_err(dev, "USB reset failed (%d), giving up!\n", 312 dev_err(dev, "USB reset failed (%d), giving up!\n",
@@ -261,10 +319,17 @@ do_bus_reset:
261 if (result < 0 319 if (result < 0
262 && result != -EINVAL /* device is gone */ 320 && result != -EINVAL /* device is gone */
263 && rt != I2400M_RT_BUS) { 321 && rt != I2400M_RT_BUS) {
322 /*
323 * Things failed -- resort to lower level reset, that
324 * we queue in another context; the reason for this is
325 * that the pre and post reset functionality requires
326 * the i2400m->init_mutex; RT_WARM and RT_COLD can
327 * come from areas where i2400m->init_mutex is taken.
328 */
264 dev_err(dev, "%s reset failed (%d); trying USB reset\n", 329 dev_err(dev, "%s reset failed (%d); trying USB reset\n",
265 rt == I2400M_RT_WARM ? "warm" : "cold", result); 330 rt == I2400M_RT_WARM ? "warm" : "cold", result);
266 rt = I2400M_RT_BUS; 331 usb_queue_reset_device(i2400mu->usb_iface);
267 goto do_bus_reset; 332 result = -ENODEV;
268 } 333 }
269 d_fnend(3, dev, "(i2400m %p rt %u) = %d\n", i2400m, rt, result); 334 d_fnend(3, dev, "(i2400m %p rt %u) = %d\n", i2400m, rt, result);
270 return result; 335 return result;
@@ -402,20 +467,33 @@ int i2400mu_probe(struct usb_interface *iface,
402 467
403 i2400m->bus_tx_block_size = I2400MU_BLK_SIZE; 468 i2400m->bus_tx_block_size = I2400MU_BLK_SIZE;
404 i2400m->bus_pl_size_max = I2400MU_PL_SIZE_MAX; 469 i2400m->bus_pl_size_max = I2400MU_PL_SIZE_MAX;
470 i2400m->bus_setup = NULL;
405 i2400m->bus_dev_start = i2400mu_bus_dev_start; 471 i2400m->bus_dev_start = i2400mu_bus_dev_start;
406 i2400m->bus_dev_stop = i2400mu_bus_dev_stop; 472 i2400m->bus_dev_stop = i2400mu_bus_dev_stop;
473 i2400m->bus_release = NULL;
407 i2400m->bus_tx_kick = i2400mu_bus_tx_kick; 474 i2400m->bus_tx_kick = i2400mu_bus_tx_kick;
408 i2400m->bus_reset = i2400mu_bus_reset; 475 i2400m->bus_reset = i2400mu_bus_reset;
409 i2400m->bus_bm_retries = I2400M_BOOT_RETRIES; 476 i2400m->bus_bm_retries = I2400M_USB_BOOT_RETRIES;
410 i2400m->bus_bm_cmd_send = i2400mu_bus_bm_cmd_send; 477 i2400m->bus_bm_cmd_send = i2400mu_bus_bm_cmd_send;
411 i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack; 478 i2400m->bus_bm_wait_for_ack = i2400mu_bus_bm_wait_for_ack;
412 i2400m->bus_fw_names = i2400mu_bus_fw_names;
413 i2400m->bus_bm_mac_addr_impaired = 0; 479 i2400m->bus_bm_mac_addr_impaired = 0;
414 480
481 if (id->idProduct == USB_DEVICE_ID_I6050) {
482 i2400m->bus_fw_names = i2400mu_bus_fw_names_6050;
483 i2400mu->endpoint_cfg.bulk_out = 0;
484 i2400mu->endpoint_cfg.notification = 3;
485 i2400mu->endpoint_cfg.reset_cold = 2;
486 i2400mu->endpoint_cfg.bulk_in = 1;
487 } else {
488 i2400m->bus_fw_names = i2400mu_bus_fw_names_5x50;
489 i2400mu->endpoint_cfg.bulk_out = 0;
490 i2400mu->endpoint_cfg.notification = 1;
491 i2400mu->endpoint_cfg.reset_cold = 2;
492 i2400mu->endpoint_cfg.bulk_in = 3;
493 }
415#ifdef CONFIG_PM 494#ifdef CONFIG_PM
416 iface->needs_remote_wakeup = 1; /* autosuspend (15s delay) */ 495 iface->needs_remote_wakeup = 1; /* autosuspend (15s delay) */
417 device_init_wakeup(dev, 1); 496 device_init_wakeup(dev, 1);
418 usb_autopm_enable(i2400mu->usb_iface);
419 usb_dev->autosuspend_delay = 15 * HZ; 497 usb_dev->autosuspend_delay = 15 * HZ;
420 usb_dev->autosuspend_disabled = 0; 498 usb_dev->autosuspend_disabled = 0;
421#endif 499#endif
@@ -483,7 +561,10 @@ void i2400mu_disconnect(struct usb_interface *iface)
483 * So at the end, the three cases require common handling. 561 * So at the end, the three cases require common handling.
484 * 562 *
485 * If at the time of this call the device's firmware is not loaded, 563 * If at the time of this call the device's firmware is not loaded,
486 * nothing has to be done. 564 * nothing has to be done. Note we can be "loose" about not reading
565 * i2400m->updown under i2400m->init_mutex. If it happens to change
566 * inmediately, other parts of the call flow will fail and effectively
567 * catch it.
487 * 568 *
488 * If the firmware is loaded, we need to: 569 * If the firmware is loaded, we need to:
489 * 570 *
@@ -522,6 +603,7 @@ int i2400mu_suspend(struct usb_interface *iface, pm_message_t pm_msg)
522#endif 603#endif
523 604
524 d_fnstart(3, dev, "(iface %p pm_msg %u)\n", iface, pm_msg.event); 605 d_fnstart(3, dev, "(iface %p pm_msg %u)\n", iface, pm_msg.event);
606 rmb(); /* see i2400m->updown's documentation */
525 if (i2400m->updown == 0) 607 if (i2400m->updown == 0)
526 goto no_firmware; 608 goto no_firmware;
527 if (i2400m->state == I2400M_SS_DATA_PATH_CONNECTED && is_autosuspend) { 609 if (i2400m->state == I2400M_SS_DATA_PATH_CONNECTED && is_autosuspend) {
@@ -575,6 +657,7 @@ int i2400mu_resume(struct usb_interface *iface)
575 struct i2400m *i2400m = &i2400mu->i2400m; 657 struct i2400m *i2400m = &i2400mu->i2400m;
576 658
577 d_fnstart(3, dev, "(iface %p)\n", iface); 659 d_fnstart(3, dev, "(iface %p)\n", iface);
660 rmb(); /* see i2400m->updown's documentation */
578 if (i2400m->updown == 0) { 661 if (i2400m->updown == 0) {
579 d_printf(1, dev, "fw was down, no resume neeed\n"); 662 d_printf(1, dev, "fw was down, no resume neeed\n");
580 goto out; 663 goto out;
@@ -591,7 +674,54 @@ out:
591 674
592 675
593static 676static
677int i2400mu_reset_resume(struct usb_interface *iface)
678{
679 int result;
680 struct device *dev = &iface->dev;
681 struct i2400mu *i2400mu = usb_get_intfdata(iface);
682 struct i2400m *i2400m = &i2400mu->i2400m;
683
684 d_fnstart(3, dev, "(iface %p)\n", iface);
685 result = i2400m_dev_reset_handle(i2400m, "device reset on resume");
686 d_fnend(3, dev, "(iface %p) = %d\n", iface, result);
687 return result < 0 ? result : 0;
688}
689
690
691/*
692 * Another driver or user space is triggering a reset on the device
693 * which contains the interface passed as an argument. Cease IO and
694 * save any device state you need to restore.
695 *
696 * If you need to allocate memory here, use GFP_NOIO or GFP_ATOMIC, if
697 * you are in atomic context.
698 */
699static
700int i2400mu_pre_reset(struct usb_interface *iface)
701{
702 struct i2400mu *i2400mu = usb_get_intfdata(iface);
703 return i2400m_pre_reset(&i2400mu->i2400m);
704}
705
706
707/*
708 * The reset has completed. Restore any saved device state and begin
709 * using the device again.
710 *
711 * If you need to allocate memory here, use GFP_NOIO or GFP_ATOMIC, if
712 * you are in atomic context.
713 */
714static
715int i2400mu_post_reset(struct usb_interface *iface)
716{
717 struct i2400mu *i2400mu = usb_get_intfdata(iface);
718 return i2400m_post_reset(&i2400mu->i2400m);
719}
720
721
722static
594struct usb_device_id i2400mu_id_table[] = { 723struct usb_device_id i2400mu_id_table[] = {
724 { USB_DEVICE(0x8086, USB_DEVICE_ID_I6050) },
595 { USB_DEVICE(0x8086, 0x0181) }, 725 { USB_DEVICE(0x8086, 0x0181) },
596 { USB_DEVICE(0x8086, 0x1403) }, 726 { USB_DEVICE(0x8086, 0x1403) },
597 { USB_DEVICE(0x8086, 0x1405) }, 727 { USB_DEVICE(0x8086, 0x1405) },
@@ -609,8 +739,11 @@ struct usb_driver i2400mu_driver = {
609 .name = KBUILD_MODNAME, 739 .name = KBUILD_MODNAME,
610 .suspend = i2400mu_suspend, 740 .suspend = i2400mu_suspend,
611 .resume = i2400mu_resume, 741 .resume = i2400mu_resume,
742 .reset_resume = i2400mu_reset_resume,
612 .probe = i2400mu_probe, 743 .probe = i2400mu_probe,
613 .disconnect = i2400mu_disconnect, 744 .disconnect = i2400mu_disconnect,
745 .pre_reset = i2400mu_pre_reset,
746 .post_reset = i2400mu_post_reset,
614 .id_table = i2400mu_id_table, 747 .id_table = i2400mu_id_table,
615 .supports_autosuspend = 1, 748 .supports_autosuspend = 1,
616}; 749};
@@ -618,6 +751,8 @@ struct usb_driver i2400mu_driver = {
618static 751static
619int __init i2400mu_driver_init(void) 752int __init i2400mu_driver_init(void)
620{ 753{
754 d_parse_params(D_LEVEL, D_LEVEL_SIZE, i2400mu_debug_params,
755 "i2400m_usb.debug");
621 return usb_register(&i2400mu_driver); 756 return usb_register(&i2400mu_driver);
622} 757}
623module_init(i2400mu_driver_init); 758module_init(i2400mu_driver_init);
@@ -632,7 +767,7 @@ void __exit i2400mu_driver_exit(void)
632module_exit(i2400mu_driver_exit); 767module_exit(i2400mu_driver_exit);
633 768
634MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>"); 769MODULE_AUTHOR("Intel Corporation <linux-wimax@intel.com>");
635MODULE_DESCRIPTION("Intel 2400M WiMAX networking for USB"); 770MODULE_DESCRIPTION("Driver for USB based Intel Wireless WiMAX Connection 2400M "
771 "(5x50 & 6050)");
636MODULE_LICENSE("GPL"); 772MODULE_LICENSE("GPL");
637MODULE_FIRMWARE(I2400MU_FW_FILE_NAME_v1_4); 773MODULE_FIRMWARE(I2400MU_FW_FILE_NAME_v1_4);
638MODULE_FIRMWARE(I2400MU_FW_FILE_NAME_v1_3);
diff --git a/drivers/net/wireless/Kconfig b/drivers/net/wireless/Kconfig
index d7a764a2fc1a..56dd6650c97a 100644
--- a/drivers/net/wireless/Kconfig
+++ b/drivers/net/wireless/Kconfig
@@ -5,6 +5,7 @@
5menuconfig WLAN 5menuconfig WLAN
6 bool "Wireless LAN" 6 bool "Wireless LAN"
7 depends on !S390 7 depends on !S390
8 select WIRELESS
8 default y 9 default y
9 ---help--- 10 ---help---
10 This section contains all the pre 802.11 and 802.11 wireless 11 This section contains all the pre 802.11 and 802.11 wireless
@@ -15,114 +16,12 @@ menuconfig WLAN
15 16
16if WLAN 17if WLAN
17 18
18menuconfig WLAN_PRE80211
19 bool "Wireless LAN (pre-802.11)"
20 depends on NETDEVICES
21 ---help---
22 Say Y if you have any pre-802.11 wireless LAN hardware.
23
24 This option does not affect the kernel build, it only
25 lets you choose drivers.
26
27config STRIP
28 tristate "STRIP (Metricom starmode radio IP)"
29 depends on INET && WLAN_PRE80211
30 select WIRELESS_EXT
31 ---help---
32 Say Y if you have a Metricom radio and intend to use Starmode Radio
33 IP. STRIP is a radio protocol developed for the MosquitoNet project
34 to send Internet traffic using Metricom radios. Metricom radios are
35 small, battery powered, 100kbit/sec packet radio transceivers, about
36 the size and weight of a cellular telephone. (You may also have heard
37 them called "Metricom modems" but we avoid the term "modem" because
38 it misleads many people into thinking that you can plug a Metricom
39 modem into a phone line and use it as a modem.)
40
41 You can use STRIP on any Linux machine with a serial port, although
42 it is obviously most useful for people with laptop computers. If you
43 think you might get a Metricom radio in the future, there is no harm
44 in saying Y to STRIP now, except that it makes the kernel a bit
45 bigger.
46
47 To compile this as a module, choose M here: the module will be
48 called strip.
49
50config ARLAN
51 tristate "Aironet Arlan 655 & IC2200 DS support"
52 depends on ISA && !64BIT && WLAN_PRE80211
53 select WIRELESS_EXT
54 ---help---
55 Aironet makes Arlan, a class of wireless LAN adapters. These use the
56 www.Telxon.com chip, which is also used on several similar cards.
57 This driver is tested on the 655 and IC2200 series cards. Look at
58 <http://www.ylenurme.ee/~elmer/655/> for the latest information.
59
60 The driver is built as two modules, arlan and arlan-proc. The latter
61 is the /proc interface and is not needed most of time.
62
63 On some computers the card ends up in non-valid state after some
64 time. Use a ping-reset script to clear it.
65
66config WAVELAN
67 tristate "AT&T/Lucent old WaveLAN & DEC RoamAbout DS ISA support"
68 depends on ISA && WLAN_PRE80211
69 select WIRELESS_EXT
70 ---help---
71 The Lucent WaveLAN (formerly NCR and AT&T; or DEC RoamAbout DS) is
72 a Radio LAN (wireless Ethernet-like Local Area Network) using the
73 radio frequencies 900 MHz and 2.4 GHz.
74
75 If you want to use an ISA WaveLAN card under Linux, say Y and read
76 the Ethernet-HOWTO, available from
77 <http://www.tldp.org/docs.html#howto>. Some more specific
78 information is contained in
79 <file:Documentation/networking/wavelan.txt> and in the source code
80 <file:drivers/net/wireless/wavelan.p.h>.
81
82 You will also need the wireless tools package available from
83 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
84 Please read the man pages contained therein.
85
86 To compile this driver as a module, choose M here: the module will be
87 called wavelan.
88
89config PCMCIA_WAVELAN
90 tristate "AT&T/Lucent old WaveLAN Pcmcia wireless support"
91 depends on PCMCIA && WLAN_PRE80211
92 select WIRELESS_EXT
93 help
94 Say Y here if you intend to attach an AT&T/Lucent Wavelan PCMCIA
95 (PC-card) wireless Ethernet networking card to your computer. This
96 driver is for the non-IEEE-802.11 Wavelan cards.
97
98 To compile this driver as a module, choose M here: the module will be
99 called wavelan_cs. If unsure, say N.
100
101config PCMCIA_NETWAVE
102 tristate "Xircom Netwave AirSurfer Pcmcia wireless support"
103 depends on PCMCIA && WLAN_PRE80211
104 select WIRELESS_EXT
105 help
106 Say Y here if you intend to attach this type of PCMCIA (PC-card)
107 wireless Ethernet networking card to your computer.
108
109 To compile this driver as a module, choose M here: the module will be
110 called netwave_cs. If unsure, say N.
111
112
113menuconfig WLAN_80211
114 bool "Wireless LAN (IEEE 802.11)"
115 depends on NETDEVICES
116 ---help---
117 Say Y if you have any 802.11 wireless LAN hardware.
118
119 This option does not affect the kernel build, it only
120 lets you choose drivers.
121
122config PCMCIA_RAYCS 19config PCMCIA_RAYCS
123 tristate "Aviator/Raytheon 2.4GHz wireless support" 20 tristate "Aviator/Raytheon 2.4GHz wireless support"
124 depends on PCMCIA && WLAN_80211 21 depends on PCMCIA
125 select WIRELESS_EXT 22 select WIRELESS_EXT
23 select WEXT_SPY
24 select WEXT_PRIV
126 ---help--- 25 ---help---
127 Say Y here if you intend to attach an Aviator/Raytheon PCMCIA 26 Say Y here if you intend to attach an Aviator/Raytheon PCMCIA
128 (PC-card) wireless Ethernet networking card to your computer. 27 (PC-card) wireless Ethernet networking card to your computer.
@@ -132,49 +31,9 @@ config PCMCIA_RAYCS
132 To compile this driver as a module, choose M here: the module will be 31 To compile this driver as a module, choose M here: the module will be
133 called ray_cs. If unsure, say N. 32 called ray_cs. If unsure, say N.
134 33
135config LIBERTAS
136 tristate "Marvell 8xxx Libertas WLAN driver support"
137 depends on WLAN_80211
138 select WIRELESS_EXT
139 select LIB80211
140 select FW_LOADER
141 ---help---
142 A library for Marvell Libertas 8xxx devices.
143
144config LIBERTAS_USB
145 tristate "Marvell Libertas 8388 USB 802.11b/g cards"
146 depends on LIBERTAS && USB
147 ---help---
148 A driver for Marvell Libertas 8388 USB devices.
149
150config LIBERTAS_CS
151 tristate "Marvell Libertas 8385 CompactFlash 802.11b/g cards"
152 depends on LIBERTAS && PCMCIA
153 select FW_LOADER
154 ---help---
155 A driver for Marvell Libertas 8385 CompactFlash devices.
156
157config LIBERTAS_SDIO
158 tristate "Marvell Libertas 8385/8686/8688 SDIO 802.11b/g cards"
159 depends on LIBERTAS && MMC
160 ---help---
161 A driver for Marvell Libertas 8385/8686/8688 SDIO devices.
162
163config LIBERTAS_SPI
164 tristate "Marvell Libertas 8686 SPI 802.11b/g cards"
165 depends on LIBERTAS && SPI
166 ---help---
167 A driver for Marvell Libertas 8686 SPI devices.
168
169config LIBERTAS_DEBUG
170 bool "Enable full debugging output in the Libertas module."
171 depends on LIBERTAS
172 ---help---
173 Debugging support.
174
175config LIBERTAS_THINFIRM 34config LIBERTAS_THINFIRM
176 tristate "Marvell 8xxx Libertas WLAN driver support with thin firmware" 35 tristate "Marvell 8xxx Libertas WLAN driver support with thin firmware"
177 depends on WLAN_80211 && MAC80211 36 depends on MAC80211
178 select FW_LOADER 37 select FW_LOADER
179 ---help--- 38 ---help---
180 A library for Marvell Libertas 8xxx devices using thinfirm. 39 A library for Marvell Libertas 8xxx devices using thinfirm.
@@ -187,9 +46,11 @@ config LIBERTAS_THINFIRM_USB
187 46
188config AIRO 47config AIRO
189 tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards" 48 tristate "Cisco/Aironet 34X/35X/4500/4800 ISA and PCI cards"
190 depends on ISA_DMA_API && WLAN_80211 && (PCI || BROKEN) 49 depends on ISA_DMA_API && (PCI || BROKEN)
191 select WIRELESS_EXT 50 select WIRELESS_EXT
192 select CRYPTO 51 select CRYPTO
52 select WEXT_SPY
53 select WEXT_PRIV
193 ---help--- 54 ---help---
194 This is the standard Linux driver to support Cisco/Aironet ISA and 55 This is the standard Linux driver to support Cisco/Aironet ISA and
195 PCI 802.11 wireless cards. 56 PCI 802.11 wireless cards.
@@ -205,8 +66,9 @@ config AIRO
205 66
206config ATMEL 67config ATMEL
207 tristate "Atmel at76c50x chipset 802.11b support" 68 tristate "Atmel at76c50x chipset 802.11b support"
208 depends on (PCI || PCMCIA) && WLAN_80211 69 depends on (PCI || PCMCIA)
209 select WIRELESS_EXT 70 select WIRELESS_EXT
71 select WEXT_PRIV
210 select FW_LOADER 72 select FW_LOADER
211 select CRC32 73 select CRC32
212 ---help--- 74 ---help---
@@ -239,7 +101,7 @@ config PCMCIA_ATMEL
239 101
240config AT76C50X_USB 102config AT76C50X_USB
241 tristate "Atmel at76c503/at76c505/at76c505a USB cards" 103 tristate "Atmel at76c503/at76c505/at76c505a USB cards"
242 depends on MAC80211 && WLAN_80211 && USB 104 depends on MAC80211 && USB
243 select FW_LOADER 105 select FW_LOADER
244 ---help--- 106 ---help---
245 Enable support for USB Wireless devices using Atmel at76c503, 107 Enable support for USB Wireless devices using Atmel at76c503,
@@ -247,8 +109,9 @@ config AT76C50X_USB
247 109
248config AIRO_CS 110config AIRO_CS
249 tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards" 111 tristate "Cisco/Aironet 34X/35X/4500/4800 PCMCIA cards"
250 depends on PCMCIA && (BROKEN || !M32R) && WLAN_80211 112 depends on PCMCIA && (BROKEN || !M32R)
251 select WIRELESS_EXT 113 select WIRELESS_EXT
114 select WEXT_SPY
252 select CRYPTO 115 select CRYPTO
253 select CRYPTO_AES 116 select CRYPTO_AES
254 ---help--- 117 ---help---
@@ -266,18 +129,21 @@ config AIRO_CS
266 Cisco Linux utilities can be used to configure the card. 129 Cisco Linux utilities can be used to configure the card.
267 130
268config PCMCIA_WL3501 131config PCMCIA_WL3501
269 tristate "Planet WL3501 PCMCIA cards" 132 tristate "Planet WL3501 PCMCIA cards"
270 depends on EXPERIMENTAL && PCMCIA && WLAN_80211 133 depends on EXPERIMENTAL && PCMCIA
271 select WIRELESS_EXT 134 select WIRELESS_EXT
272 ---help--- 135 select WEXT_SPY
273 A driver for WL3501 PCMCIA 802.11 wireless cards made by Planet. 136 help
274 It has basic support for Linux wireless extensions and initial 137 A driver for WL3501 PCMCIA 802.11 wireless cards made by Planet.
275 micro support for ethtool. 138 It has basic support for Linux wireless extensions and initial
139 micro support for ethtool.
276 140
277config PRISM54 141config PRISM54
278 tristate 'Intersil Prism GT/Duette/Indigo PCI/Cardbus (DEPRECATED)' 142 tristate 'Intersil Prism GT/Duette/Indigo PCI/Cardbus (DEPRECATED)'
279 depends on PCI && EXPERIMENTAL && WLAN_80211 143 depends on PCI && EXPERIMENTAL
280 select WIRELESS_EXT 144 select WIRELESS_EXT
145 select WEXT_SPY
146 select WEXT_PRIV
281 select FW_LOADER 147 select FW_LOADER
282 ---help--- 148 ---help---
283 This enables support for FullMAC PCI/Cardbus prism54 devices. This 149 This enables support for FullMAC PCI/Cardbus prism54 devices. This
@@ -298,8 +164,9 @@ config PRISM54
298 164
299config USB_ZD1201 165config USB_ZD1201
300 tristate "USB ZD1201 based Wireless device support" 166 tristate "USB ZD1201 based Wireless device support"
301 depends on USB && WLAN_80211 167 depends on USB
302 select WIRELESS_EXT 168 select WIRELESS_EXT
169 select WEXT_PRIV
303 select FW_LOADER 170 select FW_LOADER
304 ---help--- 171 ---help---
305 Say Y if you want to use wireless LAN adapters based on the ZyDAS 172 Say Y if you want to use wireless LAN adapters based on the ZyDAS
@@ -316,7 +183,7 @@ config USB_ZD1201
316 183
317config USB_NET_RNDIS_WLAN 184config USB_NET_RNDIS_WLAN
318 tristate "Wireless RNDIS USB support" 185 tristate "Wireless RNDIS USB support"
319 depends on USB && WLAN_80211 && EXPERIMENTAL 186 depends on USB && EXPERIMENTAL
320 depends on CFG80211 187 depends on CFG80211
321 select USB_USBNET 188 select USB_USBNET
322 select USB_NET_CDCETHER 189 select USB_NET_CDCETHER
@@ -344,7 +211,7 @@ config USB_NET_RNDIS_WLAN
344 211
345config RTL8180 212config RTL8180
346 tristate "Realtek 8180/8185 PCI support" 213 tristate "Realtek 8180/8185 PCI support"
347 depends on MAC80211 && PCI && WLAN_80211 && EXPERIMENTAL 214 depends on MAC80211 && PCI && EXPERIMENTAL
348 select EEPROM_93CX6 215 select EEPROM_93CX6
349 ---help--- 216 ---help---
350 This is a driver for RTL8180 and RTL8185 based cards. 217 This is a driver for RTL8180 and RTL8185 based cards.
@@ -400,7 +267,7 @@ config RTL8180
400 267
401config RTL8187 268config RTL8187
402 tristate "Realtek 8187 and 8187B USB support" 269 tristate "Realtek 8187 and 8187B USB support"
403 depends on MAC80211 && USB && WLAN_80211 270 depends on MAC80211 && USB
404 select EEPROM_93CX6 271 select EEPROM_93CX6
405 ---help--- 272 ---help---
406 This is a driver for RTL8187 and RTL8187B based cards. 273 This is a driver for RTL8187 and RTL8187B based cards.
@@ -429,7 +296,7 @@ config RTL8187_LEDS
429 296
430config ADM8211 297config ADM8211
431 tristate "ADMtek ADM8211 support" 298 tristate "ADMtek ADM8211 support"
432 depends on MAC80211 && PCI && WLAN_80211 && EXPERIMENTAL 299 depends on MAC80211 && PCI && EXPERIMENTAL
433 select CRC32 300 select CRC32
434 select EEPROM_93CX6 301 select EEPROM_93CX6
435 ---help--- 302 ---help---
@@ -456,7 +323,7 @@ config ADM8211
456 323
457config MAC80211_HWSIM 324config MAC80211_HWSIM
458 tristate "Simulated radio testing tool for mac80211" 325 tristate "Simulated radio testing tool for mac80211"
459 depends on MAC80211 && WLAN_80211 326 depends on MAC80211
460 ---help--- 327 ---help---
461 This driver is a developer testing tool that can be used to test 328 This driver is a developer testing tool that can be used to test
462 IEEE 802.11 networking stack (mac80211) functionality. This is not 329 IEEE 802.11 networking stack (mac80211) functionality. This is not
@@ -469,24 +336,25 @@ config MAC80211_HWSIM
469 336
470config MWL8K 337config MWL8K
471 tristate "Marvell 88W8xxx PCI/PCIe Wireless support" 338 tristate "Marvell 88W8xxx PCI/PCIe Wireless support"
472 depends on MAC80211 && PCI && WLAN_80211 && EXPERIMENTAL 339 depends on MAC80211 && PCI && EXPERIMENTAL
473 ---help--- 340 ---help---
474 This driver supports Marvell TOPDOG 802.11 wireless cards. 341 This driver supports Marvell TOPDOG 802.11 wireless cards.
475 342
476 To compile this driver as a module, choose M here: the module 343 To compile this driver as a module, choose M here: the module
477 will be called mwl8k. If unsure, say N. 344 will be called mwl8k. If unsure, say N.
478 345
479source "drivers/net/wireless/p54/Kconfig"
480source "drivers/net/wireless/ath/Kconfig" 346source "drivers/net/wireless/ath/Kconfig"
481source "drivers/net/wireless/ipw2x00/Kconfig"
482source "drivers/net/wireless/iwlwifi/Kconfig"
483source "drivers/net/wireless/hostap/Kconfig"
484source "drivers/net/wireless/b43/Kconfig" 347source "drivers/net/wireless/b43/Kconfig"
485source "drivers/net/wireless/b43legacy/Kconfig" 348source "drivers/net/wireless/b43legacy/Kconfig"
486source "drivers/net/wireless/zd1211rw/Kconfig" 349source "drivers/net/wireless/hostap/Kconfig"
487source "drivers/net/wireless/rt2x00/Kconfig" 350source "drivers/net/wireless/ipw2x00/Kconfig"
351source "drivers/net/wireless/iwlwifi/Kconfig"
352source "drivers/net/wireless/iwmc3200wifi/Kconfig"
353source "drivers/net/wireless/libertas/Kconfig"
488source "drivers/net/wireless/orinoco/Kconfig" 354source "drivers/net/wireless/orinoco/Kconfig"
355source "drivers/net/wireless/p54/Kconfig"
356source "drivers/net/wireless/rt2x00/Kconfig"
489source "drivers/net/wireless/wl12xx/Kconfig" 357source "drivers/net/wireless/wl12xx/Kconfig"
490source "drivers/net/wireless/iwmc3200wifi/Kconfig" 358source "drivers/net/wireless/zd1211rw/Kconfig"
491 359
492endif # WLAN 360endif # WLAN
diff --git a/drivers/net/wireless/Makefile b/drivers/net/wireless/Makefile
index 7a4647e78fd3..5d4ce4d2b32b 100644
--- a/drivers/net/wireless/Makefile
+++ b/drivers/net/wireless/Makefile
@@ -5,16 +5,6 @@
5obj-$(CONFIG_IPW2100) += ipw2x00/ 5obj-$(CONFIG_IPW2100) += ipw2x00/
6obj-$(CONFIG_IPW2200) += ipw2x00/ 6obj-$(CONFIG_IPW2200) += ipw2x00/
7 7
8obj-$(CONFIG_STRIP) += strip.o
9obj-$(CONFIG_ARLAN) += arlan.o
10
11arlan-objs := arlan-main.o arlan-proc.o
12
13# Obsolete cards
14obj-$(CONFIG_WAVELAN) += wavelan.o
15obj-$(CONFIG_PCMCIA_NETWAVE) += netwave_cs.o
16obj-$(CONFIG_PCMCIA_WAVELAN) += wavelan_cs.o
17
18obj-$(CONFIG_HERMES) += orinoco/ 8obj-$(CONFIG_HERMES) += orinoco/
19 9
20obj-$(CONFIG_AIRO) += airo.o 10obj-$(CONFIG_AIRO) += airo.o
diff --git a/drivers/net/wireless/airo.c b/drivers/net/wireless/airo.c
index abf896a7390e..4eec87c3be2b 100644
--- a/drivers/net/wireless/airo.c
+++ b/drivers/net/wireless/airo.c
@@ -5659,7 +5659,8 @@ static int airo_pci_suspend(struct pci_dev *pdev, pm_message_t state)
5659 5659
5660 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1); 5660 pci_enable_wake(pdev, pci_choose_state(pdev, state), 1);
5661 pci_save_state(pdev); 5661 pci_save_state(pdev);
5662 return pci_set_power_state(pdev, pci_choose_state(pdev, state)); 5662 pci_set_power_state(pdev, pci_choose_state(pdev, state));
5663 return 0;
5663} 5664}
5664 5665
5665static int airo_pci_resume(struct pci_dev *pdev) 5666static int airo_pci_resume(struct pci_dev *pdev)
diff --git a/drivers/net/wireless/at76c50x-usb.c b/drivers/net/wireless/at76c50x-usb.c
index 8e1a55dec351..e559dc960552 100644
--- a/drivers/net/wireless/at76c50x-usb.c
+++ b/drivers/net/wireless/at76c50x-usb.c
@@ -2217,6 +2217,8 @@ static struct ieee80211_supported_band at76_supported_band = {
2217static int at76_init_new_device(struct at76_priv *priv, 2217static int at76_init_new_device(struct at76_priv *priv,
2218 struct usb_interface *interface) 2218 struct usb_interface *interface)
2219{ 2219{
2220 struct wiphy *wiphy;
2221 size_t len;
2220 int ret; 2222 int ret;
2221 2223
2222 /* set up the endpoint information */ 2224 /* set up the endpoint information */
@@ -2254,6 +2256,7 @@ static int at76_init_new_device(struct at76_priv *priv,
2254 priv->device_unplugged = 0; 2256 priv->device_unplugged = 0;
2255 2257
2256 /* mac80211 initialisation */ 2258 /* mac80211 initialisation */
2259 wiphy = priv->hw->wiphy;
2257 priv->hw->wiphy->max_scan_ssids = 1; 2260 priv->hw->wiphy->max_scan_ssids = 1;
2258 priv->hw->wiphy->max_scan_ie_len = 0; 2261 priv->hw->wiphy->max_scan_ie_len = 0;
2259 priv->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 2262 priv->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
@@ -2265,6 +2268,13 @@ static int at76_init_new_device(struct at76_priv *priv,
2265 SET_IEEE80211_DEV(priv->hw, &interface->dev); 2268 SET_IEEE80211_DEV(priv->hw, &interface->dev);
2266 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr); 2269 SET_IEEE80211_PERM_ADDR(priv->hw, priv->mac_addr);
2267 2270
2271 len = sizeof(wiphy->fw_version);
2272 snprintf(wiphy->fw_version, len, "%d.%d.%d-%d",
2273 priv->fw_version.major, priv->fw_version.minor,
2274 priv->fw_version.patch, priv->fw_version.build);
2275
2276 wiphy->hw_version = priv->board_type;
2277
2268 ret = ieee80211_register_hw(priv->hw); 2278 ret = ieee80211_register_hw(priv->hw);
2269 if (ret) { 2279 if (ret) {
2270 printk(KERN_ERR "cannot register mac80211 hw (status %d)!\n", 2280 printk(KERN_ERR "cannot register mac80211 hw (status %d)!\n",
diff --git a/drivers/net/wireless/ath/Kconfig b/drivers/net/wireless/ath/Kconfig
index 11ded150b932..4e7a7fd695c8 100644
--- a/drivers/net/wireless/ath/Kconfig
+++ b/drivers/net/wireless/ath/Kconfig
@@ -1,6 +1,5 @@
1menuconfig ATH_COMMON 1menuconfig ATH_COMMON
2 tristate "Atheros Wireless Cards" 2 tristate "Atheros Wireless Cards"
3 depends on WLAN_80211
4 depends on CFG80211 3 depends on CFG80211
5 ---help--- 4 ---help---
6 This will enable the support for the Atheros wireless drivers. 5 This will enable the support for the Atheros wireless drivers.
@@ -16,7 +15,15 @@ menuconfig ATH_COMMON
16 http://wireless.kernel.org/en/users/Drivers/Atheros 15 http://wireless.kernel.org/en/users/Drivers/Atheros
17 16
18if ATH_COMMON 17if ATH_COMMON
18
19config ATH_DEBUG
20 bool "Atheros wireless debugging"
21 ---help---
22 Say Y, if you want to debug atheros wireless drivers.
23 Right now only ath9k makes use of this.
24
19source "drivers/net/wireless/ath/ath5k/Kconfig" 25source "drivers/net/wireless/ath/ath5k/Kconfig"
20source "drivers/net/wireless/ath/ath9k/Kconfig" 26source "drivers/net/wireless/ath/ath9k/Kconfig"
21source "drivers/net/wireless/ath/ar9170/Kconfig" 27source "drivers/net/wireless/ath/ar9170/Kconfig"
28
22endif 29endif
diff --git a/drivers/net/wireless/ath/Makefile b/drivers/net/wireless/ath/Makefile
index 4bb0132ada37..8113a5042afa 100644
--- a/drivers/net/wireless/ath/Makefile
+++ b/drivers/net/wireless/ath/Makefile
@@ -1,6 +1,11 @@
1obj-$(CONFIG_ATH5K) += ath5k/ 1obj-$(CONFIG_ATH5K) += ath5k/
2obj-$(CONFIG_ATH9K) += ath9k/ 2obj-$(CONFIG_ATH9K_HW) += ath9k/
3obj-$(CONFIG_AR9170_USB) += ar9170/ 3obj-$(CONFIG_AR9170_USB) += ar9170/
4 4
5obj-$(CONFIG_ATH_COMMON) += ath.o 5obj-$(CONFIG_ATH_COMMON) += ath.o
6ath-objs := main.o regd.o 6
7ath-objs := main.o \
8 regd.o \
9 hw.o
10
11ath-$(CONFIG_ATH_DEBUG) += debug.o
diff --git a/drivers/net/wireless/ath/ar9170/Kconfig b/drivers/net/wireless/ath/ar9170/Kconfig
index 05918f1e685a..d7a4799d20fb 100644
--- a/drivers/net/wireless/ath/ar9170/Kconfig
+++ b/drivers/net/wireless/ath/ar9170/Kconfig
@@ -1,6 +1,6 @@
1config AR9170_USB 1config AR9170_USB
2 tristate "Atheros AR9170 802.11n USB support" 2 tristate "Atheros AR9170 802.11n USB support"
3 depends on USB && MAC80211 && WLAN_80211 3 depends on USB && MAC80211
4 select FW_LOADER 4 select FW_LOADER
5 help 5 help
6 This is a driver for the Atheros "otus" 802.11n USB devices. 6 This is a driver for the Atheros "otus" 802.11n USB devices.
diff --git a/drivers/net/wireless/ath/ar9170/ar9170.h b/drivers/net/wireless/ath/ar9170/ar9170.h
index 914e4718a9a8..9f9459860d82 100644
--- a/drivers/net/wireless/ath/ar9170/ar9170.h
+++ b/drivers/net/wireless/ath/ar9170/ar9170.h
@@ -172,8 +172,6 @@ struct ar9170 {
172 172
173 /* interface mode settings */ 173 /* interface mode settings */
174 struct ieee80211_vif *vif; 174 struct ieee80211_vif *vif;
175 u8 mac_addr[ETH_ALEN];
176 u8 bssid[ETH_ALEN];
177 175
178 /* beaconing */ 176 /* beaconing */
179 struct sk_buff *beacon; 177 struct sk_buff *beacon;
@@ -204,6 +202,8 @@ struct ar9170 {
204 u8 power_2G_ht20[8]; 202 u8 power_2G_ht20[8];
205 u8 power_2G_ht40[8]; 203 u8 power_2G_ht40[8];
206 204
205 u8 phy_heavy_clip;
206
207#ifdef CONFIG_AR9170_LEDS 207#ifdef CONFIG_AR9170_LEDS
208 struct delayed_work led_work; 208 struct delayed_work led_work;
209 struct ar9170_led leds[AR9170_NUM_LEDS]; 209 struct ar9170_led leds[AR9170_NUM_LEDS];
@@ -231,7 +231,7 @@ struct ar9170 {
231 struct sk_buff_head tx_status_ampdu; 231 struct sk_buff_head tx_status_ampdu;
232 spinlock_t tx_ampdu_list_lock; 232 spinlock_t tx_ampdu_list_lock;
233 struct list_head tx_ampdu_list; 233 struct list_head tx_ampdu_list;
234 unsigned int tx_ampdu_pending; 234 atomic_t tx_ampdu_pending;
235 235
236 /* rxstream mpdu merge */ 236 /* rxstream mpdu merge */
237 struct ar9170_rxstream_mpdu_merge rx_mpdu; 237 struct ar9170_rxstream_mpdu_merge rx_mpdu;
diff --git a/drivers/net/wireless/ath/ar9170/cmd.c b/drivers/net/wireless/ath/ar9170/cmd.c
index f57a6200167b..cf6f5c4174a6 100644
--- a/drivers/net/wireless/ath/ar9170/cmd.c
+++ b/drivers/net/wireless/ath/ar9170/cmd.c
@@ -72,8 +72,7 @@ int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val)
72 return err; 72 return err;
73} 73}
74 74
75static int ar9170_read_mreg(struct ar9170 *ar, int nregs, 75int ar9170_read_mreg(struct ar9170 *ar, int nregs, const u32 *regs, u32 *out)
76 const u32 *regs, u32 *out)
77{ 76{
78 int i, err; 77 int i, err;
79 __le32 *offs, *res; 78 __le32 *offs, *res;
diff --git a/drivers/net/wireless/ath/ar9170/cmd.h b/drivers/net/wireless/ath/ar9170/cmd.h
index a4f0e50e52b4..826c45e6b274 100644
--- a/drivers/net/wireless/ath/ar9170/cmd.h
+++ b/drivers/net/wireless/ath/ar9170/cmd.h
@@ -44,6 +44,7 @@
44int ar9170_write_mem(struct ar9170 *ar, const __le32 *data, size_t len); 44int ar9170_write_mem(struct ar9170 *ar, const __le32 *data, size_t len);
45int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val); 45int ar9170_write_reg(struct ar9170 *ar, const u32 reg, const u32 val);
46int ar9170_read_reg(struct ar9170 *ar, u32 reg, u32 *val); 46int ar9170_read_reg(struct ar9170 *ar, u32 reg, u32 *val);
47int ar9170_read_mreg(struct ar9170 *ar, int nregs, const u32 *regs, u32 *out);
47int ar9170_echo_test(struct ar9170 *ar, u32 v); 48int ar9170_echo_test(struct ar9170 *ar, u32 v);
48 49
49/* 50/*
diff --git a/drivers/net/wireless/ath/ar9170/hw.h b/drivers/net/wireless/ath/ar9170/hw.h
index 6cbfb2f83391..701ddb7d8400 100644
--- a/drivers/net/wireless/ath/ar9170/hw.h
+++ b/drivers/net/wireless/ath/ar9170/hw.h
@@ -152,14 +152,14 @@ enum ar9170_cmd {
152#define AR9170_MAC_REG_FTF_BIT14 BIT(14) 152#define AR9170_MAC_REG_FTF_BIT14 BIT(14)
153#define AR9170_MAC_REG_FTF_BIT15 BIT(15) 153#define AR9170_MAC_REG_FTF_BIT15 BIT(15)
154#define AR9170_MAC_REG_FTF_BAR BIT(24) 154#define AR9170_MAC_REG_FTF_BAR BIT(24)
155#define AR9170_MAC_REG_FTF_BIT25 BIT(25) 155#define AR9170_MAC_REG_FTF_BA BIT(25)
156#define AR9170_MAC_REG_FTF_PSPOLL BIT(26) 156#define AR9170_MAC_REG_FTF_PSPOLL BIT(26)
157#define AR9170_MAC_REG_FTF_RTS BIT(27) 157#define AR9170_MAC_REG_FTF_RTS BIT(27)
158#define AR9170_MAC_REG_FTF_CTS BIT(28) 158#define AR9170_MAC_REG_FTF_CTS BIT(28)
159#define AR9170_MAC_REG_FTF_ACK BIT(29) 159#define AR9170_MAC_REG_FTF_ACK BIT(29)
160#define AR9170_MAC_REG_FTF_CFE BIT(30) 160#define AR9170_MAC_REG_FTF_CFE BIT(30)
161#define AR9170_MAC_REG_FTF_CFE_ACK BIT(31) 161#define AR9170_MAC_REG_FTF_CFE_ACK BIT(31)
162#define AR9170_MAC_REG_FTF_DEFAULTS 0x0500ffff 162#define AR9170_MAC_REG_FTF_DEFAULTS 0x0700ffff
163#define AR9170_MAC_REG_FTF_MONITOR 0xfd00ffff 163#define AR9170_MAC_REG_FTF_MONITOR 0xfd00ffff
164 164
165#define AR9170_MAC_REG_RX_TOTAL (AR9170_MAC_REG_BASE + 0x6A0) 165#define AR9170_MAC_REG_RX_TOTAL (AR9170_MAC_REG_BASE + 0x6A0)
@@ -311,6 +311,8 @@ struct ar9170_tx_control {
311 311
312#define AR9170_TX_PHY_SHORT_GI 0x80000000 312#define AR9170_TX_PHY_SHORT_GI 0x80000000
313 313
314#define AR5416_MAX_RATE_POWER 63
315
314struct ar9170_rx_head { 316struct ar9170_rx_head {
315 u8 plcp[12]; 317 u8 plcp[12];
316} __packed; 318} __packed;
diff --git a/drivers/net/wireless/ath/ar9170/mac.c b/drivers/net/wireless/ath/ar9170/mac.c
index 614e3218a2bc..ddc8c09dc79e 100644
--- a/drivers/net/wireless/ath/ar9170/mac.c
+++ b/drivers/net/wireless/ath/ar9170/mac.c
@@ -35,6 +35,9 @@
35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 35 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 36 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
37 */ 37 */
38
39#include <asm/unaligned.h>
40
38#include "ar9170.h" 41#include "ar9170.h"
39#include "cmd.h" 42#include "cmd.h"
40 43
@@ -227,11 +230,8 @@ static int ar9170_set_mac_reg(struct ar9170 *ar, const u32 reg, const u8 *mac)
227 230
228 ar9170_regwrite_begin(ar); 231 ar9170_regwrite_begin(ar);
229 232
230 ar9170_regwrite(reg, 233 ar9170_regwrite(reg, get_unaligned_le32(mac));
231 (mac[3] << 24) | (mac[2] << 16) | 234 ar9170_regwrite(reg + 4, get_unaligned_le16(mac + 4));
232 (mac[1] << 8) | mac[0]);
233
234 ar9170_regwrite(reg + 4, (mac[5] << 8) | mac[4]);
235 235
236 ar9170_regwrite_finish(); 236 ar9170_regwrite_finish();
237 237
@@ -311,13 +311,14 @@ static int ar9170_set_promiscouous(struct ar9170 *ar)
311 311
312int ar9170_set_operating_mode(struct ar9170 *ar) 312int ar9170_set_operating_mode(struct ar9170 *ar)
313{ 313{
314 struct ath_common *common = &ar->common;
314 u32 pm_mode = AR9170_MAC_REG_POWERMGT_DEFAULTS; 315 u32 pm_mode = AR9170_MAC_REG_POWERMGT_DEFAULTS;
315 u8 *mac_addr, *bssid; 316 u8 *mac_addr, *bssid;
316 int err; 317 int err;
317 318
318 if (ar->vif) { 319 if (ar->vif) {
319 mac_addr = ar->mac_addr; 320 mac_addr = common->macaddr;
320 bssid = ar->bssid; 321 bssid = common->curbssid;
321 322
322 switch (ar->vif->type) { 323 switch (ar->vif->type) {
323 case NL80211_IFTYPE_MESH_POINT: 324 case NL80211_IFTYPE_MESH_POINT:
diff --git a/drivers/net/wireless/ath/ar9170/main.c b/drivers/net/wireless/ath/ar9170/main.c
index c1f8c69db165..7e59b82e64d3 100644
--- a/drivers/net/wireless/ath/ar9170/main.c
+++ b/drivers/net/wireless/ath/ar9170/main.c
@@ -414,9 +414,9 @@ static void ar9170_tx_ampdu_callback(struct ar9170 *ar, struct sk_buff *skb)
414 414
415 skb_queue_tail(&ar->tx_status_ampdu, skb); 415 skb_queue_tail(&ar->tx_status_ampdu, skb);
416 ar9170_tx_fake_ampdu_status(ar); 416 ar9170_tx_fake_ampdu_status(ar);
417 ar->tx_ampdu_pending--;
418 417
419 if (!list_empty(&ar->tx_ampdu_list) && !ar->tx_ampdu_pending) 418 if (atomic_dec_and_test(&ar->tx_ampdu_pending) &&
419 !list_empty(&ar->tx_ampdu_list))
420 ar9170_tx_ampdu(ar); 420 ar9170_tx_ampdu(ar);
421} 421}
422 422
@@ -1248,6 +1248,7 @@ static int ar9170_op_start(struct ieee80211_hw *hw)
1248 ar->global_ampdu_density = 6; 1248 ar->global_ampdu_density = 6;
1249 ar->global_ampdu_factor = 3; 1249 ar->global_ampdu_factor = 3;
1250 1250
1251 atomic_set(&ar->tx_ampdu_pending, 0);
1251 ar->bad_hw_nagger = jiffies; 1252 ar->bad_hw_nagger = jiffies;
1252 1253
1253 err = ar->open(ar); 1254 err = ar->open(ar);
@@ -1773,7 +1774,7 @@ static void ar9170_tx(struct ar9170 *ar)
1773 msecs_to_jiffies(AR9170_TX_TIMEOUT); 1774 msecs_to_jiffies(AR9170_TX_TIMEOUT);
1774 1775
1775 if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK) 1776 if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK)
1776 ar->tx_ampdu_pending++; 1777 atomic_inc(&ar->tx_ampdu_pending);
1777 1778
1778#ifdef AR9170_QUEUE_DEBUG 1779#ifdef AR9170_QUEUE_DEBUG
1779 printk(KERN_DEBUG "%s: send frame q:%d =>\n", 1780 printk(KERN_DEBUG "%s: send frame q:%d =>\n",
@@ -1784,7 +1785,7 @@ static void ar9170_tx(struct ar9170 *ar)
1784 err = ar->tx(ar, skb); 1785 err = ar->tx(ar, skb);
1785 if (unlikely(err)) { 1786 if (unlikely(err)) {
1786 if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK) 1787 if (arinfo->flags == AR9170_TX_FLAG_BLOCK_ACK)
1787 ar->tx_ampdu_pending--; 1788 atomic_dec(&ar->tx_ampdu_pending);
1788 1789
1789 frames_failed++; 1790 frames_failed++;
1790 dev_kfree_skb_any(skb); 1791 dev_kfree_skb_any(skb);
@@ -1931,7 +1932,7 @@ int ar9170_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
1931 if (info->flags & IEEE80211_TX_CTL_AMPDU) { 1932 if (info->flags & IEEE80211_TX_CTL_AMPDU) {
1932 bool run = ar9170_tx_ampdu_queue(ar, skb); 1933 bool run = ar9170_tx_ampdu_queue(ar, skb);
1933 1934
1934 if (run || !ar->tx_ampdu_pending) 1935 if (run || !atomic_read(&ar->tx_ampdu_pending))
1935 ar9170_tx_ampdu(ar); 1936 ar9170_tx_ampdu(ar);
1936 } else { 1937 } else {
1937 unsigned int queue = skb_get_queue_mapping(skb); 1938 unsigned int queue = skb_get_queue_mapping(skb);
@@ -1952,6 +1953,7 @@ static int ar9170_op_add_interface(struct ieee80211_hw *hw,
1952 struct ieee80211_if_init_conf *conf) 1953 struct ieee80211_if_init_conf *conf)
1953{ 1954{
1954 struct ar9170 *ar = hw->priv; 1955 struct ar9170 *ar = hw->priv;
1956 struct ath_common *common = &ar->common;
1955 int err = 0; 1957 int err = 0;
1956 1958
1957 mutex_lock(&ar->mutex); 1959 mutex_lock(&ar->mutex);
@@ -1962,7 +1964,7 @@ static int ar9170_op_add_interface(struct ieee80211_hw *hw,
1962 } 1964 }
1963 1965
1964 ar->vif = conf->vif; 1966 ar->vif = conf->vif;
1965 memcpy(ar->mac_addr, conf->mac_addr, ETH_ALEN); 1967 memcpy(common->macaddr, conf->mac_addr, ETH_ALEN);
1966 1968
1967 if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) { 1969 if (modparam_nohwcrypt || (ar->vif->type != NL80211_IFTYPE_STATION)) {
1968 ar->rx_software_decryption = true; 1970 ar->rx_software_decryption = true;
@@ -2131,12 +2133,13 @@ static void ar9170_op_bss_info_changed(struct ieee80211_hw *hw,
2131 u32 changed) 2133 u32 changed)
2132{ 2134{
2133 struct ar9170 *ar = hw->priv; 2135 struct ar9170 *ar = hw->priv;
2136 struct ath_common *common = &ar->common;
2134 int err = 0; 2137 int err = 0;
2135 2138
2136 mutex_lock(&ar->mutex); 2139 mutex_lock(&ar->mutex);
2137 2140
2138 if (changed & BSS_CHANGED_BSSID) { 2141 if (changed & BSS_CHANGED_BSSID) {
2139 memcpy(ar->bssid, bss_conf->bssid, ETH_ALEN); 2142 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
2140 err = ar9170_set_operating_mode(ar); 2143 err = ar9170_set_operating_mode(ar);
2141 if (err) 2144 if (err)
2142 goto out; 2145 goto out;
@@ -2190,22 +2193,30 @@ static u64 ar9170_op_get_tsf(struct ieee80211_hw *hw)
2190{ 2193{
2191 struct ar9170 *ar = hw->priv; 2194 struct ar9170 *ar = hw->priv;
2192 int err; 2195 int err;
2193 u32 tsf_low;
2194 u32 tsf_high;
2195 u64 tsf; 2196 u64 tsf;
2197#define NR 3
2198 static const u32 addr[NR] = { AR9170_MAC_REG_TSF_H,
2199 AR9170_MAC_REG_TSF_L,
2200 AR9170_MAC_REG_TSF_H };
2201 u32 val[NR];
2202 int loops = 0;
2196 2203
2197 mutex_lock(&ar->mutex); 2204 mutex_lock(&ar->mutex);
2198 err = ar9170_read_reg(ar, AR9170_MAC_REG_TSF_L, &tsf_low); 2205
2199 if (!err) 2206 while (loops++ < 10) {
2200 err = ar9170_read_reg(ar, AR9170_MAC_REG_TSF_H, &tsf_high); 2207 err = ar9170_read_mreg(ar, NR, addr, val);
2208 if (err || val[0] == val[2])
2209 break;
2210 }
2211
2201 mutex_unlock(&ar->mutex); 2212 mutex_unlock(&ar->mutex);
2202 2213
2203 if (WARN_ON(err)) 2214 if (WARN_ON(err))
2204 return 0; 2215 return 0;
2205 2216 tsf = val[0];
2206 tsf = tsf_high; 2217 tsf = (tsf << 32) | val[1];
2207 tsf = (tsf << 32) | tsf_low;
2208 return tsf; 2218 return tsf;
2219#undef NR
2209} 2220}
2210 2221
2211static int ar9170_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 2222static int ar9170_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
diff --git a/drivers/net/wireless/ath/ar9170/phy.c b/drivers/net/wireless/ath/ar9170/phy.c
index dbd488da18b1..45a415ea809a 100644
--- a/drivers/net/wireless/ath/ar9170/phy.c
+++ b/drivers/net/wireless/ath/ar9170/phy.c
@@ -1239,9 +1239,6 @@ static u8 ar9170_get_max_edge_power(struct ar9170 *ar,
1239 struct ar9170_calctl_edges edges[], 1239 struct ar9170_calctl_edges edges[],
1240 u32 freq) 1240 u32 freq)
1241{ 1241{
1242/* TODO: move somewhere else */
1243#define AR5416_MAX_RATE_POWER 63
1244
1245 int i; 1242 int i;
1246 u8 rc = AR5416_MAX_RATE_POWER; 1243 u8 rc = AR5416_MAX_RATE_POWER;
1247 u8 f; 1244 u8 f;
@@ -1259,10 +1256,11 @@ static u8 ar9170_get_max_edge_power(struct ar9170 *ar,
1259 break; 1256 break;
1260 } 1257 }
1261 if (i > 0 && f < edges[i].channel) { 1258 if (i > 0 && f < edges[i].channel) {
1262 if (f > edges[i-1].channel && 1259 if (f > edges[i - 1].channel &&
1263 edges[i-1].power_flags & AR9170_CALCTL_EDGE_FLAGS) { 1260 edges[i - 1].power_flags &
1261 AR9170_CALCTL_EDGE_FLAGS) {
1264 /* lower channel has the inband flag set */ 1262 /* lower channel has the inband flag set */
1265 rc = edges[i-1].power_flags & 1263 rc = edges[i - 1].power_flags &
1266 ~AR9170_CALCTL_EDGE_FLAGS; 1264 ~AR9170_CALCTL_EDGE_FLAGS;
1267 } 1265 }
1268 break; 1266 break;
@@ -1270,18 +1268,48 @@ static u8 ar9170_get_max_edge_power(struct ar9170 *ar,
1270 } 1268 }
1271 1269
1272 if (i == AR5416_NUM_BAND_EDGES) { 1270 if (i == AR5416_NUM_BAND_EDGES) {
1273 if (f > edges[i-1].channel && 1271 if (f > edges[i - 1].channel &&
1274 edges[i-1].power_flags & AR9170_CALCTL_EDGE_FLAGS) { 1272 edges[i - 1].power_flags & AR9170_CALCTL_EDGE_FLAGS) {
1275 /* lower channel has the inband flag set */ 1273 /* lower channel has the inband flag set */
1276 rc = edges[i-1].power_flags & 1274 rc = edges[i - 1].power_flags &
1277 ~AR9170_CALCTL_EDGE_FLAGS; 1275 ~AR9170_CALCTL_EDGE_FLAGS;
1278 } 1276 }
1279 } 1277 }
1280 return rc; 1278 return rc;
1281} 1279}
1282 1280
1283/* calculate the conformance test limits and apply them to ar->power* 1281static u8 ar9170_get_heavy_clip(struct ar9170 *ar,
1284 * (derived from otus hal/hpmain.c, line 3706 ff.) 1282 struct ar9170_calctl_edges edges[],
1283 u32 freq, enum ar9170_bw bw)
1284{
1285 u8 f;
1286 int i;
1287 u8 rc = 0;
1288
1289 if (freq < 3000)
1290 f = freq - 2300;
1291 else
1292 f = (freq - 4800) / 5;
1293
1294 if (bw == AR9170_BW_40_BELOW || bw == AR9170_BW_40_ABOVE)
1295 rc |= 0xf0;
1296
1297 for (i = 0; i < AR5416_NUM_BAND_EDGES; i++) {
1298 if (edges[i].channel == 0xff)
1299 break;
1300 if (f == edges[i].channel) {
1301 if (!(edges[i].power_flags & AR9170_CALCTL_EDGE_FLAGS))
1302 rc |= 0x0f;
1303 break;
1304 }
1305 }
1306
1307 return rc;
1308}
1309
1310/*
1311 * calculate the conformance test limits and the heavy clip parameter
1312 * and apply them to ar->power* (derived from otus hal/hpmain.c, line 3706)
1285 */ 1313 */
1286static void ar9170_calc_ctl(struct ar9170 *ar, u32 freq, enum ar9170_bw bw) 1314static void ar9170_calc_ctl(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
1287{ 1315{
@@ -1295,7 +1323,8 @@ static void ar9170_calc_ctl(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
1295 int pwr_cal_len; 1323 int pwr_cal_len;
1296 } *modes; 1324 } *modes;
1297 1325
1298 /* order is relevant in the mode_list_*: we fall back to the 1326 /*
1327 * order is relevant in the mode_list_*: we fall back to the
1299 * lower indices if any mode is missed in the EEPROM. 1328 * lower indices if any mode is missed in the EEPROM.
1300 */ 1329 */
1301 struct ctl_modes mode_list_2ghz[] = { 1330 struct ctl_modes mode_list_2ghz[] = {
@@ -1313,7 +1342,10 @@ static void ar9170_calc_ctl(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
1313 1342
1314#define EDGES(c, n) (ar->eeprom.ctl_data[c].control_edges[n]) 1343#define EDGES(c, n) (ar->eeprom.ctl_data[c].control_edges[n])
1315 1344
1316 /* TODO: investigate the differences between OTUS' 1345 ar->phy_heavy_clip = 0;
1346
1347 /*
1348 * TODO: investigate the differences between OTUS'
1317 * hpreg.c::zfHpGetRegulatoryDomain() and 1349 * hpreg.c::zfHpGetRegulatoryDomain() and
1318 * ath/regd.c::ath_regd_get_band_ctl() - 1350 * ath/regd.c::ath_regd_get_band_ctl() -
1319 * e.g. for FCC3_WORLD the OTUS procedure 1351 * e.g. for FCC3_WORLD the OTUS procedure
@@ -1347,6 +1379,15 @@ static void ar9170_calc_ctl(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
1347 if (ctl_idx < AR5416_NUM_CTLS) { 1379 if (ctl_idx < AR5416_NUM_CTLS) {
1348 int f_off = 0; 1380 int f_off = 0;
1349 1381
1382 /* determine heav clip parameter from
1383 the 11G edges array */
1384 if (modes[i].ctl_mode == CTL_11G) {
1385 ar->phy_heavy_clip =
1386 ar9170_get_heavy_clip(ar,
1387 EDGES(ctl_idx, 1),
1388 freq, bw);
1389 }
1390
1350 /* adjust freq for 40MHz */ 1391 /* adjust freq for 40MHz */
1351 if (modes[i].ctl_mode == CTL_2GHT40 || 1392 if (modes[i].ctl_mode == CTL_2GHT40 ||
1352 modes[i].ctl_mode == CTL_5GHT40) { 1393 modes[i].ctl_mode == CTL_5GHT40) {
@@ -1360,13 +1401,15 @@ static void ar9170_calc_ctl(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
1360 ar9170_get_max_edge_power(ar, EDGES(ctl_idx, 1), 1401 ar9170_get_max_edge_power(ar, EDGES(ctl_idx, 1),
1361 freq+f_off); 1402 freq+f_off);
1362 1403
1363 /* TODO: check if the regulatory max. power is 1404 /*
1405 * TODO: check if the regulatory max. power is
1364 * controlled by cfg80211 for DFS 1406 * controlled by cfg80211 for DFS
1365 * (hpmain applies it to max_power itself for DFS freq) 1407 * (hpmain applies it to max_power itself for DFS freq)
1366 */ 1408 */
1367 1409
1368 } else { 1410 } else {
1369 /* Workaround in otus driver, hpmain.c, line 3906: 1411 /*
1412 * Workaround in otus driver, hpmain.c, line 3906:
1370 * if no data for 5GHT20 are found, take the 1413 * if no data for 5GHT20 are found, take the
1371 * legacy 5G value. 1414 * legacy 5G value.
1372 * We extend this here to fallback from any other *HT or 1415 * We extend this here to fallback from any other *HT or
@@ -1390,6 +1433,19 @@ static void ar9170_calc_ctl(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
1390 modes[i].max_power); 1433 modes[i].max_power);
1391 } 1434 }
1392 } 1435 }
1436
1437 if (ar->phy_heavy_clip & 0xf0) {
1438 ar->power_2G_ht40[0]--;
1439 ar->power_2G_ht40[1]--;
1440 ar->power_2G_ht40[2]--;
1441 }
1442 if (ar->phy_heavy_clip & 0xf) {
1443 ar->power_2G_ht20[0]++;
1444 ar->power_2G_ht20[1]++;
1445 ar->power_2G_ht20[2]++;
1446 }
1447
1448
1393#undef EDGES 1449#undef EDGES
1394} 1450}
1395 1451
@@ -1499,8 +1555,6 @@ static int ar9170_set_power_cal(struct ar9170 *ar, u32 freq, enum ar9170_bw bw)
1499 /* calc. conformance test limits and apply to ar->power*[] */ 1555 /* calc. conformance test limits and apply to ar->power*[] */
1500 ar9170_calc_ctl(ar, freq, bw); 1556 ar9170_calc_ctl(ar, freq, bw);
1501 1557
1502 /* TODO: (heavy clip) regulatory domain power level fine-tuning. */
1503
1504 /* set ACK/CTS TX power */ 1558 /* set ACK/CTS TX power */
1505 ar9170_regwrite_begin(ar); 1559 ar9170_regwrite_begin(ar);
1506 1560
@@ -1643,6 +1697,17 @@ int ar9170_set_channel(struct ar9170 *ar, struct ieee80211_channel *channel,
1643 if (err) 1697 if (err)
1644 return err; 1698 return err;
1645 1699
1700 if (ar->phy_heavy_clip) {
1701 err = ar9170_write_reg(ar, 0x1c59e0,
1702 0x200 | ar->phy_heavy_clip);
1703 if (err) {
1704 if (ar9170_nag_limiter(ar))
1705 printk(KERN_ERR "%s: failed to set "
1706 "heavy clip\n",
1707 wiphy_name(ar->hw->wiphy));
1708 }
1709 }
1710
1646 for (i = 0; i < 2; i++) { 1711 for (i = 0; i < 2; i++) {
1647 ar->noise[i] = ar9170_calc_noise_dbm( 1712 ar->noise[i] = ar9170_calc_noise_dbm(
1648 (le32_to_cpu(vals[2 + i]) >> 19) & 0x1ff); 1713 (le32_to_cpu(vals[2 + i]) >> 19) & 0x1ff);
diff --git a/drivers/net/wireless/ath/ar9170/usb.c b/drivers/net/wireless/ath/ar9170/usb.c
index e974e5829e1a..6bdcdf6d1cc0 100644
--- a/drivers/net/wireless/ath/ar9170/usb.c
+++ b/drivers/net/wireless/ath/ar9170/usb.c
@@ -108,15 +108,15 @@ static void ar9170_usb_submit_urb(struct ar9170_usb *aru)
108 return ; 108 return ;
109 109
110 spin_lock_irqsave(&aru->tx_urb_lock, flags); 110 spin_lock_irqsave(&aru->tx_urb_lock, flags);
111 if (aru->tx_submitted_urbs >= AR9170_NUM_TX_URBS) { 111 if (atomic_read(&aru->tx_submitted_urbs) >= AR9170_NUM_TX_URBS) {
112 spin_unlock_irqrestore(&aru->tx_urb_lock, flags); 112 spin_unlock_irqrestore(&aru->tx_urb_lock, flags);
113 return ; 113 return ;
114 } 114 }
115 aru->tx_submitted_urbs++; 115 atomic_inc(&aru->tx_submitted_urbs);
116 116
117 urb = usb_get_from_anchor(&aru->tx_pending); 117 urb = usb_get_from_anchor(&aru->tx_pending);
118 if (!urb) { 118 if (!urb) {
119 aru->tx_submitted_urbs--; 119 atomic_dec(&aru->tx_submitted_urbs);
120 spin_unlock_irqrestore(&aru->tx_urb_lock, flags); 120 spin_unlock_irqrestore(&aru->tx_urb_lock, flags);
121 121
122 return ; 122 return ;
@@ -133,7 +133,7 @@ static void ar9170_usb_submit_urb(struct ar9170_usb *aru)
133 err); 133 err);
134 134
135 usb_unanchor_urb(urb); 135 usb_unanchor_urb(urb);
136 aru->tx_submitted_urbs--; 136 atomic_dec(&aru->tx_submitted_urbs);
137 ar9170_tx_callback(&aru->common, urb->context); 137 ar9170_tx_callback(&aru->common, urb->context);
138 } 138 }
139 139
@@ -151,7 +151,7 @@ static void ar9170_usb_tx_urb_complete_frame(struct urb *urb)
151 return ; 151 return ;
152 } 152 }
153 153
154 aru->tx_submitted_urbs--; 154 atomic_dec(&aru->tx_submitted_urbs);
155 155
156 ar9170_tx_callback(&aru->common, skb); 156 ar9170_tx_callback(&aru->common, skb);
157 157
@@ -794,7 +794,7 @@ static int ar9170_usb_probe(struct usb_interface *intf,
794 spin_lock_init(&aru->tx_urb_lock); 794 spin_lock_init(&aru->tx_urb_lock);
795 795
796 aru->tx_pending_urbs = 0; 796 aru->tx_pending_urbs = 0;
797 aru->tx_submitted_urbs = 0; 797 atomic_set(&aru->tx_submitted_urbs, 0);
798 798
799 aru->common.stop = ar9170_usb_stop; 799 aru->common.stop = ar9170_usb_stop;
800 aru->common.flush = ar9170_usb_flush; 800 aru->common.flush = ar9170_usb_flush;
diff --git a/drivers/net/wireless/ath/ar9170/usb.h b/drivers/net/wireless/ath/ar9170/usb.h
index d098f4d5d2f2..a2ce3b169ceb 100644
--- a/drivers/net/wireless/ath/ar9170/usb.h
+++ b/drivers/net/wireless/ath/ar9170/usb.h
@@ -67,7 +67,7 @@ struct ar9170_usb {
67 bool req_one_stage_fw; 67 bool req_one_stage_fw;
68 68
69 spinlock_t tx_urb_lock; 69 spinlock_t tx_urb_lock;
70 unsigned int tx_submitted_urbs; 70 atomic_t tx_submitted_urbs;
71 unsigned int tx_pending_urbs; 71 unsigned int tx_pending_urbs;
72 72
73 struct completion cmd_wait; 73 struct completion cmd_wait;
diff --git a/drivers/net/wireless/ath/ath.h b/drivers/net/wireless/ath/ath.h
index a63e90cbf9e5..5e19a7330d39 100644
--- a/drivers/net/wireless/ath/ath.h
+++ b/drivers/net/wireless/ath/ath.h
@@ -18,6 +18,15 @@
18#define ATH_H 18#define ATH_H
19 19
20#include <linux/skbuff.h> 20#include <linux/skbuff.h>
21#include <linux/if_ether.h>
22#include <net/mac80211.h>
23
24static const u8 ath_bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
25
26enum ath_device_state {
27 ATH_HW_UNAVAILABLE,
28 ATH_HW_INITIALIZED,
29};
21 30
22struct reg_dmn_pair_mapping { 31struct reg_dmn_pair_mapping {
23 u16 regDmnEnum; 32 u16 regDmnEnum;
@@ -36,13 +45,45 @@ struct ath_regulatory {
36 struct reg_dmn_pair_mapping *regpair; 45 struct reg_dmn_pair_mapping *regpair;
37}; 46};
38 47
48struct ath_ops {
49 unsigned int (*read)(void *, u32 reg_offset);
50 void (*write)(void *, u32 val, u32 reg_offset);
51};
52
53struct ath_common;
54
55struct ath_bus_ops {
56 void (*read_cachesize)(struct ath_common *common, int *csz);
57 void (*cleanup)(struct ath_common *common);
58 bool (*eeprom_read)(struct ath_common *common, u32 off, u16 *data);
59 void (*bt_coex_prep)(struct ath_common *common);
60};
61
39struct ath_common { 62struct ath_common {
63 void *ah;
64 void *priv;
65 struct ieee80211_hw *hw;
66 int debug_mask;
67 enum ath_device_state state;
68
40 u16 cachelsz; 69 u16 cachelsz;
70 u16 curaid;
71 u8 macaddr[ETH_ALEN];
72 u8 curbssid[ETH_ALEN];
73 u8 bssidmask[ETH_ALEN];
74
75 u8 tx_chainmask;
76 u8 rx_chainmask;
77
41 struct ath_regulatory regulatory; 78 struct ath_regulatory regulatory;
79 const struct ath_ops *ops;
80 const struct ath_bus_ops *bus_ops;
42}; 81};
43 82
44struct sk_buff *ath_rxbuf_alloc(struct ath_common *common, 83struct sk_buff *ath_rxbuf_alloc(struct ath_common *common,
45 u32 len, 84 u32 len,
46 gfp_t gfp_mask); 85 gfp_t gfp_mask);
47 86
87void ath_hw_setbssidmask(struct ath_common *common);
88
48#endif /* ATH_H */ 89#endif /* ATH_H */
diff --git a/drivers/net/wireless/ath/ath5k/Kconfig b/drivers/net/wireless/ath/ath5k/Kconfig
index 06d006675d7d..eb83b7b4d0e3 100644
--- a/drivers/net/wireless/ath/ath5k/Kconfig
+++ b/drivers/net/wireless/ath/ath5k/Kconfig
@@ -1,6 +1,6 @@
1config ATH5K 1config ATH5K
2 tristate "Atheros 5xxx wireless cards support" 2 tristate "Atheros 5xxx wireless cards support"
3 depends on PCI && MAC80211 && WLAN_80211 3 depends on PCI && MAC80211
4 select MAC80211_LEDS 4 select MAC80211_LEDS
5 select LEDS_CLASS 5 select LEDS_CLASS
6 select NEW_LEDS 6 select NEW_LEDS
diff --git a/drivers/net/wireless/ath/ath5k/ath5k.h b/drivers/net/wireless/ath/ath5k/ath5k.h
index 6cd5efcec417..6a2a96761111 100644
--- a/drivers/net/wireless/ath/ath5k/ath5k.h
+++ b/drivers/net/wireless/ath/ath5k/ath5k.h
@@ -35,6 +35,7 @@
35 * TODO: Make a more generic struct (eg. add more stuff to ath5k_capabilities) 35 * TODO: Make a more generic struct (eg. add more stuff to ath5k_capabilities)
36 * and clean up common bits, then introduce set/get functions in eeprom.c */ 36 * and clean up common bits, then introduce set/get functions in eeprom.c */
37#include "eeprom.h" 37#include "eeprom.h"
38#include "../ath.h"
38 39
39/* PCI IDs */ 40/* PCI IDs */
40#define PCI_DEVICE_ID_ATHEROS_AR5210 0x0007 /* AR5210 */ 41#define PCI_DEVICE_ID_ATHEROS_AR5210 0x0007 /* AR5210 */
@@ -165,13 +166,6 @@
165#define AR5K_INI_VAL_XR 0 166#define AR5K_INI_VAL_XR 0
166#define AR5K_INI_VAL_MAX 5 167#define AR5K_INI_VAL_MAX 5
167 168
168/* Used for BSSID etc manipulation */
169#define AR5K_LOW_ID(_a)( \
170(_a)[0] | (_a)[1] << 8 | (_a)[2] << 16 | (_a)[3] << 24 \
171)
172
173#define AR5K_HIGH_ID(_a) ((_a)[4] | (_a)[5] << 8)
174
175/* 169/*
176 * Some tuneable values (these should be changeable by the user) 170 * Some tuneable values (these should be changeable by the user)
177 * TODO: Make use of them and add more options OR use debug/configfs 171 * TODO: Make use of them and add more options OR use debug/configfs
@@ -204,6 +198,7 @@
204#define AR5K_TUNE_CWMAX_11B 1023 198#define AR5K_TUNE_CWMAX_11B 1023
205#define AR5K_TUNE_CWMAX_XR 7 199#define AR5K_TUNE_CWMAX_XR 7
206#define AR5K_TUNE_NOISE_FLOOR -72 200#define AR5K_TUNE_NOISE_FLOOR -72
201#define AR5K_TUNE_CCA_MAX_GOOD_VALUE -95
207#define AR5K_TUNE_MAX_TXPOWER 63 202#define AR5K_TUNE_MAX_TXPOWER 63
208#define AR5K_TUNE_DEFAULT_TXPOWER 25 203#define AR5K_TUNE_DEFAULT_TXPOWER 25
209#define AR5K_TUNE_TPC_TXPOWER false 204#define AR5K_TUNE_TPC_TXPOWER false
@@ -1012,6 +1007,14 @@ struct ath5k_capabilities {
1012 } cap_queues; 1007 } cap_queues;
1013}; 1008};
1014 1009
1010/* size of noise floor history (keep it a power of two) */
1011#define ATH5K_NF_CAL_HIST_MAX 8
1012struct ath5k_nfcal_hist
1013{
1014 s16 index; /* current index into nfval */
1015 s16 nfval[ATH5K_NF_CAL_HIST_MAX]; /* last few noise floors */
1016};
1017
1015 1018
1016/***************************************\ 1019/***************************************\
1017 HARDWARE ABSTRACTION LAYER STRUCTURE 1020 HARDWARE ABSTRACTION LAYER STRUCTURE
@@ -1027,6 +1030,7 @@ struct ath5k_capabilities {
1027/* TODO: Clean up and merge with ath5k_softc */ 1030/* TODO: Clean up and merge with ath5k_softc */
1028struct ath5k_hw { 1031struct ath5k_hw {
1029 u32 ah_magic; 1032 u32 ah_magic;
1033 struct ath_common common;
1030 1034
1031 struct ath5k_softc *ah_sc; 1035 struct ath5k_softc *ah_sc;
1032 void __iomem *ah_iobase; 1036 void __iomem *ah_iobase;
@@ -1067,14 +1071,6 @@ struct ath5k_hw {
1067 u8 ah_def_ant; 1071 u8 ah_def_ant;
1068 bool ah_software_retry; 1072 bool ah_software_retry;
1069 1073
1070 u8 ah_sta_id[ETH_ALEN];
1071
1072 /* Current BSSID we are trying to assoc to / create.
1073 * This is passed by mac80211 on config_interface() and cached here for
1074 * use in resets */
1075 u8 ah_bssid[ETH_ALEN];
1076 u8 ah_bssid_mask[ETH_ALEN];
1077
1078 int ah_gpio_npins; 1074 int ah_gpio_npins;
1079 1075
1080 struct ath5k_capabilities ah_capabilities; 1076 struct ath5k_capabilities ah_capabilities;
@@ -1125,6 +1121,8 @@ struct ath5k_hw {
1125 struct ieee80211_channel r_last_channel; 1121 struct ieee80211_channel r_last_channel;
1126 } ah_radar; 1122 } ah_radar;
1127 1123
1124 struct ath5k_nfcal_hist ah_nfcal_hist;
1125
1128 /* noise floor from last periodic calibration */ 1126 /* noise floor from last periodic calibration */
1129 s32 ah_noise_floor; 1127 s32 ah_noise_floor;
1130 1128
@@ -1160,7 +1158,7 @@ struct ath5k_hw {
1160 */ 1158 */
1161 1159
1162/* Attach/Detach Functions */ 1160/* Attach/Detach Functions */
1163extern struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc); 1161extern int ath5k_hw_attach(struct ath5k_softc *sc);
1164extern void ath5k_hw_detach(struct ath5k_hw *ah); 1162extern void ath5k_hw_detach(struct ath5k_hw *ah);
1165 1163
1166/* LED functions */ 1164/* LED functions */
@@ -1203,10 +1201,9 @@ extern bool ath5k_eeprom_is_hb63(struct ath5k_hw *ah);
1203/* Protocol Control Unit Functions */ 1201/* Protocol Control Unit Functions */
1204extern int ath5k_hw_set_opmode(struct ath5k_hw *ah); 1202extern int ath5k_hw_set_opmode(struct ath5k_hw *ah);
1205/* BSSID Functions */ 1203/* BSSID Functions */
1206extern void ath5k_hw_get_lladdr(struct ath5k_hw *ah, u8 *mac);
1207extern int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac); 1204extern int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac);
1208extern void ath5k_hw_set_associd(struct ath5k_hw *ah, const u8 *bssid, u16 assoc_id); 1205extern void ath5k_hw_set_associd(struct ath5k_hw *ah);
1209extern int ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask); 1206extern void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask);
1210/* Receive start/stop functions */ 1207/* Receive start/stop functions */
1211extern void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah); 1208extern void ath5k_hw_start_rx_pcu(struct ath5k_hw *ah);
1212extern void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah); 1209extern void ath5k_hw_stop_rx_pcu(struct ath5k_hw *ah);
@@ -1288,8 +1285,10 @@ extern int ath5k_hw_rfgain_opt_init(struct ath5k_hw *ah);
1288extern bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags); 1285extern bool ath5k_channel_ok(struct ath5k_hw *ah, u16 freq, unsigned int flags);
1289extern int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel); 1286extern int ath5k_hw_channel(struct ath5k_hw *ah, struct ieee80211_channel *channel);
1290/* PHY calibration */ 1287/* PHY calibration */
1288void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah);
1291extern int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel); 1289extern int ath5k_hw_phy_calibrate(struct ath5k_hw *ah, struct ieee80211_channel *channel);
1292extern int ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq); 1290extern int ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq);
1291extern s16 ath5k_hw_get_noise_floor(struct ath5k_hw *ah);
1293extern void ath5k_hw_calibration_poll(struct ath5k_hw *ah); 1292extern void ath5k_hw_calibration_poll(struct ath5k_hw *ah);
1294/* Spur mitigation */ 1293/* Spur mitigation */
1295bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah, 1294bool ath5k_hw_chan_has_spur_noise(struct ath5k_hw *ah,
@@ -1329,17 +1328,21 @@ static inline unsigned int ath5k_hw_clocktoh(unsigned int clock, bool turbo)
1329 return turbo ? (clock / 80) : (clock / 40); 1328 return turbo ? (clock / 80) : (clock / 40);
1330} 1329}
1331 1330
1332/* 1331static inline struct ath_common *ath5k_hw_common(struct ath5k_hw *ah)
1333 * Read from a register 1332{
1334 */ 1333 return &ah->common;
1334}
1335
1336static inline struct ath_regulatory *ath5k_hw_regulatory(struct ath5k_hw *ah)
1337{
1338 return &(ath5k_hw_common(ah)->regulatory);
1339}
1340
1335static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg) 1341static inline u32 ath5k_hw_reg_read(struct ath5k_hw *ah, u16 reg)
1336{ 1342{
1337 return ioread32(ah->ah_iobase + reg); 1343 return ioread32(ah->ah_iobase + reg);
1338} 1344}
1339 1345
1340/*
1341 * Write to a register
1342 */
1343static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg) 1346static inline void ath5k_hw_reg_write(struct ath5k_hw *ah, u32 val, u16 reg)
1344{ 1347{
1345 iowrite32(val, ah->ah_iobase + reg); 1348 iowrite32(val, ah->ah_iobase + reg);
diff --git a/drivers/net/wireless/ath/ath5k/attach.c b/drivers/net/wireless/ath/ath5k/attach.c
index 71a1bd254517..42284445b75e 100644
--- a/drivers/net/wireless/ath/ath5k/attach.c
+++ b/drivers/net/wireless/ath/ath5k/attach.c
@@ -101,25 +101,15 @@ static int ath5k_hw_post(struct ath5k_hw *ah)
101 * -ENODEV if the device is not supported or prints an error msg if something 101 * -ENODEV if the device is not supported or prints an error msg if something
102 * else went wrong. 102 * else went wrong.
103 */ 103 */
104struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc) 104int ath5k_hw_attach(struct ath5k_softc *sc)
105{ 105{
106 struct ath5k_hw *ah; 106 struct ath5k_hw *ah = sc->ah;
107 struct ath_common *common = ath5k_hw_common(ah);
107 struct pci_dev *pdev = sc->pdev; 108 struct pci_dev *pdev = sc->pdev;
108 struct ath5k_eeprom_info *ee; 109 struct ath5k_eeprom_info *ee;
109 int ret; 110 int ret;
110 u32 srev; 111 u32 srev;
111 112
112 /*If we passed the test malloc a ath5k_hw struct*/
113 ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
114 if (ah == NULL) {
115 ret = -ENOMEM;
116 ATH5K_ERR(sc, "out of memory\n");
117 goto err;
118 }
119
120 ah->ah_sc = sc;
121 ah->ah_iobase = sc->iobase;
122
123 /* 113 /*
124 * HW information 114 * HW information
125 */ 115 */
@@ -278,12 +268,12 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc)
278 goto err_free; 268 goto err_free;
279 } 269 }
280 270
271 ee = &ah->ah_capabilities.cap_eeprom;
272
281 /* 273 /*
282 * Write PCI-E power save settings 274 * Write PCI-E power save settings
283 */ 275 */
284 if ((ah->ah_version == AR5K_AR5212) && (pdev->is_pcie)) { 276 if ((ah->ah_version == AR5K_AR5212) && (pdev->is_pcie)) {
285 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
286
287 ath5k_hw_reg_write(ah, 0x9248fc00, AR5K_PCIE_SERDES); 277 ath5k_hw_reg_write(ah, 0x9248fc00, AR5K_PCIE_SERDES);
288 ath5k_hw_reg_write(ah, 0x24924924, AR5K_PCIE_SERDES); 278 ath5k_hw_reg_write(ah, 0x24924924, AR5K_PCIE_SERDES);
289 279
@@ -321,7 +311,6 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc)
321 } 311 }
322 312
323 /* Crypto settings */ 313 /* Crypto settings */
324 ee = &ah->ah_capabilities.cap_eeprom;
325 ah->ah_aes_support = srev >= AR5K_SREV_AR5212_V4 && 314 ah->ah_aes_support = srev >= AR5K_SREV_AR5212_V4 &&
326 (ee->ee_version >= AR5K_EEPROM_VERSION_5_0 && 315 (ee->ee_version >= AR5K_EEPROM_VERSION_5_0 &&
327 !AR5K_EEPROM_AES_DIS(ee->ee_misc5)); 316 !AR5K_EEPROM_AES_DIS(ee->ee_misc5));
@@ -336,20 +325,21 @@ struct ath5k_hw *ath5k_hw_attach(struct ath5k_softc *sc)
336 ath5k_hw_set_lladdr(ah, (u8[ETH_ALEN]){}); 325 ath5k_hw_set_lladdr(ah, (u8[ETH_ALEN]){});
337 326
338 /* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */ 327 /* Set BSSID to bcast address: ff:ff:ff:ff:ff:ff for now */
339 memset(ah->ah_bssid, 0xff, ETH_ALEN); 328 memcpy(common->curbssid, ath_bcast_mac, ETH_ALEN);
340 ath5k_hw_set_associd(ah, ah->ah_bssid, 0); 329 ath5k_hw_set_associd(ah);
341 ath5k_hw_set_opmode(ah); 330 ath5k_hw_set_opmode(ah);
342 331
343 ath5k_hw_rfgain_opt_init(ah); 332 ath5k_hw_rfgain_opt_init(ah);
344 333
334 ath5k_hw_init_nfcal_hist(ah);
335
345 /* turn on HW LEDs */ 336 /* turn on HW LEDs */
346 ath5k_hw_set_ledstate(ah, AR5K_LED_INIT); 337 ath5k_hw_set_ledstate(ah, AR5K_LED_INIT);
347 338
348 return ah; 339 return 0;
349err_free: 340err_free:
350 kfree(ah); 341 kfree(ah);
351err: 342 return ret;
352 return ERR_PTR(ret);
353} 343}
354 344
355/** 345/**
@@ -369,5 +359,4 @@ void ath5k_hw_detach(struct ath5k_hw *ah)
369 ath5k_eeprom_detach(ah); 359 ath5k_eeprom_detach(ah);
370 360
371 /* assume interrupts are down */ 361 /* assume interrupts are down */
372 kfree(ah);
373} 362}
diff --git a/drivers/net/wireless/ath/ath5k/base.c b/drivers/net/wireless/ath/ath5k/base.c
index 95a8e232b58f..cb3dc892d697 100644
--- a/drivers/net/wireless/ath/ath5k/base.c
+++ b/drivers/net/wireless/ath/ath5k/base.c
@@ -195,12 +195,13 @@ static int __devinit ath5k_pci_probe(struct pci_dev *pdev,
195 const struct pci_device_id *id); 195 const struct pci_device_id *id);
196static void __devexit ath5k_pci_remove(struct pci_dev *pdev); 196static void __devexit ath5k_pci_remove(struct pci_dev *pdev);
197#ifdef CONFIG_PM 197#ifdef CONFIG_PM
198static int ath5k_pci_suspend(struct pci_dev *pdev, 198static int ath5k_pci_suspend(struct device *dev);
199 pm_message_t state); 199static int ath5k_pci_resume(struct device *dev);
200static int ath5k_pci_resume(struct pci_dev *pdev); 200
201SIMPLE_DEV_PM_OPS(ath5k_pm_ops, ath5k_pci_suspend, ath5k_pci_resume);
202#define ATH5K_PM_OPS (&ath5k_pm_ops)
201#else 203#else
202#define ath5k_pci_suspend NULL 204#define ATH5K_PM_OPS NULL
203#define ath5k_pci_resume NULL
204#endif /* CONFIG_PM */ 205#endif /* CONFIG_PM */
205 206
206static struct pci_driver ath5k_pci_driver = { 207static struct pci_driver ath5k_pci_driver = {
@@ -208,8 +209,7 @@ static struct pci_driver ath5k_pci_driver = {
208 .id_table = ath5k_pci_id_table, 209 .id_table = ath5k_pci_id_table,
209 .probe = ath5k_pci_probe, 210 .probe = ath5k_pci_probe,
210 .remove = __devexit_p(ath5k_pci_remove), 211 .remove = __devexit_p(ath5k_pci_remove),
211 .suspend = ath5k_pci_suspend, 212 .driver.pm = ATH5K_PM_OPS,
212 .resume = ath5k_pci_resume,
213}; 213};
214 214
215 215
@@ -437,6 +437,22 @@ ath5k_chip_name(enum ath5k_srev_type type, u_int16_t val)
437 437
438 return name; 438 return name;
439} 439}
440static unsigned int ath5k_ioread32(void *hw_priv, u32 reg_offset)
441{
442 struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
443 return ath5k_hw_reg_read(ah, reg_offset);
444}
445
446static void ath5k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
447{
448 struct ath5k_hw *ah = (struct ath5k_hw *) hw_priv;
449 ath5k_hw_reg_write(ah, val, reg_offset);
450}
451
452static const struct ath_ops ath5k_common_ops = {
453 .read = ath5k_ioread32,
454 .write = ath5k_iowrite32,
455};
440 456
441static int __devinit 457static int __devinit
442ath5k_pci_probe(struct pci_dev *pdev, 458ath5k_pci_probe(struct pci_dev *pdev,
@@ -444,6 +460,7 @@ ath5k_pci_probe(struct pci_dev *pdev,
444{ 460{
445 void __iomem *mem; 461 void __iomem *mem;
446 struct ath5k_softc *sc; 462 struct ath5k_softc *sc;
463 struct ath_common *common;
447 struct ieee80211_hw *hw; 464 struct ieee80211_hw *hw;
448 int ret; 465 int ret;
449 u8 csz; 466 u8 csz;
@@ -547,7 +564,6 @@ ath5k_pci_probe(struct pci_dev *pdev,
547 __set_bit(ATH_STAT_INVALID, sc->status); 564 __set_bit(ATH_STAT_INVALID, sc->status);
548 565
549 sc->iobase = mem; /* So we can unmap it on detach */ 566 sc->iobase = mem; /* So we can unmap it on detach */
550 sc->common.cachelsz = csz << 2; /* convert to bytes */
551 sc->opmode = NL80211_IFTYPE_STATION; 567 sc->opmode = NL80211_IFTYPE_STATION;
552 sc->bintval = 1000; 568 sc->bintval = 1000;
553 mutex_init(&sc->lock); 569 mutex_init(&sc->lock);
@@ -565,13 +581,28 @@ ath5k_pci_probe(struct pci_dev *pdev,
565 goto err_free; 581 goto err_free;
566 } 582 }
567 583
568 /* Initialize device */ 584 /*If we passed the test malloc a ath5k_hw struct*/
569 sc->ah = ath5k_hw_attach(sc); 585 sc->ah = kzalloc(sizeof(struct ath5k_hw), GFP_KERNEL);
570 if (IS_ERR(sc->ah)) { 586 if (!sc->ah) {
571 ret = PTR_ERR(sc->ah); 587 ret = -ENOMEM;
588 ATH5K_ERR(sc, "out of memory\n");
572 goto err_irq; 589 goto err_irq;
573 } 590 }
574 591
592 sc->ah->ah_sc = sc;
593 sc->ah->ah_iobase = sc->iobase;
594 common = ath5k_hw_common(sc->ah);
595 common->ops = &ath5k_common_ops;
596 common->ah = sc->ah;
597 common->hw = hw;
598 common->cachelsz = csz << 2; /* convert to bytes */
599
600 /* Initialize device */
601 ret = ath5k_hw_attach(sc);
602 if (ret) {
603 goto err_free_ah;
604 }
605
575 /* set up multi-rate retry capabilities */ 606 /* set up multi-rate retry capabilities */
576 if (sc->ah->ah_version == AR5K_AR5212) { 607 if (sc->ah->ah_version == AR5K_AR5212) {
577 hw->max_rates = 4; 608 hw->max_rates = 4;
@@ -640,6 +671,8 @@ err_ah:
640 ath5k_hw_detach(sc->ah); 671 ath5k_hw_detach(sc->ah);
641err_irq: 672err_irq:
642 free_irq(pdev->irq, sc); 673 free_irq(pdev->irq, sc);
674err_free_ah:
675 kfree(sc->ah);
643err_free: 676err_free:
644 ieee80211_free_hw(hw); 677 ieee80211_free_hw(hw);
645err_map: 678err_map:
@@ -661,6 +694,7 @@ ath5k_pci_remove(struct pci_dev *pdev)
661 ath5k_debug_finish_device(sc); 694 ath5k_debug_finish_device(sc);
662 ath5k_detach(pdev, hw); 695 ath5k_detach(pdev, hw);
663 ath5k_hw_detach(sc->ah); 696 ath5k_hw_detach(sc->ah);
697 kfree(sc->ah);
664 free_irq(pdev->irq, sc); 698 free_irq(pdev->irq, sc);
665 pci_iounmap(pdev, sc->iobase); 699 pci_iounmap(pdev, sc->iobase);
666 pci_release_region(pdev, 0); 700 pci_release_region(pdev, 0);
@@ -669,33 +703,20 @@ ath5k_pci_remove(struct pci_dev *pdev)
669} 703}
670 704
671#ifdef CONFIG_PM 705#ifdef CONFIG_PM
672static int 706static int ath5k_pci_suspend(struct device *dev)
673ath5k_pci_suspend(struct pci_dev *pdev, pm_message_t state)
674{ 707{
675 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 708 struct ieee80211_hw *hw = pci_get_drvdata(to_pci_dev(dev));
676 struct ath5k_softc *sc = hw->priv; 709 struct ath5k_softc *sc = hw->priv;
677 710
678 ath5k_led_off(sc); 711 ath5k_led_off(sc);
679
680 pci_save_state(pdev);
681 pci_disable_device(pdev);
682 pci_set_power_state(pdev, PCI_D3hot);
683
684 return 0; 712 return 0;
685} 713}
686 714
687static int 715static int ath5k_pci_resume(struct device *dev)
688ath5k_pci_resume(struct pci_dev *pdev)
689{ 716{
717 struct pci_dev *pdev = to_pci_dev(dev);
690 struct ieee80211_hw *hw = pci_get_drvdata(pdev); 718 struct ieee80211_hw *hw = pci_get_drvdata(pdev);
691 struct ath5k_softc *sc = hw->priv; 719 struct ath5k_softc *sc = hw->priv;
692 int err;
693
694 pci_restore_state(pdev);
695
696 err = pci_enable_device(pdev);
697 if (err)
698 return err;
699 720
700 /* 721 /*
701 * Suspend/Resume resets the PCI configuration space, so we have to 722 * Suspend/Resume resets the PCI configuration space, so we have to
@@ -718,7 +739,7 @@ static int ath5k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *re
718{ 739{
719 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 740 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
720 struct ath5k_softc *sc = hw->priv; 741 struct ath5k_softc *sc = hw->priv;
721 struct ath_regulatory *regulatory = &sc->common.regulatory; 742 struct ath_regulatory *regulatory = ath5k_hw_regulatory(sc->ah);
722 743
723 return ath_reg_notifier_apply(wiphy, request, regulatory); 744 return ath_reg_notifier_apply(wiphy, request, regulatory);
724} 745}
@@ -728,7 +749,7 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
728{ 749{
729 struct ath5k_softc *sc = hw->priv; 750 struct ath5k_softc *sc = hw->priv;
730 struct ath5k_hw *ah = sc->ah; 751 struct ath5k_hw *ah = sc->ah;
731 struct ath_regulatory *regulatory = &sc->common.regulatory; 752 struct ath_regulatory *regulatory = ath5k_hw_regulatory(ah);
732 u8 mac[ETH_ALEN] = {}; 753 u8 mac[ETH_ALEN] = {};
733 int ret; 754 int ret;
734 755
@@ -815,7 +836,7 @@ ath5k_attach(struct pci_dev *pdev, struct ieee80211_hw *hw)
815 836
816 SET_IEEE80211_PERM_ADDR(hw, mac); 837 SET_IEEE80211_PERM_ADDR(hw, mac);
817 /* All MAC address bits matter for ACKs */ 838 /* All MAC address bits matter for ACKs */
818 memset(sc->bssidmask, 0xff, ETH_ALEN); 839 memcpy(sc->bssidmask, ath_bcast_mac, ETH_ALEN);
819 ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask); 840 ath5k_hw_set_bssid_mask(sc->ah, sc->bssidmask);
820 841
821 regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain; 842 regulatory->current_rd = ah->ah_capabilities.cap_eeprom.ee_regdomain;
@@ -1152,19 +1173,20 @@ ath5k_hw_to_driver_rix(struct ath5k_softc *sc, int hw_rix)
1152static 1173static
1153struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr) 1174struct sk_buff *ath5k_rx_skb_alloc(struct ath5k_softc *sc, dma_addr_t *skb_addr)
1154{ 1175{
1176 struct ath_common *common = ath5k_hw_common(sc->ah);
1155 struct sk_buff *skb; 1177 struct sk_buff *skb;
1156 1178
1157 /* 1179 /*
1158 * Allocate buffer with headroom_needed space for the 1180 * Allocate buffer with headroom_needed space for the
1159 * fake physical layer header at the start. 1181 * fake physical layer header at the start.
1160 */ 1182 */
1161 skb = ath_rxbuf_alloc(&sc->common, 1183 skb = ath_rxbuf_alloc(common,
1162 sc->rxbufsize + sc->common.cachelsz - 1, 1184 sc->rxbufsize + common->cachelsz - 1,
1163 GFP_ATOMIC); 1185 GFP_ATOMIC);
1164 1186
1165 if (!skb) { 1187 if (!skb) {
1166 ATH5K_ERR(sc, "can't alloc skbuff of size %u\n", 1188 ATH5K_ERR(sc, "can't alloc skbuff of size %u\n",
1167 sc->rxbufsize + sc->common.cachelsz - 1); 1189 sc->rxbufsize + common->cachelsz - 1);
1168 return NULL; 1190 return NULL;
1169 } 1191 }
1170 1192
@@ -1605,13 +1627,14 @@ static int
1605ath5k_rx_start(struct ath5k_softc *sc) 1627ath5k_rx_start(struct ath5k_softc *sc)
1606{ 1628{
1607 struct ath5k_hw *ah = sc->ah; 1629 struct ath5k_hw *ah = sc->ah;
1630 struct ath_common *common = ath5k_hw_common(ah);
1608 struct ath5k_buf *bf; 1631 struct ath5k_buf *bf;
1609 int ret; 1632 int ret;
1610 1633
1611 sc->rxbufsize = roundup(IEEE80211_MAX_LEN, sc->common.cachelsz); 1634 sc->rxbufsize = roundup(IEEE80211_MAX_LEN, common->cachelsz);
1612 1635
1613 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rxbufsize %u\n", 1636 ATH5K_DBG(sc, ATH5K_DEBUG_RESET, "cachelsz %u rxbufsize %u\n",
1614 sc->common.cachelsz, sc->rxbufsize); 1637 common->cachelsz, sc->rxbufsize);
1615 1638
1616 spin_lock_bh(&sc->rxbuflock); 1639 spin_lock_bh(&sc->rxbuflock);
1617 sc->rxlink = NULL; 1640 sc->rxlink = NULL;
@@ -1684,13 +1707,14 @@ static void
1684ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb, 1707ath5k_check_ibss_tsf(struct ath5k_softc *sc, struct sk_buff *skb,
1685 struct ieee80211_rx_status *rxs) 1708 struct ieee80211_rx_status *rxs)
1686{ 1709{
1710 struct ath_common *common = ath5k_hw_common(sc->ah);
1687 u64 tsf, bc_tstamp; 1711 u64 tsf, bc_tstamp;
1688 u32 hw_tu; 1712 u32 hw_tu;
1689 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data; 1713 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
1690 1714
1691 if (ieee80211_is_beacon(mgmt->frame_control) && 1715 if (ieee80211_is_beacon(mgmt->frame_control) &&
1692 le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS && 1716 le16_to_cpu(mgmt->u.beacon.capab_info) & WLAN_CAPABILITY_IBSS &&
1693 memcmp(mgmt->bssid, sc->ah->ah_bssid, ETH_ALEN) == 0) { 1717 memcmp(mgmt->bssid, common->curbssid, ETH_ALEN) == 0) {
1694 /* 1718 /*
1695 * Received an IBSS beacon with the same BSSID. Hardware *must* 1719 * Received an IBSS beacon with the same BSSID. Hardware *must*
1696 * have updated the local TSF. We have to work around various 1720 * have updated the local TSF. We have to work around various
@@ -3176,6 +3200,7 @@ static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
3176{ 3200{
3177 struct ath5k_softc *sc = hw->priv; 3201 struct ath5k_softc *sc = hw->priv;
3178 struct ath5k_hw *ah = sc->ah; 3202 struct ath5k_hw *ah = sc->ah;
3203 struct ath_common *common = ath5k_hw_common(ah);
3179 unsigned long flags; 3204 unsigned long flags;
3180 3205
3181 mutex_lock(&sc->lock); 3206 mutex_lock(&sc->lock);
@@ -3184,10 +3209,9 @@ static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
3184 3209
3185 if (changes & BSS_CHANGED_BSSID) { 3210 if (changes & BSS_CHANGED_BSSID) {
3186 /* Cache for later use during resets */ 3211 /* Cache for later use during resets */
3187 memcpy(ah->ah_bssid, bss_conf->bssid, ETH_ALEN); 3212 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
3188 /* XXX: assoc id is set to 0 for now, mac80211 doesn't have 3213 common->curaid = 0;
3189 * a clean way of letting us retrieve this yet. */ 3214 ath5k_hw_set_associd(ah);
3190 ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
3191 mmiowb(); 3215 mmiowb();
3192 } 3216 }
3193 3217
@@ -3200,6 +3224,14 @@ static void ath5k_bss_info_changed(struct ieee80211_hw *hw,
3200 set_beacon_filter(hw, sc->assoc); 3224 set_beacon_filter(hw, sc->assoc);
3201 ath5k_hw_set_ledstate(sc->ah, sc->assoc ? 3225 ath5k_hw_set_ledstate(sc->ah, sc->assoc ?
3202 AR5K_LED_ASSOC : AR5K_LED_INIT); 3226 AR5K_LED_ASSOC : AR5K_LED_INIT);
3227 if (bss_conf->assoc) {
3228 ATH5K_DBG(sc, ATH5K_DEBUG_ANY,
3229 "Bss Info ASSOC %d, bssid: %pM\n",
3230 bss_conf->aid, common->curbssid);
3231 common->curaid = bss_conf->aid;
3232 ath5k_hw_set_associd(ah);
3233 /* Once ANI is available you would start it here */
3234 }
3203 } 3235 }
3204 3236
3205 if (changes & BSS_CHANGED_BEACON) { 3237 if (changes & BSS_CHANGED_BEACON) {
diff --git a/drivers/net/wireless/ath/ath5k/base.h b/drivers/net/wireless/ath/ath5k/base.h
index a28c42f32c9d..b14ba07e9157 100644
--- a/drivers/net/wireless/ath/ath5k/base.h
+++ b/drivers/net/wireless/ath/ath5k/base.h
@@ -115,7 +115,6 @@ struct ath5k_rfkill {
115 * associated with an instance of a device */ 115 * associated with an instance of a device */
116struct ath5k_softc { 116struct ath5k_softc {
117 struct pci_dev *pdev; /* for dma mapping */ 117 struct pci_dev *pdev; /* for dma mapping */
118 struct ath_common common;
119 void __iomem *iobase; /* address of the device */ 118 void __iomem *iobase; /* address of the device */
120 struct mutex lock; /* dev-level lock */ 119 struct mutex lock; /* dev-level lock */
121 struct ieee80211_tx_queue_stats tx_stats[AR5K_NUM_TX_QUEUES]; 120 struct ieee80211_tx_queue_stats tx_stats[AR5K_NUM_TX_QUEUES];
@@ -202,15 +201,4 @@ struct ath5k_softc {
202#define ath5k_hw_hasveol(_ah) \ 201#define ath5k_hw_hasveol(_ah) \
203 (ath5k_hw_get_capability(_ah, AR5K_CAP_VEOL, 0, NULL) == 0) 202 (ath5k_hw_get_capability(_ah, AR5K_CAP_VEOL, 0, NULL) == 0)
204 203
205static inline struct ath_common *ath5k_hw_common(struct ath5k_hw *ah)
206{
207 return &ah->ah_sc->common;
208}
209
210static inline struct ath_regulatory *ath5k_hw_regulatory(struct ath5k_hw *ah)
211{
212 return &(ath5k_hw_common(ah)->regulatory);
213
214}
215
216#endif 204#endif
diff --git a/drivers/net/wireless/ath/ath5k/initvals.c b/drivers/net/wireless/ath/ath5k/initvals.c
index 18eb5190ce4b..8fa439308828 100644
--- a/drivers/net/wireless/ath/ath5k/initvals.c
+++ b/drivers/net/wireless/ath/ath5k/initvals.c
@@ -560,8 +560,8 @@ static const struct ath5k_ini ar5212_ini_common_start[] = {
560 { AR5K_SLEEP0, 0x0002aaaa }, 560 { AR5K_SLEEP0, 0x0002aaaa },
561 { AR5K_SLEEP1, 0x02005555 }, 561 { AR5K_SLEEP1, 0x02005555 },
562 { AR5K_SLEEP2, 0x00000000 }, 562 { AR5K_SLEEP2, 0x00000000 },
563 { AR5K_BSS_IDM0, 0xffffffff }, 563 { AR_BSSMSKL, 0xffffffff },
564 { AR5K_BSS_IDM1, 0x0000ffff }, 564 { AR_BSSMSKU, 0x0000ffff },
565 { AR5K_TXPC, 0x00000000 }, 565 { AR5K_TXPC, 0x00000000 },
566 { AR5K_PROFCNT_TX, 0x00000000 }, 566 { AR5K_PROFCNT_TX, 0x00000000 },
567 { AR5K_PROFCNT_RX, 0x00000000 }, 567 { AR5K_PROFCNT_RX, 0x00000000 },
diff --git a/drivers/net/wireless/ath/ath5k/led.c b/drivers/net/wireless/ath/ath5k/led.c
index b548c8eaaae1..d495890355d9 100644
--- a/drivers/net/wireless/ath/ath5k/led.c
+++ b/drivers/net/wireless/ath/ath5k/led.c
@@ -59,6 +59,8 @@ static const struct pci_device_id ath5k_led_devices[] = {
59 { ATH_SDEVICE(PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID), ATH_LED(1, 1) }, 59 { ATH_SDEVICE(PCI_VENDOR_ID_COMPAQ, PCI_ANY_ID), ATH_LED(1, 1) },
60 /* Acer Aspire One A150 (maximlevitsky@gmail.com) */ 60 /* Acer Aspire One A150 (maximlevitsky@gmail.com) */
61 { ATH_SDEVICE(PCI_VENDOR_ID_FOXCONN, 0xe008), ATH_LED(3, 0) }, 61 { ATH_SDEVICE(PCI_VENDOR_ID_FOXCONN, 0xe008), ATH_LED(3, 0) },
62 /* Acer Aspire One AO531h AO751h (keng-yu.lin@canonical.com) */
63 { ATH_SDEVICE(PCI_VENDOR_ID_FOXCONN, 0xe00d), ATH_LED(3, 0) },
62 /* Acer Ferrari 5000 (russ.dill@gmail.com) */ 64 /* Acer Ferrari 5000 (russ.dill@gmail.com) */
63 { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0422), ATH_LED(1, 1) }, 65 { ATH_SDEVICE(PCI_VENDOR_ID_AMBIT, 0x0422), ATH_LED(1, 1) },
64 /* E-machines E510 (tuliom@gmail.com) */ 66 /* E-machines E510 (tuliom@gmail.com) */
diff --git a/drivers/net/wireless/ath/ath5k/pcu.c b/drivers/net/wireless/ath/ath5k/pcu.c
index 2942f13c9c4a..64fc1eb9b6d9 100644
--- a/drivers/net/wireless/ath/ath5k/pcu.c
+++ b/drivers/net/wireless/ath/ath5k/pcu.c
@@ -24,6 +24,8 @@
24* Protocol Control Unit Functions * 24* Protocol Control Unit Functions *
25\*********************************/ 25\*********************************/
26 26
27#include <asm/unaligned.h>
28
27#include "ath5k.h" 29#include "ath5k.h"
28#include "reg.h" 30#include "reg.h"
29#include "debug.h" 31#include "debug.h"
@@ -44,6 +46,7 @@
44 */ 46 */
45int ath5k_hw_set_opmode(struct ath5k_hw *ah) 47int ath5k_hw_set_opmode(struct ath5k_hw *ah)
46{ 48{
49 struct ath_common *common = ath5k_hw_common(ah);
47 u32 pcu_reg, beacon_reg, low_id, high_id; 50 u32 pcu_reg, beacon_reg, low_id, high_id;
48 51
49 52
@@ -95,8 +98,8 @@ int ath5k_hw_set_opmode(struct ath5k_hw *ah)
95 /* 98 /*
96 * Set PCU registers 99 * Set PCU registers
97 */ 100 */
98 low_id = AR5K_LOW_ID(ah->ah_sta_id); 101 low_id = get_unaligned_le32(common->macaddr);
99 high_id = AR5K_HIGH_ID(ah->ah_sta_id); 102 high_id = get_unaligned_le16(common->macaddr + 4);
100 ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0); 103 ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
101 ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1); 104 ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
102 105
@@ -238,28 +241,6 @@ int ath5k_hw_set_cts_timeout(struct ath5k_hw *ah, unsigned int timeout)
238 return 0; 241 return 0;
239} 242}
240 243
241
242/****************\
243* BSSID handling *
244\****************/
245
246/**
247 * ath5k_hw_get_lladdr - Get station id
248 *
249 * @ah: The &struct ath5k_hw
250 * @mac: The card's mac address
251 *
252 * Initialize ah->ah_sta_id using the mac address provided
253 * (just a memcpy).
254 *
255 * TODO: Remove it once we merge ath5k_softc and ath5k_hw
256 */
257void ath5k_hw_get_lladdr(struct ath5k_hw *ah, u8 *mac)
258{
259 ATH5K_TRACE(ah->ah_sc);
260 memcpy(mac, ah->ah_sta_id, ETH_ALEN);
261}
262
263/** 244/**
264 * ath5k_hw_set_lladdr - Set station id 245 * ath5k_hw_set_lladdr - Set station id
265 * 246 *
@@ -270,17 +251,18 @@ void ath5k_hw_get_lladdr(struct ath5k_hw *ah, u8 *mac)
270 */ 251 */
271int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac) 252int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
272{ 253{
254 struct ath_common *common = ath5k_hw_common(ah);
273 u32 low_id, high_id; 255 u32 low_id, high_id;
274 u32 pcu_reg; 256 u32 pcu_reg;
275 257
276 ATH5K_TRACE(ah->ah_sc); 258 ATH5K_TRACE(ah->ah_sc);
277 /* Set new station ID */ 259 /* Set new station ID */
278 memcpy(ah->ah_sta_id, mac, ETH_ALEN); 260 memcpy(common->macaddr, mac, ETH_ALEN);
279 261
280 pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000; 262 pcu_reg = ath5k_hw_reg_read(ah, AR5K_STA_ID1) & 0xffff0000;
281 263
282 low_id = AR5K_LOW_ID(mac); 264 low_id = get_unaligned_le32(mac);
283 high_id = AR5K_HIGH_ID(mac); 265 high_id = get_unaligned_le16(mac + 4);
284 266
285 ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0); 267 ath5k_hw_reg_write(ah, low_id, AR5K_STA_ID0);
286 ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1); 268 ath5k_hw_reg_write(ah, pcu_reg | high_id, AR5K_STA_ID1);
@@ -297,159 +279,51 @@ int ath5k_hw_set_lladdr(struct ath5k_hw *ah, const u8 *mac)
297 * 279 *
298 * Sets the BSSID which trigers the "SME Join" operation 280 * Sets the BSSID which trigers the "SME Join" operation
299 */ 281 */
300void ath5k_hw_set_associd(struct ath5k_hw *ah, const u8 *bssid, u16 assoc_id) 282void ath5k_hw_set_associd(struct ath5k_hw *ah)
301{ 283{
302 u32 low_id, high_id; 284 struct ath_common *common = ath5k_hw_common(ah);
303 u16 tim_offset = 0; 285 u16 tim_offset = 0;
304 286
305 /* 287 /*
306 * Set simple BSSID mask on 5212 288 * Set simple BSSID mask on 5212
307 */ 289 */
308 if (ah->ah_version == AR5K_AR5212) { 290 if (ah->ah_version == AR5K_AR5212)
309 ath5k_hw_reg_write(ah, AR5K_LOW_ID(ah->ah_bssid_mask), 291 ath_hw_setbssidmask(common);
310 AR5K_BSS_IDM0);
311 ath5k_hw_reg_write(ah, AR5K_HIGH_ID(ah->ah_bssid_mask),
312 AR5K_BSS_IDM1);
313 }
314 292
315 /* 293 /*
316 * Set BSSID which triggers the "SME Join" operation 294 * Set BSSID which triggers the "SME Join" operation
317 */ 295 */
318 low_id = AR5K_LOW_ID(bssid); 296 ath5k_hw_reg_write(ah,
319 high_id = AR5K_HIGH_ID(bssid); 297 get_unaligned_le32(common->curbssid),
320 ath5k_hw_reg_write(ah, low_id, AR5K_BSS_ID0); 298 AR5K_BSS_ID0);
321 ath5k_hw_reg_write(ah, high_id | ((assoc_id & 0x3fff) << 299 ath5k_hw_reg_write(ah,
322 AR5K_BSS_ID1_AID_S), AR5K_BSS_ID1); 300 get_unaligned_le16(common->curbssid + 4) |
323 301 ((common->curaid & 0x3fff) << AR5K_BSS_ID1_AID_S),
324 if (assoc_id == 0) { 302 AR5K_BSS_ID1);
303
304 if (common->curaid == 0) {
325 ath5k_hw_disable_pspoll(ah); 305 ath5k_hw_disable_pspoll(ah);
326 return; 306 return;
327 } 307 }
328 308
329 AR5K_REG_WRITE_BITS(ah, AR5K_BEACON, AR5K_BEACON_TIM, 309 AR5K_REG_WRITE_BITS(ah, AR5K_BEACON, AR5K_BEACON_TIM,
330 tim_offset ? tim_offset + 4 : 0); 310 tim_offset ? tim_offset + 4 : 0);
331 311
332 ath5k_hw_enable_pspoll(ah, NULL, 0); 312 ath5k_hw_enable_pspoll(ah, NULL, 0);
333} 313}
334 314
335/** 315void ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
336 * ath5k_hw_set_bssid_mask - filter out bssids we listen
337 *
338 * @ah: the &struct ath5k_hw
339 * @mask: the bssid_mask, a u8 array of size ETH_ALEN
340 *
341 * BSSID masking is a method used by AR5212 and newer hardware to inform PCU
342 * which bits of the interface's MAC address should be looked at when trying
343 * to decide which packets to ACK. In station mode and AP mode with a single
344 * BSS every bit matters since we lock to only one BSS. In AP mode with
345 * multiple BSSes (virtual interfaces) not every bit matters because hw must
346 * accept frames for all BSSes and so we tweak some bits of our mac address
347 * in order to have multiple BSSes.
348 *
349 * NOTE: This is a simple filter and does *not* filter out all
350 * relevant frames. Some frames that are not for us might get ACKed from us
351 * by PCU because they just match the mask.
352 *
353 * When handling multiple BSSes you can get the BSSID mask by computing the
354 * set of ~ ( MAC XOR BSSID ) for all bssids we handle.
355 *
356 * When you do this you are essentially computing the common bits of all your
357 * BSSes. Later it is assumed the harware will "and" (&) the BSSID mask with
358 * the MAC address to obtain the relevant bits and compare the result with
359 * (frame's BSSID & mask) to see if they match.
360 */
361/*
362 * Simple example: on your card you have have two BSSes you have created with
363 * BSSID-01 and BSSID-02. Lets assume BSSID-01 will not use the MAC address.
364 * There is another BSSID-03 but you are not part of it. For simplicity's sake,
365 * assuming only 4 bits for a mac address and for BSSIDs you can then have:
366 *
367 * \
368 * MAC: 0001 |
369 * BSSID-01: 0100 | --> Belongs to us
370 * BSSID-02: 1001 |
371 * /
372 * -------------------
373 * BSSID-03: 0110 | --> External
374 * -------------------
375 *
376 * Our bssid_mask would then be:
377 *
378 * On loop iteration for BSSID-01:
379 * ~(0001 ^ 0100) -> ~(0101)
380 * -> 1010
381 * bssid_mask = 1010
382 *
383 * On loop iteration for BSSID-02:
384 * bssid_mask &= ~(0001 ^ 1001)
385 * bssid_mask = (1010) & ~(0001 ^ 1001)
386 * bssid_mask = (1010) & ~(1001)
387 * bssid_mask = (1010) & (0110)
388 * bssid_mask = 0010
389 *
390 * A bssid_mask of 0010 means "only pay attention to the second least
391 * significant bit". This is because its the only bit common
392 * amongst the MAC and all BSSIDs we support. To findout what the real
393 * common bit is we can simply "&" the bssid_mask now with any BSSID we have
394 * or our MAC address (we assume the hardware uses the MAC address).
395 *
396 * Now, suppose there's an incoming frame for BSSID-03:
397 *
398 * IFRAME-01: 0110
399 *
400 * An easy eye-inspeciton of this already should tell you that this frame
401 * will not pass our check. This is beacuse the bssid_mask tells the
402 * hardware to only look at the second least significant bit and the
403 * common bit amongst the MAC and BSSIDs is 0, this frame has the 2nd LSB
404 * as 1, which does not match 0.
405 *
406 * So with IFRAME-01 we *assume* the hardware will do:
407 *
408 * allow = (IFRAME-01 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
409 * --> allow = (0110 & 0010) == (0010 & 0001) ? 1 : 0;
410 * --> allow = (0010) == 0000 ? 1 : 0;
411 * --> allow = 0
412 *
413 * Lets now test a frame that should work:
414 *
415 * IFRAME-02: 0001 (we should allow)
416 *
417 * allow = (0001 & 1010) == 1010
418 *
419 * allow = (IFRAME-02 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
420 * --> allow = (0001 & 0010) == (0010 & 0001) ? 1 :0;
421 * --> allow = (0010) == (0010)
422 * --> allow = 1
423 *
424 * Other examples:
425 *
426 * IFRAME-03: 0100 --> allowed
427 * IFRAME-04: 1001 --> allowed
428 * IFRAME-05: 1101 --> allowed but its not for us!!!
429 *
430 */
431int ath5k_hw_set_bssid_mask(struct ath5k_hw *ah, const u8 *mask)
432{ 316{
433 u32 low_id, high_id; 317 struct ath_common *common = ath5k_hw_common(ah);
434 ATH5K_TRACE(ah->ah_sc); 318 ATH5K_TRACE(ah->ah_sc);
435 319
436 /* Cache bssid mask so that we can restore it 320 /* Cache bssid mask so that we can restore it
437 * on reset */ 321 * on reset */
438 memcpy(ah->ah_bssid_mask, mask, ETH_ALEN); 322 memcpy(common->bssidmask, mask, ETH_ALEN);
439 if (ah->ah_version == AR5K_AR5212) { 323 if (ah->ah_version == AR5K_AR5212)
440 low_id = AR5K_LOW_ID(mask); 324 ath_hw_setbssidmask(common);
441 high_id = AR5K_HIGH_ID(mask);
442
443 ath5k_hw_reg_write(ah, low_id, AR5K_BSS_IDM0);
444 ath5k_hw_reg_write(ah, high_id, AR5K_BSS_IDM1);
445
446 return 0;
447 }
448
449 return -EIO;
450} 325}
451 326
452
453/************\ 327/************\
454* RX Control * 328* RX Control *
455\************/ 329\************/
@@ -1157,14 +1031,17 @@ int ath5k_hw_set_key_lladdr(struct ath5k_hw *ah, u16 entry, const u8 *mac)
1157 /* Invalid entry (key table overflow) */ 1031 /* Invalid entry (key table overflow) */
1158 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE); 1032 AR5K_ASSERT_ENTRY(entry, AR5K_KEYTABLE_SIZE);
1159 1033
1160 /* MAC may be NULL if it's a broadcast key. In this case no need to 1034 /*
1161 * to compute AR5K_LOW_ID and AR5K_HIGH_ID as we already know it. */ 1035 * MAC may be NULL if it's a broadcast key. In this case no need to
1036 * to compute get_unaligned_le32 and get_unaligned_le16 as we
1037 * already know it.
1038 */
1162 if (!mac) { 1039 if (!mac) {
1163 low_id = 0xffffffff; 1040 low_id = 0xffffffff;
1164 high_id = 0xffff | AR5K_KEYTABLE_VALID; 1041 high_id = 0xffff | AR5K_KEYTABLE_VALID;
1165 } else { 1042 } else {
1166 low_id = AR5K_LOW_ID(mac); 1043 low_id = get_unaligned_le32(mac);
1167 high_id = AR5K_HIGH_ID(mac) | AR5K_KEYTABLE_VALID; 1044 high_id = get_unaligned_le16(mac + 4) | AR5K_KEYTABLE_VALID;
1168 } 1045 }
1169 1046
1170 ath5k_hw_reg_write(ah, low_id, AR5K_KEYTABLE_MAC0(entry)); 1047 ath5k_hw_reg_write(ah, low_id, AR5K_KEYTABLE_MAC0(entry));
diff --git a/drivers/net/wireless/ath/ath5k/phy.c b/drivers/net/wireless/ath/ath5k/phy.c
index 1a039f2bd732..895990751d36 100644
--- a/drivers/net/wireless/ath/ath5k/phy.c
+++ b/drivers/net/wireless/ath/ath5k/phy.c
@@ -1124,77 +1124,148 @@ ath5k_hw_calibration_poll(struct ath5k_hw *ah)
1124 ah->ah_swi_mask = AR5K_SWI_FULL_CALIBRATION; 1124 ah->ah_swi_mask = AR5K_SWI_FULL_CALIBRATION;
1125 AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI); 1125 AR5K_REG_ENABLE_BITS(ah, AR5K_CR, AR5K_CR_SWI);
1126 } 1126 }
1127}
1127 1128
1129static int sign_extend(int val, const int nbits)
1130{
1131 int order = BIT(nbits-1);
1132 return (val ^ order) - order;
1128} 1133}
1129 1134
1130/** 1135static s32 ath5k_hw_read_measured_noise_floor(struct ath5k_hw *ah)
1131 * ath5k_hw_noise_floor_calibration - perform PHY noise floor calibration 1136{
1132 * 1137 s32 val;
1133 * @ah: struct ath5k_hw pointer we are operating on 1138
1134 * @freq: the channel frequency, just used for error logging 1139 val = ath5k_hw_reg_read(ah, AR5K_PHY_NF);
1135 * 1140 return sign_extend(AR5K_REG_MS(val, AR5K_PHY_NF_MINCCA_PWR), 9);
1136 * This function performs a noise floor calibration of the PHY and waits for 1141}
1137 * it to complete. Then the noise floor value is compared to some maximum 1142
1138 * noise floor we consider valid. 1143void ath5k_hw_init_nfcal_hist(struct ath5k_hw *ah)
1139 * 1144{
1140 * Note that this is different from what the madwifi HAL does: it reads the 1145 int i;
1141 * noise floor and afterwards initiates the calibration. Since the noise floor 1146
1142 * calibration can take some time to finish, depending on the current channel 1147 ah->ah_nfcal_hist.index = 0;
1143 * use, that avoids the occasional timeout warnings we are seeing now. 1148 for (i = 0; i < ATH5K_NF_CAL_HIST_MAX; i++)
1144 * 1149 ah->ah_nfcal_hist.nfval[i] = AR5K_TUNE_CCA_MAX_GOOD_VALUE;
1145 * See the following link for an Atheros patent on noise floor calibration: 1150}
1146 * http://patft.uspto.gov/netacgi/nph-Parser?Sect1=PTO1&Sect2=HITOFF&d=PALL \ 1151
1147 * &p=1&u=%2Fnetahtml%2FPTO%2Fsrchnum.htm&r=1&f=G&l=50&s1=7245893.PN.&OS=PN/7 1152static void ath5k_hw_update_nfcal_hist(struct ath5k_hw *ah, s16 noise_floor)
1153{
1154 struct ath5k_nfcal_hist *hist = &ah->ah_nfcal_hist;
1155 hist->index = (hist->index + 1) & (ATH5K_NF_CAL_HIST_MAX-1);
1156 hist->nfval[hist->index] = noise_floor;
1157}
1158
1159static s16 ath5k_hw_get_median_noise_floor(struct ath5k_hw *ah)
1160{
1161 s16 sort[ATH5K_NF_CAL_HIST_MAX];
1162 s16 tmp;
1163 int i, j;
1164
1165 memcpy(sort, ah->ah_nfcal_hist.nfval, sizeof(sort));
1166 for (i = 0; i < ATH5K_NF_CAL_HIST_MAX - 1; i++) {
1167 for (j = 1; j < ATH5K_NF_CAL_HIST_MAX - i; j++) {
1168 if (sort[j] > sort[j-1]) {
1169 tmp = sort[j];
1170 sort[j] = sort[j-1];
1171 sort[j-1] = tmp;
1172 }
1173 }
1174 }
1175 for (i = 0; i < ATH5K_NF_CAL_HIST_MAX; i++) {
1176 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
1177 "cal %d:%d\n", i, sort[i]);
1178 }
1179 return sort[(ATH5K_NF_CAL_HIST_MAX-1) / 2];
1180}
1181
1182/*
1183 * When we tell the hardware to perform a noise floor calibration
1184 * by setting the AR5K_PHY_AGCCTL_NF bit, it will periodically
1185 * sample-and-hold the minimum noise level seen at the antennas.
1186 * This value is then stored in a ring buffer of recently measured
1187 * noise floor values so we have a moving window of the last few
1188 * samples.
1148 * 1189 *
1149 * XXX: Since during noise floor calibration antennas are detached according to 1190 * The median of the values in the history is then loaded into the
1150 * the patent, we should stop tx queues here. 1191 * hardware for its own use for RSSI and CCA measurements.
1151 */ 1192 */
1152int 1193void ath5k_hw_update_noise_floor(struct ath5k_hw *ah)
1153ath5k_hw_noise_floor_calibration(struct ath5k_hw *ah, short freq)
1154{ 1194{
1155 int ret; 1195 struct ath5k_eeprom_info *ee = &ah->ah_capabilities.cap_eeprom;
1156 unsigned int i; 1196 u32 val;
1157 s32 noise_floor; 1197 s16 nf, threshold;
1198 u8 ee_mode;
1158 1199
1159 /* 1200 /* keep last value if calibration hasn't completed */
1160 * Enable noise floor calibration 1201 if (ath5k_hw_reg_read(ah, AR5K_PHY_AGCCTL) & AR5K_PHY_AGCCTL_NF) {
1161 */ 1202 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
1162 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, 1203 "NF did not complete in calibration window\n");
1163 AR5K_PHY_AGCCTL_NF);
1164 1204
1165 ret = ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL, 1205 return;
1166 AR5K_PHY_AGCCTL_NF, 0, false);
1167 if (ret) {
1168 ATH5K_ERR(ah->ah_sc,
1169 "noise floor calibration timeout (%uMHz)\n", freq);
1170 return -EAGAIN;
1171 } 1206 }
1172 1207
1173 /* Wait until the noise floor is calibrated and read the value */ 1208 switch (ah->ah_current_channel->hw_value & CHANNEL_MODES) {
1174 for (i = 20; i > 0; i--) { 1209 case CHANNEL_A:
1175 mdelay(1); 1210 case CHANNEL_T:
1176 noise_floor = ath5k_hw_reg_read(ah, AR5K_PHY_NF); 1211 case CHANNEL_XR:
1177 noise_floor = AR5K_PHY_NF_RVAL(noise_floor); 1212 ee_mode = AR5K_EEPROM_MODE_11A;
1178 if (noise_floor & AR5K_PHY_NF_ACTIVE) { 1213 break;
1179 noise_floor = AR5K_PHY_NF_AVAL(noise_floor); 1214 case CHANNEL_G:
1180 1215 case CHANNEL_TG:
1181 if (noise_floor <= AR5K_TUNE_NOISE_FLOOR) 1216 ee_mode = AR5K_EEPROM_MODE_11G;
1182 break; 1217 break;
1183 } 1218 default:
1219 case CHANNEL_B:
1220 ee_mode = AR5K_EEPROM_MODE_11B;
1221 break;
1184 } 1222 }
1185 1223
1186 ATH5K_DBG_UNLIMIT(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
1187 "noise floor %d\n", noise_floor);
1188 1224
1189 if (noise_floor > AR5K_TUNE_NOISE_FLOOR) { 1225 /* completed NF calibration, test threshold */
1190 ATH5K_ERR(ah->ah_sc, 1226 nf = ath5k_hw_read_measured_noise_floor(ah);
1191 "noise floor calibration failed (%uMHz)\n", freq); 1227 threshold = ee->ee_noise_floor_thr[ee_mode];
1192 return -EAGAIN; 1228
1229 if (nf > threshold) {
1230 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
1231 "noise floor failure detected; "
1232 "read %d, threshold %d\n",
1233 nf, threshold);
1234
1235 nf = AR5K_TUNE_CCA_MAX_GOOD_VALUE;
1193 } 1236 }
1194 1237
1195 ah->ah_noise_floor = noise_floor; 1238 ath5k_hw_update_nfcal_hist(ah, nf);
1239 nf = ath5k_hw_get_median_noise_floor(ah);
1196 1240
1197 return 0; 1241 /* load noise floor (in .5 dBm) so the hardware will use it */
1242 val = ath5k_hw_reg_read(ah, AR5K_PHY_NF) & ~AR5K_PHY_NF_M;
1243 val |= (nf * 2) & AR5K_PHY_NF_M;
1244 ath5k_hw_reg_write(ah, val, AR5K_PHY_NF);
1245
1246 AR5K_REG_MASKED_BITS(ah, AR5K_PHY_AGCCTL, AR5K_PHY_AGCCTL_NF,
1247 ~(AR5K_PHY_AGCCTL_NF_EN | AR5K_PHY_AGCCTL_NF_NOUPDATE));
1248
1249 ath5k_hw_register_timeout(ah, AR5K_PHY_AGCCTL, AR5K_PHY_AGCCTL_NF,
1250 0, false);
1251
1252 /*
1253 * Load a high max CCA Power value (-50 dBm in .5 dBm units)
1254 * so that we're not capped by the median we just loaded.
1255 * This will be used as the initial value for the next noise
1256 * floor calibration.
1257 */
1258 val = (val & ~AR5K_PHY_NF_M) | ((-50 * 2) & AR5K_PHY_NF_M);
1259 ath5k_hw_reg_write(ah, val, AR5K_PHY_NF);
1260 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
1261 AR5K_PHY_AGCCTL_NF_EN |
1262 AR5K_PHY_AGCCTL_NF_NOUPDATE |
1263 AR5K_PHY_AGCCTL_NF);
1264
1265 ah->ah_noise_floor = nf;
1266
1267 ATH5K_DBG(ah->ah_sc, ATH5K_DEBUG_CALIBRATE,
1268 "noise floor calibrated: %d\n", nf);
1198} 1269}
1199 1270
1200/* 1271/*
@@ -1287,7 +1358,7 @@ static int ath5k_hw_rf5110_calibrate(struct ath5k_hw *ah,
1287 return ret; 1358 return ret;
1288 } 1359 }
1289 1360
1290 ath5k_hw_noise_floor_calibration(ah, channel->center_freq); 1361 ath5k_hw_update_noise_floor(ah);
1291 1362
1292 /* 1363 /*
1293 * Re-enable RX/TX and beacons 1364 * Re-enable RX/TX and beacons
@@ -1360,7 +1431,7 @@ done:
1360 * since noise floor calibration interrupts rx path while I/Q 1431 * since noise floor calibration interrupts rx path while I/Q
1361 * calibration doesn't. We don't need to run noise floor calibration 1432 * calibration doesn't. We don't need to run noise floor calibration
1362 * as often as I/Q calibration.*/ 1433 * as often as I/Q calibration.*/
1363 ath5k_hw_noise_floor_calibration(ah, channel->center_freq); 1434 ath5k_hw_update_noise_floor(ah);
1364 1435
1365 /* Initiate a gain_F calibration */ 1436 /* Initiate a gain_F calibration */
1366 ath5k_hw_request_rfgain_probe(ah); 1437 ath5k_hw_request_rfgain_probe(ah);
diff --git a/drivers/net/wireless/ath/ath5k/reg.h b/drivers/net/wireless/ath/ath5k/reg.h
index c63ea6afd96f..4cb9c5df9f46 100644
--- a/drivers/net/wireless/ath/ath5k/reg.h
+++ b/drivers/net/wireless/ath/ath5k/reg.h
@@ -35,7 +35,7 @@
35 * released by Atheros and on various debug messages found on the net. 35 * released by Atheros and on various debug messages found on the net.
36 */ 36 */
37 37
38 38#include "../reg.h"
39 39
40/*====MAC DMA REGISTERS====*/ 40/*====MAC DMA REGISTERS====*/
41 41
@@ -1650,12 +1650,6 @@
1650#define AR5K_SLEEP2_DTIM_PER_S 16 1650#define AR5K_SLEEP2_DTIM_PER_S 16
1651 1651
1652/* 1652/*
1653 * BSSID mask registers
1654 */
1655#define AR5K_BSS_IDM0 0x80e0 /* Upper bits */
1656#define AR5K_BSS_IDM1 0x80e4 /* Lower bits */
1657
1658/*
1659 * TX power control (TPC) register 1653 * TX power control (TPC) register
1660 * 1654 *
1661 * XXX: PCDAC steps (0.5dbm) or DBM ? 1655 * XXX: PCDAC steps (0.5dbm) or DBM ?
@@ -2039,17 +2033,14 @@
2039#define AR5K_PHY_AGCCTL_NF_NOUPDATE 0x00020000 /* Don't update nf automaticaly */ 2033#define AR5K_PHY_AGCCTL_NF_NOUPDATE 0x00020000 /* Don't update nf automaticaly */
2040 2034
2041/* 2035/*
2042 * PHY noise floor status register 2036 * PHY noise floor status register (CCA = Clear Channel Assessment)
2043 */ 2037 */
2044#define AR5K_PHY_NF 0x9864 /* Register address */ 2038#define AR5K_PHY_NF 0x9864 /* Register address */
2045#define AR5K_PHY_NF_M 0x000001ff /* Noise floor mask */ 2039#define AR5K_PHY_NF_M 0x000001ff /* Noise floor, written to hardware in 1/2 dBm units */
2046#define AR5K_PHY_NF_ACTIVE 0x00000100 /* Noise floor calibration still active */ 2040#define AR5K_PHY_NF_SVAL(_n) (((_n) & AR5K_PHY_NF_M) | (1 << 9))
2047#define AR5K_PHY_NF_RVAL(_n) (((_n) >> 19) & AR5K_PHY_NF_M)
2048#define AR5K_PHY_NF_AVAL(_n) (-((_n) ^ AR5K_PHY_NF_M) + 1)
2049#define AR5K_PHY_NF_SVAL(_n) (((_n) & AR5K_PHY_NF_M) | (1 << 9))
2050#define AR5K_PHY_NF_THRESH62 0x0007f000 /* Thresh62 -check ANI patent- (field) */ 2041#define AR5K_PHY_NF_THRESH62 0x0007f000 /* Thresh62 -check ANI patent- (field) */
2051#define AR5K_PHY_NF_THRESH62_S 12 2042#define AR5K_PHY_NF_THRESH62_S 12
2052#define AR5K_PHY_NF_MINCCA_PWR 0x0ff80000 /* ??? */ 2043#define AR5K_PHY_NF_MINCCA_PWR 0x0ff80000 /* Minimum measured noise level, read from hardware in 1 dBm units */
2053#define AR5K_PHY_NF_MINCCA_PWR_S 19 2044#define AR5K_PHY_NF_MINCCA_PWR_S 19
2054 2045
2055/* 2046/*
diff --git a/drivers/net/wireless/ath/ath5k/reset.c b/drivers/net/wireless/ath/ath5k/reset.c
index 34e13c700849..62954fc77869 100644
--- a/drivers/net/wireless/ath/ath5k/reset.c
+++ b/drivers/net/wireless/ath/ath5k/reset.c
@@ -25,6 +25,8 @@
25 Reset functions and helpers 25 Reset functions and helpers
26\*****************************/ 26\*****************************/
27 27
28#include <asm/unaligned.h>
29
28#include <linux/pci.h> /* To determine if a card is pci-e */ 30#include <linux/pci.h> /* To determine if a card is pci-e */
29#include <linux/log2.h> 31#include <linux/log2.h>
30#include "ath5k.h" 32#include "ath5k.h"
@@ -870,6 +872,7 @@ static void ath5k_hw_commit_eeprom_settings(struct ath5k_hw *ah,
870int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode, 872int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
871 struct ieee80211_channel *channel, bool change_channel) 873 struct ieee80211_channel *channel, bool change_channel)
872{ 874{
875 struct ath_common *common = ath5k_hw_common(ah);
873 u32 s_seq[10], s_ant, s_led[3], staid1_flags, tsf_up, tsf_lo; 876 u32 s_seq[10], s_ant, s_led[3], staid1_flags, tsf_up, tsf_lo;
874 u32 phy_tst1; 877 u32 phy_tst1;
875 u8 mode, freq, ee_mode, ant[2]; 878 u8 mode, freq, ee_mode, ant[2];
@@ -1171,10 +1174,12 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1171 ath5k_hw_reg_write(ah, s_led[2], AR5K_GPIODO); 1174 ath5k_hw_reg_write(ah, s_led[2], AR5K_GPIODO);
1172 1175
1173 /* Restore sta_id flags and preserve our mac address*/ 1176 /* Restore sta_id flags and preserve our mac address*/
1174 ath5k_hw_reg_write(ah, AR5K_LOW_ID(ah->ah_sta_id), 1177 ath5k_hw_reg_write(ah,
1175 AR5K_STA_ID0); 1178 get_unaligned_le32(common->macaddr),
1176 ath5k_hw_reg_write(ah, staid1_flags | AR5K_HIGH_ID(ah->ah_sta_id), 1179 AR5K_STA_ID0);
1177 AR5K_STA_ID1); 1180 ath5k_hw_reg_write(ah,
1181 staid1_flags | get_unaligned_le16(common->macaddr + 4),
1182 AR5K_STA_ID1);
1178 1183
1179 1184
1180 /* 1185 /*
@@ -1182,8 +1187,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1182 */ 1187 */
1183 1188
1184 /* Restore bssid and bssid mask */ 1189 /* Restore bssid and bssid mask */
1185 /* XXX: add ah->aid once mac80211 gives this to us */ 1190 ath5k_hw_set_associd(ah);
1186 ath5k_hw_set_associd(ah, ah->ah_bssid, 0);
1187 1191
1188 /* Set PCU config */ 1192 /* Set PCU config */
1189 ath5k_hw_set_opmode(ah); 1193 ath5k_hw_set_opmode(ah);
@@ -1289,7 +1293,7 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1289 * out and/or noise floor calibration might timeout. 1293 * out and/or noise floor calibration might timeout.
1290 */ 1294 */
1291 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL, 1295 AR5K_REG_ENABLE_BITS(ah, AR5K_PHY_AGCCTL,
1292 AR5K_PHY_AGCCTL_CAL); 1296 AR5K_PHY_AGCCTL_CAL | AR5K_PHY_AGCCTL_NF);
1293 1297
1294 /* At the same time start I/Q calibration for QAM constellation 1298 /* At the same time start I/Q calibration for QAM constellation
1295 * -no need for CCK- */ 1299 * -no need for CCK- */
@@ -1310,21 +1314,6 @@ int ath5k_hw_reset(struct ath5k_hw *ah, enum nl80211_iftype op_mode,
1310 channel->center_freq); 1314 channel->center_freq);
1311 } 1315 }
1312 1316
1313 /*
1314 * If we run NF calibration before AGC, it always times out.
1315 * Binary HAL starts NF and AGC calibration at the same time
1316 * and only waits for AGC to finish. Also if AGC or NF cal.
1317 * times out, reset doesn't fail on binary HAL. I believe
1318 * that's wrong because since rx path is routed to a detector,
1319 * if cal. doesn't finish we won't have RX. Sam's HAL for AR5210/5211
1320 * enables noise floor calibration after offset calibration and if noise
1321 * floor calibration fails, reset fails. I believe that's
1322 * a better approach, we just need to find a polling interval
1323 * that suits best, even if reset continues we need to make
1324 * sure that rx path is ready.
1325 */
1326 ath5k_hw_noise_floor_calibration(ah, channel->center_freq);
1327
1328 /* Restore antenna mode */ 1317 /* Restore antenna mode */
1329 ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode); 1318 ath5k_hw_set_antenna_mode(ah, ah->ah_ant_mode);
1330 1319
diff --git a/drivers/net/wireless/ath/ath9k/Kconfig b/drivers/net/wireless/ath/ath9k/Kconfig
index ef5f59c4dd80..b735fb399fb1 100644
--- a/drivers/net/wireless/ath/ath9k/Kconfig
+++ b/drivers/net/wireless/ath/ath9k/Kconfig
@@ -1,6 +1,10 @@
1config ATH9K_HW
2 tristate
3
1config ATH9K 4config ATH9K
2 tristate "Atheros 802.11n wireless cards support" 5 tristate "Atheros 802.11n wireless cards support"
3 depends on PCI && MAC80211 && WLAN_80211 6 depends on PCI && MAC80211
7 select ATH9K_HW
4 select MAC80211_LEDS 8 select MAC80211_LEDS
5 select LEDS_CLASS 9 select LEDS_CLASS
6 select NEW_LEDS 10 select NEW_LEDS
@@ -16,6 +20,8 @@ config ATH9K
16 20
17 If you choose to build a module, it'll be called ath9k. 21 If you choose to build a module, it'll be called ath9k.
18 22
23if ATH_DEBUG
24
19config ATH9K_DEBUG 25config ATH9K_DEBUG
20 bool "Atheros ath9k debugging" 26 bool "Atheros ath9k debugging"
21 depends on ATH9K 27 depends on ATH9K
@@ -26,3 +32,5 @@ config ATH9K_DEBUG
26 modprobe ath9k debug=0x00000200 32 modprobe ath9k debug=0x00000200
27 33
28 Look in ath9k/debug.h for possible debug masks 34 Look in ath9k/debug.h for possible debug masks
35
36endif # ATH_DEBUG
diff --git a/drivers/net/wireless/ath/ath9k/Makefile b/drivers/net/wireless/ath/ath9k/Makefile
index ff2c9a26c10c..8caf2a8f8953 100644
--- a/drivers/net/wireless/ath/ath9k/Makefile
+++ b/drivers/net/wireless/ath/ath9k/Makefile
@@ -1,22 +1,25 @@
1ath9k-y += hw.o \ 1ath9k-y += beacon.o \
2 eeprom.o \
3 eeprom_def.o \
4 eeprom_4k.o \
5 eeprom_9287.o \
6 mac.o \
7 calib.o \
8 ani.o \
9 phy.o \
10 beacon.o \
11 main.o \ 2 main.o \
12 recv.o \ 3 recv.o \
13 xmit.o \ 4 xmit.o \
14 virtual.o \ 5 virtual.o \
15 rc.o \ 6 rc.o
16 btcoex.o
17 7
18ath9k-$(CONFIG_PCI) += pci.o 8ath9k-$(CONFIG_PCI) += pci.o
19ath9k-$(CONFIG_ATHEROS_AR71XX) += ahb.o 9ath9k-$(CONFIG_ATHEROS_AR71XX) += ahb.o
20ath9k-$(CONFIG_ATH9K_DEBUG) += debug.o 10ath9k-$(CONFIG_ATH9K_DEBUG) += debug.o
21 11
22obj-$(CONFIG_ATH9K) += ath9k.o 12obj-$(CONFIG_ATH9K) += ath9k.o
13
14ath9k_hw-y:= hw.o \
15 eeprom.o \
16 eeprom_def.o \
17 eeprom_4k.o \
18 eeprom_9287.o \
19 calib.o \
20 ani.o \
21 phy.o \
22 btcoex.o \
23 mac.o \
24
25obj-$(CONFIG_ATH9K_HW) += ath9k_hw.o
diff --git a/drivers/net/wireless/ath/ath9k/ahb.c b/drivers/net/wireless/ath/ath9k/ahb.c
index 2ad7d0280f7a..329e6bc137ab 100644
--- a/drivers/net/wireless/ath/ath9k/ahb.c
+++ b/drivers/net/wireless/ath/ath9k/ahb.c
@@ -22,27 +22,29 @@
22#include "ath9k.h" 22#include "ath9k.h"
23 23
24/* return bus cachesize in 4B word units */ 24/* return bus cachesize in 4B word units */
25static void ath_ahb_read_cachesize(struct ath_softc *sc, int *csz) 25static void ath_ahb_read_cachesize(struct ath_common *common, int *csz)
26{ 26{
27 *csz = L1_CACHE_BYTES >> 2; 27 *csz = L1_CACHE_BYTES >> 2;
28} 28}
29 29
30static void ath_ahb_cleanup(struct ath_softc *sc) 30static void ath_ahb_cleanup(struct ath_common *common)
31{ 31{
32 struct ath_softc *sc = (struct ath_softc *)common->priv;
32 iounmap(sc->mem); 33 iounmap(sc->mem);
33} 34}
34 35
35static bool ath_ahb_eeprom_read(struct ath_hw *ah, u32 off, u16 *data) 36static bool ath_ahb_eeprom_read(struct ath_common *common, u32 off, u16 *data)
36{ 37{
37 struct ath_softc *sc = ah->ah_sc; 38 struct ath_softc *sc = (struct ath_softc *)common->priv;
38 struct platform_device *pdev = to_platform_device(sc->dev); 39 struct platform_device *pdev = to_platform_device(sc->dev);
39 struct ath9k_platform_data *pdata; 40 struct ath9k_platform_data *pdata;
40 41
41 pdata = (struct ath9k_platform_data *) pdev->dev.platform_data; 42 pdata = (struct ath9k_platform_data *) pdev->dev.platform_data;
42 if (off >= (ARRAY_SIZE(pdata->eeprom_data))) { 43 if (off >= (ARRAY_SIZE(pdata->eeprom_data))) {
43 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 44 ath_print(common, ATH_DBG_FATAL,
44 "%s: flash read failed, offset %08x is out of range\n", 45 "%s: flash read failed, offset %08x "
45 __func__, off); 46 "is out of range\n",
47 __func__, off);
46 return false; 48 return false;
47 } 49 }
48 50
@@ -67,6 +69,7 @@ static int ath_ahb_probe(struct platform_device *pdev)
67 int irq; 69 int irq;
68 int ret = 0; 70 int ret = 0;
69 struct ath_hw *ah; 71 struct ath_hw *ah;
72 char hw_name[64];
70 73
71 if (!pdev->dev.platform_data) { 74 if (!pdev->dev.platform_data) {
72 dev_err(&pdev->dev, "no platform data specified\n"); 75 dev_err(&pdev->dev, "no platform data specified\n");
@@ -116,10 +119,9 @@ static int ath_ahb_probe(struct platform_device *pdev)
116 sc->hw = hw; 119 sc->hw = hw;
117 sc->dev = &pdev->dev; 120 sc->dev = &pdev->dev;
118 sc->mem = mem; 121 sc->mem = mem;
119 sc->bus_ops = &ath_ahb_bus_ops;
120 sc->irq = irq; 122 sc->irq = irq;
121 123
122 ret = ath_init_device(AR5416_AR9100_DEVID, sc, 0x0); 124 ret = ath_init_device(AR5416_AR9100_DEVID, sc, 0x0, &ath_ahb_bus_ops);
123 if (ret) { 125 if (ret) {
124 dev_err(&pdev->dev, "failed to initialize device\n"); 126 dev_err(&pdev->dev, "failed to initialize device\n");
125 goto err_free_hw; 127 goto err_free_hw;
@@ -132,14 +134,11 @@ static int ath_ahb_probe(struct platform_device *pdev)
132 } 134 }
133 135
134 ah = sc->sc_ah; 136 ah = sc->sc_ah;
137 ath9k_hw_name(ah, hw_name, sizeof(hw_name));
135 printk(KERN_INFO 138 printk(KERN_INFO
136 "%s: Atheros AR%s MAC/BB Rev:%x, " 139 "%s: %s mem=0x%lx, irq=%d\n",
137 "AR%s RF Rev:%x, mem=0x%lx, irq=%d\n",
138 wiphy_name(hw->wiphy), 140 wiphy_name(hw->wiphy),
139 ath_mac_bb_name(ah->hw_version.macVersion), 141 hw_name,
140 ah->hw_version.macRev,
141 ath_rf_name((ah->hw_version.analog5GhzRev & AR_RADIO_SREV_MAJOR)),
142 ah->hw_version.phyRev,
143 (unsigned long)mem, irq); 142 (unsigned long)mem, irq);
144 143
145 return 0; 144 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/ani.c b/drivers/net/wireless/ath/ath9k/ani.c
index 2b493742ef10..2a0cd64c2bfb 100644
--- a/drivers/net/wireless/ath/ath9k/ani.c
+++ b/drivers/net/wireless/ath/ath9k/ani.c
@@ -14,7 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "ath9k.h" 17#include "hw.h"
18 18
19static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah, 19static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
20 struct ath9k_channel *chan) 20 struct ath9k_channel *chan)
@@ -31,8 +31,8 @@ static int ath9k_hw_get_ani_channel_idx(struct ath_hw *ah,
31 } 31 }
32 } 32 }
33 33
34 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 34 ath_print(ath9k_hw_common(ah), ATH_DBG_ANI,
35 "No more channel states left. Using channel 0\n"); 35 "No more channel states left. Using channel 0\n");
36 36
37 return 0; 37 return 0;
38} 38}
@@ -41,16 +41,17 @@ static bool ath9k_hw_ani_control(struct ath_hw *ah,
41 enum ath9k_ani_cmd cmd, int param) 41 enum ath9k_ani_cmd cmd, int param)
42{ 42{
43 struct ar5416AniState *aniState = ah->curani; 43 struct ar5416AniState *aniState = ah->curani;
44 struct ath_common *common = ath9k_hw_common(ah);
44 45
45 switch (cmd & ah->ani_function) { 46 switch (cmd & ah->ani_function) {
46 case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{ 47 case ATH9K_ANI_NOISE_IMMUNITY_LEVEL:{
47 u32 level = param; 48 u32 level = param;
48 49
49 if (level >= ARRAY_SIZE(ah->totalSizeDesired)) { 50 if (level >= ARRAY_SIZE(ah->totalSizeDesired)) {
50 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 51 ath_print(common, ATH_DBG_ANI,
51 "level out of range (%u > %u)\n", 52 "level out of range (%u > %u)\n",
52 level, 53 level,
53 (unsigned)ARRAY_SIZE(ah->totalSizeDesired)); 54 (unsigned)ARRAY_SIZE(ah->totalSizeDesired));
54 return false; 55 return false;
55 } 56 }
56 57
@@ -152,10 +153,10 @@ static bool ath9k_hw_ani_control(struct ath_hw *ah,
152 u32 level = param; 153 u32 level = param;
153 154
154 if (level >= ARRAY_SIZE(firstep)) { 155 if (level >= ARRAY_SIZE(firstep)) {
155 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 156 ath_print(common, ATH_DBG_ANI,
156 "level out of range (%u > %u)\n", 157 "level out of range (%u > %u)\n",
157 level, 158 level,
158 (unsigned) ARRAY_SIZE(firstep)); 159 (unsigned) ARRAY_SIZE(firstep));
159 return false; 160 return false;
160 } 161 }
161 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG, 162 REG_RMW_FIELD(ah, AR_PHY_FIND_SIG,
@@ -174,11 +175,10 @@ static bool ath9k_hw_ani_control(struct ath_hw *ah,
174 u32 level = param; 175 u32 level = param;
175 176
176 if (level >= ARRAY_SIZE(cycpwrThr1)) { 177 if (level >= ARRAY_SIZE(cycpwrThr1)) {
177 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 178 ath_print(common, ATH_DBG_ANI,
178 "level out of range (%u > %u)\n", 179 "level out of range (%u > %u)\n",
179 level, 180 level,
180 (unsigned) 181 (unsigned) ARRAY_SIZE(cycpwrThr1));
181 ARRAY_SIZE(cycpwrThr1));
182 return false; 182 return false;
183 } 183 }
184 REG_RMW_FIELD(ah, AR_PHY_TIMING5, 184 REG_RMW_FIELD(ah, AR_PHY_TIMING5,
@@ -194,25 +194,28 @@ static bool ath9k_hw_ani_control(struct ath_hw *ah,
194 case ATH9K_ANI_PRESENT: 194 case ATH9K_ANI_PRESENT:
195 break; 195 break;
196 default: 196 default:
197 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 197 ath_print(common, ATH_DBG_ANI,
198 "invalid cmd %u\n", cmd); 198 "invalid cmd %u\n", cmd);
199 return false; 199 return false;
200 } 200 }
201 201
202 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "ANI parameters:\n"); 202 ath_print(common, ATH_DBG_ANI, "ANI parameters:\n");
203 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 203 ath_print(common, ATH_DBG_ANI,
204 "noiseImmunityLevel=%d, spurImmunityLevel=%d, " 204 "noiseImmunityLevel=%d, spurImmunityLevel=%d, "
205 "ofdmWeakSigDetectOff=%d\n", 205 "ofdmWeakSigDetectOff=%d\n",
206 aniState->noiseImmunityLevel, aniState->spurImmunityLevel, 206 aniState->noiseImmunityLevel,
207 !aniState->ofdmWeakSigDetectOff); 207 aniState->spurImmunityLevel,
208 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 208 !aniState->ofdmWeakSigDetectOff);
209 "cckWeakSigThreshold=%d, " 209 ath_print(common, ATH_DBG_ANI,
210 "firstepLevel=%d, listenTime=%d\n", 210 "cckWeakSigThreshold=%d, "
211 aniState->cckWeakSigThreshold, aniState->firstepLevel, 211 "firstepLevel=%d, listenTime=%d\n",
212 aniState->listenTime); 212 aniState->cckWeakSigThreshold,
213 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 213 aniState->firstepLevel,
214 aniState->listenTime);
215 ath_print(common, ATH_DBG_ANI,
214 "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n", 216 "cycleCount=%d, ofdmPhyErrCount=%d, cckPhyErrCount=%d\n\n",
215 aniState->cycleCount, aniState->ofdmPhyErrCount, 217 aniState->cycleCount,
218 aniState->ofdmPhyErrCount,
216 aniState->cckPhyErrCount); 219 aniState->cckPhyErrCount);
217 220
218 return true; 221 return true;
@@ -231,6 +234,7 @@ static void ath9k_hw_update_mibstats(struct ath_hw *ah,
231static void ath9k_ani_restart(struct ath_hw *ah) 234static void ath9k_ani_restart(struct ath_hw *ah)
232{ 235{
233 struct ar5416AniState *aniState; 236 struct ar5416AniState *aniState;
237 struct ath_common *common = ath9k_hw_common(ah);
234 238
235 if (!DO_ANI(ah)) 239 if (!DO_ANI(ah))
236 return; 240 return;
@@ -240,24 +244,24 @@ static void ath9k_ani_restart(struct ath_hw *ah)
240 244
241 if (aniState->ofdmTrigHigh > AR_PHY_COUNTMAX) { 245 if (aniState->ofdmTrigHigh > AR_PHY_COUNTMAX) {
242 aniState->ofdmPhyErrBase = 0; 246 aniState->ofdmPhyErrBase = 0;
243 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 247 ath_print(common, ATH_DBG_ANI,
244 "OFDM Trigger is too high for hw counters\n"); 248 "OFDM Trigger is too high for hw counters\n");
245 } else { 249 } else {
246 aniState->ofdmPhyErrBase = 250 aniState->ofdmPhyErrBase =
247 AR_PHY_COUNTMAX - aniState->ofdmTrigHigh; 251 AR_PHY_COUNTMAX - aniState->ofdmTrigHigh;
248 } 252 }
249 if (aniState->cckTrigHigh > AR_PHY_COUNTMAX) { 253 if (aniState->cckTrigHigh > AR_PHY_COUNTMAX) {
250 aniState->cckPhyErrBase = 0; 254 aniState->cckPhyErrBase = 0;
251 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 255 ath_print(common, ATH_DBG_ANI,
252 "CCK Trigger is too high for hw counters\n"); 256 "CCK Trigger is too high for hw counters\n");
253 } else { 257 } else {
254 aniState->cckPhyErrBase = 258 aniState->cckPhyErrBase =
255 AR_PHY_COUNTMAX - aniState->cckTrigHigh; 259 AR_PHY_COUNTMAX - aniState->cckTrigHigh;
256 } 260 }
257 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 261 ath_print(common, ATH_DBG_ANI,
258 "Writing ofdmbase=%u cckbase=%u\n", 262 "Writing ofdmbase=%u cckbase=%u\n",
259 aniState->ofdmPhyErrBase, 263 aniState->ofdmPhyErrBase,
260 aniState->cckPhyErrBase); 264 aniState->cckPhyErrBase);
261 REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase); 265 REG_WRITE(ah, AR_PHY_ERR_1, aniState->ofdmPhyErrBase);
262 REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase); 266 REG_WRITE(ah, AR_PHY_ERR_2, aniState->cckPhyErrBase);
263 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING); 267 REG_WRITE(ah, AR_PHY_ERR_MASK_1, AR_PHY_ERR_OFDM_TIMING);
@@ -271,7 +275,7 @@ static void ath9k_ani_restart(struct ath_hw *ah)
271 275
272static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah) 276static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
273{ 277{
274 struct ieee80211_conf *conf = &ah->ah_sc->hw->conf; 278 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
275 struct ar5416AniState *aniState; 279 struct ar5416AniState *aniState;
276 int32_t rssi; 280 int32_t rssi;
277 281
@@ -343,7 +347,7 @@ static void ath9k_hw_ani_ofdm_err_trigger(struct ath_hw *ah)
343 347
344static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah) 348static void ath9k_hw_ani_cck_err_trigger(struct ath_hw *ah)
345{ 349{
346 struct ieee80211_conf *conf = &ah->ah_sc->hw->conf; 350 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
347 struct ar5416AniState *aniState; 351 struct ar5416AniState *aniState;
348 int32_t rssi; 352 int32_t rssi;
349 353
@@ -464,6 +468,7 @@ void ath9k_ani_reset(struct ath_hw *ah)
464{ 468{
465 struct ar5416AniState *aniState; 469 struct ar5416AniState *aniState;
466 struct ath9k_channel *chan = ah->curchan; 470 struct ath9k_channel *chan = ah->curchan;
471 struct ath_common *common = ath9k_hw_common(ah);
467 int index; 472 int index;
468 473
469 if (!DO_ANI(ah)) 474 if (!DO_ANI(ah))
@@ -475,8 +480,8 @@ void ath9k_ani_reset(struct ath_hw *ah)
475 480
476 if (DO_ANI(ah) && ah->opmode != NL80211_IFTYPE_STATION 481 if (DO_ANI(ah) && ah->opmode != NL80211_IFTYPE_STATION
477 && ah->opmode != NL80211_IFTYPE_ADHOC) { 482 && ah->opmode != NL80211_IFTYPE_ADHOC) {
478 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 483 ath_print(common, ATH_DBG_ANI,
479 "Reset ANI state opmode %u\n", ah->opmode); 484 "Reset ANI state opmode %u\n", ah->opmode);
480 ah->stats.ast_ani_reset++; 485 ah->stats.ast_ani_reset++;
481 486
482 if (ah->opmode == NL80211_IFTYPE_AP) { 487 if (ah->opmode == NL80211_IFTYPE_AP) {
@@ -543,6 +548,7 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah,
543 struct ath9k_channel *chan) 548 struct ath9k_channel *chan)
544{ 549{
545 struct ar5416AniState *aniState; 550 struct ar5416AniState *aniState;
551 struct ath_common *common = ath9k_hw_common(ah);
546 int32_t listenTime; 552 int32_t listenTime;
547 u32 phyCnt1, phyCnt2; 553 u32 phyCnt1, phyCnt2;
548 u32 ofdmPhyErrCnt, cckPhyErrCnt; 554 u32 ofdmPhyErrCnt, cckPhyErrCnt;
@@ -569,20 +575,22 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah,
569 if (phyCnt1 < aniState->ofdmPhyErrBase || 575 if (phyCnt1 < aniState->ofdmPhyErrBase ||
570 phyCnt2 < aniState->cckPhyErrBase) { 576 phyCnt2 < aniState->cckPhyErrBase) {
571 if (phyCnt1 < aniState->ofdmPhyErrBase) { 577 if (phyCnt1 < aniState->ofdmPhyErrBase) {
572 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 578 ath_print(common, ATH_DBG_ANI,
573 "phyCnt1 0x%x, resetting " 579 "phyCnt1 0x%x, resetting "
574 "counter value to 0x%x\n", 580 "counter value to 0x%x\n",
575 phyCnt1, aniState->ofdmPhyErrBase); 581 phyCnt1,
582 aniState->ofdmPhyErrBase);
576 REG_WRITE(ah, AR_PHY_ERR_1, 583 REG_WRITE(ah, AR_PHY_ERR_1,
577 aniState->ofdmPhyErrBase); 584 aniState->ofdmPhyErrBase);
578 REG_WRITE(ah, AR_PHY_ERR_MASK_1, 585 REG_WRITE(ah, AR_PHY_ERR_MASK_1,
579 AR_PHY_ERR_OFDM_TIMING); 586 AR_PHY_ERR_OFDM_TIMING);
580 } 587 }
581 if (phyCnt2 < aniState->cckPhyErrBase) { 588 if (phyCnt2 < aniState->cckPhyErrBase) {
582 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 589 ath_print(common, ATH_DBG_ANI,
583 "phyCnt2 0x%x, resetting " 590 "phyCnt2 0x%x, resetting "
584 "counter value to 0x%x\n", 591 "counter value to 0x%x\n",
585 phyCnt2, aniState->cckPhyErrBase); 592 phyCnt2,
593 aniState->cckPhyErrBase);
586 REG_WRITE(ah, AR_PHY_ERR_2, 594 REG_WRITE(ah, AR_PHY_ERR_2,
587 aniState->cckPhyErrBase); 595 aniState->cckPhyErrBase);
588 REG_WRITE(ah, AR_PHY_ERR_MASK_2, 596 REG_WRITE(ah, AR_PHY_ERR_MASK_2,
@@ -621,10 +629,13 @@ void ath9k_hw_ani_monitor(struct ath_hw *ah,
621 } 629 }
622 } 630 }
623} 631}
632EXPORT_SYMBOL(ath9k_hw_ani_monitor);
624 633
625void ath9k_enable_mib_counters(struct ath_hw *ah) 634void ath9k_enable_mib_counters(struct ath_hw *ah)
626{ 635{
627 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Enable MIB counters\n"); 636 struct ath_common *common = ath9k_hw_common(ah);
637
638 ath_print(common, ATH_DBG_ANI, "Enable MIB counters\n");
628 639
629 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); 640 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
630 641
@@ -640,7 +651,10 @@ void ath9k_enable_mib_counters(struct ath_hw *ah)
640/* Freeze the MIB counters, get the stats and then clear them */ 651/* Freeze the MIB counters, get the stats and then clear them */
641void ath9k_hw_disable_mib_counters(struct ath_hw *ah) 652void ath9k_hw_disable_mib_counters(struct ath_hw *ah)
642{ 653{
643 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Disable MIB counters\n"); 654 struct ath_common *common = ath9k_hw_common(ah);
655
656 ath_print(common, ATH_DBG_ANI, "Disable MIB counters\n");
657
644 REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC); 658 REG_WRITE(ah, AR_MIBC, AR_MIBC_FMC);
645 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats); 659 ath9k_hw_update_mibstats(ah, &ah->ah_mibStats);
646 REG_WRITE(ah, AR_MIBC, AR_MIBC_CMC); 660 REG_WRITE(ah, AR_MIBC, AR_MIBC_CMC);
@@ -653,6 +667,7 @@ u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah,
653 u32 *rxf_pcnt, 667 u32 *rxf_pcnt,
654 u32 *txf_pcnt) 668 u32 *txf_pcnt)
655{ 669{
670 struct ath_common *common = ath9k_hw_common(ah);
656 static u32 cycles, rx_clear, rx_frame, tx_frame; 671 static u32 cycles, rx_clear, rx_frame, tx_frame;
657 u32 good = 1; 672 u32 good = 1;
658 673
@@ -662,8 +677,8 @@ u32 ath9k_hw_GetMibCycleCountsPct(struct ath_hw *ah,
662 u32 cc = REG_READ(ah, AR_CCCNT); 677 u32 cc = REG_READ(ah, AR_CCCNT);
663 678
664 if (cycles == 0 || cycles > cc) { 679 if (cycles == 0 || cycles > cc) {
665 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 680 ath_print(common, ATH_DBG_ANI,
666 "cycle counter wrap. ExtBusy = 0\n"); 681 "cycle counter wrap. ExtBusy = 0\n");
667 good = 0; 682 good = 0;
668 } else { 683 } else {
669 u32 cc_d = cc - cycles; 684 u32 cc_d = cc - cycles;
@@ -742,6 +757,7 @@ void ath9k_hw_procmibevent(struct ath_hw *ah)
742 ath9k_ani_restart(ah); 757 ath9k_ani_restart(ah);
743 } 758 }
744} 759}
760EXPORT_SYMBOL(ath9k_hw_procmibevent);
745 761
746void ath9k_hw_ani_setup(struct ath_hw *ah) 762void ath9k_hw_ani_setup(struct ath_hw *ah)
747{ 763{
@@ -762,9 +778,10 @@ void ath9k_hw_ani_setup(struct ath_hw *ah)
762 778
763void ath9k_hw_ani_init(struct ath_hw *ah) 779void ath9k_hw_ani_init(struct ath_hw *ah)
764{ 780{
781 struct ath_common *common = ath9k_hw_common(ah);
765 int i; 782 int i;
766 783
767 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Initialize ANI\n"); 784 ath_print(common, ATH_DBG_ANI, "Initialize ANI\n");
768 785
769 memset(ah->ani, 0, sizeof(ah->ani)); 786 memset(ah->ani, 0, sizeof(ah->ani));
770 for (i = 0; i < ARRAY_SIZE(ah->ani); i++) { 787 for (i = 0; i < ARRAY_SIZE(ah->ani); i++) {
@@ -786,11 +803,11 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
786 AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH; 803 AR_PHY_COUNTMAX - ATH9K_ANI_CCK_TRIG_HIGH;
787 } 804 }
788 805
789 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 806 ath_print(common, ATH_DBG_ANI,
790 "Setting OfdmErrBase = 0x%08x\n", 807 "Setting OfdmErrBase = 0x%08x\n",
791 ah->ani[0].ofdmPhyErrBase); 808 ah->ani[0].ofdmPhyErrBase);
792 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Setting cckErrBase = 0x%08x\n", 809 ath_print(common, ATH_DBG_ANI, "Setting cckErrBase = 0x%08x\n",
793 ah->ani[0].cckPhyErrBase); 810 ah->ani[0].cckPhyErrBase);
794 811
795 REG_WRITE(ah, AR_PHY_ERR_1, ah->ani[0].ofdmPhyErrBase); 812 REG_WRITE(ah, AR_PHY_ERR_1, ah->ani[0].ofdmPhyErrBase);
796 REG_WRITE(ah, AR_PHY_ERR_2, ah->ani[0].cckPhyErrBase); 813 REG_WRITE(ah, AR_PHY_ERR_2, ah->ani[0].cckPhyErrBase);
@@ -803,7 +820,7 @@ void ath9k_hw_ani_init(struct ath_hw *ah)
803 820
804void ath9k_hw_ani_disable(struct ath_hw *ah) 821void ath9k_hw_ani_disable(struct ath_hw *ah)
805{ 822{
806 DPRINTF(ah->ah_sc, ATH_DBG_ANI, "Disabling ANI\n"); 823 ath_print(ath9k_hw_common(ah), ATH_DBG_ANI, "Disabling ANI\n");
807 824
808 ath9k_hw_disable_mib_counters(ah); 825 ath9k_hw_disable_mib_counters(ah);
809 REG_WRITE(ah, AR_PHY_ERR_1, 0); 826 REG_WRITE(ah, AR_PHY_ERR_1, 0);
diff --git a/drivers/net/wireless/ath/ath9k/ath9k.h b/drivers/net/wireless/ath/ath9k/ath9k.h
index 1d59f10f68da..13dd0202d6b5 100644
--- a/drivers/net/wireless/ath/ath9k/ath9k.h
+++ b/drivers/net/wireless/ath/ath9k/ath9k.h
@@ -26,7 +26,7 @@
26#include "rc.h" 26#include "rc.h"
27#include "debug.h" 27#include "debug.h"
28#include "../ath.h" 28#include "../ath.h"
29#include "btcoex.h" 29#include "../debug.h"
30 30
31struct ath_node; 31struct ath_node;
32 32
@@ -54,15 +54,11 @@ struct ath_node;
54 54
55#define A_MAX(a, b) ((a) > (b) ? (a) : (b)) 55#define A_MAX(a, b) ((a) > (b) ? (a) : (b))
56 56
57#define ASSERT(exp) BUG_ON(!(exp))
58
59#define TSF_TO_TU(_h,_l) \ 57#define TSF_TO_TU(_h,_l) \
60 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10)) 58 ((((u32)(_h)) << 22) | (((u32)(_l)) >> 10))
61 59
62#define ATH_TXQ_SETUP(sc, i) ((sc)->tx.txqsetup & (1<<i)) 60#define ATH_TXQ_SETUP(sc, i) ((sc)->tx.txqsetup & (1<<i))
63 61
64static const u8 ath_bcast_mac[ETH_ALEN] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
65
66struct ath_config { 62struct ath_config {
67 u32 ath_aggr_prot; 63 u32 ath_aggr_prot;
68 u16 txpowlimit; 64 u16 txpowlimit;
@@ -191,7 +187,6 @@ void ath_descdma_cleanup(struct ath_softc *sc, struct ath_descdma *dd,
191/* minimum h/w qdepth to be sustained to maximize aggregation */ 187/* minimum h/w qdepth to be sustained to maximize aggregation */
192#define ATH_AGGR_MIN_QDEPTH 2 188#define ATH_AGGR_MIN_QDEPTH 2
193#define ATH_AMPDU_SUBFRAME_DEFAULT 32 189#define ATH_AMPDU_SUBFRAME_DEFAULT 32
194#define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1)
195 190
196#define IEEE80211_SEQ_SEQ_SHIFT 4 191#define IEEE80211_SEQ_SEQ_SHIFT 4
197#define IEEE80211_SEQ_MAX 4096 192#define IEEE80211_SEQ_MAX 4096
@@ -293,7 +288,6 @@ struct ath_tx_control {
293 288
294#define ATH_RSSI_LPF_LEN 10 289#define ATH_RSSI_LPF_LEN 10
295#define RSSI_LPF_THRESHOLD -20 290#define RSSI_LPF_THRESHOLD -20
296#define ATH9K_RSSI_BAD 0x80
297#define ATH_RSSI_EP_MULTIPLIER (1<<7) 291#define ATH_RSSI_EP_MULTIPLIER (1<<7)
298#define ATH_EP_MUL(x, mul) ((x) * (mul)) 292#define ATH_EP_MUL(x, mul) ((x) * (mul))
299#define ATH_RSSI_IN(x) (ATH_EP_MUL((x), ATH_RSSI_EP_MULTIPLIER)) 293#define ATH_RSSI_IN(x) (ATH_EP_MUL((x), ATH_RSSI_EP_MULTIPLIER))
@@ -427,7 +421,6 @@ struct ath_beacon {
427 421
428void ath_beacon_tasklet(unsigned long data); 422void ath_beacon_tasklet(unsigned long data);
429void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif); 423void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif);
430int ath_beaconq_setup(struct ath_hw *ah);
431int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif); 424int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif);
432void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp); 425void ath_beacon_return(struct ath_softc *sc, struct ath_vif *avp);
433 426
@@ -451,6 +444,26 @@ struct ath_ani {
451 struct timer_list timer; 444 struct timer_list timer;
452}; 445};
453 446
447/* Defines the BT AR_BT_COEX_WGHT used */
448enum ath_stomp_type {
449 ATH_BTCOEX_NO_STOMP,
450 ATH_BTCOEX_STOMP_ALL,
451 ATH_BTCOEX_STOMP_LOW,
452 ATH_BTCOEX_STOMP_NONE
453};
454
455struct ath_btcoex {
456 bool hw_timer_enabled;
457 spinlock_t btcoex_lock;
458 struct timer_list period_timer; /* Timer for BT period */
459 u32 bt_priority_cnt;
460 unsigned long bt_priority_time;
461 int bt_stomp_type; /* Types of BT stomping */
462 u32 btcoex_no_stomp; /* in usec */
463 u32 btcoex_period; /* in usec */
464 struct ath_gen_timer *no_stomp_timer; /* Timer for no BT stomping */
465};
466
454/********************/ 467/********************/
455/* LED Control */ 468/* LED Control */
456/********************/ 469/********************/
@@ -484,7 +497,6 @@ struct ath_led {
484 * Used when PCI device not fully initialized by bootrom/BIOS 497 * Used when PCI device not fully initialized by bootrom/BIOS
485*/ 498*/
486#define DEFAULT_CACHELINE 32 499#define DEFAULT_CACHELINE 32
487#define ATH_DEFAULT_NOISE_FLOOR -95
488#define ATH_REGCLASSIDS_MAX 10 500#define ATH_REGCLASSIDS_MAX 10
489#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */ 501#define ATH_CABQ_READY_TIME 80 /* % of beacon interval */
490#define ATH_MAX_SW_RETRIES 10 502#define ATH_MAX_SW_RETRIES 10
@@ -522,23 +534,14 @@ struct ath_led {
522#define SC_OP_WAIT_FOR_PSPOLL_DATA BIT(17) 534#define SC_OP_WAIT_FOR_PSPOLL_DATA BIT(17)
523#define SC_OP_WAIT_FOR_TX_ACK BIT(18) 535#define SC_OP_WAIT_FOR_TX_ACK BIT(18)
524#define SC_OP_BEACON_SYNC BIT(19) 536#define SC_OP_BEACON_SYNC BIT(19)
525#define SC_OP_BTCOEX_ENABLED BIT(20)
526#define SC_OP_BT_PRIORITY_DETECTED BIT(21) 537#define SC_OP_BT_PRIORITY_DETECTED BIT(21)
527 538
528struct ath_bus_ops {
529 void (*read_cachesize)(struct ath_softc *sc, int *csz);
530 void (*cleanup)(struct ath_softc *sc);
531 bool (*eeprom_read)(struct ath_hw *ah, u32 off, u16 *data);
532};
533
534struct ath_wiphy; 539struct ath_wiphy;
535 540
536struct ath_softc { 541struct ath_softc {
537 struct ieee80211_hw *hw; 542 struct ieee80211_hw *hw;
538 struct device *dev; 543 struct device *dev;
539 544
540 struct ath_common common;
541
542 spinlock_t wiphy_lock; /* spinlock to protect ath_wiphy data */ 545 spinlock_t wiphy_lock; /* spinlock to protect ath_wiphy data */
543 struct ath_wiphy *pri_wiphy; 546 struct ath_wiphy *pri_wiphy;
544 struct ath_wiphy **sec_wiphy; /* secondary wiphys (virtual radios); may 547 struct ath_wiphy **sec_wiphy; /* secondary wiphys (virtual radios); may
@@ -565,24 +568,17 @@ struct ath_softc {
565 spinlock_t sc_pm_lock; 568 spinlock_t sc_pm_lock;
566 struct mutex mutex; 569 struct mutex mutex;
567 570
568 u8 curbssid[ETH_ALEN];
569 u8 bssidmask[ETH_ALEN];
570 u32 intrstatus; 571 u32 intrstatus;
571 u32 sc_flags; /* SC_OP_* */ 572 u32 sc_flags; /* SC_OP_* */
572 u16 curtxpow; 573 u16 curtxpow;
573 u16 curaid;
574 u8 nbcnvifs; 574 u8 nbcnvifs;
575 u16 nvifs; 575 u16 nvifs;
576 u8 tx_chainmask;
577 u8 rx_chainmask;
578 u32 keymax; 576 u32 keymax;
579 DECLARE_BITMAP(keymap, ATH_KEYMAX); 577 DECLARE_BITMAP(keymap, ATH_KEYMAX);
580 u8 splitmic; 578 u8 splitmic;
581 bool ps_enabled; 579 bool ps_enabled;
582 unsigned long ps_usecount; 580 unsigned long ps_usecount;
583 enum ath9k_int imask; 581 enum ath9k_int imask;
584 enum ath9k_ht_extprotspacing ht_extprotspacing;
585 enum ath9k_ht_macmode tx_chan_width;
586 582
587 struct ath_config config; 583 struct ath_config config;
588 struct ath_rx rx; 584 struct ath_rx rx;
@@ -609,10 +605,9 @@ struct ath_softc {
609#ifdef CONFIG_ATH9K_DEBUG 605#ifdef CONFIG_ATH9K_DEBUG
610 struct ath9k_debug debug; 606 struct ath9k_debug debug;
611#endif 607#endif
612 struct ath_bus_ops *bus_ops;
613 struct ath_beacon_config cur_beacon_conf; 608 struct ath_beacon_config cur_beacon_conf;
614 struct delayed_work tx_complete_work; 609 struct delayed_work tx_complete_work;
615 struct ath_btcoex_info btcoex_info; 610 struct ath_btcoex btcoex;
616}; 611};
617 612
618struct ath_wiphy { 613struct ath_wiphy {
@@ -634,31 +629,22 @@ int ath_get_hal_qnum(u16 queue, struct ath_softc *sc);
634int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc); 629int ath_get_mac80211_qnum(u32 queue, struct ath_softc *sc);
635int ath_cabq_update(struct ath_softc *); 630int ath_cabq_update(struct ath_softc *);
636 631
637static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah) 632static inline void ath_read_cachesize(struct ath_common *common, int *csz)
638{
639 return &ah->ah_sc->common;
640}
641
642static inline struct ath_regulatory *ath9k_hw_regulatory(struct ath_hw *ah)
643{ 633{
644 return &(ath9k_hw_common(ah)->regulatory); 634 common->bus_ops->read_cachesize(common, csz);
645} 635}
646 636
647static inline void ath_read_cachesize(struct ath_softc *sc, int *csz) 637static inline void ath_bus_cleanup(struct ath_common *common)
648{ 638{
649 sc->bus_ops->read_cachesize(sc, csz); 639 common->bus_ops->cleanup(common);
650}
651
652static inline void ath_bus_cleanup(struct ath_softc *sc)
653{
654 sc->bus_ops->cleanup(sc);
655} 640}
656 641
657extern struct ieee80211_ops ath9k_ops; 642extern struct ieee80211_ops ath9k_ops;
658 643
659irqreturn_t ath_isr(int irq, void *dev); 644irqreturn_t ath_isr(int irq, void *dev);
660void ath_cleanup(struct ath_softc *sc); 645void ath_cleanup(struct ath_softc *sc);
661int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid); 646int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
647 const struct ath_bus_ops *bus_ops);
662void ath_detach(struct ath_softc *sc); 648void ath_detach(struct ath_softc *sc);
663const char *ath_mac_bb_name(u32 mac_bb_version); 649const char *ath_mac_bb_name(u32 mac_bb_version);
664const char *ath_rf_name(u16 rf_version); 650const char *ath_rf_name(u16 rf_version);
@@ -706,8 +692,5 @@ bool ath9k_wiphy_scanning(struct ath_softc *sc);
706void ath9k_wiphy_work(struct work_struct *work); 692void ath9k_wiphy_work(struct work_struct *work);
707bool ath9k_all_wiphys_idle(struct ath_softc *sc); 693bool ath9k_all_wiphys_idle(struct ath_softc *sc);
708 694
709void ath9k_iowrite32(struct ath_hw *ah, u32 reg_offset, u32 val);
710unsigned int ath9k_ioread32(struct ath_hw *ah, u32 reg_offset);
711
712int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype); 695int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype);
713#endif /* ATH9K_H */ 696#endif /* ATH9K_H */
diff --git a/drivers/net/wireless/ath/ath9k/beacon.c b/drivers/net/wireless/ath/ath9k/beacon.c
index 45c4ea57616b..b10c884f2933 100644
--- a/drivers/net/wireless/ath/ath9k/beacon.c
+++ b/drivers/net/wireless/ath/ath9k/beacon.c
@@ -26,6 +26,7 @@
26static int ath_beaconq_config(struct ath_softc *sc) 26static int ath_beaconq_config(struct ath_softc *sc)
27{ 27{
28 struct ath_hw *ah = sc->sc_ah; 28 struct ath_hw *ah = sc->sc_ah;
29 struct ath_common *common = ath9k_hw_common(ah);
29 struct ath9k_tx_queue_info qi; 30 struct ath9k_tx_queue_info qi;
30 31
31 ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi); 32 ath9k_hw_get_txq_props(ah, sc->beacon.beaconq, &qi);
@@ -42,8 +43,8 @@ static int ath_beaconq_config(struct ath_softc *sc)
42 } 43 }
43 44
44 if (!ath9k_hw_set_txq_props(ah, sc->beacon.beaconq, &qi)) { 45 if (!ath9k_hw_set_txq_props(ah, sc->beacon.beaconq, &qi)) {
45 DPRINTF(sc, ATH_DBG_FATAL, 46 ath_print(common, ATH_DBG_FATAL,
46 "Unable to update h/w beacon queue parameters\n"); 47 "Unable to update h/w beacon queue parameters\n");
47 return 0; 48 return 0;
48 } else { 49 } else {
49 ath9k_hw_resettxqueue(ah, sc->beacon.beaconq); 50 ath9k_hw_resettxqueue(ah, sc->beacon.beaconq);
@@ -61,6 +62,7 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
61{ 62{
62 struct sk_buff *skb = bf->bf_mpdu; 63 struct sk_buff *skb = bf->bf_mpdu;
63 struct ath_hw *ah = sc->sc_ah; 64 struct ath_hw *ah = sc->sc_ah;
65 struct ath_common *common = ath9k_hw_common(ah);
64 struct ath_desc *ds; 66 struct ath_desc *ds;
65 struct ath9k_11n_rate_series series[4]; 67 struct ath9k_11n_rate_series series[4];
66 const struct ath_rate_table *rt; 68 const struct ath_rate_table *rt;
@@ -108,7 +110,7 @@ static void ath_beacon_setup(struct ath_softc *sc, struct ath_vif *avp,
108 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4); 110 memset(series, 0, sizeof(struct ath9k_11n_rate_series) * 4);
109 series[0].Tries = 1; 111 series[0].Tries = 1;
110 series[0].Rate = rate; 112 series[0].Rate = rate;
111 series[0].ChSel = sc->tx_chainmask; 113 series[0].ChSel = common->tx_chainmask;
112 series[0].RateFlags = (ctsrate) ? ATH9K_RATESERIES_RTS_CTS : 0; 114 series[0].RateFlags = (ctsrate) ? ATH9K_RATESERIES_RTS_CTS : 0;
113 ath9k_hw_set11n_ratescenario(ah, ds, ds, 0, ctsrate, ctsduration, 115 ath9k_hw_set11n_ratescenario(ah, ds, ds, 0, ctsrate, ctsduration,
114 series, 4, 0); 116 series, 4, 0);
@@ -119,6 +121,7 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
119{ 121{
120 struct ath_wiphy *aphy = hw->priv; 122 struct ath_wiphy *aphy = hw->priv;
121 struct ath_softc *sc = aphy->sc; 123 struct ath_softc *sc = aphy->sc;
124 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
122 struct ath_buf *bf; 125 struct ath_buf *bf;
123 struct ath_vif *avp; 126 struct ath_vif *avp;
124 struct sk_buff *skb; 127 struct sk_buff *skb;
@@ -172,7 +175,8 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
172 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) { 175 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
173 dev_kfree_skb_any(skb); 176 dev_kfree_skb_any(skb);
174 bf->bf_mpdu = NULL; 177 bf->bf_mpdu = NULL;
175 DPRINTF(sc, ATH_DBG_FATAL, "dma_mapping_error on beaconing\n"); 178 ath_print(common, ATH_DBG_FATAL,
179 "dma_mapping_error on beaconing\n");
176 return NULL; 180 return NULL;
177 } 181 }
178 182
@@ -192,8 +196,8 @@ static struct ath_buf *ath_beacon_generate(struct ieee80211_hw *hw,
192 196
193 if (skb && cabq_depth) { 197 if (skb && cabq_depth) {
194 if (sc->nvifs > 1) { 198 if (sc->nvifs > 1) {
195 DPRINTF(sc, ATH_DBG_BEACON, 199 ath_print(common, ATH_DBG_BEACON,
196 "Flushing previous cabq traffic\n"); 200 "Flushing previous cabq traffic\n");
197 ath_draintxq(sc, cabq, false); 201 ath_draintxq(sc, cabq, false);
198 } 202 }
199 } 203 }
@@ -216,6 +220,7 @@ static void ath_beacon_start_adhoc(struct ath_softc *sc,
216 struct ieee80211_vif *vif) 220 struct ieee80211_vif *vif)
217{ 221{
218 struct ath_hw *ah = sc->sc_ah; 222 struct ath_hw *ah = sc->sc_ah;
223 struct ath_common *common = ath9k_hw_common(ah);
219 struct ath_buf *bf; 224 struct ath_buf *bf;
220 struct ath_vif *avp; 225 struct ath_vif *avp;
221 struct sk_buff *skb; 226 struct sk_buff *skb;
@@ -233,25 +238,14 @@ static void ath_beacon_start_adhoc(struct ath_softc *sc,
233 /* NB: caller is known to have already stopped tx dma */ 238 /* NB: caller is known to have already stopped tx dma */
234 ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bf->bf_daddr); 239 ath9k_hw_puttxbuf(ah, sc->beacon.beaconq, bf->bf_daddr);
235 ath9k_hw_txstart(ah, sc->beacon.beaconq); 240 ath9k_hw_txstart(ah, sc->beacon.beaconq);
236 DPRINTF(sc, ATH_DBG_BEACON, "TXDP%u = %llx (%p)\n", 241 ath_print(common, ATH_DBG_BEACON, "TXDP%u = %llx (%p)\n",
237 sc->beacon.beaconq, ito64(bf->bf_daddr), bf->bf_desc); 242 sc->beacon.beaconq, ito64(bf->bf_daddr), bf->bf_desc);
238}
239
240int ath_beaconq_setup(struct ath_hw *ah)
241{
242 struct ath9k_tx_queue_info qi;
243
244 memset(&qi, 0, sizeof(qi));
245 qi.tqi_aifs = 1;
246 qi.tqi_cwmin = 0;
247 qi.tqi_cwmax = 0;
248 /* NB: don't enable any interrupts */
249 return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
250} 243}
251 244
252int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif) 245int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
253{ 246{
254 struct ath_softc *sc = aphy->sc; 247 struct ath_softc *sc = aphy->sc;
248 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
255 struct ath_vif *avp; 249 struct ath_vif *avp;
256 struct ath_buf *bf; 250 struct ath_buf *bf;
257 struct sk_buff *skb; 251 struct sk_buff *skb;
@@ -309,7 +303,7 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
309 /* NB: the beacon data buffer must be 32-bit aligned. */ 303 /* NB: the beacon data buffer must be 32-bit aligned. */
310 skb = ieee80211_beacon_get(sc->hw, vif); 304 skb = ieee80211_beacon_get(sc->hw, vif);
311 if (skb == NULL) { 305 if (skb == NULL) {
312 DPRINTF(sc, ATH_DBG_BEACON, "cannot get skb\n"); 306 ath_print(common, ATH_DBG_BEACON, "cannot get skb\n");
313 return -ENOMEM; 307 return -ENOMEM;
314 } 308 }
315 309
@@ -333,9 +327,10 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
333 tsfadjust = intval * avp->av_bslot / ATH_BCBUF; 327 tsfadjust = intval * avp->av_bslot / ATH_BCBUF;
334 avp->tsf_adjust = cpu_to_le64(TU_TO_USEC(tsfadjust)); 328 avp->tsf_adjust = cpu_to_le64(TU_TO_USEC(tsfadjust));
335 329
336 DPRINTF(sc, ATH_DBG_BEACON, 330 ath_print(common, ATH_DBG_BEACON,
337 "stagger beacons, bslot %d intval %u tsfadjust %llu\n", 331 "stagger beacons, bslot %d intval "
338 avp->av_bslot, intval, (unsigned long long)tsfadjust); 332 "%u tsfadjust %llu\n",
333 avp->av_bslot, intval, (unsigned long long)tsfadjust);
339 334
340 ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp = 335 ((struct ieee80211_mgmt *)skb->data)->u.beacon.timestamp =
341 avp->tsf_adjust; 336 avp->tsf_adjust;
@@ -349,8 +344,8 @@ int ath_beacon_alloc(struct ath_wiphy *aphy, struct ieee80211_vif *vif)
349 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) { 344 if (unlikely(dma_mapping_error(sc->dev, bf->bf_buf_addr))) {
350 dev_kfree_skb_any(skb); 345 dev_kfree_skb_any(skb);
351 bf->bf_mpdu = NULL; 346 bf->bf_mpdu = NULL;
352 DPRINTF(sc, ATH_DBG_FATAL, 347 ath_print(common, ATH_DBG_FATAL,
353 "dma_mapping_error on beacon alloc\n"); 348 "dma_mapping_error on beacon alloc\n");
354 return -ENOMEM; 349 return -ENOMEM;
355 } 350 }
356 351
@@ -386,6 +381,7 @@ void ath_beacon_tasklet(unsigned long data)
386{ 381{
387 struct ath_softc *sc = (struct ath_softc *)data; 382 struct ath_softc *sc = (struct ath_softc *)data;
388 struct ath_hw *ah = sc->sc_ah; 383 struct ath_hw *ah = sc->sc_ah;
384 struct ath_common *common = ath9k_hw_common(ah);
389 struct ath_buf *bf = NULL; 385 struct ath_buf *bf = NULL;
390 struct ieee80211_vif *vif; 386 struct ieee80211_vif *vif;
391 struct ath_wiphy *aphy; 387 struct ath_wiphy *aphy;
@@ -405,12 +401,12 @@ void ath_beacon_tasklet(unsigned long data)
405 sc->beacon.bmisscnt++; 401 sc->beacon.bmisscnt++;
406 402
407 if (sc->beacon.bmisscnt < BSTUCK_THRESH) { 403 if (sc->beacon.bmisscnt < BSTUCK_THRESH) {
408 DPRINTF(sc, ATH_DBG_BEACON, 404 ath_print(common, ATH_DBG_BEACON,
409 "missed %u consecutive beacons\n", 405 "missed %u consecutive beacons\n",
410 sc->beacon.bmisscnt); 406 sc->beacon.bmisscnt);
411 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) { 407 } else if (sc->beacon.bmisscnt >= BSTUCK_THRESH) {
412 DPRINTF(sc, ATH_DBG_BEACON, 408 ath_print(common, ATH_DBG_BEACON,
413 "beacon is officially stuck\n"); 409 "beacon is officially stuck\n");
414 sc->sc_flags |= SC_OP_TSF_RESET; 410 sc->sc_flags |= SC_OP_TSF_RESET;
415 ath_reset(sc, false); 411 ath_reset(sc, false);
416 } 412 }
@@ -419,9 +415,9 @@ void ath_beacon_tasklet(unsigned long data)
419 } 415 }
420 416
421 if (sc->beacon.bmisscnt != 0) { 417 if (sc->beacon.bmisscnt != 0) {
422 DPRINTF(sc, ATH_DBG_BEACON, 418 ath_print(common, ATH_DBG_BEACON,
423 "resume beacon xmit after %u misses\n", 419 "resume beacon xmit after %u misses\n",
424 sc->beacon.bmisscnt); 420 sc->beacon.bmisscnt);
425 sc->beacon.bmisscnt = 0; 421 sc->beacon.bmisscnt = 0;
426 } 422 }
427 423
@@ -447,9 +443,9 @@ void ath_beacon_tasklet(unsigned long data)
447 vif = sc->beacon.bslot[slot]; 443 vif = sc->beacon.bslot[slot];
448 aphy = sc->beacon.bslot_aphy[slot]; 444 aphy = sc->beacon.bslot_aphy[slot];
449 445
450 DPRINTF(sc, ATH_DBG_BEACON, 446 ath_print(common, ATH_DBG_BEACON,
451 "slot %d [tsf %llu tsftu %u intval %u] vif %p\n", 447 "slot %d [tsf %llu tsftu %u intval %u] vif %p\n",
452 slot, tsf, tsftu, intval, vif); 448 slot, tsf, tsftu, intval, vif);
453 449
454 bfaddr = 0; 450 bfaddr = 0;
455 if (vif) { 451 if (vif) {
@@ -490,7 +486,7 @@ void ath_beacon_tasklet(unsigned long data)
490 * are still pending on the queue. 486 * are still pending on the queue.
491 */ 487 */
492 if (!ath9k_hw_stoptxdma(ah, sc->beacon.beaconq)) { 488 if (!ath9k_hw_stoptxdma(ah, sc->beacon.beaconq)) {
493 DPRINTF(sc, ATH_DBG_FATAL, 489 ath_print(common, ATH_DBG_FATAL,
494 "beacon queue %u did not stop?\n", sc->beacon.beaconq); 490 "beacon queue %u did not stop?\n", sc->beacon.beaconq);
495 } 491 }
496 492
@@ -502,6 +498,19 @@ void ath_beacon_tasklet(unsigned long data)
502 } 498 }
503} 499}
504 500
501static void ath9k_beacon_init(struct ath_softc *sc,
502 u32 next_beacon,
503 u32 beacon_period)
504{
505 if (beacon_period & ATH9K_BEACON_RESET_TSF)
506 ath9k_ps_wakeup(sc);
507
508 ath9k_hw_beaconinit(sc->sc_ah, next_beacon, beacon_period);
509
510 if (beacon_period & ATH9K_BEACON_RESET_TSF)
511 ath9k_ps_restore(sc);
512}
513
505/* 514/*
506 * For multi-bss ap support beacons are either staggered evenly over N slots or 515 * For multi-bss ap support beacons are either staggered evenly over N slots or
507 * burst together. For the former arrange for the SWBA to be delivered for each 516 * burst together. For the former arrange for the SWBA to be delivered for each
@@ -534,7 +543,7 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
534 /* Set the computed AP beacon timers */ 543 /* Set the computed AP beacon timers */
535 544
536 ath9k_hw_set_interrupts(sc->sc_ah, 0); 545 ath9k_hw_set_interrupts(sc->sc_ah, 0);
537 ath9k_hw_beaconinit(sc->sc_ah, nexttbtt, intval); 546 ath9k_beacon_init(sc, nexttbtt, intval);
538 sc->beacon.bmisscnt = 0; 547 sc->beacon.bmisscnt = 0;
539 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); 548 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
540 549
@@ -555,6 +564,7 @@ static void ath_beacon_config_ap(struct ath_softc *sc,
555static void ath_beacon_config_sta(struct ath_softc *sc, 564static void ath_beacon_config_sta(struct ath_softc *sc,
556 struct ath_beacon_config *conf) 565 struct ath_beacon_config *conf)
557{ 566{
567 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
558 struct ath9k_beacon_state bs; 568 struct ath9k_beacon_state bs;
559 int dtimperiod, dtimcount, sleepduration; 569 int dtimperiod, dtimcount, sleepduration;
560 int cfpperiod, cfpcount; 570 int cfpperiod, cfpcount;
@@ -651,11 +661,11 @@ static void ath_beacon_config_sta(struct ath_softc *sc,
651 /* TSF out of range threshold fixed at 1 second */ 661 /* TSF out of range threshold fixed at 1 second */
652 bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD; 662 bs.bs_tsfoor_threshold = ATH9K_TSFOOR_THRESHOLD;
653 663
654 DPRINTF(sc, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu); 664 ath_print(common, ATH_DBG_BEACON, "tsf: %llu tsftu: %u\n", tsf, tsftu);
655 DPRINTF(sc, ATH_DBG_BEACON, 665 ath_print(common, ATH_DBG_BEACON,
656 "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n", 666 "bmiss: %u sleep: %u cfp-period: %u maxdur: %u next: %u\n",
657 bs.bs_bmissthreshold, bs.bs_sleepduration, 667 bs.bs_bmissthreshold, bs.bs_sleepduration,
658 bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext); 668 bs.bs_cfpperiod, bs.bs_cfpmaxduration, bs.bs_cfpnext);
659 669
660 /* Set the computed STA beacon timers */ 670 /* Set the computed STA beacon timers */
661 671
@@ -669,6 +679,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
669 struct ath_beacon_config *conf, 679 struct ath_beacon_config *conf,
670 struct ieee80211_vif *vif) 680 struct ieee80211_vif *vif)
671{ 681{
682 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
672 u64 tsf; 683 u64 tsf;
673 u32 tsftu, intval, nexttbtt; 684 u32 tsftu, intval, nexttbtt;
674 685
@@ -689,9 +700,9 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
689 nexttbtt += intval; 700 nexttbtt += intval;
690 } while (nexttbtt < tsftu); 701 } while (nexttbtt < tsftu);
691 702
692 DPRINTF(sc, ATH_DBG_BEACON, 703 ath_print(common, ATH_DBG_BEACON,
693 "IBSS nexttbtt %u intval %u (%u)\n", 704 "IBSS nexttbtt %u intval %u (%u)\n",
694 nexttbtt, intval, conf->beacon_interval); 705 nexttbtt, intval, conf->beacon_interval);
695 706
696 /* 707 /*
697 * In IBSS mode enable the beacon timers but only enable SWBA interrupts 708 * In IBSS mode enable the beacon timers but only enable SWBA interrupts
@@ -707,7 +718,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
707 /* Set the computed ADHOC beacon timers */ 718 /* Set the computed ADHOC beacon timers */
708 719
709 ath9k_hw_set_interrupts(sc->sc_ah, 0); 720 ath9k_hw_set_interrupts(sc->sc_ah, 0);
710 ath9k_hw_beaconinit(sc->sc_ah, nexttbtt, intval); 721 ath9k_beacon_init(sc, nexttbtt, intval);
711 sc->beacon.bmisscnt = 0; 722 sc->beacon.bmisscnt = 0;
712 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); 723 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask);
713 724
@@ -719,6 +730,7 @@ static void ath_beacon_config_adhoc(struct ath_softc *sc,
719void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif) 730void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
720{ 731{
721 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf; 732 struct ath_beacon_config *cur_conf = &sc->cur_beacon_conf;
733 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
722 enum nl80211_iftype iftype; 734 enum nl80211_iftype iftype;
723 735
724 /* Setup the beacon configuration parameters */ 736 /* Setup the beacon configuration parameters */
@@ -759,8 +771,8 @@ void ath_beacon_config(struct ath_softc *sc, struct ieee80211_vif *vif)
759 ath_beacon_config_sta(sc, cur_conf); 771 ath_beacon_config_sta(sc, cur_conf);
760 break; 772 break;
761 default: 773 default:
762 DPRINTF(sc, ATH_DBG_CONFIG, 774 ath_print(common, ATH_DBG_CONFIG,
763 "Unsupported beaconing mode\n"); 775 "Unsupported beaconing mode\n");
764 return; 776 return;
765 } 777 }
766 778
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.c b/drivers/net/wireless/ath/ath9k/btcoex.c
index 55f607b7699e..fb4ac15f3b93 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.c
+++ b/drivers/net/wireless/ath/ath9k/btcoex.c
@@ -14,10 +14,26 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "ath9k.h" 17#include "hw.h"
18 18
19static const struct ath_btcoex_config ath_bt_config = { 0, true, true, 19enum ath_bt_mode {
20 ATH_BT_COEX_MODE_SLOTTED, true, true, 2, 5, true }; 20 ATH_BT_COEX_MODE_LEGACY, /* legacy rx_clear mode */
21 ATH_BT_COEX_MODE_UNSLOTTED, /* untimed/unslotted mode */
22 ATH_BT_COEX_MODE_SLOTTED, /* slotted mode */
23 ATH_BT_COEX_MODE_DISALBED, /* coexistence disabled */
24};
25
26struct ath_btcoex_config {
27 u8 bt_time_extend;
28 bool bt_txstate_extend;
29 bool bt_txframe_extend;
30 enum ath_bt_mode bt_mode; /* coexistence mode */
31 bool bt_quiet_collision;
32 bool bt_rxclear_polarity; /* invert rx_clear as WLAN_ACTIVE*/
33 u8 bt_priority_time;
34 u8 bt_first_slot_time;
35 bool bt_hold_rx_clear;
36};
21 37
22static const u16 ath_subsysid_tbl[] = { 38static const u16 ath_subsysid_tbl[] = {
23 AR9280_COEX2WIRE_SUBSYSID, 39 AR9280_COEX2WIRE_SUBSYSID,
@@ -29,141 +45,38 @@ static const u16 ath_subsysid_tbl[] = {
29 * Checks the subsystem id of the device to see if it 45 * Checks the subsystem id of the device to see if it
30 * supports btcoex 46 * supports btcoex
31 */ 47 */
32bool ath_btcoex_supported(u16 subsysid) 48bool ath9k_hw_btcoex_supported(struct ath_hw *ah)
33{ 49{
34 int i; 50 int i;
35 51
36 if (!subsysid) 52 if (!ah->hw_version.subsysid)
37 return false; 53 return false;
38 54
39 for (i = 0; i < ARRAY_SIZE(ath_subsysid_tbl); i++) 55 for (i = 0; i < ARRAY_SIZE(ath_subsysid_tbl); i++)
40 if (subsysid == ath_subsysid_tbl[i]) 56 if (ah->hw_version.subsysid == ath_subsysid_tbl[i])
41 return true; 57 return true;
42 58
43 return false; 59 return false;
44} 60}
45 61
46/* 62void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum)
47 * Detects if there is any priority bt traffic
48 */
49static void ath_detect_bt_priority(struct ath_softc *sc)
50{
51 struct ath_btcoex_info *btinfo = &sc->btcoex_info;
52
53 if (ath9k_hw_gpio_get(sc->sc_ah, btinfo->btpriority_gpio))
54 btinfo->bt_priority_cnt++;
55
56 if (time_after(jiffies, btinfo->bt_priority_time +
57 msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
58 if (btinfo->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
59 DPRINTF(sc, ATH_DBG_BTCOEX,
60 "BT priority traffic detected");
61 sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
62 } else {
63 sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
64 }
65
66 btinfo->bt_priority_cnt = 0;
67 btinfo->bt_priority_time = jiffies;
68 }
69}
70
71/*
72 * Configures appropriate weight based on stomp type.
73 */
74static void ath_btcoex_bt_stomp(struct ath_softc *sc,
75 struct ath_btcoex_info *btinfo,
76 int stomp_type)
77{
78
79 switch (stomp_type) {
80 case ATH_BTCOEX_STOMP_ALL:
81 ath_btcoex_set_weight(btinfo, AR_BT_COEX_WGHT,
82 AR_STOMP_ALL_WLAN_WGHT);
83 break;
84 case ATH_BTCOEX_STOMP_LOW:
85 ath_btcoex_set_weight(btinfo, AR_BT_COEX_WGHT,
86 AR_STOMP_LOW_WLAN_WGHT);
87 break;
88 case ATH_BTCOEX_STOMP_NONE:
89 ath_btcoex_set_weight(btinfo, AR_BT_COEX_WGHT,
90 AR_STOMP_NONE_WLAN_WGHT);
91 break;
92 default:
93 DPRINTF(sc, ATH_DBG_BTCOEX, "Invalid Stomptype\n");
94 break;
95 }
96
97 ath9k_hw_btcoex_enable(sc->sc_ah);
98}
99
100/*
101 * This is the master bt coex timer which runs for every
102 * 45ms, bt traffic will be given priority during 55% of this
103 * period while wlan gets remaining 45%
104 */
105
106static void ath_btcoex_period_timer(unsigned long data)
107{
108 struct ath_softc *sc = (struct ath_softc *) data;
109 struct ath_btcoex_info *btinfo = &sc->btcoex_info;
110
111 ath_detect_bt_priority(sc);
112
113 spin_lock_bh(&btinfo->btcoex_lock);
114
115 ath_btcoex_bt_stomp(sc, btinfo, btinfo->bt_stomp_type);
116
117 spin_unlock_bh(&btinfo->btcoex_lock);
118
119 if (btinfo->btcoex_period != btinfo->btcoex_no_stomp) {
120 if (btinfo->hw_timer_enabled)
121 ath_gen_timer_stop(sc->sc_ah, btinfo->no_stomp_timer);
122
123 ath_gen_timer_start(sc->sc_ah,
124 btinfo->no_stomp_timer,
125 (ath9k_hw_gettsf32(sc->sc_ah) +
126 btinfo->btcoex_no_stomp),
127 btinfo->btcoex_no_stomp * 10);
128 btinfo->hw_timer_enabled = true;
129 }
130
131 mod_timer(&btinfo->period_timer, jiffies +
132 msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
133}
134
135/*
136 * Generic tsf based hw timer which configures weight
137 * registers to time slice between wlan and bt traffic
138 */
139
140static void ath_btcoex_no_stomp_timer(void *arg)
141{
142 struct ath_softc *sc = (struct ath_softc *)arg;
143 struct ath_btcoex_info *btinfo = &sc->btcoex_info;
144
145 DPRINTF(sc, ATH_DBG_BTCOEX, "no stomp timer running \n");
146
147 spin_lock_bh(&btinfo->btcoex_lock);
148
149 if (btinfo->bt_stomp_type == ATH_BTCOEX_STOMP_LOW)
150 ath_btcoex_bt_stomp(sc, btinfo, ATH_BTCOEX_STOMP_NONE);
151 else if (btinfo->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
152 ath_btcoex_bt_stomp(sc, btinfo, ATH_BTCOEX_STOMP_LOW);
153
154 spin_unlock_bh(&btinfo->btcoex_lock);
155}
156
157static int ath_init_btcoex_info(struct ath_hw *hw,
158 struct ath_btcoex_info *btcoex_info)
159{ 63{
64 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
65 const struct ath_btcoex_config ath_bt_config = {
66 .bt_time_extend = 0,
67 .bt_txstate_extend = true,
68 .bt_txframe_extend = true,
69 .bt_mode = ATH_BT_COEX_MODE_SLOTTED,
70 .bt_quiet_collision = true,
71 .bt_rxclear_polarity = true,
72 .bt_priority_time = 2,
73 .bt_first_slot_time = 5,
74 .bt_hold_rx_clear = true,
75 };
160 u32 i; 76 u32 i;
161 int qnum;
162 77
163 qnum = ath_tx_get_qnum(hw->ah_sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE); 78 btcoex_hw->bt_coex_mode =
164 79 (btcoex_hw->bt_coex_mode & AR_BT_QCU_THRESH) |
165 btcoex_info->bt_coex_mode =
166 (btcoex_info->bt_coex_mode & AR_BT_QCU_THRESH) |
167 SM(ath_bt_config.bt_time_extend, AR_BT_TIME_EXTEND) | 80 SM(ath_bt_config.bt_time_extend, AR_BT_TIME_EXTEND) |
168 SM(ath_bt_config.bt_txstate_extend, AR_BT_TXSTATE_EXTEND) | 81 SM(ath_bt_config.bt_txstate_extend, AR_BT_TXSTATE_EXTEND) |
169 SM(ath_bt_config.bt_txframe_extend, AR_BT_TX_FRAME_EXTEND) | 82 SM(ath_bt_config.bt_txframe_extend, AR_BT_TX_FRAME_EXTEND) |
@@ -174,167 +87,141 @@ static int ath_init_btcoex_info(struct ath_hw *hw,
174 SM(ath_bt_config.bt_first_slot_time, AR_BT_FIRST_SLOT_TIME) | 87 SM(ath_bt_config.bt_first_slot_time, AR_BT_FIRST_SLOT_TIME) |
175 SM(qnum, AR_BT_QCU_THRESH); 88 SM(qnum, AR_BT_QCU_THRESH);
176 89
177 btcoex_info->bt_coex_mode2 = 90 btcoex_hw->bt_coex_mode2 =
178 SM(ath_bt_config.bt_hold_rx_clear, AR_BT_HOLD_RX_CLEAR) | 91 SM(ath_bt_config.bt_hold_rx_clear, AR_BT_HOLD_RX_CLEAR) |
179 SM(ATH_BTCOEX_BMISS_THRESH, AR_BT_BCN_MISS_THRESH) | 92 SM(ATH_BTCOEX_BMISS_THRESH, AR_BT_BCN_MISS_THRESH) |
180 AR_BT_DISABLE_BT_ANT; 93 AR_BT_DISABLE_BT_ANT;
181 94
182 btcoex_info->bt_stomp_type = ATH_BTCOEX_STOMP_LOW; 95 for (i = 0; i < 32; i++)
96 ah->hw_gen_timers.gen_timer_index[(debruijn32 << i) >> 27] = i;
97}
98EXPORT_SYMBOL(ath9k_hw_init_btcoex_hw);
183 99
184 btcoex_info->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000; 100void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah)
101{
102 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
185 103
186 btcoex_info->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) * 104 /* connect bt_active to baseband */
187 btcoex_info->btcoex_period / 100; 105 REG_CLR_BIT(ah, AR_GPIO_INPUT_EN_VAL,
106 (AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF |
107 AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF));
188 108
189 for (i = 0; i < 32; i++) 109 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
190 hw->hw_gen_timers.gen_timer_index[(debruijn32 << i) >> 27] = i; 110 AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB);
191 111
192 setup_timer(&btcoex_info->period_timer, ath_btcoex_period_timer, 112 /* Set input mux for bt_active to gpio pin */
193 (unsigned long) hw->ah_sc); 113 REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
114 AR_GPIO_INPUT_MUX1_BT_ACTIVE,
115 btcoex_hw->btactive_gpio);
194 116
195 btcoex_info->no_stomp_timer = ath_gen_timer_alloc(hw, 117 /* Configure the desired gpio port for input */
196 ath_btcoex_no_stomp_timer, 118 ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btactive_gpio);
197 ath_btcoex_no_stomp_timer, 119}
198 (void *)hw->ah_sc, AR_FIRST_NDP_TIMER); 120EXPORT_SYMBOL(ath9k_hw_btcoex_init_2wire);
121
122void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah)
123{
124 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
199 125
200 if (btcoex_info->no_stomp_timer == NULL) 126 /* btcoex 3-wire */
201 return -ENOMEM; 127 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
128 (AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB |
129 AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB));
202 130
203 spin_lock_init(&btcoex_info->btcoex_lock); 131 /* Set input mux for bt_prority_async and
132 * bt_active_async to GPIO pins */
133 REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
134 AR_GPIO_INPUT_MUX1_BT_ACTIVE,
135 btcoex_hw->btactive_gpio);
204 136
205 return 0; 137 REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
138 AR_GPIO_INPUT_MUX1_BT_PRIORITY,
139 btcoex_hw->btpriority_gpio);
140
141 /* Configure the desired GPIO ports for input */
142
143 ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btactive_gpio);
144 ath9k_hw_cfg_gpio_input(ah, btcoex_hw->btpriority_gpio);
206} 145}
146EXPORT_SYMBOL(ath9k_hw_btcoex_init_3wire);
207 147
208int ath9k_hw_btcoex_init(struct ath_hw *ah) 148static void ath9k_hw_btcoex_enable_2wire(struct ath_hw *ah)
209{ 149{
210 struct ath_btcoex_info *btcoex_info = &ah->ah_sc->btcoex_info; 150 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
211 int ret = 0;
212
213 if (btcoex_info->btcoex_scheme == ATH_BTCOEX_CFG_2WIRE) {
214 /* connect bt_active to baseband */
215 REG_CLR_BIT(ah, AR_GPIO_INPUT_EN_VAL,
216 (AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_DEF |
217 AR_GPIO_INPUT_EN_VAL_BT_FREQUENCY_DEF));
218
219 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
220 AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB);
221
222 /* Set input mux for bt_active to gpio pin */
223 REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
224 AR_GPIO_INPUT_MUX1_BT_ACTIVE,
225 btcoex_info->btactive_gpio);
226
227 /* Configure the desired gpio port for input */
228 ath9k_hw_cfg_gpio_input(ah, btcoex_info->btactive_gpio);
229 } else {
230 /* btcoex 3-wire */
231 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
232 (AR_GPIO_INPUT_EN_VAL_BT_PRIORITY_BB |
233 AR_GPIO_INPUT_EN_VAL_BT_ACTIVE_BB));
234
235 /* Set input mux for bt_prority_async and
236 * bt_active_async to GPIO pins */
237 REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
238 AR_GPIO_INPUT_MUX1_BT_ACTIVE,
239 btcoex_info->btactive_gpio);
240
241 REG_RMW_FIELD(ah, AR_GPIO_INPUT_MUX1,
242 AR_GPIO_INPUT_MUX1_BT_PRIORITY,
243 btcoex_info->btpriority_gpio);
244
245 /* Configure the desired GPIO ports for input */
246
247 ath9k_hw_cfg_gpio_input(ah, btcoex_info->btactive_gpio);
248 ath9k_hw_cfg_gpio_input(ah, btcoex_info->btpriority_gpio);
249
250 ret = ath_init_btcoex_info(ah, btcoex_info);
251 }
252 151
253 return ret; 152 /* Configure the desired GPIO port for TX_FRAME output */
153 ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
154 AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
254} 155}
255 156
256void ath9k_hw_btcoex_enable(struct ath_hw *ah) 157void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
158 u32 bt_weight,
159 u32 wlan_weight)
257{ 160{
258 struct ath_btcoex_info *btcoex_info = &ah->ah_sc->btcoex_info; 161 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
259
260 if (btcoex_info->btcoex_scheme == ATH_BTCOEX_CFG_2WIRE) {
261 /* Configure the desired GPIO port for TX_FRAME output */
262 ath9k_hw_cfg_output(ah, btcoex_info->wlanactive_gpio,
263 AR_GPIO_OUTPUT_MUX_AS_TX_FRAME);
264 } else {
265 /*
266 * Program coex mode and weight registers to
267 * enable coex 3-wire
268 */
269 REG_WRITE(ah, AR_BT_COEX_MODE, btcoex_info->bt_coex_mode);
270 REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex_info->bt_coex_weights);
271 REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex_info->bt_coex_mode2);
272
273 REG_RMW_FIELD(ah, AR_QUIET1,
274 AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
275 REG_RMW_FIELD(ah, AR_PCU_MISC,
276 AR_PCU_BT_ANT_PREVENT_RX, 0);
277
278 ath9k_hw_cfg_output(ah, btcoex_info->wlanactive_gpio,
279 AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL);
280 }
281 162
282 REG_RMW(ah, AR_GPIO_PDPU, 163 btcoex_hw->bt_coex_weights = SM(bt_weight, AR_BTCOEX_BT_WGHT) |
283 (0x2 << (btcoex_info->btactive_gpio * 2)), 164 SM(wlan_weight, AR_BTCOEX_WL_WGHT);
284 (0x3 << (btcoex_info->btactive_gpio * 2)));
285
286 ah->ah_sc->sc_flags |= SC_OP_BTCOEX_ENABLED;
287} 165}
166EXPORT_SYMBOL(ath9k_hw_btcoex_set_weight);
288 167
289void ath9k_hw_btcoex_disable(struct ath_hw *ah) 168static void ath9k_hw_btcoex_enable_3wire(struct ath_hw *ah)
290{ 169{
291 struct ath_btcoex_info *btcoex_info = &ah->ah_sc->btcoex_info; 170 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
292 171
293 ath9k_hw_set_gpio(ah, btcoex_info->wlanactive_gpio, 0); 172 /*
173 * Program coex mode and weight registers to
174 * enable coex 3-wire
175 */
176 REG_WRITE(ah, AR_BT_COEX_MODE, btcoex_hw->bt_coex_mode);
177 REG_WRITE(ah, AR_BT_COEX_WEIGHT, btcoex_hw->bt_coex_weights);
178 REG_WRITE(ah, AR_BT_COEX_MODE2, btcoex_hw->bt_coex_mode2);
294 179
295 ath9k_hw_cfg_output(ah, btcoex_info->wlanactive_gpio, 180 REG_RMW_FIELD(ah, AR_QUIET1, AR_QUIET1_QUIET_ACK_CTS_ENABLE, 1);
296 AR_GPIO_OUTPUT_MUX_AS_OUTPUT); 181 REG_RMW_FIELD(ah, AR_PCU_MISC, AR_PCU_BT_ANT_PREVENT_RX, 0);
297 182
298 if (btcoex_info->btcoex_scheme == ATH_BTCOEX_CFG_3WIRE) { 183 ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
299 REG_WRITE(ah, AR_BT_COEX_MODE, AR_BT_QUIET | AR_BT_MODE); 184 AR_GPIO_OUTPUT_MUX_AS_RX_CLEAR_EXTERNAL);
300 REG_WRITE(ah, AR_BT_COEX_WEIGHT, 0);
301 REG_WRITE(ah, AR_BT_COEX_MODE2, 0);
302 }
303
304 ah->ah_sc->sc_flags &= ~SC_OP_BTCOEX_ENABLED;
305} 185}
306 186
307/* 187void ath9k_hw_btcoex_enable(struct ath_hw *ah)
308 * Pause btcoex timer and bt duty cycle timer
309 */
310void ath_btcoex_timer_pause(struct ath_softc *sc,
311 struct ath_btcoex_info *btinfo)
312{ 188{
189 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
313 190
314 del_timer_sync(&btinfo->period_timer); 191 switch (btcoex_hw->scheme) {
192 case ATH_BTCOEX_CFG_NONE:
193 break;
194 case ATH_BTCOEX_CFG_2WIRE:
195 ath9k_hw_btcoex_enable_2wire(ah);
196 break;
197 case ATH_BTCOEX_CFG_3WIRE:
198 ath9k_hw_btcoex_enable_3wire(ah);
199 break;
200 }
315 201
316 if (btinfo->hw_timer_enabled) 202 REG_RMW(ah, AR_GPIO_PDPU,
317 ath_gen_timer_stop(sc->sc_ah, btinfo->no_stomp_timer); 203 (0x2 << (btcoex_hw->btactive_gpio * 2)),
204 (0x3 << (btcoex_hw->btactive_gpio * 2)));
318 205
319 btinfo->hw_timer_enabled = false; 206 ah->btcoex_hw.enabled = true;
320} 207}
208EXPORT_SYMBOL(ath9k_hw_btcoex_enable);
321 209
322/* 210void ath9k_hw_btcoex_disable(struct ath_hw *ah)
323 * (Re)start btcoex timers
324 */
325void ath_btcoex_timer_resume(struct ath_softc *sc,
326 struct ath_btcoex_info *btinfo)
327{ 211{
212 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
328 213
329 DPRINTF(sc, ATH_DBG_BTCOEX, "Starting btcoex timers"); 214 ath9k_hw_set_gpio(ah, btcoex_hw->wlanactive_gpio, 0);
330 215
331 /* make sure duty cycle timer is also stopped when resuming */ 216 ath9k_hw_cfg_output(ah, btcoex_hw->wlanactive_gpio,
332 if (btinfo->hw_timer_enabled) 217 AR_GPIO_OUTPUT_MUX_AS_OUTPUT);
333 ath_gen_timer_stop(sc->sc_ah, btinfo->no_stomp_timer);
334 218
335 btinfo->bt_priority_cnt = 0; 219 if (btcoex_hw->scheme == ATH_BTCOEX_CFG_3WIRE) {
336 btinfo->bt_priority_time = jiffies; 220 REG_WRITE(ah, AR_BT_COEX_MODE, AR_BT_QUIET | AR_BT_MODE);
337 sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED; 221 REG_WRITE(ah, AR_BT_COEX_WEIGHT, 0);
222 REG_WRITE(ah, AR_BT_COEX_MODE2, 0);
223 }
338 224
339 mod_timer(&btinfo->period_timer, jiffies); 225 ah->btcoex_hw.enabled = false;
340} 226}
227EXPORT_SYMBOL(ath9k_hw_btcoex_disable);
diff --git a/drivers/net/wireless/ath/ath9k/btcoex.h b/drivers/net/wireless/ath/ath9k/btcoex.h
index 297b027fd3c3..1ba31a73317c 100644
--- a/drivers/net/wireless/ath/ath9k/btcoex.h
+++ b/drivers/net/wireless/ath/ath9k/btcoex.h
@@ -17,6 +17,8 @@
17#ifndef BTCOEX_H 17#ifndef BTCOEX_H
18#define BTCOEX_H 18#define BTCOEX_H
19 19
20#include "hw.h"
21
20#define ATH_WLANACTIVE_GPIO 5 22#define ATH_WLANACTIVE_GPIO 5
21#define ATH_BTACTIVE_GPIO 6 23#define ATH_BTACTIVE_GPIO 6
22#define ATH_BTPRIORITY_GPIO 7 24#define ATH_BTPRIORITY_GPIO 7
@@ -34,67 +36,25 @@ enum ath_btcoex_scheme {
34 ATH_BTCOEX_CFG_3WIRE, 36 ATH_BTCOEX_CFG_3WIRE,
35}; 37};
36 38
37enum ath_stomp_type { 39struct ath_btcoex_hw {
38 ATH_BTCOEX_NO_STOMP, 40 enum ath_btcoex_scheme scheme;
39 ATH_BTCOEX_STOMP_ALL, 41 bool enabled;
40 ATH_BTCOEX_STOMP_LOW,
41 ATH_BTCOEX_STOMP_NONE
42};
43
44enum ath_bt_mode {
45 ATH_BT_COEX_MODE_LEGACY, /* legacy rx_clear mode */
46 ATH_BT_COEX_MODE_UNSLOTTED, /* untimed/unslotted mode */
47 ATH_BT_COEX_MODE_SLOTTED, /* slotted mode */
48 ATH_BT_COEX_MODE_DISALBED, /* coexistence disabled */
49};
50
51struct ath_btcoex_config {
52 u8 bt_time_extend;
53 bool bt_txstate_extend;
54 bool bt_txframe_extend;
55 enum ath_bt_mode bt_mode; /* coexistence mode */
56 bool bt_quiet_collision;
57 bool bt_rxclear_polarity; /* invert rx_clear as WLAN_ACTIVE*/
58 u8 bt_priority_time;
59 u8 bt_first_slot_time;
60 bool bt_hold_rx_clear;
61};
62
63struct ath_btcoex_info {
64 enum ath_btcoex_scheme btcoex_scheme;
65 u8 wlanactive_gpio; 42 u8 wlanactive_gpio;
66 u8 btactive_gpio; 43 u8 btactive_gpio;
67 u8 btpriority_gpio; 44 u8 btpriority_gpio;
68 u8 bt_duty_cycle; /* BT duty cycle in percentage */
69 int bt_stomp_type; /* Types of BT stomping */
70 u32 bt_coex_mode; /* Register setting for AR_BT_COEX_MODE */ 45 u32 bt_coex_mode; /* Register setting for AR_BT_COEX_MODE */
71 u32 bt_coex_weights; /* Register setting for AR_BT_COEX_WEIGHT */ 46 u32 bt_coex_weights; /* Register setting for AR_BT_COEX_WEIGHT */
72 u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */ 47 u32 bt_coex_mode2; /* Register setting for AR_BT_COEX_MODE2 */
73 u32 btcoex_no_stomp; /* in usec */
74 u32 btcoex_period; /* in usec */
75 u32 bt_priority_cnt;
76 unsigned long bt_priority_time;
77 bool hw_timer_enabled;
78 spinlock_t btcoex_lock;
79 struct timer_list period_timer; /* Timer for BT period */
80 struct ath_gen_timer *no_stomp_timer; /*Timer for no BT stomping*/
81}; 48};
82 49
83bool ath_btcoex_supported(u16 subsysid); 50bool ath9k_hw_btcoex_supported(struct ath_hw *ah);
84int ath9k_hw_btcoex_init(struct ath_hw *ah); 51void ath9k_hw_btcoex_init_2wire(struct ath_hw *ah);
52void ath9k_hw_btcoex_init_3wire(struct ath_hw *ah);
53void ath9k_hw_init_btcoex_hw(struct ath_hw *ah, int qnum);
54void ath9k_hw_btcoex_set_weight(struct ath_hw *ah,
55 u32 bt_weight,
56 u32 wlan_weight);
85void ath9k_hw_btcoex_enable(struct ath_hw *ah); 57void ath9k_hw_btcoex_enable(struct ath_hw *ah);
86void ath9k_hw_btcoex_disable(struct ath_hw *ah); 58void ath9k_hw_btcoex_disable(struct ath_hw *ah);
87void ath_btcoex_timer_resume(struct ath_softc *sc,
88 struct ath_btcoex_info *btinfo);
89void ath_btcoex_timer_pause(struct ath_softc *sc,
90 struct ath_btcoex_info *btinfo);
91
92static inline void ath_btcoex_set_weight(struct ath_btcoex_info *btcoex_info,
93 u32 bt_weight,
94 u32 wlan_weight)
95{
96 btcoex_info->bt_coex_weights = SM(bt_weight, AR_BTCOEX_BT_WGHT) |
97 SM(wlan_weight, AR_BTCOEX_WL_WGHT);
98}
99 59
100#endif 60#endif
diff --git a/drivers/net/wireless/ath/ath9k/calib.c b/drivers/net/wireless/ath/ath9k/calib.c
index 0ad6d0b76e9e..238a5744d8e9 100644
--- a/drivers/net/wireless/ath/ath9k/calib.c
+++ b/drivers/net/wireless/ath/ath9k/calib.c
@@ -14,7 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "ath9k.h" 17#include "hw.h"
18 18
19/* We can tune this as we go by monitoring really low values */ 19/* We can tune this as we go by monitoring really low values */
20#define ATH9K_NF_TOO_LOW -60 20#define ATH9K_NF_TOO_LOW -60
@@ -26,11 +26,11 @@
26static bool ath9k_hw_nf_in_range(struct ath_hw *ah, s16 nf) 26static bool ath9k_hw_nf_in_range(struct ath_hw *ah, s16 nf)
27{ 27{
28 if (nf > ATH9K_NF_TOO_LOW) { 28 if (nf > ATH9K_NF_TOO_LOW) {
29 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 29 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
30 "noise floor value detected (%d) is " 30 "noise floor value detected (%d) is "
31 "lower than what we think is a " 31 "lower than what we think is a "
32 "reasonable value (%d)\n", 32 "reasonable value (%d)\n",
33 nf, ATH9K_NF_TOO_LOW); 33 nf, ATH9K_NF_TOO_LOW);
34 return false; 34 return false;
35 } 35 }
36 return true; 36 return true;
@@ -89,6 +89,7 @@ static void ath9k_hw_update_nfcal_hist_buffer(struct ath9k_nfcal_hist *h,
89static void ath9k_hw_do_getnf(struct ath_hw *ah, 89static void ath9k_hw_do_getnf(struct ath_hw *ah,
90 int16_t nfarray[NUM_NF_READINGS]) 90 int16_t nfarray[NUM_NF_READINGS])
91{ 91{
92 struct ath_common *common = ath9k_hw_common(ah);
92 int16_t nf; 93 int16_t nf;
93 94
94 if (AR_SREV_9280_10_OR_LATER(ah)) 95 if (AR_SREV_9280_10_OR_LATER(ah))
@@ -98,8 +99,8 @@ static void ath9k_hw_do_getnf(struct ath_hw *ah,
98 99
99 if (nf & 0x100) 100 if (nf & 0x100)
100 nf = 0 - ((nf ^ 0x1ff) + 1); 101 nf = 0 - ((nf ^ 0x1ff) + 1);
101 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 102 ath_print(common, ATH_DBG_CALIBRATE,
102 "NF calibrated [ctl] [chain 0] is %d\n", nf); 103 "NF calibrated [ctl] [chain 0] is %d\n", nf);
103 nfarray[0] = nf; 104 nfarray[0] = nf;
104 105
105 if (!AR_SREV_9285(ah)) { 106 if (!AR_SREV_9285(ah)) {
@@ -112,8 +113,8 @@ static void ath9k_hw_do_getnf(struct ath_hw *ah,
112 113
113 if (nf & 0x100) 114 if (nf & 0x100)
114 nf = 0 - ((nf ^ 0x1ff) + 1); 115 nf = 0 - ((nf ^ 0x1ff) + 1);
115 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 116 ath_print(common, ATH_DBG_CALIBRATE,
116 "NF calibrated [ctl] [chain 1] is %d\n", nf); 117 "NF calibrated [ctl] [chain 1] is %d\n", nf);
117 nfarray[1] = nf; 118 nfarray[1] = nf;
118 119
119 if (!AR_SREV_9280(ah) && !AR_SREV_9287(ah)) { 120 if (!AR_SREV_9280(ah) && !AR_SREV_9287(ah)) {
@@ -121,8 +122,8 @@ static void ath9k_hw_do_getnf(struct ath_hw *ah,
121 AR_PHY_CH2_MINCCA_PWR); 122 AR_PHY_CH2_MINCCA_PWR);
122 if (nf & 0x100) 123 if (nf & 0x100)
123 nf = 0 - ((nf ^ 0x1ff) + 1); 124 nf = 0 - ((nf ^ 0x1ff) + 1);
124 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 125 ath_print(common, ATH_DBG_CALIBRATE,
125 "NF calibrated [ctl] [chain 2] is %d\n", nf); 126 "NF calibrated [ctl] [chain 2] is %d\n", nf);
126 nfarray[2] = nf; 127 nfarray[2] = nf;
127 } 128 }
128 } 129 }
@@ -136,8 +137,8 @@ static void ath9k_hw_do_getnf(struct ath_hw *ah,
136 137
137 if (nf & 0x100) 138 if (nf & 0x100)
138 nf = 0 - ((nf ^ 0x1ff) + 1); 139 nf = 0 - ((nf ^ 0x1ff) + 1);
139 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 140 ath_print(common, ATH_DBG_CALIBRATE,
140 "NF calibrated [ext] [chain 0] is %d\n", nf); 141 "NF calibrated [ext] [chain 0] is %d\n", nf);
141 nfarray[3] = nf; 142 nfarray[3] = nf;
142 143
143 if (!AR_SREV_9285(ah)) { 144 if (!AR_SREV_9285(ah)) {
@@ -150,8 +151,8 @@ static void ath9k_hw_do_getnf(struct ath_hw *ah,
150 151
151 if (nf & 0x100) 152 if (nf & 0x100)
152 nf = 0 - ((nf ^ 0x1ff) + 1); 153 nf = 0 - ((nf ^ 0x1ff) + 1);
153 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 154 ath_print(common, ATH_DBG_CALIBRATE,
154 "NF calibrated [ext] [chain 1] is %d\n", nf); 155 "NF calibrated [ext] [chain 1] is %d\n", nf);
155 nfarray[4] = nf; 156 nfarray[4] = nf;
156 157
157 if (!AR_SREV_9280(ah) && !AR_SREV_9287(ah)) { 158 if (!AR_SREV_9280(ah) && !AR_SREV_9287(ah)) {
@@ -159,8 +160,8 @@ static void ath9k_hw_do_getnf(struct ath_hw *ah,
159 AR_PHY_CH2_EXT_MINCCA_PWR); 160 AR_PHY_CH2_EXT_MINCCA_PWR);
160 if (nf & 0x100) 161 if (nf & 0x100)
161 nf = 0 - ((nf ^ 0x1ff) + 1); 162 nf = 0 - ((nf ^ 0x1ff) + 1);
162 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 163 ath_print(common, ATH_DBG_CALIBRATE,
163 "NF calibrated [ext] [chain 2] is %d\n", nf); 164 "NF calibrated [ext] [chain 2] is %d\n", nf);
164 nfarray[5] = nf; 165 nfarray[5] = nf;
165 } 166 }
166 } 167 }
@@ -188,6 +189,8 @@ static bool getNoiseFloorThresh(struct ath_hw *ah,
188static void ath9k_hw_setup_calibration(struct ath_hw *ah, 189static void ath9k_hw_setup_calibration(struct ath_hw *ah,
189 struct ath9k_cal_list *currCal) 190 struct ath9k_cal_list *currCal)
190{ 191{
192 struct ath_common *common = ath9k_hw_common(ah);
193
191 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0), 194 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(0),
192 AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX, 195 AR_PHY_TIMING_CTRL4_IQCAL_LOG_COUNT_MAX,
193 currCal->calData->calCountMax); 196 currCal->calData->calCountMax);
@@ -195,23 +198,23 @@ static void ath9k_hw_setup_calibration(struct ath_hw *ah,
195 switch (currCal->calData->calType) { 198 switch (currCal->calData->calType) {
196 case IQ_MISMATCH_CAL: 199 case IQ_MISMATCH_CAL:
197 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ); 200 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_IQ);
198 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 201 ath_print(common, ATH_DBG_CALIBRATE,
199 "starting IQ Mismatch Calibration\n"); 202 "starting IQ Mismatch Calibration\n");
200 break; 203 break;
201 case ADC_GAIN_CAL: 204 case ADC_GAIN_CAL:
202 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN); 205 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_GAIN);
203 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 206 ath_print(common, ATH_DBG_CALIBRATE,
204 "starting ADC Gain Calibration\n"); 207 "starting ADC Gain Calibration\n");
205 break; 208 break;
206 case ADC_DC_CAL: 209 case ADC_DC_CAL:
207 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER); 210 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_PER);
208 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 211 ath_print(common, ATH_DBG_CALIBRATE,
209 "starting ADC DC Calibration\n"); 212 "starting ADC DC Calibration\n");
210 break; 213 break;
211 case ADC_DC_INIT_CAL: 214 case ADC_DC_INIT_CAL:
212 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT); 215 REG_WRITE(ah, AR_PHY_CALMODE, AR_PHY_CALMODE_ADC_DC_INIT);
213 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 216 ath_print(common, ATH_DBG_CALIBRATE,
214 "starting Init ADC DC Calibration\n"); 217 "starting Init ADC DC Calibration\n");
215 break; 218 break;
216 } 219 }
217 220
@@ -278,7 +281,7 @@ static bool ath9k_hw_per_calibration(struct ath_hw *ah,
278static bool ath9k_hw_iscal_supported(struct ath_hw *ah, 281static bool ath9k_hw_iscal_supported(struct ath_hw *ah,
279 enum ath9k_cal_types calType) 282 enum ath9k_cal_types calType)
280{ 283{
281 struct ieee80211_conf *conf = &ah->ah_sc->hw->conf; 284 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
282 285
283 switch (calType & ah->supp_cals) { 286 switch (calType & ah->supp_cals) {
284 case IQ_MISMATCH_CAL: /* Both 2 GHz and 5 GHz support OFDM */ 287 case IQ_MISMATCH_CAL: /* Both 2 GHz and 5 GHz support OFDM */
@@ -304,11 +307,11 @@ static void ath9k_hw_iqcal_collect(struct ath_hw *ah)
304 REG_READ(ah, AR_PHY_CAL_MEAS_1(i)); 307 REG_READ(ah, AR_PHY_CAL_MEAS_1(i));
305 ah->totalIqCorrMeas[i] += 308 ah->totalIqCorrMeas[i] +=
306 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i)); 309 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_2(i));
307 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 310 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
308 "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n", 311 "%d: Chn %d pmi=0x%08x;pmq=0x%08x;iqcm=0x%08x;\n",
309 ah->cal_samples, i, ah->totalPowerMeasI[i], 312 ah->cal_samples, i, ah->totalPowerMeasI[i],
310 ah->totalPowerMeasQ[i], 313 ah->totalPowerMeasQ[i],
311 ah->totalIqCorrMeas[i]); 314 ah->totalIqCorrMeas[i]);
312 } 315 }
313} 316}
314 317
@@ -326,14 +329,14 @@ static void ath9k_hw_adc_gaincal_collect(struct ath_hw *ah)
326 ah->totalAdcQEvenPhase[i] += 329 ah->totalAdcQEvenPhase[i] +=
327 REG_READ(ah, AR_PHY_CAL_MEAS_3(i)); 330 REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
328 331
329 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 332 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
330 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; " 333 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
331 "oddq=0x%08x; evenq=0x%08x;\n", 334 "oddq=0x%08x; evenq=0x%08x;\n",
332 ah->cal_samples, i, 335 ah->cal_samples, i,
333 ah->totalAdcIOddPhase[i], 336 ah->totalAdcIOddPhase[i],
334 ah->totalAdcIEvenPhase[i], 337 ah->totalAdcIEvenPhase[i],
335 ah->totalAdcQOddPhase[i], 338 ah->totalAdcQOddPhase[i],
336 ah->totalAdcQEvenPhase[i]); 339 ah->totalAdcQEvenPhase[i]);
337 } 340 }
338} 341}
339 342
@@ -351,19 +354,20 @@ static void ath9k_hw_adc_dccal_collect(struct ath_hw *ah)
351 ah->totalAdcDcOffsetQEvenPhase[i] += 354 ah->totalAdcDcOffsetQEvenPhase[i] +=
352 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i)); 355 (int32_t) REG_READ(ah, AR_PHY_CAL_MEAS_3(i));
353 356
354 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 357 ath_print(ath9k_hw_common(ah), ATH_DBG_CALIBRATE,
355 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; " 358 "%d: Chn %d oddi=0x%08x; eveni=0x%08x; "
356 "oddq=0x%08x; evenq=0x%08x;\n", 359 "oddq=0x%08x; evenq=0x%08x;\n",
357 ah->cal_samples, i, 360 ah->cal_samples, i,
358 ah->totalAdcDcOffsetIOddPhase[i], 361 ah->totalAdcDcOffsetIOddPhase[i],
359 ah->totalAdcDcOffsetIEvenPhase[i], 362 ah->totalAdcDcOffsetIEvenPhase[i],
360 ah->totalAdcDcOffsetQOddPhase[i], 363 ah->totalAdcDcOffsetQOddPhase[i],
361 ah->totalAdcDcOffsetQEvenPhase[i]); 364 ah->totalAdcDcOffsetQEvenPhase[i]);
362 } 365 }
363} 366}
364 367
365static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains) 368static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
366{ 369{
370 struct ath_common *common = ath9k_hw_common(ah);
367 u32 powerMeasQ, powerMeasI, iqCorrMeas; 371 u32 powerMeasQ, powerMeasI, iqCorrMeas;
368 u32 qCoffDenom, iCoffDenom; 372 u32 qCoffDenom, iCoffDenom;
369 int32_t qCoff, iCoff; 373 int32_t qCoff, iCoff;
@@ -374,13 +378,13 @@ static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
374 powerMeasQ = ah->totalPowerMeasQ[i]; 378 powerMeasQ = ah->totalPowerMeasQ[i];
375 iqCorrMeas = ah->totalIqCorrMeas[i]; 379 iqCorrMeas = ah->totalIqCorrMeas[i];
376 380
377 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 381 ath_print(common, ATH_DBG_CALIBRATE,
378 "Starting IQ Cal and Correction for Chain %d\n", 382 "Starting IQ Cal and Correction for Chain %d\n",
379 i); 383 i);
380 384
381 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 385 ath_print(common, ATH_DBG_CALIBRATE,
382 "Orignal: Chn %diq_corr_meas = 0x%08x\n", 386 "Orignal: Chn %diq_corr_meas = 0x%08x\n",
383 i, ah->totalIqCorrMeas[i]); 387 i, ah->totalIqCorrMeas[i]);
384 388
385 iqCorrNeg = 0; 389 iqCorrNeg = 0;
386 390
@@ -389,27 +393,28 @@ static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
389 iqCorrNeg = 1; 393 iqCorrNeg = 1;
390 } 394 }
391 395
392 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 396 ath_print(common, ATH_DBG_CALIBRATE,
393 "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI); 397 "Chn %d pwr_meas_i = 0x%08x\n", i, powerMeasI);
394 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 398 ath_print(common, ATH_DBG_CALIBRATE,
395 "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ); 399 "Chn %d pwr_meas_q = 0x%08x\n", i, powerMeasQ);
396 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n", 400 ath_print(common, ATH_DBG_CALIBRATE, "iqCorrNeg is 0x%08x\n",
397 iqCorrNeg); 401 iqCorrNeg);
398 402
399 iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128; 403 iCoffDenom = (powerMeasI / 2 + powerMeasQ / 2) / 128;
400 qCoffDenom = powerMeasQ / 64; 404 qCoffDenom = powerMeasQ / 64;
401 405
402 if (powerMeasQ != 0) { 406 if ((powerMeasQ != 0) && (iCoffDenom != 0) &&
407 (qCoffDenom != 0)) {
403 iCoff = iqCorrMeas / iCoffDenom; 408 iCoff = iqCorrMeas / iCoffDenom;
404 qCoff = powerMeasI / qCoffDenom - 64; 409 qCoff = powerMeasI / qCoffDenom - 64;
405 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 410 ath_print(common, ATH_DBG_CALIBRATE,
406 "Chn %d iCoff = 0x%08x\n", i, iCoff); 411 "Chn %d iCoff = 0x%08x\n", i, iCoff);
407 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 412 ath_print(common, ATH_DBG_CALIBRATE,
408 "Chn %d qCoff = 0x%08x\n", i, qCoff); 413 "Chn %d qCoff = 0x%08x\n", i, qCoff);
409 414
410 iCoff = iCoff & 0x3f; 415 iCoff = iCoff & 0x3f;
411 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 416 ath_print(common, ATH_DBG_CALIBRATE,
412 "New: Chn %d iCoff = 0x%08x\n", i, iCoff); 417 "New: Chn %d iCoff = 0x%08x\n", i, iCoff);
413 if (iqCorrNeg == 0x0) 418 if (iqCorrNeg == 0x0)
414 iCoff = 0x40 - iCoff; 419 iCoff = 0x40 - iCoff;
415 420
@@ -418,9 +423,9 @@ static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
418 else if (qCoff <= -16) 423 else if (qCoff <= -16)
419 qCoff = 16; 424 qCoff = 16;
420 425
421 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 426 ath_print(common, ATH_DBG_CALIBRATE,
422 "Chn %d : iCoff = 0x%x qCoff = 0x%x\n", 427 "Chn %d : iCoff = 0x%x qCoff = 0x%x\n",
423 i, iCoff, qCoff); 428 i, iCoff, qCoff);
424 429
425 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i), 430 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
426 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF, 431 AR_PHY_TIMING_CTRL4_IQCORR_Q_I_COFF,
@@ -428,9 +433,9 @@ static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
428 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i), 433 REG_RMW_FIELD(ah, AR_PHY_TIMING_CTRL4(i),
429 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF, 434 AR_PHY_TIMING_CTRL4_IQCORR_Q_Q_COFF,
430 qCoff); 435 qCoff);
431 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 436 ath_print(common, ATH_DBG_CALIBRATE,
432 "IQ Cal and Correction done for Chain %d\n", 437 "IQ Cal and Correction done for Chain %d\n",
433 i); 438 i);
434 } 439 }
435 } 440 }
436 441
@@ -440,6 +445,7 @@ static void ath9k_hw_iqcalibrate(struct ath_hw *ah, u8 numChains)
440 445
441static void ath9k_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains) 446static void ath9k_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
442{ 447{
448 struct ath_common *common = ath9k_hw_common(ah);
443 u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset, qEvenMeasOffset; 449 u32 iOddMeasOffset, iEvenMeasOffset, qOddMeasOffset, qEvenMeasOffset;
444 u32 qGainMismatch, iGainMismatch, val, i; 450 u32 qGainMismatch, iGainMismatch, val, i;
445 451
@@ -449,21 +455,21 @@ static void ath9k_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
449 qOddMeasOffset = ah->totalAdcQOddPhase[i]; 455 qOddMeasOffset = ah->totalAdcQOddPhase[i];
450 qEvenMeasOffset = ah->totalAdcQEvenPhase[i]; 456 qEvenMeasOffset = ah->totalAdcQEvenPhase[i];
451 457
452 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 458 ath_print(common, ATH_DBG_CALIBRATE,
453 "Starting ADC Gain Cal for Chain %d\n", i); 459 "Starting ADC Gain Cal for Chain %d\n", i);
454 460
455 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 461 ath_print(common, ATH_DBG_CALIBRATE,
456 "Chn %d pwr_meas_odd_i = 0x%08x\n", i, 462 "Chn %d pwr_meas_odd_i = 0x%08x\n", i,
457 iOddMeasOffset); 463 iOddMeasOffset);
458 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 464 ath_print(common, ATH_DBG_CALIBRATE,
459 "Chn %d pwr_meas_even_i = 0x%08x\n", i, 465 "Chn %d pwr_meas_even_i = 0x%08x\n", i,
460 iEvenMeasOffset); 466 iEvenMeasOffset);
461 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 467 ath_print(common, ATH_DBG_CALIBRATE,
462 "Chn %d pwr_meas_odd_q = 0x%08x\n", i, 468 "Chn %d pwr_meas_odd_q = 0x%08x\n", i,
463 qOddMeasOffset); 469 qOddMeasOffset);
464 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 470 ath_print(common, ATH_DBG_CALIBRATE,
465 "Chn %d pwr_meas_even_q = 0x%08x\n", i, 471 "Chn %d pwr_meas_even_q = 0x%08x\n", i,
466 qEvenMeasOffset); 472 qEvenMeasOffset);
467 473
468 if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) { 474 if (iOddMeasOffset != 0 && qEvenMeasOffset != 0) {
469 iGainMismatch = 475 iGainMismatch =
@@ -473,20 +479,20 @@ static void ath9k_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
473 ((qOddMeasOffset * 32) / 479 ((qOddMeasOffset * 32) /
474 qEvenMeasOffset) & 0x3f; 480 qEvenMeasOffset) & 0x3f;
475 481
476 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 482 ath_print(common, ATH_DBG_CALIBRATE,
477 "Chn %d gain_mismatch_i = 0x%08x\n", i, 483 "Chn %d gain_mismatch_i = 0x%08x\n", i,
478 iGainMismatch); 484 iGainMismatch);
479 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 485 ath_print(common, ATH_DBG_CALIBRATE,
480 "Chn %d gain_mismatch_q = 0x%08x\n", i, 486 "Chn %d gain_mismatch_q = 0x%08x\n", i,
481 qGainMismatch); 487 qGainMismatch);
482 488
483 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i)); 489 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
484 val &= 0xfffff000; 490 val &= 0xfffff000;
485 val |= (qGainMismatch) | (iGainMismatch << 6); 491 val |= (qGainMismatch) | (iGainMismatch << 6);
486 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val); 492 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
487 493
488 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 494 ath_print(common, ATH_DBG_CALIBRATE,
489 "ADC Gain Cal done for Chain %d\n", i); 495 "ADC Gain Cal done for Chain %d\n", i);
490 } 496 }
491 } 497 }
492 498
@@ -497,6 +503,7 @@ static void ath9k_hw_adc_gaincal_calibrate(struct ath_hw *ah, u8 numChains)
497 503
498static void ath9k_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains) 504static void ath9k_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
499{ 505{
506 struct ath_common *common = ath9k_hw_common(ah);
500 u32 iOddMeasOffset, iEvenMeasOffset, val, i; 507 u32 iOddMeasOffset, iEvenMeasOffset, val, i;
501 int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch; 508 int32_t qOddMeasOffset, qEvenMeasOffset, qDcMismatch, iDcMismatch;
502 const struct ath9k_percal_data *calData = 509 const struct ath9k_percal_data *calData =
@@ -510,41 +517,41 @@ static void ath9k_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
510 qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i]; 517 qOddMeasOffset = ah->totalAdcDcOffsetQOddPhase[i];
511 qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i]; 518 qEvenMeasOffset = ah->totalAdcDcOffsetQEvenPhase[i];
512 519
513 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 520 ath_print(common, ATH_DBG_CALIBRATE,
514 "Starting ADC DC Offset Cal for Chain %d\n", i); 521 "Starting ADC DC Offset Cal for Chain %d\n", i);
515 522
516 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 523 ath_print(common, ATH_DBG_CALIBRATE,
517 "Chn %d pwr_meas_odd_i = %d\n", i, 524 "Chn %d pwr_meas_odd_i = %d\n", i,
518 iOddMeasOffset); 525 iOddMeasOffset);
519 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 526 ath_print(common, ATH_DBG_CALIBRATE,
520 "Chn %d pwr_meas_even_i = %d\n", i, 527 "Chn %d pwr_meas_even_i = %d\n", i,
521 iEvenMeasOffset); 528 iEvenMeasOffset);
522 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 529 ath_print(common, ATH_DBG_CALIBRATE,
523 "Chn %d pwr_meas_odd_q = %d\n", i, 530 "Chn %d pwr_meas_odd_q = %d\n", i,
524 qOddMeasOffset); 531 qOddMeasOffset);
525 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 532 ath_print(common, ATH_DBG_CALIBRATE,
526 "Chn %d pwr_meas_even_q = %d\n", i, 533 "Chn %d pwr_meas_even_q = %d\n", i,
527 qEvenMeasOffset); 534 qEvenMeasOffset);
528 535
529 iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) / 536 iDcMismatch = (((iEvenMeasOffset - iOddMeasOffset) * 2) /
530 numSamples) & 0x1ff; 537 numSamples) & 0x1ff;
531 qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) / 538 qDcMismatch = (((qOddMeasOffset - qEvenMeasOffset) * 2) /
532 numSamples) & 0x1ff; 539 numSamples) & 0x1ff;
533 540
534 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 541 ath_print(common, ATH_DBG_CALIBRATE,
535 "Chn %d dc_offset_mismatch_i = 0x%08x\n", i, 542 "Chn %d dc_offset_mismatch_i = 0x%08x\n", i,
536 iDcMismatch); 543 iDcMismatch);
537 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 544 ath_print(common, ATH_DBG_CALIBRATE,
538 "Chn %d dc_offset_mismatch_q = 0x%08x\n", i, 545 "Chn %d dc_offset_mismatch_q = 0x%08x\n", i,
539 qDcMismatch); 546 qDcMismatch);
540 547
541 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i)); 548 val = REG_READ(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i));
542 val &= 0xc0000fff; 549 val &= 0xc0000fff;
543 val |= (qDcMismatch << 12) | (iDcMismatch << 21); 550 val |= (qDcMismatch << 12) | (iDcMismatch << 21);
544 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val); 551 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(i), val);
545 552
546 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 553 ath_print(common, ATH_DBG_CALIBRATE,
547 "ADC DC Offset Cal done for Chain %d\n", i); 554 "ADC DC Offset Cal done for Chain %d\n", i);
548 } 555 }
549 556
550 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0), 557 REG_WRITE(ah, AR_PHY_NEW_ADC_DC_GAIN_CORR(0),
@@ -555,7 +562,8 @@ static void ath9k_hw_adc_dccal_calibrate(struct ath_hw *ah, u8 numChains)
555/* This is done for the currently configured channel */ 562/* This is done for the currently configured channel */
556bool ath9k_hw_reset_calvalid(struct ath_hw *ah) 563bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
557{ 564{
558 struct ieee80211_conf *conf = &ah->ah_sc->hw->conf; 565 struct ath_common *common = ath9k_hw_common(ah);
566 struct ieee80211_conf *conf = &common->hw->conf;
559 struct ath9k_cal_list *currCal = ah->cal_list_curr; 567 struct ath9k_cal_list *currCal = ah->cal_list_curr;
560 568
561 if (!ah->curchan) 569 if (!ah->curchan)
@@ -568,24 +576,25 @@ bool ath9k_hw_reset_calvalid(struct ath_hw *ah)
568 return true; 576 return true;
569 577
570 if (currCal->calState != CAL_DONE) { 578 if (currCal->calState != CAL_DONE) {
571 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 579 ath_print(common, ATH_DBG_CALIBRATE,
572 "Calibration state incorrect, %d\n", 580 "Calibration state incorrect, %d\n",
573 currCal->calState); 581 currCal->calState);
574 return true; 582 return true;
575 } 583 }
576 584
577 if (!ath9k_hw_iscal_supported(ah, currCal->calData->calType)) 585 if (!ath9k_hw_iscal_supported(ah, currCal->calData->calType))
578 return true; 586 return true;
579 587
580 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 588 ath_print(common, ATH_DBG_CALIBRATE,
581 "Resetting Cal %d state for channel %u\n", 589 "Resetting Cal %d state for channel %u\n",
582 currCal->calData->calType, conf->channel->center_freq); 590 currCal->calData->calType, conf->channel->center_freq);
583 591
584 ah->curchan->CalValid &= ~currCal->calData->calType; 592 ah->curchan->CalValid &= ~currCal->calData->calType;
585 currCal->calState = CAL_WAITING; 593 currCal->calState = CAL_WAITING;
586 594
587 return false; 595 return false;
588} 596}
597EXPORT_SYMBOL(ath9k_hw_reset_calvalid);
589 598
590void ath9k_hw_start_nfcal(struct ath_hw *ah) 599void ath9k_hw_start_nfcal(struct ath_hw *ah)
591{ 600{
@@ -645,11 +654,11 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
645 AR_PHY_AGC_CONTROL_NO_UPDATE_NF); 654 AR_PHY_AGC_CONTROL_NO_UPDATE_NF);
646 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF); 655 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_NF);
647 656
648 for (j = 0; j < 1000; j++) { 657 for (j = 0; j < 5; j++) {
649 if ((REG_READ(ah, AR_PHY_AGC_CONTROL) & 658 if ((REG_READ(ah, AR_PHY_AGC_CONTROL) &
650 AR_PHY_AGC_CONTROL_NF) == 0) 659 AR_PHY_AGC_CONTROL_NF) == 0)
651 break; 660 break;
652 udelay(10); 661 udelay(50);
653 } 662 }
654 663
655 for (i = 0; i < NUM_NF_READINGS; i++) { 664 for (i = 0; i < NUM_NF_READINGS; i++) {
@@ -665,6 +674,7 @@ void ath9k_hw_loadnf(struct ath_hw *ah, struct ath9k_channel *chan)
665int16_t ath9k_hw_getnf(struct ath_hw *ah, 674int16_t ath9k_hw_getnf(struct ath_hw *ah,
666 struct ath9k_channel *chan) 675 struct ath9k_channel *chan)
667{ 676{
677 struct ath_common *common = ath9k_hw_common(ah);
668 int16_t nf, nfThresh; 678 int16_t nf, nfThresh;
669 int16_t nfarray[NUM_NF_READINGS] = { 0 }; 679 int16_t nfarray[NUM_NF_READINGS] = { 0 };
670 struct ath9k_nfcal_hist *h; 680 struct ath9k_nfcal_hist *h;
@@ -672,8 +682,8 @@ int16_t ath9k_hw_getnf(struct ath_hw *ah,
672 682
673 chan->channelFlags &= (~CHANNEL_CW_INT); 683 chan->channelFlags &= (~CHANNEL_CW_INT);
674 if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) { 684 if (REG_READ(ah, AR_PHY_AGC_CONTROL) & AR_PHY_AGC_CONTROL_NF) {
675 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 685 ath_print(common, ATH_DBG_CALIBRATE,
676 "NF did not complete in calibration window\n"); 686 "NF did not complete in calibration window\n");
677 nf = 0; 687 nf = 0;
678 chan->rawNoiseFloor = nf; 688 chan->rawNoiseFloor = nf;
679 return chan->rawNoiseFloor; 689 return chan->rawNoiseFloor;
@@ -682,10 +692,10 @@ int16_t ath9k_hw_getnf(struct ath_hw *ah,
682 nf = nfarray[0]; 692 nf = nfarray[0];
683 if (getNoiseFloorThresh(ah, c->band, &nfThresh) 693 if (getNoiseFloorThresh(ah, c->band, &nfThresh)
684 && nf > nfThresh) { 694 && nf > nfThresh) {
685 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 695 ath_print(common, ATH_DBG_CALIBRATE,
686 "noise floor failed detected; " 696 "noise floor failed detected; "
687 "detected %d, threshold %d\n", 697 "detected %d, threshold %d\n",
688 nf, nfThresh); 698 nf, nfThresh);
689 chan->channelFlags |= CHANNEL_CW_INT; 699 chan->channelFlags |= CHANNEL_CW_INT;
690 } 700 }
691 } 701 }
@@ -737,51 +747,73 @@ s16 ath9k_hw_getchan_noise(struct ath_hw *ah, struct ath9k_channel *chan)
737 747
738 return nf; 748 return nf;
739} 749}
750EXPORT_SYMBOL(ath9k_hw_getchan_noise);
740 751
741static void ath9k_olc_temp_compensation(struct ath_hw *ah) 752static void ath9k_olc_temp_compensation_9287(struct ath_hw *ah)
742{ 753{
743 u32 rddata, i; 754 u32 rddata;
744 int delta, currPDADC, regval, slope; 755 int32_t delta, currPDADC, slope;
745 756
746 rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4); 757 rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
747 currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT); 758 currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
748 759
760 if (ah->initPDADC == 0 || currPDADC == 0) {
761 /*
762 * Zero value indicates that no frames have been transmitted yet,
763 * can't do temperature compensation until frames are transmitted.
764 */
765 return;
766 } else {
767 slope = ah->eep_ops->get_eeprom(ah, EEP_TEMPSENSE_SLOPE);
768
769 if (slope == 0) { /* to avoid divide by zero case */
770 delta = 0;
771 } else {
772 delta = ((currPDADC - ah->initPDADC)*4) / slope;
773 }
774 REG_RMW_FIELD(ah, AR_PHY_CH0_TX_PWRCTRL11,
775 AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
776 REG_RMW_FIELD(ah, AR_PHY_CH1_TX_PWRCTRL11,
777 AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta);
778 }
779}
780
781static void ath9k_olc_temp_compensation(struct ath_hw *ah)
782{
783 u32 rddata, i;
784 int delta, currPDADC, regval;
749 785
750 if (OLC_FOR_AR9287_10_LATER) { 786 if (OLC_FOR_AR9287_10_LATER) {
787 ath9k_olc_temp_compensation_9287(ah);
788 } else {
789 rddata = REG_READ(ah, AR_PHY_TX_PWRCTRL4);
790 currPDADC = MS(rddata, AR_PHY_TX_PWRCTRL_PD_AVG_OUT);
791
751 if (ah->initPDADC == 0 || currPDADC == 0) { 792 if (ah->initPDADC == 0 || currPDADC == 0) {
752 return; 793 return;
753 } else { 794 } else {
754 slope = ah->eep_ops->get_eeprom(ah, EEP_TEMPSENSE_SLOPE); 795 if (ah->eep_ops->get_eeprom(ah, EEP_DAC_HPWR_5G))
755 if (slope == 0) 796 delta = (currPDADC - ah->initPDADC + 4) / 8;
756 delta = 0;
757 else 797 else
758 delta = ((currPDADC - ah->initPDADC)*4) / slope; 798 delta = (currPDADC - ah->initPDADC + 5) / 10;
759 REG_RMW_FIELD(ah, AR_PHY_CH0_TX_PWRCTRL11, 799
760 AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta); 800 if (delta != ah->PDADCdelta) {
761 REG_RMW_FIELD(ah, AR_PHY_CH1_TX_PWRCTRL11, 801 ah->PDADCdelta = delta;
762 AR_PHY_TX_PWRCTRL_OLPC_TEMP_COMP, delta); 802 for (i = 1; i < AR9280_TX_GAIN_TABLE_SIZE; i++) {
763 } 803 regval = ah->originalGain[i] - delta;
764 } else { 804 if (regval < 0)
765 if (ah->eep_ops->get_eeprom(ah, EEP_DAC_HPWR_5G)) 805 regval = 0;
766 delta = (currPDADC - ah->initPDADC + 4) / 8; 806
767 else 807 REG_RMW_FIELD(ah,
768 delta = (currPDADC - ah->initPDADC + 5) / 10; 808 AR_PHY_TX_GAIN_TBL1 + i * 4,
769 809 AR_PHY_TX_GAIN, regval);
770 if (delta != ah->PDADCdelta) { 810 }
771 ah->PDADCdelta = delta;
772 for (i = 1; i < AR9280_TX_GAIN_TABLE_SIZE; i++) {
773 regval = ah->originalGain[i] - delta;
774 if (regval < 0)
775 regval = 0;
776
777 REG_RMW_FIELD(ah, AR_PHY_TX_GAIN_TBL1 + i * 4,
778 AR_PHY_TX_GAIN, regval);
779 } 811 }
780 } 812 }
781 } 813 }
782} 814}
783 815
784static void ath9k_hw_9271_pa_cal(struct ath_hw *ah) 816static void ath9k_hw_9271_pa_cal(struct ath_hw *ah, bool is_reset)
785{ 817{
786 u32 regVal; 818 u32 regVal;
787 unsigned int i; 819 unsigned int i;
@@ -845,7 +877,7 @@ static void ath9k_hw_9271_pa_cal(struct ath_hw *ah)
845 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0); 877 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0);
846 878
847 /* find off_6_1; */ 879 /* find off_6_1; */
848 for (i = 6; i >= 0; i--) { 880 for (i = 6; i > 0; i--) {
849 regVal = REG_READ(ah, 0x7834); 881 regVal = REG_READ(ah, 0x7834);
850 regVal |= (1 << (20 + i)); 882 regVal |= (1 << (20 + i));
851 REG_WRITE(ah, 0x7834, regVal); 883 REG_WRITE(ah, 0x7834, regVal);
@@ -857,10 +889,19 @@ static void ath9k_hw_9271_pa_cal(struct ath_hw *ah)
857 REG_WRITE(ah, 0x7834, regVal); 889 REG_WRITE(ah, 0x7834, regVal);
858 } 890 }
859 891
860 /* Empirical offset correction */ 892 regVal = (regVal >>20) & 0x7f;
861#if 0 893
862 REG_RMW_FIELD(ah, AR9285_AN_RF2G6, AR9271_AN_RF2G6_OFFS, 0x20); 894 /* Update PA cal info */
863#endif 895 if ((!is_reset) && (ah->pacal_info.prev_offset == regVal)) {
896 if (ah->pacal_info.max_skipcount < MAX_PACAL_SKIPCOUNT)
897 ah->pacal_info.max_skipcount =
898 2 * ah->pacal_info.max_skipcount;
899 ah->pacal_info.skipcount = ah->pacal_info.max_skipcount;
900 } else {
901 ah->pacal_info.max_skipcount = 1;
902 ah->pacal_info.skipcount = 0;
903 ah->pacal_info.prev_offset = regVal;
904 }
864 905
865 regVal = REG_READ(ah, 0x7834); 906 regVal = REG_READ(ah, 0x7834);
866 regVal |= 0x1; 907 regVal |= 0x1;
@@ -875,7 +916,7 @@ static void ath9k_hw_9271_pa_cal(struct ath_hw *ah)
875 916
876static inline void ath9k_hw_9285_pa_cal(struct ath_hw *ah, bool is_reset) 917static inline void ath9k_hw_9285_pa_cal(struct ath_hw *ah, bool is_reset)
877{ 918{
878 919 struct ath_common *common = ath9k_hw_common(ah);
879 u32 regVal; 920 u32 regVal;
880 int i, offset, offs_6_1, offs_0; 921 int i, offset, offs_6_1, offs_0;
881 u32 ccomp_org, reg_field; 922 u32 ccomp_org, reg_field;
@@ -889,7 +930,7 @@ static inline void ath9k_hw_9285_pa_cal(struct ath_hw *ah, bool is_reset)
889 { 0x7838, 0 }, 930 { 0x7838, 0 },
890 }; 931 };
891 932
892 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "Running PA Calibration\n"); 933 ath_print(common, ATH_DBG_CALIBRATE, "Running PA Calibration\n");
893 934
894 /* PA CAL is not needed for high power solution */ 935 /* PA CAL is not needed for high power solution */
895 if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) == 936 if (ah->eep_ops->get_eeprom(ah, EEP_TXGAIN_TYPE) ==
@@ -1011,7 +1052,7 @@ bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
1011 if (longcal) { 1052 if (longcal) {
1012 /* Do periodic PAOffset Cal */ 1053 /* Do periodic PAOffset Cal */
1013 if (AR_SREV_9271(ah)) 1054 if (AR_SREV_9271(ah))
1014 ath9k_hw_9271_pa_cal(ah); 1055 ath9k_hw_9271_pa_cal(ah, false);
1015 else if (AR_SREV_9285_11_OR_LATER(ah)) { 1056 else if (AR_SREV_9285_11_OR_LATER(ah)) {
1016 if (!ah->pacal_info.skipcount) 1057 if (!ah->pacal_info.skipcount)
1017 ath9k_hw_9285_pa_cal(ah, false); 1058 ath9k_hw_9285_pa_cal(ah, false);
@@ -1036,9 +1077,13 @@ bool ath9k_hw_calibrate(struct ath_hw *ah, struct ath9k_channel *chan,
1036 1077
1037 return iscaldone; 1078 return iscaldone;
1038} 1079}
1080EXPORT_SYMBOL(ath9k_hw_calibrate);
1039 1081
1082/* Carrier leakage Calibration fix */
1040static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan) 1083static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
1041{ 1084{
1085 struct ath_common *common = ath9k_hw_common(ah);
1086
1042 REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE); 1087 REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_CL_CAL_ENABLE);
1043 if (IS_CHAN_HT20(chan)) { 1088 if (IS_CHAN_HT20(chan)) {
1044 REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE); 1089 REG_SET_BIT(ah, AR_PHY_CL_CAL_CTL, AR_PHY_PARALLEL_CAL_ENABLE);
@@ -1049,9 +1094,9 @@ static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
1049 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL); 1094 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
1050 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, 1095 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL,
1051 AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) { 1096 AR_PHY_AGC_CONTROL_CAL, 0, AH_WAIT_TIMEOUT)) {
1052 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "offset " 1097 ath_print(common, ATH_DBG_CALIBRATE, "offset "
1053 "calibration failed to complete in " 1098 "calibration failed to complete in "
1054 "1ms; noisy ??\n"); 1099 "1ms; noisy ??\n");
1055 return false; 1100 return false;
1056 } 1101 }
1057 REG_CLR_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN); 1102 REG_CLR_BIT(ah, AR_PHY_TURBO, AR_PHY_FC_DYN2040_EN);
@@ -1064,8 +1109,8 @@ static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
1064 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL); 1109 REG_SET_BIT(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL);
1065 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL, 1110 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
1066 0, AH_WAIT_TIMEOUT)) { 1111 0, AH_WAIT_TIMEOUT)) {
1067 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, "offset calibration " 1112 ath_print(common, ATH_DBG_CALIBRATE, "offset calibration "
1068 "failed to complete in 1ms; noisy ??\n"); 1113 "failed to complete in 1ms; noisy ??\n");
1069 return false; 1114 return false;
1070 } 1115 }
1071 1116
@@ -1078,7 +1123,9 @@ static bool ar9285_clc(struct ath_hw *ah, struct ath9k_channel *chan)
1078 1123
1079bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan) 1124bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
1080{ 1125{
1081 if (AR_SREV_9285_12_OR_LATER(ah)) { 1126 struct ath_common *common = ath9k_hw_common(ah);
1127
1128 if (AR_SREV_9271(ah) || AR_SREV_9285_12_OR_LATER(ah)) {
1082 if (!ar9285_clc(ah, chan)) 1129 if (!ar9285_clc(ah, chan))
1083 return false; 1130 return false;
1084 } else { 1131 } else {
@@ -1098,9 +1145,9 @@ bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
1098 /* Poll for offset calibration complete */ 1145 /* Poll for offset calibration complete */
1099 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL, 1146 if (!ath9k_hw_wait(ah, AR_PHY_AGC_CONTROL, AR_PHY_AGC_CONTROL_CAL,
1100 0, AH_WAIT_TIMEOUT)) { 1147 0, AH_WAIT_TIMEOUT)) {
1101 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 1148 ath_print(common, ATH_DBG_CALIBRATE,
1102 "offset calibration failed to complete in 1ms; " 1149 "offset calibration failed to "
1103 "noisy environment?\n"); 1150 "complete in 1ms; noisy environment?\n");
1104 return false; 1151 return false;
1105 } 1152 }
1106 1153
@@ -1114,7 +1161,9 @@ bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
1114 } 1161 }
1115 1162
1116 /* Do PA Calibration */ 1163 /* Do PA Calibration */
1117 if (AR_SREV_9285_11_OR_LATER(ah)) 1164 if (AR_SREV_9271(ah))
1165 ath9k_hw_9271_pa_cal(ah, true);
1166 else if (AR_SREV_9285_11_OR_LATER(ah))
1118 ath9k_hw_9285_pa_cal(ah, true); 1167 ath9k_hw_9285_pa_cal(ah, true);
1119 1168
1120 /* Do NF Calibration after DC offset and other calibrations */ 1169 /* Do NF Calibration after DC offset and other calibrations */
@@ -1128,20 +1177,20 @@ bool ath9k_hw_init_cal(struct ath_hw *ah, struct ath9k_channel *chan)
1128 if (ath9k_hw_iscal_supported(ah, ADC_GAIN_CAL)) { 1177 if (ath9k_hw_iscal_supported(ah, ADC_GAIN_CAL)) {
1129 INIT_CAL(&ah->adcgain_caldata); 1178 INIT_CAL(&ah->adcgain_caldata);
1130 INSERT_CAL(ah, &ah->adcgain_caldata); 1179 INSERT_CAL(ah, &ah->adcgain_caldata);
1131 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 1180 ath_print(common, ATH_DBG_CALIBRATE,
1132 "enabling ADC Gain Calibration.\n"); 1181 "enabling ADC Gain Calibration.\n");
1133 } 1182 }
1134 if (ath9k_hw_iscal_supported(ah, ADC_DC_CAL)) { 1183 if (ath9k_hw_iscal_supported(ah, ADC_DC_CAL)) {
1135 INIT_CAL(&ah->adcdc_caldata); 1184 INIT_CAL(&ah->adcdc_caldata);
1136 INSERT_CAL(ah, &ah->adcdc_caldata); 1185 INSERT_CAL(ah, &ah->adcdc_caldata);
1137 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 1186 ath_print(common, ATH_DBG_CALIBRATE,
1138 "enabling ADC DC Calibration.\n"); 1187 "enabling ADC DC Calibration.\n");
1139 } 1188 }
1140 if (ath9k_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) { 1189 if (ath9k_hw_iscal_supported(ah, IQ_MISMATCH_CAL)) {
1141 INIT_CAL(&ah->iq_caldata); 1190 INIT_CAL(&ah->iq_caldata);
1142 INSERT_CAL(ah, &ah->iq_caldata); 1191 INSERT_CAL(ah, &ah->iq_caldata);
1143 DPRINTF(ah->ah_sc, ATH_DBG_CALIBRATE, 1192 ath_print(common, ATH_DBG_CALIBRATE,
1144 "enabling IQ Calibration.\n"); 1193 "enabling IQ Calibration.\n");
1145 } 1194 }
1146 1195
1147 ah->cal_list_curr = ah->cal_list; 1196 ah->cal_list_curr = ah->cal_list;
diff --git a/drivers/net/wireless/ath/ath9k/calib.h b/drivers/net/wireless/ath/ath9k/calib.h
index 9028ab193e42..b2c873e97485 100644
--- a/drivers/net/wireless/ath/ath9k/calib.h
+++ b/drivers/net/wireless/ath/ath9k/calib.h
@@ -17,6 +17,8 @@
17#ifndef CALIB_H 17#ifndef CALIB_H
18#define CALIB_H 18#define CALIB_H
19 19
20#include "hw.h"
21
20extern const struct ath9k_percal_data iq_cal_multi_sample; 22extern const struct ath9k_percal_data iq_cal_multi_sample;
21extern const struct ath9k_percal_data iq_cal_single_sample; 23extern const struct ath9k_percal_data iq_cal_single_sample;
22extern const struct ath9k_percal_data adc_gain_cal_multi_sample; 24extern const struct ath9k_percal_data adc_gain_cal_multi_sample;
diff --git a/drivers/net/wireless/ath/ath9k/debug.c b/drivers/net/wireless/ath/ath9k/debug.c
index 2be4c2252047..84f44269de47 100644
--- a/drivers/net/wireless/ath/ath9k/debug.c
+++ b/drivers/net/wireless/ath/ath9k/debug.c
@@ -18,26 +18,13 @@
18 18
19#include "ath9k.h" 19#include "ath9k.h"
20 20
21static unsigned int ath9k_debug = DBG_DEFAULT; 21#define REG_WRITE_D(_ah, _reg, _val) \
22module_param_named(debug, ath9k_debug, uint, 0); 22 ath9k_hw_common(_ah)->ops->write((_ah), (_val), (_reg))
23#define REG_READ_D(_ah, _reg) \
24 ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
23 25
24static struct dentry *ath9k_debugfs_root; 26static struct dentry *ath9k_debugfs_root;
25 27
26void DPRINTF(struct ath_softc *sc, int dbg_mask, const char *fmt, ...)
27{
28 if (!sc)
29 return;
30
31 if (sc->debug.debug_mask & dbg_mask) {
32 va_list args;
33
34 va_start(args, fmt);
35 printk(KERN_DEBUG "ath9k: ");
36 vprintk(fmt, args);
37 va_end(args);
38 }
39}
40
41static int ath9k_debugfs_open(struct inode *inode, struct file *file) 28static int ath9k_debugfs_open(struct inode *inode, struct file *file)
42{ 29{
43 file->private_data = inode->i_private; 30 file->private_data = inode->i_private;
@@ -48,10 +35,11 @@ static ssize_t read_file_debug(struct file *file, char __user *user_buf,
48 size_t count, loff_t *ppos) 35 size_t count, loff_t *ppos)
49{ 36{
50 struct ath_softc *sc = file->private_data; 37 struct ath_softc *sc = file->private_data;
38 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
51 char buf[32]; 39 char buf[32];
52 unsigned int len; 40 unsigned int len;
53 41
54 len = snprintf(buf, sizeof(buf), "0x%08x\n", sc->debug.debug_mask); 42 len = snprintf(buf, sizeof(buf), "0x%08x\n", common->debug_mask);
55 return simple_read_from_buffer(user_buf, count, ppos, buf, len); 43 return simple_read_from_buffer(user_buf, count, ppos, buf, len);
56} 44}
57 45
@@ -59,6 +47,7 @@ static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
59 size_t count, loff_t *ppos) 47 size_t count, loff_t *ppos)
60{ 48{
61 struct ath_softc *sc = file->private_data; 49 struct ath_softc *sc = file->private_data;
50 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
62 unsigned long mask; 51 unsigned long mask;
63 char buf[32]; 52 char buf[32];
64 ssize_t len; 53 ssize_t len;
@@ -71,7 +60,7 @@ static ssize_t write_file_debug(struct file *file, const char __user *user_buf,
71 if (strict_strtoul(buf, 0, &mask)) 60 if (strict_strtoul(buf, 0, &mask))
72 return -EINVAL; 61 return -EINVAL;
73 62
74 sc->debug.debug_mask = mask; 63 common->debug_mask = mask;
75 return count; 64 return count;
76} 65}
77 66
@@ -95,7 +84,7 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
95 84
96 ath9k_ps_wakeup(sc); 85 ath9k_ps_wakeup(sc);
97 86
98 REG_WRITE(ah, AR_MACMISC, 87 REG_WRITE_D(ah, AR_MACMISC,
99 ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) | 88 ((AR_MACMISC_DMA_OBS_LINE_8 << AR_MACMISC_DMA_OBS_S) |
100 (AR_MACMISC_MISC_OBS_BUS_1 << 89 (AR_MACMISC_MISC_OBS_BUS_1 <<
101 AR_MACMISC_MISC_OBS_BUS_MSB_S))); 90 AR_MACMISC_MISC_OBS_BUS_MSB_S)));
@@ -107,7 +96,7 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
107 if (i % 4 == 0) 96 if (i % 4 == 0)
108 len += snprintf(buf + len, sizeof(buf) - len, "\n"); 97 len += snprintf(buf + len, sizeof(buf) - len, "\n");
109 98
110 val[i] = REG_READ(ah, AR_DMADBG_0 + (i * sizeof(u32))); 99 val[i] = REG_READ_D(ah, AR_DMADBG_0 + (i * sizeof(u32)));
111 len += snprintf(buf + len, sizeof(buf) - len, "%d: %08x ", 100 len += snprintf(buf + len, sizeof(buf) - len, "%d: %08x ",
112 i, val[i]); 101 i, val[i]);
113 } 102 }
@@ -157,9 +146,9 @@ static ssize_t read_file_dma(struct file *file, char __user *user_buf,
157 (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17); 146 (val[6] & 0x0001e000) >> 13, (val[6] & 0x001e0000) >> 17);
158 147
159 len += snprintf(buf + len, sizeof(buf) - len, "pcu observe: 0x%x \n", 148 len += snprintf(buf + len, sizeof(buf) - len, "pcu observe: 0x%x \n",
160 REG_READ(ah, AR_OBS_BUS_1)); 149 REG_READ_D(ah, AR_OBS_BUS_1));
161 len += snprintf(buf + len, sizeof(buf) - len, 150 len += snprintf(buf + len, sizeof(buf) - len,
162 "AR_CR: 0x%x \n", REG_READ(ah, AR_CR)); 151 "AR_CR: 0x%x \n", REG_READ_D(ah, AR_CR));
163 152
164 ath9k_ps_restore(sc); 153 ath9k_ps_restore(sc);
165 154
@@ -376,12 +365,12 @@ static ssize_t read_file_wiphy(struct file *file, char __user *user_buf,
376 aphy->chan_idx, aphy->chan_is_ht); 365 aphy->chan_idx, aphy->chan_is_ht);
377 } 366 }
378 367
379 put_unaligned_le32(REG_READ(sc->sc_ah, AR_STA_ID0), addr); 368 put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_STA_ID0), addr);
380 put_unaligned_le16(REG_READ(sc->sc_ah, AR_STA_ID1) & 0xffff, addr + 4); 369 put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_STA_ID1) & 0xffff, addr + 4);
381 len += snprintf(buf + len, sizeof(buf) - len, 370 len += snprintf(buf + len, sizeof(buf) - len,
382 "addr: %pM\n", addr); 371 "addr: %pM\n", addr);
383 put_unaligned_le32(REG_READ(sc->sc_ah, AR_BSSMSKL), addr); 372 put_unaligned_le32(REG_READ_D(sc->sc_ah, AR_BSSMSKL), addr);
384 put_unaligned_le16(REG_READ(sc->sc_ah, AR_BSSMSKU) & 0xffff, addr + 4); 373 put_unaligned_le16(REG_READ_D(sc->sc_ah, AR_BSSMSKU) & 0xffff, addr + 4);
385 len += snprintf(buf + len, sizeof(buf) - len, 374 len += snprintf(buf + len, sizeof(buf) - len,
386 "addrmask: %pM\n", addr); 375 "addrmask: %pM\n", addr);
387 376
@@ -568,9 +557,10 @@ static const struct file_operations fops_xmit = {
568 .owner = THIS_MODULE 557 .owner = THIS_MODULE
569}; 558};
570 559
571int ath9k_init_debug(struct ath_softc *sc) 560int ath9k_init_debug(struct ath_hw *ah)
572{ 561{
573 sc->debug.debug_mask = ath9k_debug; 562 struct ath_common *common = ath9k_hw_common(ah);
563 struct ath_softc *sc = (struct ath_softc *) common->priv;
574 564
575 if (!ath9k_debugfs_root) 565 if (!ath9k_debugfs_root)
576 return -ENOENT; 566 return -ENOENT;
@@ -619,12 +609,15 @@ int ath9k_init_debug(struct ath_softc *sc)
619 609
620 return 0; 610 return 0;
621err: 611err:
622 ath9k_exit_debug(sc); 612 ath9k_exit_debug(ah);
623 return -ENOMEM; 613 return -ENOMEM;
624} 614}
625 615
626void ath9k_exit_debug(struct ath_softc *sc) 616void ath9k_exit_debug(struct ath_hw *ah)
627{ 617{
618 struct ath_common *common = ath9k_hw_common(ah);
619 struct ath_softc *sc = (struct ath_softc *) common->priv;
620
628 debugfs_remove(sc->debug.debugfs_xmit); 621 debugfs_remove(sc->debug.debugfs_xmit);
629 debugfs_remove(sc->debug.debugfs_wiphy); 622 debugfs_remove(sc->debug.debugfs_wiphy);
630 debugfs_remove(sc->debug.debugfs_rcstat); 623 debugfs_remove(sc->debug.debugfs_rcstat);
diff --git a/drivers/net/wireless/ath/ath9k/debug.h b/drivers/net/wireless/ath/ath9k/debug.h
index 7241f4748338..749e85d57551 100644
--- a/drivers/net/wireless/ath/ath9k/debug.h
+++ b/drivers/net/wireless/ath/ath9k/debug.h
@@ -17,25 +17,7 @@
17#ifndef DEBUG_H 17#ifndef DEBUG_H
18#define DEBUG_H 18#define DEBUG_H
19 19
20enum ATH_DEBUG { 20#include "hw.h"
21 ATH_DBG_RESET = 0x00000001,
22 ATH_DBG_QUEUE = 0x00000002,
23 ATH_DBG_EEPROM = 0x00000004,
24 ATH_DBG_CALIBRATE = 0x00000008,
25 ATH_DBG_INTERRUPT = 0x00000010,
26 ATH_DBG_REGULATORY = 0x00000020,
27 ATH_DBG_ANI = 0x00000040,
28 ATH_DBG_XMIT = 0x00000080,
29 ATH_DBG_BEACON = 0x00000100,
30 ATH_DBG_CONFIG = 0x00000200,
31 ATH_DBG_FATAL = 0x00000400,
32 ATH_DBG_PS = 0x00000800,
33 ATH_DBG_HWTIMER = 0x00001000,
34 ATH_DBG_BTCOEX = 0x00002000,
35 ATH_DBG_ANY = 0xffffffff
36};
37
38#define DBG_DEFAULT (ATH_DBG_FATAL)
39 21
40struct ath_txq; 22struct ath_txq;
41struct ath_buf; 23struct ath_buf;
@@ -140,7 +122,6 @@ struct ath_stats {
140}; 122};
141 123
142struct ath9k_debug { 124struct ath9k_debug {
143 int debug_mask;
144 struct dentry *debugfs_phy; 125 struct dentry *debugfs_phy;
145 struct dentry *debugfs_debug; 126 struct dentry *debugfs_debug;
146 struct dentry *debugfs_dma; 127 struct dentry *debugfs_dma;
@@ -151,9 +132,9 @@ struct ath9k_debug {
151 struct ath_stats stats; 132 struct ath_stats stats;
152}; 133};
153 134
154void DPRINTF(struct ath_softc *sc, int dbg_mask, const char *fmt, ...); 135int ath9k_init_debug(struct ath_hw *ah);
155int ath9k_init_debug(struct ath_softc *sc); 136void ath9k_exit_debug(struct ath_hw *ah);
156void ath9k_exit_debug(struct ath_softc *sc); 137
157int ath9k_debug_create_root(void); 138int ath9k_debug_create_root(void);
158void ath9k_debug_remove_root(void); 139void ath9k_debug_remove_root(void);
159void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status); 140void ath_debug_stat_interrupt(struct ath_softc *sc, enum ath9k_int status);
@@ -165,17 +146,12 @@ void ath_debug_stat_retries(struct ath_softc *sc, int rix,
165 146
166#else 147#else
167 148
168static inline void DPRINTF(struct ath_softc *sc, int dbg_mask, 149static inline int ath9k_init_debug(struct ath_hw *ah)
169 const char *fmt, ...)
170{
171}
172
173static inline int ath9k_init_debug(struct ath_softc *sc)
174{ 150{
175 return 0; 151 return 0;
176} 152}
177 153
178static inline void ath9k_exit_debug(struct ath_softc *sc) 154static inline void ath9k_exit_debug(struct ath_hw *ah)
179{ 155{
180} 156}
181 157
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.c b/drivers/net/wireless/ath/ath9k/eeprom.c
index b6e52d0f8c48..dacaae934148 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom.c
@@ -14,7 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "ath9k.h" 17#include "hw.h"
18 18
19static inline u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz) 19static inline u16 ath9k_hw_fbin2freq(u8 fbin, bool is2GHz)
20{ 20{
@@ -83,11 +83,9 @@ bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize,
83 return false; 83 return false;
84} 84}
85 85
86bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data) 86bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data)
87{ 87{
88 struct ath_softc *sc = ah->ah_sc; 88 return common->bus_ops->eeprom_read(common, off, data);
89
90 return sc->bus_ops->eeprom_read(ah, off, data);
91} 89}
92 90
93void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList, 91void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList,
diff --git a/drivers/net/wireless/ath/ath9k/eeprom.h b/drivers/net/wireless/ath/ath9k/eeprom.h
index 4fe33f7eee9d..2f2993b50e2f 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom.h
+++ b/drivers/net/wireless/ath/ath9k/eeprom.h
@@ -17,6 +17,7 @@
17#ifndef EEPROM_H 17#ifndef EEPROM_H
18#define EEPROM_H 18#define EEPROM_H
19 19
20#include "../ath.h"
20#include <net/cfg80211.h> 21#include <net/cfg80211.h>
21 22
22#define AH_USE_EEPROM 0x1 23#define AH_USE_EEPROM 0x1
@@ -133,6 +134,7 @@
133#define AR5416_EEP_MINOR_VER_17 0x11 134#define AR5416_EEP_MINOR_VER_17 0x11
134#define AR5416_EEP_MINOR_VER_19 0x13 135#define AR5416_EEP_MINOR_VER_19 0x13
135#define AR5416_EEP_MINOR_VER_20 0x14 136#define AR5416_EEP_MINOR_VER_20 0x14
137#define AR5416_EEP_MINOR_VER_21 0x15
136#define AR5416_EEP_MINOR_VER_22 0x16 138#define AR5416_EEP_MINOR_VER_22 0x16
137 139
138#define AR5416_NUM_5G_CAL_PIERS 8 140#define AR5416_NUM_5G_CAL_PIERS 8
@@ -153,7 +155,7 @@
153#define AR5416_BCHAN_UNUSED 0xFF 155#define AR5416_BCHAN_UNUSED 0xFF
154#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64 156#define AR5416_MAX_PWR_RANGE_IN_HALF_DB 64
155#define AR5416_MAX_CHAINS 3 157#define AR5416_MAX_CHAINS 3
156#define AR5416_PWR_TABLE_OFFSET -5 158#define AR5416_PWR_TABLE_OFFSET_DB -5
157 159
158/* Rx gain type values */ 160/* Rx gain type values */
159#define AR5416_EEP_RXGAIN_23DB_BACKOFF 0 161#define AR5416_EEP_RXGAIN_23DB_BACKOFF 0
@@ -301,7 +303,7 @@ struct base_eep_header {
301 u8 txGainType; 303 u8 txGainType;
302 u8 rcChainMask; 304 u8 rcChainMask;
303 u8 desiredScaleCCK; 305 u8 desiredScaleCCK;
304 u8 power_table_offset; 306 u8 pwr_table_offset;
305 u8 frac_n_5g; 307 u8 frac_n_5g;
306 u8 futureBase_3[21]; 308 u8 futureBase_3[21];
307} __packed; 309} __packed;
@@ -638,6 +640,7 @@ struct ar9287_eeprom {
638} __packed; 640} __packed;
639 641
640enum reg_ext_bitmap { 642enum reg_ext_bitmap {
643 REG_EXT_FCC_MIDBAND = 0,
641 REG_EXT_JAPAN_MIDBAND = 1, 644 REG_EXT_JAPAN_MIDBAND = 1,
642 REG_EXT_FCC_DFS_HT40 = 2, 645 REG_EXT_FCC_DFS_HT40 = 2,
643 REG_EXT_JAPAN_NONDFS_HT40 = 3, 646 REG_EXT_JAPAN_NONDFS_HT40 = 3,
@@ -684,7 +687,7 @@ int16_t ath9k_hw_interpolate(u16 target, u16 srcLeft, u16 srcRight,
684 int16_t targetRight); 687 int16_t targetRight);
685bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize, 688bool ath9k_hw_get_lower_upper_index(u8 target, u8 *pList, u16 listSize,
686 u16 *indexL, u16 *indexR); 689 u16 *indexL, u16 *indexR);
687bool ath9k_hw_nvram_read(struct ath_hw *ah, u32 off, u16 *data); 690bool ath9k_hw_nvram_read(struct ath_common *common, u32 off, u16 *data);
688void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList, 691void ath9k_hw_fill_vpd_table(u8 pwrMin, u8 pwrMax, u8 *pPwrList,
689 u8 *pVpdList, u16 numIntercepts, 692 u8 *pVpdList, u16 numIntercepts,
690 u8 *pRetVpdList); 693 u8 *pRetVpdList);
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_4k.c b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
index b8eca7be5f3a..68db16690abf 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_4k.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_4k.c
@@ -14,7 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "ath9k.h" 17#include "hw.h"
18 18
19static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah) 19static int ath9k_hw_4k_get_eeprom_ver(struct ath_hw *ah)
20{ 20{
@@ -29,20 +29,21 @@ static int ath9k_hw_4k_get_eeprom_rev(struct ath_hw *ah)
29static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah) 29static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
30{ 30{
31#define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) 31#define SIZE_EEPROM_4K (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
32 struct ath_common *common = ath9k_hw_common(ah);
32 u16 *eep_data = (u16 *)&ah->eeprom.map4k; 33 u16 *eep_data = (u16 *)&ah->eeprom.map4k;
33 int addr, eep_start_loc = 0; 34 int addr, eep_start_loc = 0;
34 35
35 eep_start_loc = 64; 36 eep_start_loc = 64;
36 37
37 if (!ath9k_hw_use_flash(ah)) { 38 if (!ath9k_hw_use_flash(ah)) {
38 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 39 ath_print(common, ATH_DBG_EEPROM,
39 "Reading from EEPROM, not flash\n"); 40 "Reading from EEPROM, not flash\n");
40 } 41 }
41 42
42 for (addr = 0; addr < SIZE_EEPROM_4K; addr++) { 43 for (addr = 0; addr < SIZE_EEPROM_4K; addr++) {
43 if (!ath9k_hw_nvram_read(ah, addr + eep_start_loc, eep_data)) { 44 if (!ath9k_hw_nvram_read(common, addr + eep_start_loc, eep_data)) {
44 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 45 ath_print(common, ATH_DBG_EEPROM,
45 "Unable to read eeprom region \n"); 46 "Unable to read eeprom region \n");
46 return false; 47 return false;
47 } 48 }
48 eep_data++; 49 eep_data++;
@@ -55,6 +56,7 @@ static bool ath9k_hw_4k_fill_eeprom(struct ath_hw *ah)
55static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah) 56static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
56{ 57{
57#define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16)) 58#define EEPROM_4K_SIZE (sizeof(struct ar5416_eeprom_4k) / sizeof(u16))
59 struct ath_common *common = ath9k_hw_common(ah);
58 struct ar5416_eeprom_4k *eep = 60 struct ar5416_eeprom_4k *eep =
59 (struct ar5416_eeprom_4k *) &ah->eeprom.map4k; 61 (struct ar5416_eeprom_4k *) &ah->eeprom.map4k;
60 u16 *eepdata, temp, magic, magic2; 62 u16 *eepdata, temp, magic, magic2;
@@ -64,15 +66,15 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
64 66
65 67
66 if (!ath9k_hw_use_flash(ah)) { 68 if (!ath9k_hw_use_flash(ah)) {
67 if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, 69 if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET,
68 &magic)) { 70 &magic)) {
69 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 71 ath_print(common, ATH_DBG_FATAL,
70 "Reading Magic # failed\n"); 72 "Reading Magic # failed\n");
71 return false; 73 return false;
72 } 74 }
73 75
74 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 76 ath_print(common, ATH_DBG_EEPROM,
75 "Read Magic = 0x%04X\n", magic); 77 "Read Magic = 0x%04X\n", magic);
76 78
77 if (magic != AR5416_EEPROM_MAGIC) { 79 if (magic != AR5416_EEPROM_MAGIC) {
78 magic2 = swab16(magic); 80 magic2 = swab16(magic);
@@ -87,16 +89,16 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
87 eepdata++; 89 eepdata++;
88 } 90 }
89 } else { 91 } else {
90 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 92 ath_print(common, ATH_DBG_FATAL,
91 "Invalid EEPROM Magic. " 93 "Invalid EEPROM Magic. "
92 "endianness mismatch.\n"); 94 "endianness mismatch.\n");
93 return -EINVAL; 95 return -EINVAL;
94 } 96 }
95 } 97 }
96 } 98 }
97 99
98 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "need_swap = %s.\n", 100 ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
99 need_swap ? "True" : "False"); 101 need_swap ? "True" : "False");
100 102
101 if (need_swap) 103 if (need_swap)
102 el = swab16(ah->eeprom.map4k.baseEepHeader.length); 104 el = swab16(ah->eeprom.map4k.baseEepHeader.length);
@@ -117,8 +119,8 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
117 u32 integer; 119 u32 integer;
118 u16 word; 120 u16 word;
119 121
120 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 122 ath_print(common, ATH_DBG_EEPROM,
121 "EEPROM Endianness is not native.. Changing\n"); 123 "EEPROM Endianness is not native.. Changing\n");
122 124
123 word = swab16(eep->baseEepHeader.length); 125 word = swab16(eep->baseEepHeader.length);
124 eep->baseEepHeader.length = word; 126 eep->baseEepHeader.length = word;
@@ -160,9 +162,9 @@ static int ath9k_hw_4k_check_eeprom(struct ath_hw *ah)
160 162
161 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER || 163 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER ||
162 ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) { 164 ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
163 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 165 ath_print(common, ATH_DBG_FATAL,
164 "Bad EEPROM checksum 0x%x or revision 0x%04x\n", 166 "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
165 sum, ah->eep_ops->get_eeprom_ver(ah)); 167 sum, ah->eep_ops->get_eeprom_ver(ah));
166 return -EINVAL; 168 return -EINVAL;
167 } 169 }
168 170
@@ -208,6 +210,8 @@ static u32 ath9k_hw_4k_get_eeprom(struct ath_hw *ah,
208 return pBase->rxMask; 210 return pBase->rxMask;
209 case EEP_FRAC_N_5G: 211 case EEP_FRAC_N_5G:
210 return 0; 212 return 0;
213 case EEP_PWR_TABLE_OFFSET:
214 return AR5416_PWR_TABLE_OFFSET_DB;
211 default: 215 default:
212 return 0; 216 return 0;
213 } 217 }
@@ -385,6 +389,7 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
385 struct ath9k_channel *chan, 389 struct ath9k_channel *chan,
386 int16_t *pTxPowerIndexOffset) 390 int16_t *pTxPowerIndexOffset)
387{ 391{
392 struct ath_common *common = ath9k_hw_common(ah);
388 struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k; 393 struct ar5416_eeprom_4k *pEepData = &ah->eeprom.map4k;
389 struct cal_data_per_freq_4k *pRawDataset; 394 struct cal_data_per_freq_4k *pRawDataset;
390 u8 *pCalBChans = NULL; 395 u8 *pCalBChans = NULL;
@@ -470,21 +475,21 @@ static void ath9k_hw_set_4k_power_cal_table(struct ath_hw *ah,
470 ((pdadcValues[4 * j + 3] & 0xFF) << 24); 475 ((pdadcValues[4 * j + 3] & 0xFF) << 24);
471 REG_WRITE(ah, regOffset, reg32); 476 REG_WRITE(ah, regOffset, reg32);
472 477
473 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 478 ath_print(common, ATH_DBG_EEPROM,
474 "PDADC (%d,%4x): %4.4x %8.8x\n", 479 "PDADC (%d,%4x): %4.4x %8.8x\n",
475 i, regChainOffset, regOffset, 480 i, regChainOffset, regOffset,
476 reg32); 481 reg32);
477 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 482 ath_print(common, ATH_DBG_EEPROM,
478 "PDADC: Chain %d | " 483 "PDADC: Chain %d | "
479 "PDADC %3d Value %3d | " 484 "PDADC %3d Value %3d | "
480 "PDADC %3d Value %3d | " 485 "PDADC %3d Value %3d | "
481 "PDADC %3d Value %3d | " 486 "PDADC %3d Value %3d | "
482 "PDADC %3d Value %3d |\n", 487 "PDADC %3d Value %3d |\n",
483 i, 4 * j, pdadcValues[4 * j], 488 i, 4 * j, pdadcValues[4 * j],
484 4 * j + 1, pdadcValues[4 * j + 1], 489 4 * j + 1, pdadcValues[4 * j + 1],
485 4 * j + 2, pdadcValues[4 * j + 2], 490 4 * j + 2, pdadcValues[4 * j + 2],
486 4 * j + 3, 491 4 * j + 3,
487 pdadcValues[4 * j + 3]); 492 pdadcValues[4 * j + 3]);
488 493
489 regOffset += 4; 494 regOffset += 4;
490 } 495 }
@@ -750,7 +755,7 @@ static void ath9k_hw_4k_set_txpower(struct ath_hw *ah,
750 755
751 if (AR_SREV_9280_10_OR_LATER(ah)) { 756 if (AR_SREV_9280_10_OR_LATER(ah)) {
752 for (i = 0; i < Ar5416RateSize; i++) 757 for (i = 0; i < Ar5416RateSize; i++)
753 ratesArray[i] -= AR5416_PWR_TABLE_OFFSET * 2; 758 ratesArray[i] -= AR5416_PWR_TABLE_OFFSET_DB * 2;
754 } 759 }
755 760
756 /* OFDM power per rate */ 761 /* OFDM power per rate */
@@ -1107,6 +1112,10 @@ static void ath9k_hw_4k_set_board_values(struct ath_hw *ah,
1107 1112
1108 REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON, 1113 REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
1109 pModal->txEndToRxOn); 1114 pModal->txEndToRxOn);
1115
1116 if (AR_SREV_9271_10(ah))
1117 REG_RMW_FIELD(ah, AR_PHY_RF_CTL3, AR_PHY_TX_END_TO_A2_RX_ON,
1118 pModal->txEndToRxOn);
1110 REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62, 1119 REG_RMW_FIELD(ah, AR_PHY_CCA, AR9280_PHY_CCA_THRESH62,
1111 pModal->thresh62); 1120 pModal->thresh62);
1112 REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0, AR_PHY_EXT_CCA0_THRESH62, 1121 REG_RMW_FIELD(ah, AR_PHY_EXT_CCA0, AR_PHY_EXT_CCA0_THRESH62,
@@ -1148,20 +1157,21 @@ static u16 ath9k_hw_4k_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
1148{ 1157{
1149#define EEP_MAP4K_SPURCHAN \ 1158#define EEP_MAP4K_SPURCHAN \
1150 (ah->eeprom.map4k.modalHeader.spurChans[i].spurChan) 1159 (ah->eeprom.map4k.modalHeader.spurChans[i].spurChan)
1160 struct ath_common *common = ath9k_hw_common(ah);
1151 1161
1152 u16 spur_val = AR_NO_SPUR; 1162 u16 spur_val = AR_NO_SPUR;
1153 1163
1154 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 1164 ath_print(common, ATH_DBG_ANI,
1155 "Getting spur idx %d is2Ghz. %d val %x\n", 1165 "Getting spur idx %d is2Ghz. %d val %x\n",
1156 i, is2GHz, ah->config.spurchans[i][is2GHz]); 1166 i, is2GHz, ah->config.spurchans[i][is2GHz]);
1157 1167
1158 switch (ah->config.spurmode) { 1168 switch (ah->config.spurmode) {
1159 case SPUR_DISABLE: 1169 case SPUR_DISABLE:
1160 break; 1170 break;
1161 case SPUR_ENABLE_IOCTL: 1171 case SPUR_ENABLE_IOCTL:
1162 spur_val = ah->config.spurchans[i][is2GHz]; 1172 spur_val = ah->config.spurchans[i][is2GHz];
1163 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 1173 ath_print(common, ATH_DBG_ANI,
1164 "Getting spur val from new loc. %d\n", spur_val); 1174 "Getting spur val from new loc. %d\n", spur_val);
1165 break; 1175 break;
1166 case SPUR_ENABLE_EEPROM: 1176 case SPUR_ENABLE_EEPROM:
1167 spur_val = EEP_MAP4K_SPURCHAN; 1177 spur_val = EEP_MAP4K_SPURCHAN;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_9287.c b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
index c20c21a79b21..839d05a1df29 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_9287.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_9287.c
@@ -14,7 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "ath9k.h" 17#include "hw.h"
18 18
19static int ath9k_hw_AR9287_get_eeprom_ver(struct ath_hw *ah) 19static int ath9k_hw_AR9287_get_eeprom_ver(struct ath_hw *ah)
20{ 20{
@@ -29,20 +29,22 @@ static int ath9k_hw_AR9287_get_eeprom_rev(struct ath_hw *ah)
29static bool ath9k_hw_AR9287_fill_eeprom(struct ath_hw *ah) 29static bool ath9k_hw_AR9287_fill_eeprom(struct ath_hw *ah)
30{ 30{
31 struct ar9287_eeprom *eep = &ah->eeprom.map9287; 31 struct ar9287_eeprom *eep = &ah->eeprom.map9287;
32 struct ath_common *common = ath9k_hw_common(ah);
32 u16 *eep_data; 33 u16 *eep_data;
33 int addr, eep_start_loc = AR9287_EEP_START_LOC; 34 int addr, eep_start_loc = AR9287_EEP_START_LOC;
34 eep_data = (u16 *)eep; 35 eep_data = (u16 *)eep;
35 36
36 if (!ath9k_hw_use_flash(ah)) { 37 if (!ath9k_hw_use_flash(ah)) {
37 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 38 ath_print(common, ATH_DBG_EEPROM,
38 "Reading from EEPROM, not flash\n"); 39 "Reading from EEPROM, not flash\n");
39 } 40 }
40 41
41 for (addr = 0; addr < sizeof(struct ar9287_eeprom) / sizeof(u16); 42 for (addr = 0; addr < sizeof(struct ar9287_eeprom) / sizeof(u16);
42 addr++) { 43 addr++) {
43 if (!ath9k_hw_nvram_read(ah, addr + eep_start_loc, eep_data)) { 44 if (!ath9k_hw_nvram_read(common,
44 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 45 addr + eep_start_loc, eep_data)) {
45 "Unable to read eeprom region \n"); 46 ath_print(common, ATH_DBG_EEPROM,
47 "Unable to read eeprom region \n");
46 return false; 48 return false;
47 } 49 }
48 eep_data++; 50 eep_data++;
@@ -57,17 +59,18 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah)
57 int i, addr; 59 int i, addr;
58 bool need_swap = false; 60 bool need_swap = false;
59 struct ar9287_eeprom *eep = &ah->eeprom.map9287; 61 struct ar9287_eeprom *eep = &ah->eeprom.map9287;
62 struct ath_common *common = ath9k_hw_common(ah);
60 63
61 if (!ath9k_hw_use_flash(ah)) { 64 if (!ath9k_hw_use_flash(ah)) {
62 if (!ath9k_hw_nvram_read 65 if (!ath9k_hw_nvram_read(common,
63 (ah, AR5416_EEPROM_MAGIC_OFFSET, &magic)) { 66 AR5416_EEPROM_MAGIC_OFFSET, &magic)) {
64 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 67 ath_print(common, ATH_DBG_FATAL,
65 "Reading Magic # failed\n"); 68 "Reading Magic # failed\n");
66 return false; 69 return false;
67 } 70 }
68 71
69 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 72 ath_print(common, ATH_DBG_EEPROM,
70 "Read Magic = 0x%04X\n", magic); 73 "Read Magic = 0x%04X\n", magic);
71 if (magic != AR5416_EEPROM_MAGIC) { 74 if (magic != AR5416_EEPROM_MAGIC) {
72 magic2 = swab16(magic); 75 magic2 = swab16(magic);
73 76
@@ -83,15 +86,15 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah)
83 eepdata++; 86 eepdata++;
84 } 87 }
85 } else { 88 } else {
86 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 89 ath_print(common, ATH_DBG_FATAL,
87 "Invalid EEPROM Magic. " 90 "Invalid EEPROM Magic. "
88 "endianness mismatch.\n"); 91 "endianness mismatch.\n");
89 return -EINVAL; 92 return -EINVAL;
90 } 93 }
91 } 94 }
92 } 95 }
93 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "need_swap = %s.\n", need_swap ? 96 ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n", need_swap ?
94 "True" : "False"); 97 "True" : "False");
95 98
96 if (need_swap) 99 if (need_swap)
97 el = swab16(ah->eeprom.map9287.baseEepHeader.length); 100 el = swab16(ah->eeprom.map9287.baseEepHeader.length);
@@ -148,9 +151,9 @@ static int ath9k_hw_AR9287_check_eeprom(struct ath_hw *ah)
148 151
149 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR9287_EEP_VER 152 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR9287_EEP_VER
150 || ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) { 153 || ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
151 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 154 ath_print(common, ATH_DBG_FATAL,
152 "Bad EEPROM checksum 0x%x or revision 0x%04x\n", 155 "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
153 sum, ah->eep_ops->get_eeprom_ver(ah)); 156 sum, ah->eep_ops->get_eeprom_ver(ah));
154 return -EINVAL; 157 return -EINVAL;
155 } 158 }
156 159
@@ -436,6 +439,7 @@ static void ath9k_hw_set_AR9287_power_cal_table(struct ath_hw *ah,
436 struct ath9k_channel *chan, 439 struct ath9k_channel *chan,
437 int16_t *pTxPowerIndexOffset) 440 int16_t *pTxPowerIndexOffset)
438{ 441{
442 struct ath_common *common = ath9k_hw_common(ah);
439 struct cal_data_per_freq_ar9287 *pRawDataset; 443 struct cal_data_per_freq_ar9287 *pRawDataset;
440 struct cal_data_op_loop_ar9287 *pRawDatasetOpenLoop; 444 struct cal_data_op_loop_ar9287 *pRawDatasetOpenLoop;
441 u8 *pCalBChans = NULL; 445 u8 *pCalBChans = NULL;
@@ -564,24 +568,25 @@ static void ath9k_hw_set_AR9287_power_cal_table(struct ath_hw *ah,
564 & 0xFF) << 24) ; 568 & 0xFF) << 24) ;
565 REG_WRITE(ah, regOffset, reg32); 569 REG_WRITE(ah, regOffset, reg32);
566 570
567 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 571 ath_print(common, ATH_DBG_EEPROM,
568 "PDADC (%d,%4x): %4.4x %8.8x\n", 572 "PDADC (%d,%4x): %4.4x "
569 i, regChainOffset, regOffset, 573 "%8.8x\n",
570 reg32); 574 i, regChainOffset, regOffset,
571 575 reg32);
572 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 576
573 "PDADC: Chain %d | " 577 ath_print(common, ATH_DBG_EEPROM,
574 "PDADC %3d Value %3d | " 578 "PDADC: Chain %d | "
575 "PDADC %3d Value %3d | " 579 "PDADC %3d Value %3d | "
576 "PDADC %3d Value %3d | " 580 "PDADC %3d Value %3d | "
577 "PDADC %3d Value %3d |\n", 581 "PDADC %3d Value %3d | "
578 i, 4 * j, pdadcValues[4 * j], 582 "PDADC %3d Value %3d |\n",
579 4 * j + 1, 583 i, 4 * j, pdadcValues[4 * j],
580 pdadcValues[4 * j + 1], 584 4 * j + 1,
581 4 * j + 2, 585 pdadcValues[4 * j + 1],
582 pdadcValues[4 * j + 2], 586 4 * j + 2,
583 4 * j + 3, 587 pdadcValues[4 * j + 2],
584 pdadcValues[4 * j + 3]); 588 4 * j + 3,
589 pdadcValues[4 * j + 3]);
585 590
586 regOffset += 4; 591 regOffset += 4;
587 } 592 }
@@ -831,6 +836,7 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah,
831{ 836{
832#define INCREASE_MAXPOW_BY_TWO_CHAIN 6 837#define INCREASE_MAXPOW_BY_TWO_CHAIN 6
833#define INCREASE_MAXPOW_BY_THREE_CHAIN 10 838#define INCREASE_MAXPOW_BY_THREE_CHAIN 10
839 struct ath_common *common = ath9k_hw_common(ah);
834 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 840 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
835 struct ar9287_eeprom *pEepData = &ah->eeprom.map9287; 841 struct ar9287_eeprom *pEepData = &ah->eeprom.map9287;
836 struct modal_eep_ar9287_header *pModal = &pEepData->modalHeader; 842 struct modal_eep_ar9287_header *pModal = &pEepData->modalHeader;
@@ -966,8 +972,8 @@ static void ath9k_hw_AR9287_set_txpower(struct ath_hw *ah,
966 INCREASE_MAXPOW_BY_THREE_CHAIN; 972 INCREASE_MAXPOW_BY_THREE_CHAIN;
967 break; 973 break;
968 default: 974 default:
969 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 975 ath_print(common, ATH_DBG_EEPROM,
970 "Invalid chainmask configuration\n"); 976 "Invalid chainmask configuration\n");
971 break; 977 break;
972 } 978 }
973} 979}
@@ -1138,19 +1144,20 @@ static u16 ath9k_hw_AR9287_get_spur_channel(struct ath_hw *ah,
1138{ 1144{
1139#define EEP_MAP9287_SPURCHAN \ 1145#define EEP_MAP9287_SPURCHAN \
1140 (ah->eeprom.map9287.modalHeader.spurChans[i].spurChan) 1146 (ah->eeprom.map9287.modalHeader.spurChans[i].spurChan)
1147 struct ath_common *common = ath9k_hw_common(ah);
1141 u16 spur_val = AR_NO_SPUR; 1148 u16 spur_val = AR_NO_SPUR;
1142 1149
1143 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 1150 ath_print(common, ATH_DBG_ANI,
1144 "Getting spur idx %d is2Ghz. %d val %x\n", 1151 "Getting spur idx %d is2Ghz. %d val %x\n",
1145 i, is2GHz, ah->config.spurchans[i][is2GHz]); 1152 i, is2GHz, ah->config.spurchans[i][is2GHz]);
1146 1153
1147 switch (ah->config.spurmode) { 1154 switch (ah->config.spurmode) {
1148 case SPUR_DISABLE: 1155 case SPUR_DISABLE:
1149 break; 1156 break;
1150 case SPUR_ENABLE_IOCTL: 1157 case SPUR_ENABLE_IOCTL:
1151 spur_val = ah->config.spurchans[i][is2GHz]; 1158 spur_val = ah->config.spurchans[i][is2GHz];
1152 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 1159 ath_print(common, ATH_DBG_ANI,
1153 "Getting spur val from new loc. %d\n", spur_val); 1160 "Getting spur val from new loc. %d\n", spur_val);
1154 break; 1161 break;
1155 case SPUR_ENABLE_EEPROM: 1162 case SPUR_ENABLE_EEPROM:
1156 spur_val = EEP_MAP9287_SPURCHAN; 1163 spur_val = EEP_MAP9287_SPURCHAN;
diff --git a/drivers/net/wireless/ath/ath9k/eeprom_def.c b/drivers/net/wireless/ath/ath9k/eeprom_def.c
index 4071fc91da0a..404a0341242c 100644
--- a/drivers/net/wireless/ath/ath9k/eeprom_def.c
+++ b/drivers/net/wireless/ath/ath9k/eeprom_def.c
@@ -14,7 +14,7 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "ath9k.h" 17#include "hw.h"
18 18
19static void ath9k_get_txgain_index(struct ath_hw *ah, 19static void ath9k_get_txgain_index(struct ath_hw *ah,
20 struct ath9k_channel *chan, 20 struct ath9k_channel *chan,
@@ -89,14 +89,15 @@ static int ath9k_hw_def_get_eeprom_rev(struct ath_hw *ah)
89static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah) 89static bool ath9k_hw_def_fill_eeprom(struct ath_hw *ah)
90{ 90{
91#define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16)) 91#define SIZE_EEPROM_DEF (sizeof(struct ar5416_eeprom_def) / sizeof(u16))
92 struct ath_common *common = ath9k_hw_common(ah);
92 u16 *eep_data = (u16 *)&ah->eeprom.def; 93 u16 *eep_data = (u16 *)&ah->eeprom.def;
93 int addr, ar5416_eep_start_loc = 0x100; 94 int addr, ar5416_eep_start_loc = 0x100;
94 95
95 for (addr = 0; addr < SIZE_EEPROM_DEF; addr++) { 96 for (addr = 0; addr < SIZE_EEPROM_DEF; addr++) {
96 if (!ath9k_hw_nvram_read(ah, addr + ar5416_eep_start_loc, 97 if (!ath9k_hw_nvram_read(common, addr + ar5416_eep_start_loc,
97 eep_data)) { 98 eep_data)) {
98 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 99 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
99 "Unable to read eeprom region\n"); 100 "Unable to read eeprom region\n");
100 return false; 101 return false;
101 } 102 }
102 eep_data++; 103 eep_data++;
@@ -109,19 +110,20 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
109{ 110{
110 struct ar5416_eeprom_def *eep = 111 struct ar5416_eeprom_def *eep =
111 (struct ar5416_eeprom_def *) &ah->eeprom.def; 112 (struct ar5416_eeprom_def *) &ah->eeprom.def;
113 struct ath_common *common = ath9k_hw_common(ah);
112 u16 *eepdata, temp, magic, magic2; 114 u16 *eepdata, temp, magic, magic2;
113 u32 sum = 0, el; 115 u32 sum = 0, el;
114 bool need_swap = false; 116 bool need_swap = false;
115 int i, addr, size; 117 int i, addr, size;
116 118
117 if (!ath9k_hw_nvram_read(ah, AR5416_EEPROM_MAGIC_OFFSET, &magic)) { 119 if (!ath9k_hw_nvram_read(common, AR5416_EEPROM_MAGIC_OFFSET, &magic)) {
118 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Reading Magic # failed\n"); 120 ath_print(common, ATH_DBG_FATAL, "Reading Magic # failed\n");
119 return false; 121 return false;
120 } 122 }
121 123
122 if (!ath9k_hw_use_flash(ah)) { 124 if (!ath9k_hw_use_flash(ah)) {
123 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 125 ath_print(common, ATH_DBG_EEPROM,
124 "Read Magic = 0x%04X\n", magic); 126 "Read Magic = 0x%04X\n", magic);
125 127
126 if (magic != AR5416_EEPROM_MAGIC) { 128 if (magic != AR5416_EEPROM_MAGIC) {
127 magic2 = swab16(magic); 129 magic2 = swab16(magic);
@@ -137,16 +139,16 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
137 eepdata++; 139 eepdata++;
138 } 140 }
139 } else { 141 } else {
140 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 142 ath_print(common, ATH_DBG_FATAL,
141 "Invalid EEPROM Magic. " 143 "Invalid EEPROM Magic. "
142 "Endianness mismatch.\n"); 144 "Endianness mismatch.\n");
143 return -EINVAL; 145 return -EINVAL;
144 } 146 }
145 } 147 }
146 } 148 }
147 149
148 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, "need_swap = %s.\n", 150 ath_print(common, ATH_DBG_EEPROM, "need_swap = %s.\n",
149 need_swap ? "True" : "False"); 151 need_swap ? "True" : "False");
150 152
151 if (need_swap) 153 if (need_swap)
152 el = swab16(ah->eeprom.def.baseEepHeader.length); 154 el = swab16(ah->eeprom.def.baseEepHeader.length);
@@ -167,8 +169,8 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
167 u32 integer, j; 169 u32 integer, j;
168 u16 word; 170 u16 word;
169 171
170 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 172 ath_print(common, ATH_DBG_EEPROM,
171 "EEPROM Endianness is not native.. Changing.\n"); 173 "EEPROM Endianness is not native.. Changing.\n");
172 174
173 word = swab16(eep->baseEepHeader.length); 175 word = swab16(eep->baseEepHeader.length);
174 eep->baseEepHeader.length = word; 176 eep->baseEepHeader.length = word;
@@ -214,8 +216,8 @@ static int ath9k_hw_def_check_eeprom(struct ath_hw *ah)
214 216
215 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER || 217 if (sum != 0xffff || ah->eep_ops->get_eeprom_ver(ah) != AR5416_EEP_VER ||
216 ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) { 218 ah->eep_ops->get_eeprom_rev(ah) < AR5416_EEP_NO_BACK_VER) {
217 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 219 ath_print(common, ATH_DBG_FATAL,
218 "Bad EEPROM checksum 0x%x or revision 0x%04x\n", 220 "Bad EEPROM checksum 0x%x or revision 0x%04x\n",
219 sum, ah->eep_ops->get_eeprom_ver(ah)); 221 sum, ah->eep_ops->get_eeprom_ver(ah));
220 return -EINVAL; 222 return -EINVAL;
221 } 223 }
@@ -289,6 +291,11 @@ static u32 ath9k_hw_def_get_eeprom(struct ath_hw *ah,
289 return pBase->frac_n_5g; 291 return pBase->frac_n_5g;
290 else 292 else
291 return 0; 293 return 0;
294 case EEP_PWR_TABLE_OFFSET:
295 if (AR5416_VER_MASK >= AR5416_EEP_MINOR_VER_21)
296 return pBase->pwr_table_offset;
297 else
298 return AR5416_PWR_TABLE_OFFSET_DB;
292 default: 299 default:
293 return 0; 300 return 0;
294 } 301 }
@@ -739,6 +746,76 @@ static void ath9k_hw_get_def_gain_boundaries_pdadcs(struct ath_hw *ah,
739 return; 746 return;
740} 747}
741 748
749static int16_t ath9k_change_gain_boundary_setting(struct ath_hw *ah,
750 u16 *gb,
751 u16 numXpdGain,
752 u16 pdGainOverlap_t2,
753 int8_t pwr_table_offset,
754 int16_t *diff)
755
756{
757 u16 k;
758
759 /* Prior to writing the boundaries or the pdadc vs. power table
760 * into the chip registers the default starting point on the pdadc
761 * vs. power table needs to be checked and the curve boundaries
762 * adjusted accordingly
763 */
764 if (AR_SREV_9280_20_OR_LATER(ah)) {
765 u16 gb_limit;
766
767 if (AR5416_PWR_TABLE_OFFSET_DB != pwr_table_offset) {
768 /* get the difference in dB */
769 *diff = (u16)(pwr_table_offset - AR5416_PWR_TABLE_OFFSET_DB);
770 /* get the number of half dB steps */
771 *diff *= 2;
772 /* change the original gain boundary settings
773 * by the number of half dB steps
774 */
775 for (k = 0; k < numXpdGain; k++)
776 gb[k] = (u16)(gb[k] - *diff);
777 }
778 /* Because of a hardware limitation, ensure the gain boundary
779 * is not larger than (63 - overlap)
780 */
781 gb_limit = (u16)(AR5416_MAX_RATE_POWER - pdGainOverlap_t2);
782
783 for (k = 0; k < numXpdGain; k++)
784 gb[k] = (u16)min(gb_limit, gb[k]);
785 }
786
787 return *diff;
788}
789
790static void ath9k_adjust_pdadc_values(struct ath_hw *ah,
791 int8_t pwr_table_offset,
792 int16_t diff,
793 u8 *pdadcValues)
794{
795#define NUM_PDADC(diff) (AR5416_NUM_PDADC_VALUES - diff)
796 u16 k;
797
798 /* If this is a board that has a pwrTableOffset that differs from
799 * the default AR5416_PWR_TABLE_OFFSET_DB then the start of the
800 * pdadc vs pwr table needs to be adjusted prior to writing to the
801 * chip.
802 */
803 if (AR_SREV_9280_20_OR_LATER(ah)) {
804 if (AR5416_PWR_TABLE_OFFSET_DB != pwr_table_offset) {
805 /* shift the table to start at the new offset */
806 for (k = 0; k < (u16)NUM_PDADC(diff); k++ ) {
807 pdadcValues[k] = pdadcValues[k + diff];
808 }
809
810 /* fill the back of the table */
811 for (k = (u16)NUM_PDADC(diff); k < NUM_PDADC(0); k++) {
812 pdadcValues[k] = pdadcValues[NUM_PDADC(diff)];
813 }
814 }
815 }
816#undef NUM_PDADC
817}
818
742static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah, 819static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
743 struct ath9k_channel *chan, 820 struct ath9k_channel *chan,
744 int16_t *pTxPowerIndexOffset) 821 int16_t *pTxPowerIndexOffset)
@@ -746,7 +823,7 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
746#define SM_PD_GAIN(x) SM(0x38, AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_##x) 823#define SM_PD_GAIN(x) SM(0x38, AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_##x)
747#define SM_PDGAIN_B(x, y) \ 824#define SM_PDGAIN_B(x, y) \
748 SM((gainBoundaries[x]), AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_##y) 825 SM((gainBoundaries[x]), AR_PHY_TPCRG5_PD_GAIN_BOUNDARY_##y)
749 826 struct ath_common *common = ath9k_hw_common(ah);
750 struct ar5416_eeprom_def *pEepData = &ah->eeprom.def; 827 struct ar5416_eeprom_def *pEepData = &ah->eeprom.def;
751 struct cal_data_per_freq *pRawDataset; 828 struct cal_data_per_freq *pRawDataset;
752 u8 *pCalBChans = NULL; 829 u8 *pCalBChans = NULL;
@@ -754,15 +831,18 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
754 static u8 pdadcValues[AR5416_NUM_PDADC_VALUES]; 831 static u8 pdadcValues[AR5416_NUM_PDADC_VALUES];
755 u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK]; 832 u16 gainBoundaries[AR5416_PD_GAINS_IN_MASK];
756 u16 numPiers, i, j; 833 u16 numPiers, i, j;
757 int16_t tMinCalPower; 834 int16_t tMinCalPower, diff = 0;
758 u16 numXpdGain, xpdMask; 835 u16 numXpdGain, xpdMask;
759 u16 xpdGainValues[AR5416_NUM_PD_GAINS] = { 0, 0, 0, 0 }; 836 u16 xpdGainValues[AR5416_NUM_PD_GAINS] = { 0, 0, 0, 0 };
760 u32 reg32, regOffset, regChainOffset; 837 u32 reg32, regOffset, regChainOffset;
761 int16_t modalIdx; 838 int16_t modalIdx;
839 int8_t pwr_table_offset;
762 840
763 modalIdx = IS_CHAN_2GHZ(chan) ? 1 : 0; 841 modalIdx = IS_CHAN_2GHZ(chan) ? 1 : 0;
764 xpdMask = pEepData->modalHeader[modalIdx].xpdGain; 842 xpdMask = pEepData->modalHeader[modalIdx].xpdGain;
765 843
844 pwr_table_offset = ah->eep_ops->get_eeprom(ah, EEP_PWR_TABLE_OFFSET);
845
766 if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >= 846 if ((pEepData->baseEepHeader.version & AR5416_EEP_VER_MINOR_MASK) >=
767 AR5416_EEP_MINOR_VER_2) { 847 AR5416_EEP_MINOR_VER_2) {
768 pdGainOverlap_t2 = 848 pdGainOverlap_t2 =
@@ -842,6 +922,13 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
842 numXpdGain); 922 numXpdGain);
843 } 923 }
844 924
925 diff = ath9k_change_gain_boundary_setting(ah,
926 gainBoundaries,
927 numXpdGain,
928 pdGainOverlap_t2,
929 pwr_table_offset,
930 &diff);
931
845 if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) { 932 if ((i == 0) || AR_SREV_5416_20_OR_LATER(ah)) {
846 if (OLC_FOR_AR9280_20_LATER) { 933 if (OLC_FOR_AR9280_20_LATER) {
847 REG_WRITE(ah, 934 REG_WRITE(ah,
@@ -862,6 +949,10 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
862 } 949 }
863 } 950 }
864 951
952
953 ath9k_adjust_pdadc_values(ah, pwr_table_offset,
954 diff, pdadcValues);
955
865 regOffset = AR_PHY_BASE + (672 << 2) + regChainOffset; 956 regOffset = AR_PHY_BASE + (672 << 2) + regChainOffset;
866 for (j = 0; j < 32; j++) { 957 for (j = 0; j < 32; j++) {
867 reg32 = ((pdadcValues[4 * j + 0] & 0xFF) << 0) | 958 reg32 = ((pdadcValues[4 * j + 0] & 0xFF) << 0) |
@@ -870,20 +961,20 @@ static void ath9k_hw_set_def_power_cal_table(struct ath_hw *ah,
870 ((pdadcValues[4 * j + 3] & 0xFF) << 24); 961 ((pdadcValues[4 * j + 3] & 0xFF) << 24);
871 REG_WRITE(ah, regOffset, reg32); 962 REG_WRITE(ah, regOffset, reg32);
872 963
873 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 964 ath_print(common, ATH_DBG_EEPROM,
874 "PDADC (%d,%4x): %4.4x %8.8x\n", 965 "PDADC (%d,%4x): %4.4x %8.8x\n",
875 i, regChainOffset, regOffset, 966 i, regChainOffset, regOffset,
876 reg32); 967 reg32);
877 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 968 ath_print(common, ATH_DBG_EEPROM,
878 "PDADC: Chain %d | PDADC %3d " 969 "PDADC: Chain %d | PDADC %3d "
879 "Value %3d | PDADC %3d Value %3d | " 970 "Value %3d | PDADC %3d Value %3d | "
880 "PDADC %3d Value %3d | PDADC %3d " 971 "PDADC %3d Value %3d | PDADC %3d "
881 "Value %3d |\n", 972 "Value %3d |\n",
882 i, 4 * j, pdadcValues[4 * j], 973 i, 4 * j, pdadcValues[4 * j],
883 4 * j + 1, pdadcValues[4 * j + 1], 974 4 * j + 1, pdadcValues[4 * j + 1],
884 4 * j + 2, pdadcValues[4 * j + 2], 975 4 * j + 2, pdadcValues[4 * j + 2],
885 4 * j + 3, 976 4 * j + 3,
886 pdadcValues[4 * j + 3]); 977 pdadcValues[4 * j + 3]);
887 978
888 regOffset += 4; 979 regOffset += 4;
889 } 980 }
@@ -1197,8 +1288,13 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
1197 } 1288 }
1198 1289
1199 if (AR_SREV_9280_10_OR_LATER(ah)) { 1290 if (AR_SREV_9280_10_OR_LATER(ah)) {
1200 for (i = 0; i < Ar5416RateSize; i++) 1291 for (i = 0; i < Ar5416RateSize; i++) {
1201 ratesArray[i] -= AR5416_PWR_TABLE_OFFSET * 2; 1292 int8_t pwr_table_offset;
1293
1294 pwr_table_offset = ah->eep_ops->get_eeprom(ah,
1295 EEP_PWR_TABLE_OFFSET);
1296 ratesArray[i] -= pwr_table_offset * 2;
1297 }
1202 } 1298 }
1203 1299
1204 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1, 1300 REG_WRITE(ah, AR_PHY_POWER_TX_RATE1,
@@ -1297,7 +1393,7 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
1297 1393
1298 if (AR_SREV_9280_10_OR_LATER(ah)) 1394 if (AR_SREV_9280_10_OR_LATER(ah))
1299 regulatory->max_power_level = 1395 regulatory->max_power_level =
1300 ratesArray[i] + AR5416_PWR_TABLE_OFFSET * 2; 1396 ratesArray[i] + AR5416_PWR_TABLE_OFFSET_DB * 2;
1301 else 1397 else
1302 regulatory->max_power_level = ratesArray[i]; 1398 regulatory->max_power_level = ratesArray[i];
1303 1399
@@ -1311,8 +1407,8 @@ static void ath9k_hw_def_set_txpower(struct ath_hw *ah,
1311 regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN; 1407 regulatory->max_power_level += INCREASE_MAXPOW_BY_THREE_CHAIN;
1312 break; 1408 break;
1313 default: 1409 default:
1314 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 1410 ath_print(ath9k_hw_common(ah), ATH_DBG_EEPROM,
1315 "Invalid chainmask configuration\n"); 1411 "Invalid chainmask configuration\n");
1316 break; 1412 break;
1317 } 1413 }
1318} 1414}
@@ -1349,20 +1445,21 @@ static u16 ath9k_hw_def_get_spur_channel(struct ath_hw *ah, u16 i, bool is2GHz)
1349{ 1445{
1350#define EEP_DEF_SPURCHAN \ 1446#define EEP_DEF_SPURCHAN \
1351 (ah->eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan) 1447 (ah->eeprom.def.modalHeader[is2GHz].spurChans[i].spurChan)
1448 struct ath_common *common = ath9k_hw_common(ah);
1352 1449
1353 u16 spur_val = AR_NO_SPUR; 1450 u16 spur_val = AR_NO_SPUR;
1354 1451
1355 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 1452 ath_print(common, ATH_DBG_ANI,
1356 "Getting spur idx %d is2Ghz. %d val %x\n", 1453 "Getting spur idx %d is2Ghz. %d val %x\n",
1357 i, is2GHz, ah->config.spurchans[i][is2GHz]); 1454 i, is2GHz, ah->config.spurchans[i][is2GHz]);
1358 1455
1359 switch (ah->config.spurmode) { 1456 switch (ah->config.spurmode) {
1360 case SPUR_DISABLE: 1457 case SPUR_DISABLE:
1361 break; 1458 break;
1362 case SPUR_ENABLE_IOCTL: 1459 case SPUR_ENABLE_IOCTL:
1363 spur_val = ah->config.spurchans[i][is2GHz]; 1460 spur_val = ah->config.spurchans[i][is2GHz];
1364 DPRINTF(ah->ah_sc, ATH_DBG_ANI, 1461 ath_print(common, ATH_DBG_ANI,
1365 "Getting spur val from new loc. %d\n", spur_val); 1462 "Getting spur val from new loc. %d\n", spur_val);
1366 break; 1463 break;
1367 case SPUR_ENABLE_EEPROM: 1464 case SPUR_ENABLE_EEPROM:
1368 spur_val = EEP_DEF_SPURCHAN; 1465 spur_val = EEP_DEF_SPURCHAN;
diff --git a/drivers/net/wireless/ath/ath9k/hw.c b/drivers/net/wireless/ath/ath9k/hw.c
index ca7694caf364..111ff049f75d 100644
--- a/drivers/net/wireless/ath/ath9k/hw.c
+++ b/drivers/net/wireless/ath/ath9k/hw.c
@@ -16,9 +16,9 @@
16 16
17#include <linux/io.h> 17#include <linux/io.h>
18#include <asm/unaligned.h> 18#include <asm/unaligned.h>
19#include <linux/pci.h>
20 19
21#include "ath9k.h" 20#include "hw.h"
21#include "rc.h"
22#include "initvals.h" 22#include "initvals.h"
23 23
24#define ATH9K_CLOCK_RATE_CCK 22 24#define ATH9K_CLOCK_RATE_CCK 22
@@ -26,13 +26,27 @@
26#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44 26#define ATH9K_CLOCK_RATE_2GHZ_OFDM 44
27 27
28static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type); 28static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type);
29static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan, 29static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan);
30 enum ath9k_ht_macmode macmode);
31static u32 ath9k_hw_ini_fixup(struct ath_hw *ah, 30static u32 ath9k_hw_ini_fixup(struct ath_hw *ah,
32 struct ar5416_eeprom_def *pEepData, 31 struct ar5416_eeprom_def *pEepData,
33 u32 reg, u32 value); 32 u32 reg, u32 value);
34static void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan); 33
35static void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan); 34MODULE_AUTHOR("Atheros Communications");
35MODULE_DESCRIPTION("Support for Atheros 802.11n wireless LAN cards.");
36MODULE_SUPPORTED_DEVICE("Atheros 802.11n WLAN cards");
37MODULE_LICENSE("Dual BSD/GPL");
38
39static int __init ath9k_init(void)
40{
41 return 0;
42}
43module_init(ath9k_init);
44
45static void __exit ath9k_exit(void)
46{
47 return;
48}
49module_exit(ath9k_exit);
36 50
37/********************/ 51/********************/
38/* Helper Functions */ 52/* Helper Functions */
@@ -40,7 +54,7 @@ static void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan
40 54
41static u32 ath9k_hw_mac_usec(struct ath_hw *ah, u32 clks) 55static u32 ath9k_hw_mac_usec(struct ath_hw *ah, u32 clks)
42{ 56{
43 struct ieee80211_conf *conf = &ah->ah_sc->hw->conf; 57 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
44 58
45 if (!ah->curchan) /* should really check for CCK instead */ 59 if (!ah->curchan) /* should really check for CCK instead */
46 return clks / ATH9K_CLOCK_RATE_CCK; 60 return clks / ATH9K_CLOCK_RATE_CCK;
@@ -52,7 +66,7 @@ static u32 ath9k_hw_mac_usec(struct ath_hw *ah, u32 clks)
52 66
53static u32 ath9k_hw_mac_to_usec(struct ath_hw *ah, u32 clks) 67static u32 ath9k_hw_mac_to_usec(struct ath_hw *ah, u32 clks)
54{ 68{
55 struct ieee80211_conf *conf = &ah->ah_sc->hw->conf; 69 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
56 70
57 if (conf_is_ht40(conf)) 71 if (conf_is_ht40(conf))
58 return ath9k_hw_mac_usec(ah, clks) / 2; 72 return ath9k_hw_mac_usec(ah, clks) / 2;
@@ -62,7 +76,7 @@ static u32 ath9k_hw_mac_to_usec(struct ath_hw *ah, u32 clks)
62 76
63static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs) 77static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs)
64{ 78{
65 struct ieee80211_conf *conf = &ah->ah_sc->hw->conf; 79 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
66 80
67 if (!ah->curchan) /* should really check for CCK instead */ 81 if (!ah->curchan) /* should really check for CCK instead */
68 return usecs *ATH9K_CLOCK_RATE_CCK; 82 return usecs *ATH9K_CLOCK_RATE_CCK;
@@ -73,7 +87,7 @@ static u32 ath9k_hw_mac_clks(struct ath_hw *ah, u32 usecs)
73 87
74static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs) 88static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs)
75{ 89{
76 struct ieee80211_conf *conf = &ah->ah_sc->hw->conf; 90 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
77 91
78 if (conf_is_ht40(conf)) 92 if (conf_is_ht40(conf))
79 return ath9k_hw_mac_clks(ah, usecs) * 2; 93 return ath9k_hw_mac_clks(ah, usecs) * 2;
@@ -81,38 +95,6 @@ static u32 ath9k_hw_mac_to_clks(struct ath_hw *ah, u32 usecs)
81 return ath9k_hw_mac_clks(ah, usecs); 95 return ath9k_hw_mac_clks(ah, usecs);
82} 96}
83 97
84/*
85 * Read and write, they both share the same lock. We do this to serialize
86 * reads and writes on Atheros 802.11n PCI devices only. This is required
87 * as the FIFO on these devices can only accept sanely 2 requests. After
88 * that the device goes bananas. Serializing the reads/writes prevents this
89 * from happening.
90 */
91
92void ath9k_iowrite32(struct ath_hw *ah, u32 reg_offset, u32 val)
93{
94 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
95 unsigned long flags;
96 spin_lock_irqsave(&ah->ah_sc->sc_serial_rw, flags);
97 iowrite32(val, ah->ah_sc->mem + reg_offset);
98 spin_unlock_irqrestore(&ah->ah_sc->sc_serial_rw, flags);
99 } else
100 iowrite32(val, ah->ah_sc->mem + reg_offset);
101}
102
103unsigned int ath9k_ioread32(struct ath_hw *ah, u32 reg_offset)
104{
105 u32 val;
106 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
107 unsigned long flags;
108 spin_lock_irqsave(&ah->ah_sc->sc_serial_rw, flags);
109 val = ioread32(ah->ah_sc->mem + reg_offset);
110 spin_unlock_irqrestore(&ah->ah_sc->sc_serial_rw, flags);
111 } else
112 val = ioread32(ah->ah_sc->mem + reg_offset);
113 return val;
114}
115
116bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout) 98bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout)
117{ 99{
118 int i; 100 int i;
@@ -126,12 +108,13 @@ bool ath9k_hw_wait(struct ath_hw *ah, u32 reg, u32 mask, u32 val, u32 timeout)
126 udelay(AH_TIME_QUANTUM); 108 udelay(AH_TIME_QUANTUM);
127 } 109 }
128 110
129 DPRINTF(ah->ah_sc, ATH_DBG_ANY, 111 ath_print(ath9k_hw_common(ah), ATH_DBG_ANY,
130 "timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n", 112 "timeout (%d us) on reg 0x%x: 0x%08x & 0x%08x != 0x%08x\n",
131 timeout, reg, REG_READ(ah, reg), mask, val); 113 timeout, reg, REG_READ(ah, reg), mask, val);
132 114
133 return false; 115 return false;
134} 116}
117EXPORT_SYMBOL(ath9k_hw_wait);
135 118
136u32 ath9k_hw_reverse_bits(u32 val, u32 n) 119u32 ath9k_hw_reverse_bits(u32 val, u32 n)
137{ 120{
@@ -210,15 +193,16 @@ u16 ath9k_hw_computetxtime(struct ath_hw *ah,
210 } 193 }
211 break; 194 break;
212 default: 195 default:
213 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 196 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
214 "Unknown phy %u (rate ix %u)\n", 197 "Unknown phy %u (rate ix %u)\n",
215 rates->info[rateix].phy, rateix); 198 rates->info[rateix].phy, rateix);
216 txTime = 0; 199 txTime = 0;
217 break; 200 break;
218 } 201 }
219 202
220 return txTime; 203 return txTime;
221} 204}
205EXPORT_SYMBOL(ath9k_hw_computetxtime);
222 206
223void ath9k_hw_get_channel_centers(struct ath_hw *ah, 207void ath9k_hw_get_channel_centers(struct ath_hw *ah,
224 struct ath9k_channel *chan, 208 struct ath9k_channel *chan,
@@ -245,10 +229,9 @@ void ath9k_hw_get_channel_centers(struct ath_hw *ah,
245 229
246 centers->ctl_center = 230 centers->ctl_center =
247 centers->synth_center - (extoff * HT40_CHANNEL_CENTER_SHIFT); 231 centers->synth_center - (extoff * HT40_CHANNEL_CENTER_SHIFT);
232 /* 25 MHz spacing is supported by hw but not on upper layers */
248 centers->ext_center = 233 centers->ext_center =
249 centers->synth_center + (extoff * 234 centers->synth_center + (extoff * HT40_CHANNEL_CENTER_SHIFT);
250 ((ah->extprotspacing == ATH9K_HT_EXTPROTSPACING_20) ?
251 HT40_CHANNEL_CENTER_SHIFT : 15));
252} 235}
253 236
254/******************/ 237/******************/
@@ -317,6 +300,7 @@ static void ath9k_hw_disablepcie(struct ath_hw *ah)
317 300
318static bool ath9k_hw_chip_test(struct ath_hw *ah) 301static bool ath9k_hw_chip_test(struct ath_hw *ah)
319{ 302{
303 struct ath_common *common = ath9k_hw_common(ah);
320 u32 regAddr[2] = { AR_STA_ID0, AR_PHY_BASE + (8 << 2) }; 304 u32 regAddr[2] = { AR_STA_ID0, AR_PHY_BASE + (8 << 2) };
321 u32 regHold[2]; 305 u32 regHold[2];
322 u32 patternData[4] = { 0x55555555, 306 u32 patternData[4] = { 0x55555555,
@@ -335,10 +319,11 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
335 REG_WRITE(ah, addr, wrData); 319 REG_WRITE(ah, addr, wrData);
336 rdData = REG_READ(ah, addr); 320 rdData = REG_READ(ah, addr);
337 if (rdData != wrData) { 321 if (rdData != wrData) {
338 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 322 ath_print(common, ATH_DBG_FATAL,
339 "address test failed " 323 "address test failed "
340 "addr: 0x%08x - wr:0x%08x != rd:0x%08x\n", 324 "addr: 0x%08x - wr:0x%08x != "
341 addr, wrData, rdData); 325 "rd:0x%08x\n",
326 addr, wrData, rdData);
342 return false; 327 return false;
343 } 328 }
344 } 329 }
@@ -347,10 +332,11 @@ static bool ath9k_hw_chip_test(struct ath_hw *ah)
347 REG_WRITE(ah, addr, wrData); 332 REG_WRITE(ah, addr, wrData);
348 rdData = REG_READ(ah, addr); 333 rdData = REG_READ(ah, addr);
349 if (wrData != rdData) { 334 if (wrData != rdData) {
350 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 335 ath_print(common, ATH_DBG_FATAL,
351 "address test failed " 336 "address test failed "
352 "addr: 0x%08x - wr:0x%08x != rd:0x%08x\n", 337 "addr: 0x%08x - wr:0x%08x != "
353 addr, wrData, rdData); 338 "rd:0x%08x\n",
339 addr, wrData, rdData);
354 return false; 340 return false;
355 } 341 }
356 } 342 }
@@ -433,6 +419,7 @@ static void ath9k_hw_init_config(struct ath_hw *ah)
433 if (num_possible_cpus() > 1) 419 if (num_possible_cpus() > 1)
434 ah->config.serialize_regmode = SER_REG_MODE_AUTO; 420 ah->config.serialize_regmode = SER_REG_MODE_AUTO;
435} 421}
422EXPORT_SYMBOL(ath9k_hw_init);
436 423
437static void ath9k_hw_init_defaults(struct ath_hw *ah) 424static void ath9k_hw_init_defaults(struct ath_hw *ah)
438{ 425{
@@ -465,21 +452,6 @@ static void ath9k_hw_init_defaults(struct ath_hw *ah)
465 ah->power_mode = ATH9K_PM_UNDEFINED; 452 ah->power_mode = ATH9K_PM_UNDEFINED;
466} 453}
467 454
468static int ath9k_hw_rfattach(struct ath_hw *ah)
469{
470 bool rfStatus = false;
471 int ecode = 0;
472
473 rfStatus = ath9k_hw_init_rf(ah, &ecode);
474 if (!rfStatus) {
475 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
476 "RF setup failed, status: %u\n", ecode);
477 return ecode;
478 }
479
480 return 0;
481}
482
483static int ath9k_hw_rf_claim(struct ath_hw *ah) 455static int ath9k_hw_rf_claim(struct ath_hw *ah)
484{ 456{
485 u32 val; 457 u32 val;
@@ -497,9 +469,9 @@ static int ath9k_hw_rf_claim(struct ath_hw *ah)
497 case AR_RAD2122_SREV_MAJOR: 469 case AR_RAD2122_SREV_MAJOR:
498 break; 470 break;
499 default: 471 default:
500 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 472 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
501 "Radio Chip Rev 0x%02X not supported\n", 473 "Radio Chip Rev 0x%02X not supported\n",
502 val & AR_RADIO_SREV_MAJOR); 474 val & AR_RADIO_SREV_MAJOR);
503 return -EOPNOTSUPP; 475 return -EOPNOTSUPP;
504 } 476 }
505 477
@@ -510,6 +482,7 @@ static int ath9k_hw_rf_claim(struct ath_hw *ah)
510 482
511static int ath9k_hw_init_macaddr(struct ath_hw *ah) 483static int ath9k_hw_init_macaddr(struct ath_hw *ah)
512{ 484{
485 struct ath_common *common = ath9k_hw_common(ah);
513 u32 sum; 486 u32 sum;
514 int i; 487 int i;
515 u16 eeval; 488 u16 eeval;
@@ -518,8 +491,8 @@ static int ath9k_hw_init_macaddr(struct ath_hw *ah)
518 for (i = 0; i < 3; i++) { 491 for (i = 0; i < 3; i++) {
519 eeval = ah->eep_ops->get_eeprom(ah, AR_EEPROM_MAC(i)); 492 eeval = ah->eep_ops->get_eeprom(ah, AR_EEPROM_MAC(i));
520 sum += eeval; 493 sum += eeval;
521 ah->macaddr[2 * i] = eeval >> 8; 494 common->macaddr[2 * i] = eeval >> 8;
522 ah->macaddr[2 * i + 1] = eeval & 0xff; 495 common->macaddr[2 * i + 1] = eeval & 0xff;
523 } 496 }
524 if (sum == 0 || sum == 0xffff * 3) 497 if (sum == 0 || sum == 0xffff * 3)
525 return -EADDRNOTAVAIL; 498 return -EADDRNOTAVAIL;
@@ -590,12 +563,20 @@ static int ath9k_hw_post_init(struct ath_hw *ah)
590 if (ecode != 0) 563 if (ecode != 0)
591 return ecode; 564 return ecode;
592 565
593 DPRINTF(ah->ah_sc, ATH_DBG_CONFIG, "Eeprom VER: %d, REV: %d\n", 566 ath_print(ath9k_hw_common(ah), ATH_DBG_CONFIG,
594 ah->eep_ops->get_eeprom_ver(ah), ah->eep_ops->get_eeprom_rev(ah)); 567 "Eeprom VER: %d, REV: %d\n",
595 568 ah->eep_ops->get_eeprom_ver(ah),
596 ecode = ath9k_hw_rfattach(ah); 569 ah->eep_ops->get_eeprom_rev(ah));
597 if (ecode != 0) 570
598 return ecode; 571 if (!AR_SREV_9280_10_OR_LATER(ah)) {
572 ecode = ath9k_hw_rf_alloc_ext_banks(ah);
573 if (ecode) {
574 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
575 "Failed allocating banks for "
576 "external radio\n");
577 return ecode;
578 }
579 }
599 580
600 if (!AR_SREV_9100(ah)) { 581 if (!AR_SREV_9100(ah)) {
601 ath9k_hw_ani_setup(ah); 582 ath9k_hw_ani_setup(ah);
@@ -617,6 +598,7 @@ static bool ath9k_hw_devid_supported(u16 devid)
617 case AR9285_DEVID_PCIE: 598 case AR9285_DEVID_PCIE:
618 case AR5416_DEVID_AR9287_PCI: 599 case AR5416_DEVID_AR9287_PCI:
619 case AR5416_DEVID_AR9287_PCIE: 600 case AR5416_DEVID_AR9287_PCIE:
601 case AR9271_USB:
620 return true; 602 return true;
621 default: 603 default:
622 break; 604 break;
@@ -634,9 +616,8 @@ static bool ath9k_hw_macversion_supported(u32 macversion)
634 case AR_SREV_VERSION_9280: 616 case AR_SREV_VERSION_9280:
635 case AR_SREV_VERSION_9285: 617 case AR_SREV_VERSION_9285:
636 case AR_SREV_VERSION_9287: 618 case AR_SREV_VERSION_9287:
637 return true;
638 /* Not yet */
639 case AR_SREV_VERSION_9271: 619 case AR_SREV_VERSION_9271:
620 return true;
640 default: 621 default:
641 break; 622 break;
642 } 623 }
@@ -670,10 +651,13 @@ static void ath9k_hw_init_cal_settings(struct ath_hw *ah)
670static void ath9k_hw_init_mode_regs(struct ath_hw *ah) 651static void ath9k_hw_init_mode_regs(struct ath_hw *ah)
671{ 652{
672 if (AR_SREV_9271(ah)) { 653 if (AR_SREV_9271(ah)) {
673 INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271_1_0, 654 INIT_INI_ARRAY(&ah->iniModes, ar9271Modes_9271,
674 ARRAY_SIZE(ar9271Modes_9271_1_0), 6); 655 ARRAY_SIZE(ar9271Modes_9271), 6);
675 INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271_1_0, 656 INIT_INI_ARRAY(&ah->iniCommon, ar9271Common_9271,
676 ARRAY_SIZE(ar9271Common_9271_1_0), 2); 657 ARRAY_SIZE(ar9271Common_9271), 2);
658 INIT_INI_ARRAY(&ah->iniModes_9271_1_0_only,
659 ar9271Modes_9271_1_0_only,
660 ARRAY_SIZE(ar9271Modes_9271_1_0_only), 6);
677 return; 661 return;
678 } 662 }
679 663
@@ -905,21 +889,27 @@ static void ath9k_hw_init_11a_eeprom_fix(struct ath_hw *ah)
905 889
906int ath9k_hw_init(struct ath_hw *ah) 890int ath9k_hw_init(struct ath_hw *ah)
907{ 891{
892 struct ath_common *common = ath9k_hw_common(ah);
908 int r = 0; 893 int r = 0;
909 894
910 if (!ath9k_hw_devid_supported(ah->hw_version.devid)) 895 if (!ath9k_hw_devid_supported(ah->hw_version.devid)) {
896 ath_print(common, ATH_DBG_FATAL,
897 "Unsupported device ID: 0x%0x\n",
898 ah->hw_version.devid);
911 return -EOPNOTSUPP; 899 return -EOPNOTSUPP;
900 }
912 901
913 ath9k_hw_init_defaults(ah); 902 ath9k_hw_init_defaults(ah);
914 ath9k_hw_init_config(ah); 903 ath9k_hw_init_config(ah);
915 904
916 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) { 905 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_POWER_ON)) {
917 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Couldn't reset chip\n"); 906 ath_print(common, ATH_DBG_FATAL,
907 "Couldn't reset chip\n");
918 return -EIO; 908 return -EIO;
919 } 909 }
920 910
921 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) { 911 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) {
922 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Couldn't wakeup chip\n"); 912 ath_print(common, ATH_DBG_FATAL, "Couldn't wakeup chip\n");
923 return -EIO; 913 return -EIO;
924 } 914 }
925 915
@@ -934,14 +924,14 @@ int ath9k_hw_init(struct ath_hw *ah)
934 } 924 }
935 } 925 }
936 926
937 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "serialize_regmode is %d\n", 927 ath_print(common, ATH_DBG_RESET, "serialize_regmode is %d\n",
938 ah->config.serialize_regmode); 928 ah->config.serialize_regmode);
939 929
940 if (!ath9k_hw_macversion_supported(ah->hw_version.macVersion)) { 930 if (!ath9k_hw_macversion_supported(ah->hw_version.macVersion)) {
941 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 931 ath_print(common, ATH_DBG_FATAL,
942 "Mac Chip Rev 0x%02x.%x is not supported by " 932 "Mac Chip Rev 0x%02x.%x is not supported by "
943 "this driver\n", ah->hw_version.macVersion, 933 "this driver\n", ah->hw_version.macVersion,
944 ah->hw_version.macRev); 934 ah->hw_version.macRev);
945 return -EOPNOTSUPP; 935 return -EOPNOTSUPP;
946 } 936 }
947 937
@@ -959,8 +949,14 @@ int ath9k_hw_init(struct ath_hw *ah)
959 ath9k_hw_init_cal_settings(ah); 949 ath9k_hw_init_cal_settings(ah);
960 950
961 ah->ani_function = ATH9K_ANI_ALL; 951 ah->ani_function = ATH9K_ANI_ALL;
962 if (AR_SREV_9280_10_OR_LATER(ah)) 952 if (AR_SREV_9280_10_OR_LATER(ah)) {
963 ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL; 953 ah->ani_function &= ~ATH9K_ANI_NOISE_IMMUNITY_LEVEL;
954 ah->ath9k_hw_rf_set_freq = &ath9k_hw_ar9280_set_channel;
955 ah->ath9k_hw_spur_mitigate_freq = &ath9k_hw_9280_spur_mitigate;
956 } else {
957 ah->ath9k_hw_rf_set_freq = &ath9k_hw_set_channel;
958 ah->ath9k_hw_spur_mitigate_freq = &ath9k_hw_spur_mitigate;
959 }
964 960
965 ath9k_hw_init_mode_regs(ah); 961 ath9k_hw_init_mode_regs(ah);
966 962
@@ -969,6 +965,16 @@ int ath9k_hw_init(struct ath_hw *ah)
969 else 965 else
970 ath9k_hw_disablepcie(ah); 966 ath9k_hw_disablepcie(ah);
971 967
968 /* Support for Japan ch.14 (2484) spread */
969 if (AR_SREV_9287_11_OR_LATER(ah)) {
970 INIT_INI_ARRAY(&ah->iniCckfirNormal,
971 ar9287Common_normal_cck_fir_coeff_92871_1,
972 ARRAY_SIZE(ar9287Common_normal_cck_fir_coeff_92871_1), 2);
973 INIT_INI_ARRAY(&ah->iniCckfirJapan2484,
974 ar9287Common_japan_2484_cck_fir_coeff_92871_1,
975 ARRAY_SIZE(ar9287Common_japan_2484_cck_fir_coeff_92871_1), 2);
976 }
977
972 r = ath9k_hw_post_init(ah); 978 r = ath9k_hw_post_init(ah);
973 if (r) 979 if (r)
974 return r; 980 return r;
@@ -979,8 +985,8 @@ int ath9k_hw_init(struct ath_hw *ah)
979 985
980 r = ath9k_hw_init_macaddr(ah); 986 r = ath9k_hw_init_macaddr(ah);
981 if (r) { 987 if (r) {
982 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 988 ath_print(common, ATH_DBG_FATAL,
983 "Failed to initialize MAC address\n"); 989 "Failed to initialize MAC address\n");
984 return r; 990 return r;
985 } 991 }
986 992
@@ -991,6 +997,8 @@ int ath9k_hw_init(struct ath_hw *ah)
991 997
992 ath9k_init_nfcal_hist_buffer(ah); 998 ath9k_init_nfcal_hist_buffer(ah);
993 999
1000 common->state = ATH_HW_INITIALIZED;
1001
994 return 0; 1002 return 0;
995} 1003}
996 1004
@@ -1027,6 +1035,22 @@ static void ath9k_hw_init_qos(struct ath_hw *ah)
1027 REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF); 1035 REG_WRITE(ah, AR_TXOP_12_15, 0xFFFFFFFF);
1028} 1036}
1029 1037
1038static void ath9k_hw_change_target_baud(struct ath_hw *ah, u32 freq, u32 baud)
1039{
1040 u32 lcr;
1041 u32 baud_divider = freq * 1000 * 1000 / 16 / baud;
1042
1043 lcr = REG_READ(ah , 0x5100c);
1044 lcr |= 0x80;
1045
1046 REG_WRITE(ah, 0x5100c, lcr);
1047 REG_WRITE(ah, 0x51004, (baud_divider >> 8));
1048 REG_WRITE(ah, 0x51000, (baud_divider & 0xff));
1049
1050 lcr &= ~0x80;
1051 REG_WRITE(ah, 0x5100c, lcr);
1052}
1053
1030static void ath9k_hw_init_pll(struct ath_hw *ah, 1054static void ath9k_hw_init_pll(struct ath_hw *ah,
1031 struct ath9k_channel *chan) 1055 struct ath9k_channel *chan)
1032{ 1056{
@@ -1090,6 +1114,26 @@ static void ath9k_hw_init_pll(struct ath_hw *ah,
1090 } 1114 }
1091 REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll); 1115 REG_WRITE(ah, AR_RTC_PLL_CONTROL, pll);
1092 1116
1117 /* Switch the core clock for ar9271 to 117Mhz */
1118 if (AR_SREV_9271(ah)) {
1119 if ((pll == 0x142c) || (pll == 0x2850) ) {
1120 udelay(500);
1121 /* set CLKOBS to output AHB clock */
1122 REG_WRITE(ah, 0x7020, 0xe);
1123 /*
1124 * 0x304: 117Mhz, ahb_ratio: 1x1
1125 * 0x306: 40Mhz, ahb_ratio: 1x1
1126 */
1127 REG_WRITE(ah, 0x50040, 0x304);
1128 /*
1129 * makes adjustments for the baud dividor to keep the
1130 * targetted baud rate based on the used core clock.
1131 */
1132 ath9k_hw_change_target_baud(ah, AR9271_CORE_CLOCK,
1133 AR9271_TARGET_BAUD_RATE);
1134 }
1135 }
1136
1093 udelay(RTC_PLL_SETTLE_DELAY); 1137 udelay(RTC_PLL_SETTLE_DELAY);
1094 1138
1095 REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK); 1139 REG_WRITE(ah, AR_RTC_SLEEP_CLK, AR_RTC_FORCE_DERIVED_CLK);
@@ -1164,7 +1208,8 @@ static void ath9k_hw_init_interrupt_masks(struct ath_hw *ah,
1164static bool ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us) 1208static bool ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us)
1165{ 1209{
1166 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_ACK))) { 1210 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_ACK))) {
1167 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "bad ack timeout %u\n", us); 1211 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
1212 "bad ack timeout %u\n", us);
1168 ah->acktimeout = (u32) -1; 1213 ah->acktimeout = (u32) -1;
1169 return false; 1214 return false;
1170 } else { 1215 } else {
@@ -1178,7 +1223,8 @@ static bool ath9k_hw_set_ack_timeout(struct ath_hw *ah, u32 us)
1178static bool ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us) 1223static bool ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
1179{ 1224{
1180 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_CTS))) { 1225 if (us > ath9k_hw_mac_to_usec(ah, MS(0xffffffff, AR_TIME_OUT_CTS))) {
1181 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "bad cts timeout %u\n", us); 1226 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
1227 "bad cts timeout %u\n", us);
1182 ah->ctstimeout = (u32) -1; 1228 ah->ctstimeout = (u32) -1;
1183 return false; 1229 return false;
1184 } else { 1230 } else {
@@ -1192,8 +1238,8 @@ static bool ath9k_hw_set_cts_timeout(struct ath_hw *ah, u32 us)
1192static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu) 1238static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
1193{ 1239{
1194 if (tu > 0xFFFF) { 1240 if (tu > 0xFFFF) {
1195 DPRINTF(ah->ah_sc, ATH_DBG_XMIT, 1241 ath_print(ath9k_hw_common(ah), ATH_DBG_XMIT,
1196 "bad global tx timeout %u\n", tu); 1242 "bad global tx timeout %u\n", tu);
1197 ah->globaltxtimeout = (u32) -1; 1243 ah->globaltxtimeout = (u32) -1;
1198 return false; 1244 return false;
1199 } else { 1245 } else {
@@ -1205,8 +1251,8 @@ static bool ath9k_hw_set_global_txtimeout(struct ath_hw *ah, u32 tu)
1205 1251
1206static void ath9k_hw_init_user_settings(struct ath_hw *ah) 1252static void ath9k_hw_init_user_settings(struct ath_hw *ah)
1207{ 1253{
1208 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "ah->misc_mode 0x%x\n", 1254 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET, "ah->misc_mode 0x%x\n",
1209 ah->misc_mode); 1255 ah->misc_mode);
1210 1256
1211 if (ah->misc_mode != 0) 1257 if (ah->misc_mode != 0)
1212 REG_WRITE(ah, AR_PCU_MISC, 1258 REG_WRITE(ah, AR_PCU_MISC,
@@ -1229,14 +1275,23 @@ const char *ath9k_hw_probe(u16 vendorid, u16 devid)
1229 1275
1230void ath9k_hw_detach(struct ath_hw *ah) 1276void ath9k_hw_detach(struct ath_hw *ah)
1231{ 1277{
1278 struct ath_common *common = ath9k_hw_common(ah);
1279
1280 if (common->state <= ATH_HW_INITIALIZED)
1281 goto free_hw;
1282
1232 if (!AR_SREV_9100(ah)) 1283 if (!AR_SREV_9100(ah))
1233 ath9k_hw_ani_disable(ah); 1284 ath9k_hw_ani_disable(ah);
1234 1285
1235 ath9k_hw_rf_free(ah);
1236 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP); 1286 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP);
1287
1288free_hw:
1289 if (!AR_SREV_9280_10_OR_LATER(ah))
1290 ath9k_hw_rf_free_ext_banks(ah);
1237 kfree(ah); 1291 kfree(ah);
1238 ah = NULL; 1292 ah = NULL;
1239} 1293}
1294EXPORT_SYMBOL(ath9k_hw_detach);
1240 1295
1241/*******/ 1296/*******/
1242/* INI */ 1297/* INI */
@@ -1254,7 +1309,8 @@ static void ath9k_hw_override_ini(struct ath_hw *ah,
1254 * AR9271 1.1 1309 * AR9271 1.1
1255 */ 1310 */
1256 if (AR_SREV_9271_10(ah)) { 1311 if (AR_SREV_9271_10(ah)) {
1257 val = REG_READ(ah, AR_PHY_SPECTRAL_SCAN) | AR_PHY_SPECTRAL_SCAN_ENABLE; 1312 val = REG_READ(ah, AR_PHY_SPECTRAL_SCAN) |
1313 AR_PHY_SPECTRAL_SCAN_ENABLE;
1258 REG_WRITE(ah, AR_PHY_SPECTRAL_SCAN, val); 1314 REG_WRITE(ah, AR_PHY_SPECTRAL_SCAN, val);
1259 } 1315 }
1260 else if (AR_SREV_9271_11(ah)) 1316 else if (AR_SREV_9271_11(ah))
@@ -1298,28 +1354,29 @@ static u32 ath9k_hw_def_ini_fixup(struct ath_hw *ah,
1298 u32 reg, u32 value) 1354 u32 reg, u32 value)
1299{ 1355{
1300 struct base_eep_header *pBase = &(pEepData->baseEepHeader); 1356 struct base_eep_header *pBase = &(pEepData->baseEepHeader);
1357 struct ath_common *common = ath9k_hw_common(ah);
1301 1358
1302 switch (ah->hw_version.devid) { 1359 switch (ah->hw_version.devid) {
1303 case AR9280_DEVID_PCI: 1360 case AR9280_DEVID_PCI:
1304 if (reg == 0x7894) { 1361 if (reg == 0x7894) {
1305 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 1362 ath_print(common, ATH_DBG_EEPROM,
1306 "ini VAL: %x EEPROM: %x\n", value, 1363 "ini VAL: %x EEPROM: %x\n", value,
1307 (pBase->version & 0xff)); 1364 (pBase->version & 0xff));
1308 1365
1309 if ((pBase->version & 0xff) > 0x0a) { 1366 if ((pBase->version & 0xff) > 0x0a) {
1310 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 1367 ath_print(common, ATH_DBG_EEPROM,
1311 "PWDCLKIND: %d\n", 1368 "PWDCLKIND: %d\n",
1312 pBase->pwdclkind); 1369 pBase->pwdclkind);
1313 value &= ~AR_AN_TOP2_PWDCLKIND; 1370 value &= ~AR_AN_TOP2_PWDCLKIND;
1314 value |= AR_AN_TOP2_PWDCLKIND & 1371 value |= AR_AN_TOP2_PWDCLKIND &
1315 (pBase->pwdclkind << AR_AN_TOP2_PWDCLKIND_S); 1372 (pBase->pwdclkind << AR_AN_TOP2_PWDCLKIND_S);
1316 } else { 1373 } else {
1317 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 1374 ath_print(common, ATH_DBG_EEPROM,
1318 "PWDCLKIND Earlier Rev\n"); 1375 "PWDCLKIND Earlier Rev\n");
1319 } 1376 }
1320 1377
1321 DPRINTF(ah->ah_sc, ATH_DBG_EEPROM, 1378 ath_print(common, ATH_DBG_EEPROM,
1322 "final ini VAL: %x\n", value); 1379 "final ini VAL: %x\n", value);
1323 } 1380 }
1324 break; 1381 break;
1325 } 1382 }
@@ -1374,8 +1431,7 @@ static u32 ath9k_regd_get_ctl(struct ath_regulatory *reg,
1374} 1431}
1375 1432
1376static int ath9k_hw_process_ini(struct ath_hw *ah, 1433static int ath9k_hw_process_ini(struct ath_hw *ah,
1377 struct ath9k_channel *chan, 1434 struct ath9k_channel *chan)
1378 enum ath9k_ht_macmode macmode)
1379{ 1435{
1380 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 1436 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
1381 int i, regWrites = 0; 1437 int i, regWrites = 0;
@@ -1469,7 +1525,11 @@ static int ath9k_hw_process_ini(struct ath_hw *ah,
1469 DO_DELAY(regWrites); 1525 DO_DELAY(regWrites);
1470 } 1526 }
1471 1527
1472 ath9k_hw_write_regs(ah, modesIndex, freqIndex, regWrites); 1528 ath9k_hw_write_regs(ah, freqIndex, regWrites);
1529
1530 if (AR_SREV_9271_10(ah))
1531 REG_WRITE_ARRAY(&ah->iniModes_9271_1_0_only,
1532 modesIndex, regWrites);
1473 1533
1474 if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) { 1534 if (AR_SREV_9280_20(ah) && IS_CHAN_A_5MHZ_SPACED(chan)) {
1475 REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex, 1535 REG_WRITE_ARRAY(&ah->iniModesAdditional, modesIndex,
@@ -1477,7 +1537,7 @@ static int ath9k_hw_process_ini(struct ath_hw *ah,
1477 } 1537 }
1478 1538
1479 ath9k_hw_override_ini(ah, chan); 1539 ath9k_hw_override_ini(ah, chan);
1480 ath9k_hw_set_regs(ah, chan, macmode); 1540 ath9k_hw_set_regs(ah, chan);
1481 ath9k_hw_init_chain_masks(ah); 1541 ath9k_hw_init_chain_masks(ah);
1482 1542
1483 if (OLC_FOR_AR9280_20_LATER) 1543 if (OLC_FOR_AR9280_20_LATER)
@@ -1491,8 +1551,8 @@ static int ath9k_hw_process_ini(struct ath_hw *ah,
1491 (u32) regulatory->power_limit)); 1551 (u32) regulatory->power_limit));
1492 1552
1493 if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) { 1553 if (!ath9k_hw_set_rf_regs(ah, chan, freqIndex)) {
1494 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 1554 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
1495 "ar5416SetRfRegs failed\n"); 1555 "ar5416SetRfRegs failed\n");
1496 return -EIO; 1556 return -EIO;
1497 } 1557 }
1498 1558
@@ -1697,16 +1757,14 @@ static bool ath9k_hw_set_reset(struct ath_hw *ah, int type)
1697 1757
1698 REG_WRITE(ah, AR_RTC_RC, 0); 1758 REG_WRITE(ah, AR_RTC_RC, 0);
1699 if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) { 1759 if (!ath9k_hw_wait(ah, AR_RTC_RC, AR_RTC_RC_M, 0, AH_WAIT_TIMEOUT)) {
1700 DPRINTF(ah->ah_sc, ATH_DBG_RESET, 1760 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
1701 "RTC stuck in MAC reset\n"); 1761 "RTC stuck in MAC reset\n");
1702 return false; 1762 return false;
1703 } 1763 }
1704 1764
1705 if (!AR_SREV_9100(ah)) 1765 if (!AR_SREV_9100(ah))
1706 REG_WRITE(ah, AR_RC, 0); 1766 REG_WRITE(ah, AR_RC, 0);
1707 1767
1708 ath9k_hw_init_pll(ah, NULL);
1709
1710 if (AR_SREV_9100(ah)) 1768 if (AR_SREV_9100(ah))
1711 udelay(50); 1769 udelay(50);
1712 1770
@@ -1734,7 +1792,8 @@ static bool ath9k_hw_set_reset_power_on(struct ath_hw *ah)
1734 AR_RTC_STATUS_M, 1792 AR_RTC_STATUS_M,
1735 AR_RTC_STATUS_ON, 1793 AR_RTC_STATUS_ON,
1736 AH_WAIT_TIMEOUT)) { 1794 AH_WAIT_TIMEOUT)) {
1737 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "RTC not waking up\n"); 1795 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
1796 "RTC not waking up\n");
1738 return false; 1797 return false;
1739 } 1798 }
1740 1799
@@ -1759,8 +1818,7 @@ static bool ath9k_hw_set_reset_reg(struct ath_hw *ah, u32 type)
1759 } 1818 }
1760} 1819}
1761 1820
1762static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan, 1821static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan)
1763 enum ath9k_ht_macmode macmode)
1764{ 1822{
1765 u32 phymode; 1823 u32 phymode;
1766 u32 enableDacFifo = 0; 1824 u32 enableDacFifo = 0;
@@ -1779,12 +1837,10 @@ static void ath9k_hw_set_regs(struct ath_hw *ah, struct ath9k_channel *chan,
1779 (chan->chanmode == CHANNEL_G_HT40PLUS)) 1837 (chan->chanmode == CHANNEL_G_HT40PLUS))
1780 phymode |= AR_PHY_FC_DYN2040_PRI_CH; 1838 phymode |= AR_PHY_FC_DYN2040_PRI_CH;
1781 1839
1782 if (ah->extprotspacing == ATH9K_HT_EXTPROTSPACING_25)
1783 phymode |= AR_PHY_FC_DYN2040_EXT_CH;
1784 } 1840 }
1785 REG_WRITE(ah, AR_PHY_TURBO, phymode); 1841 REG_WRITE(ah, AR_PHY_TURBO, phymode);
1786 1842
1787 ath9k_hw_set11nmac2040(ah, macmode); 1843 ath9k_hw_set11nmac2040(ah);
1788 1844
1789 REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S); 1845 REG_WRITE(ah, AR_GTXTO, 25 << AR_GTXTO_TIMEOUT_LIMIT_S);
1790 REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S); 1846 REG_WRITE(ah, AR_CST, 0xF << AR_CST_TIMEOUT_LIMIT_S);
@@ -1810,17 +1866,19 @@ static bool ath9k_hw_chip_reset(struct ath_hw *ah,
1810} 1866}
1811 1867
1812static bool ath9k_hw_channel_change(struct ath_hw *ah, 1868static bool ath9k_hw_channel_change(struct ath_hw *ah,
1813 struct ath9k_channel *chan, 1869 struct ath9k_channel *chan)
1814 enum ath9k_ht_macmode macmode)
1815{ 1870{
1816 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 1871 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
1872 struct ath_common *common = ath9k_hw_common(ah);
1817 struct ieee80211_channel *channel = chan->chan; 1873 struct ieee80211_channel *channel = chan->chan;
1818 u32 synthDelay, qnum; 1874 u32 synthDelay, qnum;
1875 int r;
1819 1876
1820 for (qnum = 0; qnum < AR_NUM_QCU; qnum++) { 1877 for (qnum = 0; qnum < AR_NUM_QCU; qnum++) {
1821 if (ath9k_hw_numtxpending(ah, qnum)) { 1878 if (ath9k_hw_numtxpending(ah, qnum)) {
1822 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, 1879 ath_print(common, ATH_DBG_QUEUE,
1823 "Transmit frames pending on queue %d\n", qnum); 1880 "Transmit frames pending on "
1881 "queue %d\n", qnum);
1824 return false; 1882 return false;
1825 } 1883 }
1826 } 1884 }
@@ -1828,21 +1886,18 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1828 REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN); 1886 REG_WRITE(ah, AR_PHY_RFBUS_REQ, AR_PHY_RFBUS_REQ_EN);
1829 if (!ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN, 1887 if (!ath9k_hw_wait(ah, AR_PHY_RFBUS_GRANT, AR_PHY_RFBUS_GRANT_EN,
1830 AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT)) { 1888 AR_PHY_RFBUS_GRANT_EN, AH_WAIT_TIMEOUT)) {
1831 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 1889 ath_print(common, ATH_DBG_FATAL,
1832 "Could not kill baseband RX\n"); 1890 "Could not kill baseband RX\n");
1833 return false; 1891 return false;
1834 } 1892 }
1835 1893
1836 ath9k_hw_set_regs(ah, chan, macmode); 1894 ath9k_hw_set_regs(ah, chan);
1837 1895
1838 if (AR_SREV_9280_10_OR_LATER(ah)) { 1896 r = ah->ath9k_hw_rf_set_freq(ah, chan);
1839 ath9k_hw_ar9280_set_channel(ah, chan); 1897 if (r) {
1840 } else { 1898 ath_print(common, ATH_DBG_FATAL,
1841 if (!(ath9k_hw_set_channel(ah, chan))) { 1899 "Failed to set channel\n");
1842 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 1900 return false;
1843 "Failed to set channel\n");
1844 return false;
1845 }
1846 } 1901 }
1847 1902
1848 ah->eep_ops->set_txpower(ah, chan, 1903 ah->eep_ops->set_txpower(ah, chan,
@@ -1865,10 +1920,7 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1865 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) 1920 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
1866 ath9k_hw_set_delta_slope(ah, chan); 1921 ath9k_hw_set_delta_slope(ah, chan);
1867 1922
1868 if (AR_SREV_9280_10_OR_LATER(ah)) 1923 ah->ath9k_hw_spur_mitigate_freq(ah, chan);
1869 ath9k_hw_9280_spur_mitigate(ah, chan);
1870 else
1871 ath9k_hw_spur_mitigate(ah, chan);
1872 1924
1873 if (!chan->oneTimeCalsDone) 1925 if (!chan->oneTimeCalsDone)
1874 chan->oneTimeCalsDone = true; 1926 chan->oneTimeCalsDone = true;
@@ -1876,457 +1928,6 @@ static bool ath9k_hw_channel_change(struct ath_hw *ah,
1876 return true; 1928 return true;
1877} 1929}
1878 1930
1879static void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan)
1880{
1881 int bb_spur = AR_NO_SPUR;
1882 int freq;
1883 int bin, cur_bin;
1884 int bb_spur_off, spur_subchannel_sd;
1885 int spur_freq_sd;
1886 int spur_delta_phase;
1887 int denominator;
1888 int upper, lower, cur_vit_mask;
1889 int tmp, newVal;
1890 int i;
1891 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
1892 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
1893 };
1894 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
1895 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
1896 };
1897 int inc[4] = { 0, 100, 0, 0 };
1898 struct chan_centers centers;
1899
1900 int8_t mask_m[123];
1901 int8_t mask_p[123];
1902 int8_t mask_amt;
1903 int tmp_mask;
1904 int cur_bb_spur;
1905 bool is2GHz = IS_CHAN_2GHZ(chan);
1906
1907 memset(&mask_m, 0, sizeof(int8_t) * 123);
1908 memset(&mask_p, 0, sizeof(int8_t) * 123);
1909
1910 ath9k_hw_get_channel_centers(ah, chan, &centers);
1911 freq = centers.synth_center;
1912
1913 ah->config.spurmode = SPUR_ENABLE_EEPROM;
1914 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
1915 cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
1916
1917 if (is2GHz)
1918 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
1919 else
1920 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ;
1921
1922 if (AR_NO_SPUR == cur_bb_spur)
1923 break;
1924 cur_bb_spur = cur_bb_spur - freq;
1925
1926 if (IS_CHAN_HT40(chan)) {
1927 if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) &&
1928 (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) {
1929 bb_spur = cur_bb_spur;
1930 break;
1931 }
1932 } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) &&
1933 (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) {
1934 bb_spur = cur_bb_spur;
1935 break;
1936 }
1937 }
1938
1939 if (AR_NO_SPUR == bb_spur) {
1940 REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
1941 AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
1942 return;
1943 } else {
1944 REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
1945 AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
1946 }
1947
1948 bin = bb_spur * 320;
1949
1950 tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
1951
1952 newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
1953 AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
1954 AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
1955 AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
1956 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal);
1957
1958 newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
1959 AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
1960 AR_PHY_SPUR_REG_MASK_RATE_SELECT |
1961 AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
1962 SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
1963 REG_WRITE(ah, AR_PHY_SPUR_REG, newVal);
1964
1965 if (IS_CHAN_HT40(chan)) {
1966 if (bb_spur < 0) {
1967 spur_subchannel_sd = 1;
1968 bb_spur_off = bb_spur + 10;
1969 } else {
1970 spur_subchannel_sd = 0;
1971 bb_spur_off = bb_spur - 10;
1972 }
1973 } else {
1974 spur_subchannel_sd = 0;
1975 bb_spur_off = bb_spur;
1976 }
1977
1978 if (IS_CHAN_HT40(chan))
1979 spur_delta_phase =
1980 ((bb_spur * 262144) /
1981 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
1982 else
1983 spur_delta_phase =
1984 ((bb_spur * 524288) /
1985 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
1986
1987 denominator = IS_CHAN_2GHZ(chan) ? 44 : 40;
1988 spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff;
1989
1990 newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
1991 SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
1992 SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
1993 REG_WRITE(ah, AR_PHY_TIMING11, newVal);
1994
1995 newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
1996 REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
1997
1998 cur_bin = -6000;
1999 upper = bin + 100;
2000 lower = bin - 100;
2001
2002 for (i = 0; i < 4; i++) {
2003 int pilot_mask = 0;
2004 int chan_mask = 0;
2005 int bp = 0;
2006 for (bp = 0; bp < 30; bp++) {
2007 if ((cur_bin > lower) && (cur_bin < upper)) {
2008 pilot_mask = pilot_mask | 0x1 << bp;
2009 chan_mask = chan_mask | 0x1 << bp;
2010 }
2011 cur_bin += 100;
2012 }
2013 cur_bin += inc[i];
2014 REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
2015 REG_WRITE(ah, chan_mask_reg[i], chan_mask);
2016 }
2017
2018 cur_vit_mask = 6100;
2019 upper = bin + 120;
2020 lower = bin - 120;
2021
2022 for (i = 0; i < 123; i++) {
2023 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
2024
2025 /* workaround for gcc bug #37014 */
2026 volatile int tmp_v = abs(cur_vit_mask - bin);
2027
2028 if (tmp_v < 75)
2029 mask_amt = 1;
2030 else
2031 mask_amt = 0;
2032 if (cur_vit_mask < 0)
2033 mask_m[abs(cur_vit_mask / 100)] = mask_amt;
2034 else
2035 mask_p[cur_vit_mask / 100] = mask_amt;
2036 }
2037 cur_vit_mask -= 100;
2038 }
2039
2040 tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
2041 | (mask_m[48] << 26) | (mask_m[49] << 24)
2042 | (mask_m[50] << 22) | (mask_m[51] << 20)
2043 | (mask_m[52] << 18) | (mask_m[53] << 16)
2044 | (mask_m[54] << 14) | (mask_m[55] << 12)
2045 | (mask_m[56] << 10) | (mask_m[57] << 8)
2046 | (mask_m[58] << 6) | (mask_m[59] << 4)
2047 | (mask_m[60] << 2) | (mask_m[61] << 0);
2048 REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
2049 REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
2050
2051 tmp_mask = (mask_m[31] << 28)
2052 | (mask_m[32] << 26) | (mask_m[33] << 24)
2053 | (mask_m[34] << 22) | (mask_m[35] << 20)
2054 | (mask_m[36] << 18) | (mask_m[37] << 16)
2055 | (mask_m[48] << 14) | (mask_m[39] << 12)
2056 | (mask_m[40] << 10) | (mask_m[41] << 8)
2057 | (mask_m[42] << 6) | (mask_m[43] << 4)
2058 | (mask_m[44] << 2) | (mask_m[45] << 0);
2059 REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
2060 REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
2061
2062 tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
2063 | (mask_m[18] << 26) | (mask_m[18] << 24)
2064 | (mask_m[20] << 22) | (mask_m[20] << 20)
2065 | (mask_m[22] << 18) | (mask_m[22] << 16)
2066 | (mask_m[24] << 14) | (mask_m[24] << 12)
2067 | (mask_m[25] << 10) | (mask_m[26] << 8)
2068 | (mask_m[27] << 6) | (mask_m[28] << 4)
2069 | (mask_m[29] << 2) | (mask_m[30] << 0);
2070 REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
2071 REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
2072
2073 tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
2074 | (mask_m[2] << 26) | (mask_m[3] << 24)
2075 | (mask_m[4] << 22) | (mask_m[5] << 20)
2076 | (mask_m[6] << 18) | (mask_m[7] << 16)
2077 | (mask_m[8] << 14) | (mask_m[9] << 12)
2078 | (mask_m[10] << 10) | (mask_m[11] << 8)
2079 | (mask_m[12] << 6) | (mask_m[13] << 4)
2080 | (mask_m[14] << 2) | (mask_m[15] << 0);
2081 REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
2082 REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
2083
2084 tmp_mask = (mask_p[15] << 28)
2085 | (mask_p[14] << 26) | (mask_p[13] << 24)
2086 | (mask_p[12] << 22) | (mask_p[11] << 20)
2087 | (mask_p[10] << 18) | (mask_p[9] << 16)
2088 | (mask_p[8] << 14) | (mask_p[7] << 12)
2089 | (mask_p[6] << 10) | (mask_p[5] << 8)
2090 | (mask_p[4] << 6) | (mask_p[3] << 4)
2091 | (mask_p[2] << 2) | (mask_p[1] << 0);
2092 REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
2093 REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
2094
2095 tmp_mask = (mask_p[30] << 28)
2096 | (mask_p[29] << 26) | (mask_p[28] << 24)
2097 | (mask_p[27] << 22) | (mask_p[26] << 20)
2098 | (mask_p[25] << 18) | (mask_p[24] << 16)
2099 | (mask_p[23] << 14) | (mask_p[22] << 12)
2100 | (mask_p[21] << 10) | (mask_p[20] << 8)
2101 | (mask_p[19] << 6) | (mask_p[18] << 4)
2102 | (mask_p[17] << 2) | (mask_p[16] << 0);
2103 REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
2104 REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
2105
2106 tmp_mask = (mask_p[45] << 28)
2107 | (mask_p[44] << 26) | (mask_p[43] << 24)
2108 | (mask_p[42] << 22) | (mask_p[41] << 20)
2109 | (mask_p[40] << 18) | (mask_p[39] << 16)
2110 | (mask_p[38] << 14) | (mask_p[37] << 12)
2111 | (mask_p[36] << 10) | (mask_p[35] << 8)
2112 | (mask_p[34] << 6) | (mask_p[33] << 4)
2113 | (mask_p[32] << 2) | (mask_p[31] << 0);
2114 REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
2115 REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
2116
2117 tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
2118 | (mask_p[59] << 26) | (mask_p[58] << 24)
2119 | (mask_p[57] << 22) | (mask_p[56] << 20)
2120 | (mask_p[55] << 18) | (mask_p[54] << 16)
2121 | (mask_p[53] << 14) | (mask_p[52] << 12)
2122 | (mask_p[51] << 10) | (mask_p[50] << 8)
2123 | (mask_p[49] << 6) | (mask_p[48] << 4)
2124 | (mask_p[47] << 2) | (mask_p[46] << 0);
2125 REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
2126 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
2127}
2128
2129static void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan)
2130{
2131 int bb_spur = AR_NO_SPUR;
2132 int bin, cur_bin;
2133 int spur_freq_sd;
2134 int spur_delta_phase;
2135 int denominator;
2136 int upper, lower, cur_vit_mask;
2137 int tmp, new;
2138 int i;
2139 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
2140 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
2141 };
2142 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
2143 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
2144 };
2145 int inc[4] = { 0, 100, 0, 0 };
2146
2147 int8_t mask_m[123];
2148 int8_t mask_p[123];
2149 int8_t mask_amt;
2150 int tmp_mask;
2151 int cur_bb_spur;
2152 bool is2GHz = IS_CHAN_2GHZ(chan);
2153
2154 memset(&mask_m, 0, sizeof(int8_t) * 123);
2155 memset(&mask_p, 0, sizeof(int8_t) * 123);
2156
2157 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
2158 cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
2159 if (AR_NO_SPUR == cur_bb_spur)
2160 break;
2161 cur_bb_spur = cur_bb_spur - (chan->channel * 10);
2162 if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
2163 bb_spur = cur_bb_spur;
2164 break;
2165 }
2166 }
2167
2168 if (AR_NO_SPUR == bb_spur)
2169 return;
2170
2171 bin = bb_spur * 32;
2172
2173 tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
2174 new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
2175 AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
2176 AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
2177 AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
2178
2179 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
2180
2181 new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
2182 AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
2183 AR_PHY_SPUR_REG_MASK_RATE_SELECT |
2184 AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
2185 SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
2186 REG_WRITE(ah, AR_PHY_SPUR_REG, new);
2187
2188 spur_delta_phase = ((bb_spur * 524288) / 100) &
2189 AR_PHY_TIMING11_SPUR_DELTA_PHASE;
2190
2191 denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
2192 spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
2193
2194 new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
2195 SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
2196 SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
2197 REG_WRITE(ah, AR_PHY_TIMING11, new);
2198
2199 cur_bin = -6000;
2200 upper = bin + 100;
2201 lower = bin - 100;
2202
2203 for (i = 0; i < 4; i++) {
2204 int pilot_mask = 0;
2205 int chan_mask = 0;
2206 int bp = 0;
2207 for (bp = 0; bp < 30; bp++) {
2208 if ((cur_bin > lower) && (cur_bin < upper)) {
2209 pilot_mask = pilot_mask | 0x1 << bp;
2210 chan_mask = chan_mask | 0x1 << bp;
2211 }
2212 cur_bin += 100;
2213 }
2214 cur_bin += inc[i];
2215 REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
2216 REG_WRITE(ah, chan_mask_reg[i], chan_mask);
2217 }
2218
2219 cur_vit_mask = 6100;
2220 upper = bin + 120;
2221 lower = bin - 120;
2222
2223 for (i = 0; i < 123; i++) {
2224 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
2225
2226 /* workaround for gcc bug #37014 */
2227 volatile int tmp_v = abs(cur_vit_mask - bin);
2228
2229 if (tmp_v < 75)
2230 mask_amt = 1;
2231 else
2232 mask_amt = 0;
2233 if (cur_vit_mask < 0)
2234 mask_m[abs(cur_vit_mask / 100)] = mask_amt;
2235 else
2236 mask_p[cur_vit_mask / 100] = mask_amt;
2237 }
2238 cur_vit_mask -= 100;
2239 }
2240
2241 tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
2242 | (mask_m[48] << 26) | (mask_m[49] << 24)
2243 | (mask_m[50] << 22) | (mask_m[51] << 20)
2244 | (mask_m[52] << 18) | (mask_m[53] << 16)
2245 | (mask_m[54] << 14) | (mask_m[55] << 12)
2246 | (mask_m[56] << 10) | (mask_m[57] << 8)
2247 | (mask_m[58] << 6) | (mask_m[59] << 4)
2248 | (mask_m[60] << 2) | (mask_m[61] << 0);
2249 REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
2250 REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
2251
2252 tmp_mask = (mask_m[31] << 28)
2253 | (mask_m[32] << 26) | (mask_m[33] << 24)
2254 | (mask_m[34] << 22) | (mask_m[35] << 20)
2255 | (mask_m[36] << 18) | (mask_m[37] << 16)
2256 | (mask_m[48] << 14) | (mask_m[39] << 12)
2257 | (mask_m[40] << 10) | (mask_m[41] << 8)
2258 | (mask_m[42] << 6) | (mask_m[43] << 4)
2259 | (mask_m[44] << 2) | (mask_m[45] << 0);
2260 REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
2261 REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
2262
2263 tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
2264 | (mask_m[18] << 26) | (mask_m[18] << 24)
2265 | (mask_m[20] << 22) | (mask_m[20] << 20)
2266 | (mask_m[22] << 18) | (mask_m[22] << 16)
2267 | (mask_m[24] << 14) | (mask_m[24] << 12)
2268 | (mask_m[25] << 10) | (mask_m[26] << 8)
2269 | (mask_m[27] << 6) | (mask_m[28] << 4)
2270 | (mask_m[29] << 2) | (mask_m[30] << 0);
2271 REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
2272 REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
2273
2274 tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
2275 | (mask_m[2] << 26) | (mask_m[3] << 24)
2276 | (mask_m[4] << 22) | (mask_m[5] << 20)
2277 | (mask_m[6] << 18) | (mask_m[7] << 16)
2278 | (mask_m[8] << 14) | (mask_m[9] << 12)
2279 | (mask_m[10] << 10) | (mask_m[11] << 8)
2280 | (mask_m[12] << 6) | (mask_m[13] << 4)
2281 | (mask_m[14] << 2) | (mask_m[15] << 0);
2282 REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
2283 REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
2284
2285 tmp_mask = (mask_p[15] << 28)
2286 | (mask_p[14] << 26) | (mask_p[13] << 24)
2287 | (mask_p[12] << 22) | (mask_p[11] << 20)
2288 | (mask_p[10] << 18) | (mask_p[9] << 16)
2289 | (mask_p[8] << 14) | (mask_p[7] << 12)
2290 | (mask_p[6] << 10) | (mask_p[5] << 8)
2291 | (mask_p[4] << 6) | (mask_p[3] << 4)
2292 | (mask_p[2] << 2) | (mask_p[1] << 0);
2293 REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
2294 REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
2295
2296 tmp_mask = (mask_p[30] << 28)
2297 | (mask_p[29] << 26) | (mask_p[28] << 24)
2298 | (mask_p[27] << 22) | (mask_p[26] << 20)
2299 | (mask_p[25] << 18) | (mask_p[24] << 16)
2300 | (mask_p[23] << 14) | (mask_p[22] << 12)
2301 | (mask_p[21] << 10) | (mask_p[20] << 8)
2302 | (mask_p[19] << 6) | (mask_p[18] << 4)
2303 | (mask_p[17] << 2) | (mask_p[16] << 0);
2304 REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
2305 REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
2306
2307 tmp_mask = (mask_p[45] << 28)
2308 | (mask_p[44] << 26) | (mask_p[43] << 24)
2309 | (mask_p[42] << 22) | (mask_p[41] << 20)
2310 | (mask_p[40] << 18) | (mask_p[39] << 16)
2311 | (mask_p[38] << 14) | (mask_p[37] << 12)
2312 | (mask_p[36] << 10) | (mask_p[35] << 8)
2313 | (mask_p[34] << 6) | (mask_p[33] << 4)
2314 | (mask_p[32] << 2) | (mask_p[31] << 0);
2315 REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
2316 REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
2317
2318 tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
2319 | (mask_p[59] << 26) | (mask_p[58] << 24)
2320 | (mask_p[57] << 22) | (mask_p[56] << 20)
2321 | (mask_p[55] << 18) | (mask_p[54] << 16)
2322 | (mask_p[53] << 14) | (mask_p[52] << 12)
2323 | (mask_p[51] << 10) | (mask_p[50] << 8)
2324 | (mask_p[49] << 6) | (mask_p[48] << 4)
2325 | (mask_p[47] << 2) | (mask_p[46] << 0);
2326 REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
2327 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
2328}
2329
2330static void ath9k_enable_rfkill(struct ath_hw *ah) 1931static void ath9k_enable_rfkill(struct ath_hw *ah)
2331{ 1932{
2332 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL, 1933 REG_SET_BIT(ah, AR_GPIO_INPUT_EN_VAL,
@@ -2342,17 +1943,16 @@ static void ath9k_enable_rfkill(struct ath_hw *ah)
2342int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, 1943int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2343 bool bChannelChange) 1944 bool bChannelChange)
2344{ 1945{
1946 struct ath_common *common = ath9k_hw_common(ah);
2345 u32 saveLedState; 1947 u32 saveLedState;
2346 struct ath_softc *sc = ah->ah_sc;
2347 struct ath9k_channel *curchan = ah->curchan; 1948 struct ath9k_channel *curchan = ah->curchan;
2348 u32 saveDefAntenna; 1949 u32 saveDefAntenna;
2349 u32 macStaId1; 1950 u32 macStaId1;
2350 u64 tsf = 0; 1951 u64 tsf = 0;
2351 int i, rx_chainmask, r; 1952 int i, rx_chainmask, r;
2352 1953
2353 ah->extprotspacing = sc->ht_extprotspacing; 1954 ah->txchainmask = common->tx_chainmask;
2354 ah->txchainmask = sc->tx_chainmask; 1955 ah->rxchainmask = common->rx_chainmask;
2355 ah->rxchainmask = sc->rx_chainmask;
2356 1956
2357 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) 1957 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
2358 return -EIO; 1958 return -EIO;
@@ -2369,7 +1969,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2369 !(AR_SREV_9280(ah) || IS_CHAN_A_5MHZ_SPACED(chan) || 1969 !(AR_SREV_9280(ah) || IS_CHAN_A_5MHZ_SPACED(chan) ||
2370 IS_CHAN_A_5MHZ_SPACED(ah->curchan))) { 1970 IS_CHAN_A_5MHZ_SPACED(ah->curchan))) {
2371 1971
2372 if (ath9k_hw_channel_change(ah, chan, sc->tx_chan_width)) { 1972 if (ath9k_hw_channel_change(ah, chan)) {
2373 ath9k_hw_loadnf(ah, ah->curchan); 1973 ath9k_hw_loadnf(ah, ah->curchan);
2374 ath9k_hw_start_nfcal(ah); 1974 ath9k_hw_start_nfcal(ah);
2375 return 0; 1975 return 0;
@@ -2400,7 +2000,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2400 } 2000 }
2401 2001
2402 if (!ath9k_hw_chip_reset(ah, chan)) { 2002 if (!ath9k_hw_chip_reset(ah, chan)) {
2403 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Chip reset failed\n"); 2003 ath_print(common, ATH_DBG_FATAL, "Chip reset failed\n");
2404 return -EINVAL; 2004 return -EINVAL;
2405 } 2005 }
2406 2006
@@ -2429,7 +2029,7 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2429 REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3, 2029 REG_SET_BIT(ah, AR_MAC_PCU_ASYNC_FIFO_REG3,
2430 AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET); 2030 AR_MAC_PCU_ASYNC_FIFO_REG3_SOFT_RESET);
2431 } 2031 }
2432 r = ath9k_hw_process_ini(ah, chan, sc->tx_chan_width); 2032 r = ath9k_hw_process_ini(ah, chan);
2433 if (r) 2033 if (r)
2434 return r; 2034 return r;
2435 2035
@@ -2453,17 +2053,14 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2453 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan)) 2053 if (IS_CHAN_OFDM(chan) || IS_CHAN_HT(chan))
2454 ath9k_hw_set_delta_slope(ah, chan); 2054 ath9k_hw_set_delta_slope(ah, chan);
2455 2055
2456 if (AR_SREV_9280_10_OR_LATER(ah)) 2056 ah->ath9k_hw_spur_mitigate_freq(ah, chan);
2457 ath9k_hw_9280_spur_mitigate(ah, chan);
2458 else
2459 ath9k_hw_spur_mitigate(ah, chan);
2460
2461 ah->eep_ops->set_board_values(ah, chan); 2057 ah->eep_ops->set_board_values(ah, chan);
2462 2058
2463 ath9k_hw_decrease_chain_power(ah, chan); 2059 if (AR_SREV_5416(ah))
2060 ath9k_hw_decrease_chain_power(ah, chan);
2464 2061
2465 REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(ah->macaddr)); 2062 REG_WRITE(ah, AR_STA_ID0, get_unaligned_le32(common->macaddr));
2466 REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(ah->macaddr + 4) 2063 REG_WRITE(ah, AR_STA_ID1, get_unaligned_le16(common->macaddr + 4)
2467 | macStaId1 2064 | macStaId1
2468 | AR_STA_ID1_RTS_USE_DEF 2065 | AR_STA_ID1_RTS_USE_DEF
2469 | (ah->config. 2066 | (ah->config.
@@ -2471,24 +2068,19 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2471 | ah->sta_id1_defaults); 2068 | ah->sta_id1_defaults);
2472 ath9k_hw_set_operating_mode(ah, ah->opmode); 2069 ath9k_hw_set_operating_mode(ah, ah->opmode);
2473 2070
2474 REG_WRITE(ah, AR_BSSMSKL, get_unaligned_le32(sc->bssidmask)); 2071 ath_hw_setbssidmask(common);
2475 REG_WRITE(ah, AR_BSSMSKU, get_unaligned_le16(sc->bssidmask + 4));
2476 2072
2477 REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna); 2073 REG_WRITE(ah, AR_DEF_ANTENNA, saveDefAntenna);
2478 2074
2479 REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(sc->curbssid)); 2075 ath9k_hw_write_associd(ah);
2480 REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(sc->curbssid + 4) |
2481 ((sc->curaid & 0x3fff) << AR_BSS_ID1_AID_S));
2482 2076
2483 REG_WRITE(ah, AR_ISR, ~0); 2077 REG_WRITE(ah, AR_ISR, ~0);
2484 2078
2485 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR); 2079 REG_WRITE(ah, AR_RSSI_THR, INIT_RSSI_THR);
2486 2080
2487 if (AR_SREV_9280_10_OR_LATER(ah)) 2081 r = ah->ath9k_hw_rf_set_freq(ah, chan);
2488 ath9k_hw_ar9280_set_channel(ah, chan); 2082 if (r)
2489 else 2083 return r;
2490 if (!(ath9k_hw_set_channel(ah, chan)))
2491 return -EIO;
2492 2084
2493 for (i = 0; i < AR_NUM_DCU; i++) 2085 for (i = 0; i < AR_NUM_DCU; i++)
2494 REG_WRITE(ah, AR_DQCUMASK(i), 1 << i); 2086 REG_WRITE(ah, AR_DQCUMASK(i), 1 << i);
@@ -2558,13 +2150,13 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2558 u32 mask; 2150 u32 mask;
2559 mask = REG_READ(ah, AR_CFG); 2151 mask = REG_READ(ah, AR_CFG);
2560 if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) { 2152 if (mask & (AR_CFG_SWRB | AR_CFG_SWTB | AR_CFG_SWRG)) {
2561 DPRINTF(ah->ah_sc, ATH_DBG_RESET, 2153 ath_print(common, ATH_DBG_RESET,
2562 "CFG Byte Swap Set 0x%x\n", mask); 2154 "CFG Byte Swap Set 0x%x\n", mask);
2563 } else { 2155 } else {
2564 mask = 2156 mask =
2565 INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB; 2157 INIT_CONFIG_STATUS | AR_CFG_SWRB | AR_CFG_SWTB;
2566 REG_WRITE(ah, AR_CFG, mask); 2158 REG_WRITE(ah, AR_CFG, mask);
2567 DPRINTF(ah->ah_sc, ATH_DBG_RESET, 2159 ath_print(common, ATH_DBG_RESET,
2568 "Setting CFG 0x%x\n", REG_READ(ah, AR_CFG)); 2160 "Setting CFG 0x%x\n", REG_READ(ah, AR_CFG));
2569 } 2161 }
2570 } else { 2162 } else {
@@ -2577,11 +2169,12 @@ int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
2577#endif 2169#endif
2578 } 2170 }
2579 2171
2580 if (ah->ah_sc->sc_flags & SC_OP_BTCOEX_ENABLED) 2172 if (ah->btcoex_hw.enabled)
2581 ath9k_hw_btcoex_enable(ah); 2173 ath9k_hw_btcoex_enable(ah);
2582 2174
2583 return 0; 2175 return 0;
2584} 2176}
2177EXPORT_SYMBOL(ath9k_hw_reset);
2585 2178
2586/************************/ 2179/************************/
2587/* Key Cache Management */ 2180/* Key Cache Management */
@@ -2592,8 +2185,8 @@ bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry)
2592 u32 keyType; 2185 u32 keyType;
2593 2186
2594 if (entry >= ah->caps.keycache_size) { 2187 if (entry >= ah->caps.keycache_size) {
2595 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 2188 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
2596 "keychache entry %u out of range\n", entry); 2189 "keychache entry %u out of range\n", entry);
2597 return false; 2190 return false;
2598 } 2191 }
2599 2192
@@ -2620,14 +2213,15 @@ bool ath9k_hw_keyreset(struct ath_hw *ah, u16 entry)
2620 2213
2621 return true; 2214 return true;
2622} 2215}
2216EXPORT_SYMBOL(ath9k_hw_keyreset);
2623 2217
2624bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac) 2218bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac)
2625{ 2219{
2626 u32 macHi, macLo; 2220 u32 macHi, macLo;
2627 2221
2628 if (entry >= ah->caps.keycache_size) { 2222 if (entry >= ah->caps.keycache_size) {
2629 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 2223 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
2630 "keychache entry %u out of range\n", entry); 2224 "keychache entry %u out of range\n", entry);
2631 return false; 2225 return false;
2632 } 2226 }
2633 2227
@@ -2648,18 +2242,20 @@ bool ath9k_hw_keysetmac(struct ath_hw *ah, u16 entry, const u8 *mac)
2648 2242
2649 return true; 2243 return true;
2650} 2244}
2245EXPORT_SYMBOL(ath9k_hw_keysetmac);
2651 2246
2652bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry, 2247bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
2653 const struct ath9k_keyval *k, 2248 const struct ath9k_keyval *k,
2654 const u8 *mac) 2249 const u8 *mac)
2655{ 2250{
2656 const struct ath9k_hw_capabilities *pCap = &ah->caps; 2251 const struct ath9k_hw_capabilities *pCap = &ah->caps;
2252 struct ath_common *common = ath9k_hw_common(ah);
2657 u32 key0, key1, key2, key3, key4; 2253 u32 key0, key1, key2, key3, key4;
2658 u32 keyType; 2254 u32 keyType;
2659 2255
2660 if (entry >= pCap->keycache_size) { 2256 if (entry >= pCap->keycache_size) {
2661 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 2257 ath_print(common, ATH_DBG_FATAL,
2662 "keycache entry %u out of range\n", entry); 2258 "keycache entry %u out of range\n", entry);
2663 return false; 2259 return false;
2664 } 2260 }
2665 2261
@@ -2669,9 +2265,9 @@ bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
2669 break; 2265 break;
2670 case ATH9K_CIPHER_AES_CCM: 2266 case ATH9K_CIPHER_AES_CCM:
2671 if (!(pCap->hw_caps & ATH9K_HW_CAP_CIPHER_AESCCM)) { 2267 if (!(pCap->hw_caps & ATH9K_HW_CAP_CIPHER_AESCCM)) {
2672 DPRINTF(ah->ah_sc, ATH_DBG_ANY, 2268 ath_print(common, ATH_DBG_ANY,
2673 "AES-CCM not supported by mac rev 0x%x\n", 2269 "AES-CCM not supported by mac rev 0x%x\n",
2674 ah->hw_version.macRev); 2270 ah->hw_version.macRev);
2675 return false; 2271 return false;
2676 } 2272 }
2677 keyType = AR_KEYTABLE_TYPE_CCM; 2273 keyType = AR_KEYTABLE_TYPE_CCM;
@@ -2680,15 +2276,15 @@ bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
2680 keyType = AR_KEYTABLE_TYPE_TKIP; 2276 keyType = AR_KEYTABLE_TYPE_TKIP;
2681 if (ATH9K_IS_MIC_ENABLED(ah) 2277 if (ATH9K_IS_MIC_ENABLED(ah)
2682 && entry + 64 >= pCap->keycache_size) { 2278 && entry + 64 >= pCap->keycache_size) {
2683 DPRINTF(ah->ah_sc, ATH_DBG_ANY, 2279 ath_print(common, ATH_DBG_ANY,
2684 "entry %u inappropriate for TKIP\n", entry); 2280 "entry %u inappropriate for TKIP\n", entry);
2685 return false; 2281 return false;
2686 } 2282 }
2687 break; 2283 break;
2688 case ATH9K_CIPHER_WEP: 2284 case ATH9K_CIPHER_WEP:
2689 if (k->kv_len < WLAN_KEY_LEN_WEP40) { 2285 if (k->kv_len < WLAN_KEY_LEN_WEP40) {
2690 DPRINTF(ah->ah_sc, ATH_DBG_ANY, 2286 ath_print(common, ATH_DBG_ANY,
2691 "WEP key length %u too small\n", k->kv_len); 2287 "WEP key length %u too small\n", k->kv_len);
2692 return false; 2288 return false;
2693 } 2289 }
2694 if (k->kv_len <= WLAN_KEY_LEN_WEP40) 2290 if (k->kv_len <= WLAN_KEY_LEN_WEP40)
@@ -2702,8 +2298,8 @@ bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
2702 keyType = AR_KEYTABLE_TYPE_CLR; 2298 keyType = AR_KEYTABLE_TYPE_CLR;
2703 break; 2299 break;
2704 default: 2300 default:
2705 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 2301 ath_print(common, ATH_DBG_FATAL,
2706 "cipher %u not supported\n", k->kv_type); 2302 "cipher %u not supported\n", k->kv_type);
2707 return false; 2303 return false;
2708 } 2304 }
2709 2305
@@ -2845,6 +2441,7 @@ bool ath9k_hw_set_keycache_entry(struct ath_hw *ah, u16 entry,
2845 2441
2846 return true; 2442 return true;
2847} 2443}
2444EXPORT_SYMBOL(ath9k_hw_set_keycache_entry);
2848 2445
2849bool ath9k_hw_keyisvalid(struct ath_hw *ah, u16 entry) 2446bool ath9k_hw_keyisvalid(struct ath_hw *ah, u16 entry)
2850{ 2447{
@@ -2855,6 +2452,7 @@ bool ath9k_hw_keyisvalid(struct ath_hw *ah, u16 entry)
2855 } 2452 }
2856 return false; 2453 return false;
2857} 2454}
2455EXPORT_SYMBOL(ath9k_hw_keyisvalid);
2858 2456
2859/******************************/ 2457/******************************/
2860/* Power Management (Chipset) */ 2458/* Power Management (Chipset) */
@@ -2869,8 +2467,9 @@ static void ath9k_set_power_sleep(struct ath_hw *ah, int setChip)
2869 if (!AR_SREV_9100(ah)) 2467 if (!AR_SREV_9100(ah))
2870 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF); 2468 REG_WRITE(ah, AR_RC, AR_RC_AHB | AR_RC_HOSTIF);
2871 2469
2872 REG_CLR_BIT(ah, (AR_RTC_RESET), 2470 if(!AR_SREV_5416(ah))
2873 AR_RTC_RESET_EN); 2471 REG_CLR_BIT(ah, (AR_RTC_RESET),
2472 AR_RTC_RESET_EN);
2874 } 2473 }
2875} 2474}
2876 2475
@@ -2902,6 +2501,7 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
2902 ATH9K_RESET_POWER_ON) != true) { 2501 ATH9K_RESET_POWER_ON) != true) {
2903 return false; 2502 return false;
2904 } 2503 }
2504 ath9k_hw_init_pll(ah, NULL);
2905 } 2505 }
2906 if (AR_SREV_9100(ah)) 2506 if (AR_SREV_9100(ah))
2907 REG_SET_BIT(ah, AR_RTC_RESET, 2507 REG_SET_BIT(ah, AR_RTC_RESET,
@@ -2920,8 +2520,9 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
2920 AR_RTC_FORCE_WAKE_EN); 2520 AR_RTC_FORCE_WAKE_EN);
2921 } 2521 }
2922 if (i == 0) { 2522 if (i == 0) {
2923 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 2523 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
2924 "Failed to wakeup in %uus\n", POWER_UP_TIME / 20); 2524 "Failed to wakeup in %uus\n",
2525 POWER_UP_TIME / 20);
2925 return false; 2526 return false;
2926 } 2527 }
2927 } 2528 }
@@ -2931,9 +2532,9 @@ static bool ath9k_hw_set_power_awake(struct ath_hw *ah, int setChip)
2931 return true; 2532 return true;
2932} 2533}
2933 2534
2934static bool ath9k_hw_setpower_nolock(struct ath_hw *ah, 2535bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
2935 enum ath9k_power_mode mode)
2936{ 2536{
2537 struct ath_common *common = ath9k_hw_common(ah);
2937 int status = true, setChip = true; 2538 int status = true, setChip = true;
2938 static const char *modes[] = { 2539 static const char *modes[] = {
2939 "AWAKE", 2540 "AWAKE",
@@ -2945,8 +2546,8 @@ static bool ath9k_hw_setpower_nolock(struct ath_hw *ah,
2945 if (ah->power_mode == mode) 2546 if (ah->power_mode == mode)
2946 return status; 2547 return status;
2947 2548
2948 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "%s -> %s\n", 2549 ath_print(common, ATH_DBG_RESET, "%s -> %s\n",
2949 modes[ah->power_mode], modes[mode]); 2550 modes[ah->power_mode], modes[mode]);
2950 2551
2951 switch (mode) { 2552 switch (mode) {
2952 case ATH9K_PM_AWAKE: 2553 case ATH9K_PM_AWAKE:
@@ -2960,59 +2561,15 @@ static bool ath9k_hw_setpower_nolock(struct ath_hw *ah,
2960 ath9k_set_power_network_sleep(ah, setChip); 2561 ath9k_set_power_network_sleep(ah, setChip);
2961 break; 2562 break;
2962 default: 2563 default:
2963 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 2564 ath_print(common, ATH_DBG_FATAL,
2964 "Unknown power mode %u\n", mode); 2565 "Unknown power mode %u\n", mode);
2965 return false; 2566 return false;
2966 } 2567 }
2967 ah->power_mode = mode; 2568 ah->power_mode = mode;
2968 2569
2969 return status; 2570 return status;
2970} 2571}
2971 2572EXPORT_SYMBOL(ath9k_hw_setpower);
2972bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode)
2973{
2974 unsigned long flags;
2975 bool ret;
2976
2977 spin_lock_irqsave(&ah->ah_sc->sc_pm_lock, flags);
2978 ret = ath9k_hw_setpower_nolock(ah, mode);
2979 spin_unlock_irqrestore(&ah->ah_sc->sc_pm_lock, flags);
2980
2981 return ret;
2982}
2983
2984void ath9k_ps_wakeup(struct ath_softc *sc)
2985{
2986 unsigned long flags;
2987
2988 spin_lock_irqsave(&sc->sc_pm_lock, flags);
2989 if (++sc->ps_usecount != 1)
2990 goto unlock;
2991
2992 ath9k_hw_setpower_nolock(sc->sc_ah, ATH9K_PM_AWAKE);
2993
2994 unlock:
2995 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
2996}
2997
2998void ath9k_ps_restore(struct ath_softc *sc)
2999{
3000 unsigned long flags;
3001
3002 spin_lock_irqsave(&sc->sc_pm_lock, flags);
3003 if (--sc->ps_usecount != 0)
3004 goto unlock;
3005
3006 if (sc->ps_enabled &&
3007 !(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
3008 SC_OP_WAIT_FOR_CAB |
3009 SC_OP_WAIT_FOR_PSPOLL_DATA |
3010 SC_OP_WAIT_FOR_TX_ACK)))
3011 ath9k_hw_setpower_nolock(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
3012
3013 unlock:
3014 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
3015}
3016 2573
3017/* 2574/*
3018 * Helper for ASPM support. 2575 * Helper for ASPM support.
@@ -3145,6 +2702,7 @@ void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off)
3145 } 2702 }
3146 } 2703 }
3147} 2704}
2705EXPORT_SYMBOL(ath9k_hw_configpcipowersave);
3148 2706
3149/**********************/ 2707/**********************/
3150/* Interrupt Handling */ 2708/* Interrupt Handling */
@@ -3168,6 +2726,7 @@ bool ath9k_hw_intrpend(struct ath_hw *ah)
3168 2726
3169 return false; 2727 return false;
3170} 2728}
2729EXPORT_SYMBOL(ath9k_hw_intrpend);
3171 2730
3172bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked) 2731bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
3173{ 2732{
@@ -3176,6 +2735,7 @@ bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
3176 struct ath9k_hw_capabilities *pCap = &ah->caps; 2735 struct ath9k_hw_capabilities *pCap = &ah->caps;
3177 u32 sync_cause = 0; 2736 u32 sync_cause = 0;
3178 bool fatal_int = false; 2737 bool fatal_int = false;
2738 struct ath_common *common = ath9k_hw_common(ah);
3179 2739
3180 if (!AR_SREV_9100(ah)) { 2740 if (!AR_SREV_9100(ah)) {
3181 if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) { 2741 if (REG_READ(ah, AR_INTR_ASYNC_CAUSE) & AR_INTR_MAC_IRQ) {
@@ -3249,8 +2809,8 @@ bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
3249 } 2809 }
3250 2810
3251 if (isr & AR_ISR_RXORN) { 2811 if (isr & AR_ISR_RXORN) {
3252 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, 2812 ath_print(common, ATH_DBG_INTERRUPT,
3253 "receive FIFO overrun interrupt\n"); 2813 "receive FIFO overrun interrupt\n");
3254 } 2814 }
3255 2815
3256 if (!AR_SREV_9100(ah)) { 2816 if (!AR_SREV_9100(ah)) {
@@ -3292,25 +2852,25 @@ bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
3292 2852
3293 if (fatal_int) { 2853 if (fatal_int) {
3294 if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) { 2854 if (sync_cause & AR_INTR_SYNC_HOST1_FATAL) {
3295 DPRINTF(ah->ah_sc, ATH_DBG_ANY, 2855 ath_print(common, ATH_DBG_ANY,
3296 "received PCI FATAL interrupt\n"); 2856 "received PCI FATAL interrupt\n");
3297 } 2857 }
3298 if (sync_cause & AR_INTR_SYNC_HOST1_PERR) { 2858 if (sync_cause & AR_INTR_SYNC_HOST1_PERR) {
3299 DPRINTF(ah->ah_sc, ATH_DBG_ANY, 2859 ath_print(common, ATH_DBG_ANY,
3300 "received PCI PERR interrupt\n"); 2860 "received PCI PERR interrupt\n");
3301 } 2861 }
3302 *masked |= ATH9K_INT_FATAL; 2862 *masked |= ATH9K_INT_FATAL;
3303 } 2863 }
3304 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) { 2864 if (sync_cause & AR_INTR_SYNC_RADM_CPL_TIMEOUT) {
3305 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, 2865 ath_print(common, ATH_DBG_INTERRUPT,
3306 "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n"); 2866 "AR_INTR_SYNC_RADM_CPL_TIMEOUT\n");
3307 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF); 2867 REG_WRITE(ah, AR_RC, AR_RC_HOSTIF);
3308 REG_WRITE(ah, AR_RC, 0); 2868 REG_WRITE(ah, AR_RC, 0);
3309 *masked |= ATH9K_INT_FATAL; 2869 *masked |= ATH9K_INT_FATAL;
3310 } 2870 }
3311 if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) { 2871 if (sync_cause & AR_INTR_SYNC_LOCAL_TIMEOUT) {
3312 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, 2872 ath_print(common, ATH_DBG_INTERRUPT,
3313 "AR_INTR_SYNC_LOCAL_TIMEOUT\n"); 2873 "AR_INTR_SYNC_LOCAL_TIMEOUT\n");
3314 } 2874 }
3315 2875
3316 REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause); 2876 REG_WRITE(ah, AR_INTR_SYNC_CAUSE_CLR, sync_cause);
@@ -3319,17 +2879,19 @@ bool ath9k_hw_getisr(struct ath_hw *ah, enum ath9k_int *masked)
3319 2879
3320 return true; 2880 return true;
3321} 2881}
2882EXPORT_SYMBOL(ath9k_hw_getisr);
3322 2883
3323enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints) 2884enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
3324{ 2885{
3325 u32 omask = ah->mask_reg; 2886 u32 omask = ah->mask_reg;
3326 u32 mask, mask2; 2887 u32 mask, mask2;
3327 struct ath9k_hw_capabilities *pCap = &ah->caps; 2888 struct ath9k_hw_capabilities *pCap = &ah->caps;
2889 struct ath_common *common = ath9k_hw_common(ah);
3328 2890
3329 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints); 2891 ath_print(common, ATH_DBG_INTERRUPT, "0x%x => 0x%x\n", omask, ints);
3330 2892
3331 if (omask & ATH9K_INT_GLOBAL) { 2893 if (omask & ATH9K_INT_GLOBAL) {
3332 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "disable IER\n"); 2894 ath_print(common, ATH_DBG_INTERRUPT, "disable IER\n");
3333 REG_WRITE(ah, AR_IER, AR_IER_DISABLE); 2895 REG_WRITE(ah, AR_IER, AR_IER_DISABLE);
3334 (void) REG_READ(ah, AR_IER); 2896 (void) REG_READ(ah, AR_IER);
3335 if (!AR_SREV_9100(ah)) { 2897 if (!AR_SREV_9100(ah)) {
@@ -3386,7 +2948,7 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
3386 mask2 |= AR_IMR_S2_CST; 2948 mask2 |= AR_IMR_S2_CST;
3387 } 2949 }
3388 2950
3389 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask); 2951 ath_print(common, ATH_DBG_INTERRUPT, "new IMR 0x%x\n", mask);
3390 REG_WRITE(ah, AR_IMR, mask); 2952 REG_WRITE(ah, AR_IMR, mask);
3391 mask = REG_READ(ah, AR_IMR_S2) & ~(AR_IMR_S2_TIM | 2953 mask = REG_READ(ah, AR_IMR_S2) & ~(AR_IMR_S2_TIM |
3392 AR_IMR_S2_DTIM | 2954 AR_IMR_S2_DTIM |
@@ -3406,7 +2968,7 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
3406 } 2968 }
3407 2969
3408 if (ints & ATH9K_INT_GLOBAL) { 2970 if (ints & ATH9K_INT_GLOBAL) {
3409 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "enable IER\n"); 2971 ath_print(common, ATH_DBG_INTERRUPT, "enable IER\n");
3410 REG_WRITE(ah, AR_IER, AR_IER_ENABLE); 2972 REG_WRITE(ah, AR_IER, AR_IER_ENABLE);
3411 if (!AR_SREV_9100(ah)) { 2973 if (!AR_SREV_9100(ah)) {
3412 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE, 2974 REG_WRITE(ah, AR_INTR_ASYNC_ENABLE,
@@ -3419,12 +2981,13 @@ enum ath9k_int ath9k_hw_set_interrupts(struct ath_hw *ah, enum ath9k_int ints)
3419 REG_WRITE(ah, AR_INTR_SYNC_MASK, 2981 REG_WRITE(ah, AR_INTR_SYNC_MASK,
3420 AR_INTR_SYNC_DEFAULT); 2982 AR_INTR_SYNC_DEFAULT);
3421 } 2983 }
3422 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n", 2984 ath_print(common, ATH_DBG_INTERRUPT, "AR_IMR 0x%x IER 0x%x\n",
3423 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER)); 2985 REG_READ(ah, AR_IMR), REG_READ(ah, AR_IER));
3424 } 2986 }
3425 2987
3426 return omask; 2988 return omask;
3427} 2989}
2990EXPORT_SYMBOL(ath9k_hw_set_interrupts);
3428 2991
3429/*******************/ 2992/*******************/
3430/* Beacon Handling */ 2993/* Beacon Handling */
@@ -3467,9 +3030,9 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
3467 AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN; 3030 AR_TBTT_TIMER_EN | AR_DBA_TIMER_EN | AR_SWBA_TIMER_EN;
3468 break; 3031 break;
3469 default: 3032 default:
3470 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, 3033 ath_print(ath9k_hw_common(ah), ATH_DBG_BEACON,
3471 "%s: unsupported opmode: %d\n", 3034 "%s: unsupported opmode: %d\n",
3472 __func__, ah->opmode); 3035 __func__, ah->opmode);
3473 return; 3036 return;
3474 break; 3037 break;
3475 } 3038 }
@@ -3481,18 +3044,19 @@ void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period)
3481 3044
3482 beacon_period &= ~ATH9K_BEACON_ENA; 3045 beacon_period &= ~ATH9K_BEACON_ENA;
3483 if (beacon_period & ATH9K_BEACON_RESET_TSF) { 3046 if (beacon_period & ATH9K_BEACON_RESET_TSF) {
3484 beacon_period &= ~ATH9K_BEACON_RESET_TSF;
3485 ath9k_hw_reset_tsf(ah); 3047 ath9k_hw_reset_tsf(ah);
3486 } 3048 }
3487 3049
3488 REG_SET_BIT(ah, AR_TIMER_MODE, flags); 3050 REG_SET_BIT(ah, AR_TIMER_MODE, flags);
3489} 3051}
3052EXPORT_SYMBOL(ath9k_hw_beaconinit);
3490 3053
3491void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah, 3054void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
3492 const struct ath9k_beacon_state *bs) 3055 const struct ath9k_beacon_state *bs)
3493{ 3056{
3494 u32 nextTbtt, beaconintval, dtimperiod, beacontimeout; 3057 u32 nextTbtt, beaconintval, dtimperiod, beacontimeout;
3495 struct ath9k_hw_capabilities *pCap = &ah->caps; 3058 struct ath9k_hw_capabilities *pCap = &ah->caps;
3059 struct ath_common *common = ath9k_hw_common(ah);
3496 3060
3497 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt)); 3061 REG_WRITE(ah, AR_NEXT_TBTT_TIMER, TU_TO_USEC(bs->bs_nexttbtt));
3498 3062
@@ -3518,10 +3082,10 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
3518 else 3082 else
3519 nextTbtt = bs->bs_nexttbtt; 3083 nextTbtt = bs->bs_nexttbtt;
3520 3084
3521 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "next DTIM %d\n", bs->bs_nextdtim); 3085 ath_print(common, ATH_DBG_BEACON, "next DTIM %d\n", bs->bs_nextdtim);
3522 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "next beacon %d\n", nextTbtt); 3086 ath_print(common, ATH_DBG_BEACON, "next beacon %d\n", nextTbtt);
3523 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "beacon period %d\n", beaconintval); 3087 ath_print(common, ATH_DBG_BEACON, "beacon period %d\n", beaconintval);
3524 DPRINTF(ah->ah_sc, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod); 3088 ath_print(common, ATH_DBG_BEACON, "DTIM period %d\n", dtimperiod);
3525 3089
3526 REG_WRITE(ah, AR_NEXT_DTIM, 3090 REG_WRITE(ah, AR_NEXT_DTIM,
3527 TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP)); 3091 TU_TO_USEC(bs->bs_nextdtim - SLEEP_SLOP));
@@ -3549,6 +3113,7 @@ void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
3549 /* TSF Out of Range Threshold */ 3113 /* TSF Out of Range Threshold */
3550 REG_WRITE(ah, AR_TSFOOR_THRESHOLD, bs->bs_tsfoor_threshold); 3114 REG_WRITE(ah, AR_TSFOOR_THRESHOLD, bs->bs_tsfoor_threshold);
3551} 3115}
3116EXPORT_SYMBOL(ath9k_hw_set_sta_beacon_timers);
3552 3117
3553/*******************/ 3118/*******************/
3554/* HW Capabilities */ 3119/* HW Capabilities */
@@ -3558,7 +3123,8 @@ void ath9k_hw_fill_cap_info(struct ath_hw *ah)
3558{ 3123{
3559 struct ath9k_hw_capabilities *pCap = &ah->caps; 3124 struct ath9k_hw_capabilities *pCap = &ah->caps;
3560 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah); 3125 struct ath_regulatory *regulatory = ath9k_hw_regulatory(ah);
3561 struct ath_btcoex_info *btcoex_info = &ah->ah_sc->btcoex_info; 3126 struct ath_common *common = ath9k_hw_common(ah);
3127 struct ath_btcoex_hw *btcoex_hw = &ah->btcoex_hw;
3562 3128
3563 u16 capField = 0, eeval; 3129 u16 capField = 0, eeval;
3564 3130
@@ -3579,8 +3145,8 @@ void ath9k_hw_fill_cap_info(struct ath_hw *ah)
3579 regulatory->current_rd += 5; 3145 regulatory->current_rd += 5;
3580 else if (regulatory->current_rd == 0x41) 3146 else if (regulatory->current_rd == 0x41)
3581 regulatory->current_rd = 0x43; 3147 regulatory->current_rd = 0x43;
3582 DPRINTF(ah->ah_sc, ATH_DBG_REGULATORY, 3148 ath_print(common, ATH_DBG_REGULATORY,
3583 "regdomain mapped to 0x%x\n", regulatory->current_rd); 3149 "regdomain mapped to 0x%x\n", regulatory->current_rd);
3584 } 3150 }
3585 3151
3586 eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE); 3152 eeval = ah->eep_ops->get_eeprom(ah, EEP_OP_MODE);
@@ -3719,7 +3285,10 @@ void ath9k_hw_fill_cap_info(struct ath_hw *ah)
3719 AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN; 3285 AR_EEPROM_EEREGCAP_EN_KK_U1_EVEN;
3720 } 3286 }
3721 3287
3722 pCap->reg_cap |= AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND; 3288 /* Advertise midband for AR5416 with FCC midband set in eeprom */
3289 if (regulatory->current_rd_ext & (1 << REG_EXT_FCC_MIDBAND) &&
3290 AR_SREV_5416(ah))
3291 pCap->reg_cap |= AR_EEPROM_EEREGCAP_EN_FCC_MIDBAND;
3723 3292
3724 pCap->num_antcfg_5ghz = 3293 pCap->num_antcfg_5ghz =
3725 ah->eep_ops->get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_5GHZ); 3294 ah->eep_ops->get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_5GHZ);
@@ -3727,18 +3296,18 @@ void ath9k_hw_fill_cap_info(struct ath_hw *ah)
3727 ah->eep_ops->get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_2GHZ); 3296 ah->eep_ops->get_num_ant_config(ah, ATH9K_HAL_FREQ_BAND_2GHZ);
3728 3297
3729 if (AR_SREV_9280_10_OR_LATER(ah) && 3298 if (AR_SREV_9280_10_OR_LATER(ah) &&
3730 ath_btcoex_supported(ah->hw_version.subsysid)) { 3299 ath9k_hw_btcoex_supported(ah)) {
3731 btcoex_info->btactive_gpio = ATH_BTACTIVE_GPIO; 3300 btcoex_hw->btactive_gpio = ATH_BTACTIVE_GPIO;
3732 btcoex_info->wlanactive_gpio = ATH_WLANACTIVE_GPIO; 3301 btcoex_hw->wlanactive_gpio = ATH_WLANACTIVE_GPIO;
3733 3302
3734 if (AR_SREV_9285(ah)) { 3303 if (AR_SREV_9285(ah)) {
3735 btcoex_info->btcoex_scheme = ATH_BTCOEX_CFG_3WIRE; 3304 btcoex_hw->scheme = ATH_BTCOEX_CFG_3WIRE;
3736 btcoex_info->btpriority_gpio = ATH_BTPRIORITY_GPIO; 3305 btcoex_hw->btpriority_gpio = ATH_BTPRIORITY_GPIO;
3737 } else { 3306 } else {
3738 btcoex_info->btcoex_scheme = ATH_BTCOEX_CFG_2WIRE; 3307 btcoex_hw->scheme = ATH_BTCOEX_CFG_2WIRE;
3739 } 3308 }
3740 } else { 3309 } else {
3741 btcoex_info->btcoex_scheme = ATH_BTCOEX_CFG_NONE; 3310 btcoex_hw->scheme = ATH_BTCOEX_CFG_NONE;
3742 } 3311 }
3743} 3312}
3744 3313
@@ -3812,6 +3381,7 @@ bool ath9k_hw_getcapability(struct ath_hw *ah, enum ath9k_capability_type type,
3812 return false; 3381 return false;
3813 } 3382 }
3814} 3383}
3384EXPORT_SYMBOL(ath9k_hw_getcapability);
3815 3385
3816bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type, 3386bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
3817 u32 capability, u32 setting, int *status) 3387 u32 capability, u32 setting, int *status)
@@ -3845,6 +3415,7 @@ bool ath9k_hw_setcapability(struct ath_hw *ah, enum ath9k_capability_type type,
3845 return false; 3415 return false;
3846 } 3416 }
3847} 3417}
3418EXPORT_SYMBOL(ath9k_hw_setcapability);
3848 3419
3849/****************************/ 3420/****************************/
3850/* GPIO / RFKILL / Antennae */ 3421/* GPIO / RFKILL / Antennae */
@@ -3882,7 +3453,7 @@ void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio)
3882{ 3453{
3883 u32 gpio_shift; 3454 u32 gpio_shift;
3884 3455
3885 ASSERT(gpio < ah->caps.num_gpio_pins); 3456 BUG_ON(gpio >= ah->caps.num_gpio_pins);
3886 3457
3887 gpio_shift = gpio << 1; 3458 gpio_shift = gpio << 1;
3888 3459
@@ -3891,6 +3462,7 @@ void ath9k_hw_cfg_gpio_input(struct ath_hw *ah, u32 gpio)
3891 (AR_GPIO_OE_OUT_DRV_NO << gpio_shift), 3462 (AR_GPIO_OE_OUT_DRV_NO << gpio_shift),
3892 (AR_GPIO_OE_OUT_DRV << gpio_shift)); 3463 (AR_GPIO_OE_OUT_DRV << gpio_shift));
3893} 3464}
3465EXPORT_SYMBOL(ath9k_hw_cfg_gpio_input);
3894 3466
3895u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio) 3467u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
3896{ 3468{
@@ -3909,6 +3481,7 @@ u32 ath9k_hw_gpio_get(struct ath_hw *ah, u32 gpio)
3909 else 3481 else
3910 return MS_REG_READ(AR, gpio) != 0; 3482 return MS_REG_READ(AR, gpio) != 0;
3911} 3483}
3484EXPORT_SYMBOL(ath9k_hw_gpio_get);
3912 3485
3913void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio, 3486void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
3914 u32 ah_signal_type) 3487 u32 ah_signal_type)
@@ -3924,22 +3497,26 @@ void ath9k_hw_cfg_output(struct ath_hw *ah, u32 gpio,
3924 (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift), 3497 (AR_GPIO_OE_OUT_DRV_ALL << gpio_shift),
3925 (AR_GPIO_OE_OUT_DRV << gpio_shift)); 3498 (AR_GPIO_OE_OUT_DRV << gpio_shift));
3926} 3499}
3500EXPORT_SYMBOL(ath9k_hw_cfg_output);
3927 3501
3928void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val) 3502void ath9k_hw_set_gpio(struct ath_hw *ah, u32 gpio, u32 val)
3929{ 3503{
3930 REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio), 3504 REG_RMW(ah, AR_GPIO_IN_OUT, ((val & 1) << gpio),
3931 AR_GPIO_BIT(gpio)); 3505 AR_GPIO_BIT(gpio));
3932} 3506}
3507EXPORT_SYMBOL(ath9k_hw_set_gpio);
3933 3508
3934u32 ath9k_hw_getdefantenna(struct ath_hw *ah) 3509u32 ath9k_hw_getdefantenna(struct ath_hw *ah)
3935{ 3510{
3936 return REG_READ(ah, AR_DEF_ANTENNA) & 0x7; 3511 return REG_READ(ah, AR_DEF_ANTENNA) & 0x7;
3937} 3512}
3513EXPORT_SYMBOL(ath9k_hw_getdefantenna);
3938 3514
3939void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna) 3515void ath9k_hw_setantenna(struct ath_hw *ah, u32 antenna)
3940{ 3516{
3941 REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7)); 3517 REG_WRITE(ah, AR_DEF_ANTENNA, (antenna & 0x7));
3942} 3518}
3519EXPORT_SYMBOL(ath9k_hw_setantenna);
3943 3520
3944bool ath9k_hw_setantennaswitch(struct ath_hw *ah, 3521bool ath9k_hw_setantennaswitch(struct ath_hw *ah,
3945 enum ath9k_ant_setting settings, 3522 enum ath9k_ant_setting settings,
@@ -4002,6 +3579,7 @@ u32 ath9k_hw_getrxfilter(struct ath_hw *ah)
4002 3579
4003 return bits; 3580 return bits;
4004} 3581}
3582EXPORT_SYMBOL(ath9k_hw_getrxfilter);
4005 3583
4006void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits) 3584void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
4007{ 3585{
@@ -4023,19 +3601,30 @@ void ath9k_hw_setrxfilter(struct ath_hw *ah, u32 bits)
4023 REG_WRITE(ah, AR_RXCFG, 3601 REG_WRITE(ah, AR_RXCFG,
4024 REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA); 3602 REG_READ(ah, AR_RXCFG) & ~AR_RXCFG_ZLFDMA);
4025} 3603}
3604EXPORT_SYMBOL(ath9k_hw_setrxfilter);
4026 3605
4027bool ath9k_hw_phy_disable(struct ath_hw *ah) 3606bool ath9k_hw_phy_disable(struct ath_hw *ah)
4028{ 3607{
4029 return ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM); 3608 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_WARM))
3609 return false;
3610
3611 ath9k_hw_init_pll(ah, NULL);
3612 return true;
4030} 3613}
3614EXPORT_SYMBOL(ath9k_hw_phy_disable);
4031 3615
4032bool ath9k_hw_disable(struct ath_hw *ah) 3616bool ath9k_hw_disable(struct ath_hw *ah)
4033{ 3617{
4034 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE)) 3618 if (!ath9k_hw_setpower(ah, ATH9K_PM_AWAKE))
4035 return false; 3619 return false;
4036 3620
4037 return ath9k_hw_set_reset_reg(ah, ATH9K_RESET_COLD); 3621 if (!ath9k_hw_set_reset_reg(ah, ATH9K_RESET_COLD))
3622 return false;
3623
3624 ath9k_hw_init_pll(ah, NULL);
3625 return true;
4038} 3626}
3627EXPORT_SYMBOL(ath9k_hw_disable);
4039 3628
4040void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit) 3629void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit)
4041{ 3630{
@@ -4052,35 +3641,36 @@ void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit)
4052 min((u32) MAX_RATE_POWER, 3641 min((u32) MAX_RATE_POWER,
4053 (u32) regulatory->power_limit)); 3642 (u32) regulatory->power_limit));
4054} 3643}
3644EXPORT_SYMBOL(ath9k_hw_set_txpowerlimit);
4055 3645
4056void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac) 3646void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac)
4057{ 3647{
4058 memcpy(ah->macaddr, mac, ETH_ALEN); 3648 memcpy(ath9k_hw_common(ah)->macaddr, mac, ETH_ALEN);
4059} 3649}
3650EXPORT_SYMBOL(ath9k_hw_setmac);
4060 3651
4061void ath9k_hw_setopmode(struct ath_hw *ah) 3652void ath9k_hw_setopmode(struct ath_hw *ah)
4062{ 3653{
4063 ath9k_hw_set_operating_mode(ah, ah->opmode); 3654 ath9k_hw_set_operating_mode(ah, ah->opmode);
4064} 3655}
3656EXPORT_SYMBOL(ath9k_hw_setopmode);
4065 3657
4066void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1) 3658void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1)
4067{ 3659{
4068 REG_WRITE(ah, AR_MCAST_FIL0, filter0); 3660 REG_WRITE(ah, AR_MCAST_FIL0, filter0);
4069 REG_WRITE(ah, AR_MCAST_FIL1, filter1); 3661 REG_WRITE(ah, AR_MCAST_FIL1, filter1);
4070} 3662}
3663EXPORT_SYMBOL(ath9k_hw_setmcastfilter);
4071 3664
4072void ath9k_hw_setbssidmask(struct ath_softc *sc) 3665void ath9k_hw_write_associd(struct ath_hw *ah)
4073{ 3666{
4074 REG_WRITE(sc->sc_ah, AR_BSSMSKL, get_unaligned_le32(sc->bssidmask)); 3667 struct ath_common *common = ath9k_hw_common(ah);
4075 REG_WRITE(sc->sc_ah, AR_BSSMSKU, get_unaligned_le16(sc->bssidmask + 4));
4076}
4077 3668
4078void ath9k_hw_write_associd(struct ath_softc *sc) 3669 REG_WRITE(ah, AR_BSS_ID0, get_unaligned_le32(common->curbssid));
4079{ 3670 REG_WRITE(ah, AR_BSS_ID1, get_unaligned_le16(common->curbssid + 4) |
4080 REG_WRITE(sc->sc_ah, AR_BSS_ID0, get_unaligned_le32(sc->curbssid)); 3671 ((common->curaid & 0x3fff) << AR_BSS_ID1_AID_S));
4081 REG_WRITE(sc->sc_ah, AR_BSS_ID1, get_unaligned_le16(sc->curbssid + 4) |
4082 ((sc->curaid & 0x3fff) << AR_BSS_ID1_AID_S));
4083} 3672}
3673EXPORT_SYMBOL(ath9k_hw_write_associd);
4084 3674
4085u64 ath9k_hw_gettsf64(struct ath_hw *ah) 3675u64 ath9k_hw_gettsf64(struct ath_hw *ah)
4086{ 3676{
@@ -4091,24 +3681,25 @@ u64 ath9k_hw_gettsf64(struct ath_hw *ah)
4091 3681
4092 return tsf; 3682 return tsf;
4093} 3683}
3684EXPORT_SYMBOL(ath9k_hw_gettsf64);
4094 3685
4095void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64) 3686void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64)
4096{ 3687{
4097 REG_WRITE(ah, AR_TSF_L32, tsf64 & 0xffffffff); 3688 REG_WRITE(ah, AR_TSF_L32, tsf64 & 0xffffffff);
4098 REG_WRITE(ah, AR_TSF_U32, (tsf64 >> 32) & 0xffffffff); 3689 REG_WRITE(ah, AR_TSF_U32, (tsf64 >> 32) & 0xffffffff);
4099} 3690}
3691EXPORT_SYMBOL(ath9k_hw_settsf64);
4100 3692
4101void ath9k_hw_reset_tsf(struct ath_hw *ah) 3693void ath9k_hw_reset_tsf(struct ath_hw *ah)
4102{ 3694{
4103 ath9k_ps_wakeup(ah->ah_sc);
4104 if (!ath9k_hw_wait(ah, AR_SLP32_MODE, AR_SLP32_TSF_WRITE_STATUS, 0, 3695 if (!ath9k_hw_wait(ah, AR_SLP32_MODE, AR_SLP32_TSF_WRITE_STATUS, 0,
4105 AH_TSF_WRITE_TIMEOUT)) 3696 AH_TSF_WRITE_TIMEOUT))
4106 DPRINTF(ah->ah_sc, ATH_DBG_RESET, 3697 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
4107 "AR_SLP32_TSF_WRITE_STATUS limit exceeded\n"); 3698 "AR_SLP32_TSF_WRITE_STATUS limit exceeded\n");
4108 3699
4109 REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE); 3700 REG_WRITE(ah, AR_RESET_TSF, AR_RESET_TSF_ONCE);
4110 ath9k_ps_restore(ah->ah_sc);
4111} 3701}
3702EXPORT_SYMBOL(ath9k_hw_reset_tsf);
4112 3703
4113void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting) 3704void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting)
4114{ 3705{
@@ -4117,11 +3708,13 @@ void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting)
4117 else 3708 else
4118 ah->misc_mode &= ~AR_PCU_TX_ADD_TSF; 3709 ah->misc_mode &= ~AR_PCU_TX_ADD_TSF;
4119} 3710}
3711EXPORT_SYMBOL(ath9k_hw_set_tsfadjust);
4120 3712
4121bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us) 3713bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
4122{ 3714{
4123 if (us < ATH9K_SLOT_TIME_9 || us > ath9k_hw_mac_to_usec(ah, 0xffff)) { 3715 if (us < ATH9K_SLOT_TIME_9 || us > ath9k_hw_mac_to_usec(ah, 0xffff)) {
4124 DPRINTF(ah->ah_sc, ATH_DBG_RESET, "bad slot time %u\n", us); 3716 ath_print(ath9k_hw_common(ah), ATH_DBG_RESET,
3717 "bad slot time %u\n", us);
4125 ah->slottime = (u32) -1; 3718 ah->slottime = (u32) -1;
4126 return false; 3719 return false;
4127 } else { 3720 } else {
@@ -4130,13 +3723,14 @@ bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us)
4130 return true; 3723 return true;
4131 } 3724 }
4132} 3725}
3726EXPORT_SYMBOL(ath9k_hw_setslottime);
4133 3727
4134void ath9k_hw_set11nmac2040(struct ath_hw *ah, enum ath9k_ht_macmode mode) 3728void ath9k_hw_set11nmac2040(struct ath_hw *ah)
4135{ 3729{
3730 struct ieee80211_conf *conf = &ath9k_hw_common(ah)->hw->conf;
4136 u32 macmode; 3731 u32 macmode;
4137 3732
4138 if (mode == ATH9K_HT_MACMODE_2040 && 3733 if (conf_is_ht40(conf) && !ah->config.cwm_ignore_extcca)
4139 !ah->config.cwm_ignore_extcca)
4140 macmode = AR_2040_JOINED_RX_CLEAR; 3734 macmode = AR_2040_JOINED_RX_CLEAR;
4141 else 3735 else
4142 macmode = 0; 3736 macmode = 0;
@@ -4193,6 +3787,7 @@ u32 ath9k_hw_gettsf32(struct ath_hw *ah)
4193{ 3787{
4194 return REG_READ(ah, AR_TSF_L32); 3788 return REG_READ(ah, AR_TSF_L32);
4195} 3789}
3790EXPORT_SYMBOL(ath9k_hw_gettsf32);
4196 3791
4197struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah, 3792struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
4198 void (*trigger)(void *), 3793 void (*trigger)(void *),
@@ -4206,8 +3801,9 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
4206 timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL); 3801 timer = kzalloc(sizeof(struct ath_gen_timer), GFP_KERNEL);
4207 3802
4208 if (timer == NULL) { 3803 if (timer == NULL) {
4209 printk(KERN_DEBUG "Failed to allocate memory" 3804 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
4210 "for hw timer[%d]\n", timer_index); 3805 "Failed to allocate memory"
3806 "for hw timer[%d]\n", timer_index);
4211 return NULL; 3807 return NULL;
4212 } 3808 }
4213 3809
@@ -4220,10 +3816,12 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
4220 3816
4221 return timer; 3817 return timer;
4222} 3818}
3819EXPORT_SYMBOL(ath_gen_timer_alloc);
4223 3820
4224void ath_gen_timer_start(struct ath_hw *ah, 3821void ath9k_hw_gen_timer_start(struct ath_hw *ah,
4225 struct ath_gen_timer *timer, 3822 struct ath_gen_timer *timer,
4226 u32 timer_next, u32 timer_period) 3823 u32 timer_next,
3824 u32 timer_period)
4227{ 3825{
4228 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; 3826 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
4229 u32 tsf; 3827 u32 tsf;
@@ -4234,8 +3832,9 @@ void ath_gen_timer_start(struct ath_hw *ah,
4234 3832
4235 tsf = ath9k_hw_gettsf32(ah); 3833 tsf = ath9k_hw_gettsf32(ah);
4236 3834
4237 DPRINTF(ah->ah_sc, ATH_DBG_HWTIMER, "curent tsf %x period %x" 3835 ath_print(ath9k_hw_common(ah), ATH_DBG_HWTIMER,
4238 "timer_next %x\n", tsf, timer_period, timer_next); 3836 "curent tsf %x period %x"
3837 "timer_next %x\n", tsf, timer_period, timer_next);
4239 3838
4240 /* 3839 /*
4241 * Pull timer_next forward if the current TSF already passed it 3840 * Pull timer_next forward if the current TSF already passed it
@@ -4258,15 +3857,10 @@ void ath_gen_timer_start(struct ath_hw *ah,
4258 REG_SET_BIT(ah, AR_IMR_S5, 3857 REG_SET_BIT(ah, AR_IMR_S5,
4259 (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) | 3858 (SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_THRESH) |
4260 SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG))); 3859 SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG)));
4261
4262 if ((ah->ah_sc->imask & ATH9K_INT_GENTIMER) == 0) {
4263 ath9k_hw_set_interrupts(ah, 0);
4264 ah->ah_sc->imask |= ATH9K_INT_GENTIMER;
4265 ath9k_hw_set_interrupts(ah, ah->ah_sc->imask);
4266 }
4267} 3860}
3861EXPORT_SYMBOL(ath9k_hw_gen_timer_start);
4268 3862
4269void ath_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer) 3863void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
4270{ 3864{
4271 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; 3865 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
4272 3866
@@ -4285,14 +3879,8 @@ void ath_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
4285 SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG))); 3879 SM(AR_GENTMR_BIT(timer->index), AR_IMR_S5_GENTIMER_TRIG)));
4286 3880
4287 clear_bit(timer->index, &timer_table->timer_mask.timer_bits); 3881 clear_bit(timer->index, &timer_table->timer_mask.timer_bits);
4288
4289 /* if no timer is enabled, turn off interrupt mask */
4290 if (timer_table->timer_mask.val == 0) {
4291 ath9k_hw_set_interrupts(ah, 0);
4292 ah->ah_sc->imask &= ~ATH9K_INT_GENTIMER;
4293 ath9k_hw_set_interrupts(ah, ah->ah_sc->imask);
4294 }
4295} 3882}
3883EXPORT_SYMBOL(ath9k_hw_gen_timer_stop);
4296 3884
4297void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer) 3885void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer)
4298{ 3886{
@@ -4302,6 +3890,7 @@ void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer)
4302 timer_table->timers[timer->index] = NULL; 3890 timer_table->timers[timer->index] = NULL;
4303 kfree(timer); 3891 kfree(timer);
4304} 3892}
3893EXPORT_SYMBOL(ath_gen_timer_free);
4305 3894
4306/* 3895/*
4307 * Generic Timer Interrupts handling 3896 * Generic Timer Interrupts handling
@@ -4310,6 +3899,7 @@ void ath_gen_timer_isr(struct ath_hw *ah)
4310{ 3899{
4311 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers; 3900 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
4312 struct ath_gen_timer *timer; 3901 struct ath_gen_timer *timer;
3902 struct ath_common *common = ath9k_hw_common(ah);
4313 u32 trigger_mask, thresh_mask, index; 3903 u32 trigger_mask, thresh_mask, index;
4314 3904
4315 /* get hardware generic timer interrupt status */ 3905 /* get hardware generic timer interrupt status */
@@ -4324,8 +3914,8 @@ void ath_gen_timer_isr(struct ath_hw *ah)
4324 index = rightmost_index(timer_table, &thresh_mask); 3914 index = rightmost_index(timer_table, &thresh_mask);
4325 timer = timer_table->timers[index]; 3915 timer = timer_table->timers[index];
4326 BUG_ON(!timer); 3916 BUG_ON(!timer);
4327 DPRINTF(ah->ah_sc, ATH_DBG_HWTIMER, 3917 ath_print(common, ATH_DBG_HWTIMER,
4328 "TSF overflow for Gen timer %d\n", index); 3918 "TSF overflow for Gen timer %d\n", index);
4329 timer->overflow(timer->arg); 3919 timer->overflow(timer->arg);
4330 } 3920 }
4331 3921
@@ -4333,21 +3923,95 @@ void ath_gen_timer_isr(struct ath_hw *ah)
4333 index = rightmost_index(timer_table, &trigger_mask); 3923 index = rightmost_index(timer_table, &trigger_mask);
4334 timer = timer_table->timers[index]; 3924 timer = timer_table->timers[index];
4335 BUG_ON(!timer); 3925 BUG_ON(!timer);
4336 DPRINTF(ah->ah_sc, ATH_DBG_HWTIMER, 3926 ath_print(common, ATH_DBG_HWTIMER,
4337 "Gen timer[%d] trigger\n", index); 3927 "Gen timer[%d] trigger\n", index);
4338 timer->trigger(timer->arg); 3928 timer->trigger(timer->arg);
4339 } 3929 }
4340} 3930}
3931EXPORT_SYMBOL(ath_gen_timer_isr);
3932
3933static struct {
3934 u32 version;
3935 const char * name;
3936} ath_mac_bb_names[] = {
3937 /* Devices with external radios */
3938 { AR_SREV_VERSION_5416_PCI, "5416" },
3939 { AR_SREV_VERSION_5416_PCIE, "5418" },
3940 { AR_SREV_VERSION_9100, "9100" },
3941 { AR_SREV_VERSION_9160, "9160" },
3942 /* Single-chip solutions */
3943 { AR_SREV_VERSION_9280, "9280" },
3944 { AR_SREV_VERSION_9285, "9285" },
3945 { AR_SREV_VERSION_9287, "9287" },
3946 { AR_SREV_VERSION_9271, "9271" },
3947};
3948
3949/* For devices with external radios */
3950static struct {
3951 u16 version;
3952 const char * name;
3953} ath_rf_names[] = {
3954 { 0, "5133" },
3955 { AR_RAD5133_SREV_MAJOR, "5133" },
3956 { AR_RAD5122_SREV_MAJOR, "5122" },
3957 { AR_RAD2133_SREV_MAJOR, "2133" },
3958 { AR_RAD2122_SREV_MAJOR, "2122" }
3959};
4341 3960
4342/* 3961/*
4343 * Primitive to disable ASPM 3962 * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown.
4344 */ 3963 */
4345void ath_pcie_aspm_disable(struct ath_softc *sc) 3964static const char *ath9k_hw_mac_bb_name(u32 mac_bb_version)
4346{ 3965{
4347 struct pci_dev *pdev = to_pci_dev(sc->dev); 3966 int i;
4348 u8 aspm; 3967
3968 for (i=0; i<ARRAY_SIZE(ath_mac_bb_names); i++) {
3969 if (ath_mac_bb_names[i].version == mac_bb_version) {
3970 return ath_mac_bb_names[i].name;
3971 }
3972 }
3973
3974 return "????";
3975}
3976
3977/*
3978 * Return the RF name. "????" is returned if the RF is unknown.
3979 * Used for devices with external radios.
3980 */
3981static const char *ath9k_hw_rf_name(u16 rf_version)
3982{
3983 int i;
3984
3985 for (i=0; i<ARRAY_SIZE(ath_rf_names); i++) {
3986 if (ath_rf_names[i].version == rf_version) {
3987 return ath_rf_names[i].name;
3988 }
3989 }
3990
3991 return "????";
3992}
3993
3994void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len)
3995{
3996 int used;
3997
3998 /* chipsets >= AR9280 are single-chip */
3999 if (AR_SREV_9280_10_OR_LATER(ah)) {
4000 used = snprintf(hw_name, len,
4001 "Atheros AR%s Rev:%x",
4002 ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
4003 ah->hw_version.macRev);
4004 }
4005 else {
4006 used = snprintf(hw_name, len,
4007 "Atheros AR%s MAC/BB Rev:%x AR%s RF Rev:%x",
4008 ath9k_hw_mac_bb_name(ah->hw_version.macVersion),
4009 ah->hw_version.macRev,
4010 ath9k_hw_rf_name((ah->hw_version.analog5GhzRev &
4011 AR_RADIO_SREV_MAJOR)),
4012 ah->hw_version.phyRev);
4013 }
4349 4014
4350 pci_read_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, &aspm); 4015 hw_name[used] = '\0';
4351 aspm &= ~(ATH_PCIE_CAP_LINK_L0S | ATH_PCIE_CAP_LINK_L1);
4352 pci_write_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, aspm);
4353} 4016}
4017EXPORT_SYMBOL(ath9k_hw_name);
diff --git a/drivers/net/wireless/ath/ath9k/hw.h b/drivers/net/wireless/ath/ath9k/hw.h
index b89234571829..c7b0c4d5f75a 100644
--- a/drivers/net/wireless/ath/ath9k/hw.h
+++ b/drivers/net/wireless/ath/ath9k/hw.h
@@ -27,17 +27,24 @@
27#include "calib.h" 27#include "calib.h"
28#include "reg.h" 28#include "reg.h"
29#include "phy.h" 29#include "phy.h"
30#include "btcoex.h"
30 31
31#include "../regd.h" 32#include "../regd.h"
33#include "../debug.h"
32 34
33#define ATHEROS_VENDOR_ID 0x168c 35#define ATHEROS_VENDOR_ID 0x168c
36
34#define AR5416_DEVID_PCI 0x0023 37#define AR5416_DEVID_PCI 0x0023
35#define AR5416_DEVID_PCIE 0x0024 38#define AR5416_DEVID_PCIE 0x0024
36#define AR9160_DEVID_PCI 0x0027 39#define AR9160_DEVID_PCI 0x0027
37#define AR9280_DEVID_PCI 0x0029 40#define AR9280_DEVID_PCI 0x0029
38#define AR9280_DEVID_PCIE 0x002a 41#define AR9280_DEVID_PCIE 0x002a
39#define AR9285_DEVID_PCIE 0x002b 42#define AR9285_DEVID_PCIE 0x002b
43
40#define AR5416_AR9100_DEVID 0x000b 44#define AR5416_AR9100_DEVID 0x000b
45
46#define AR9271_USB 0x9271
47
41#define AR_SUBVENDOR_ID_NOG 0x0e11 48#define AR_SUBVENDOR_ID_NOG 0x0e11
42#define AR_SUBVENDOR_ID_NEW_A 0x7065 49#define AR_SUBVENDOR_ID_NEW_A 0x7065
43#define AR5416_MAGIC 0x19641014 50#define AR5416_MAGIC 0x19641014
@@ -49,9 +56,18 @@
49#define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa 56#define AT9285_COEX3WIRE_SA_SUBSYSID 0x30aa
50#define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab 57#define AT9285_COEX3WIRE_DA_SUBSYSID 0x30ab
51 58
59#define ATH_AMPDU_LIMIT_MAX (64 * 1024 - 1)
60
61#define ATH_DEFAULT_NOISE_FLOOR -95
62
63#define ATH9K_RSSI_BAD 0x80
64
52/* Register read/write primitives */ 65/* Register read/write primitives */
53#define REG_WRITE(_ah, _reg, _val) ath9k_iowrite32((_ah), (_reg), (_val)) 66#define REG_WRITE(_ah, _reg, _val) \
54#define REG_READ(_ah, _reg) ath9k_ioread32((_ah), (_reg)) 67 ath9k_hw_common(_ah)->ops->write((_ah), (_val), (_reg))
68
69#define REG_READ(_ah, _reg) \
70 ath9k_hw_common(_ah)->ops->read((_ah), (_reg))
55 71
56#define SM(_v, _f) (((_v) << _f##_S) & _f) 72#define SM(_v, _f) (((_v) << _f##_S) & _f)
57#define MS(_v, _f) (((_v) & _f) >> _f##_S) 73#define MS(_v, _f) (((_v) & _f) >> _f##_S)
@@ -91,7 +107,7 @@
91#define AR_GPIO_BIT(_gpio) (1 << (_gpio)) 107#define AR_GPIO_BIT(_gpio) (1 << (_gpio))
92 108
93#define BASE_ACTIVATE_DELAY 100 109#define BASE_ACTIVATE_DELAY 100
94#define RTC_PLL_SETTLE_DELAY 1000 110#define RTC_PLL_SETTLE_DELAY 100
95#define COEF_SCALE_S 24 111#define COEF_SCALE_S 24
96#define HT40_CHANNEL_CENTER_SHIFT 10 112#define HT40_CHANNEL_CENTER_SHIFT 10
97 113
@@ -132,6 +148,15 @@ enum wireless_mode {
132 ATH9K_MODE_MAX, 148 ATH9K_MODE_MAX,
133}; 149};
134 150
151/**
152 * ath9k_ant_setting - transmit antenna settings
153 *
154 * Configures the antenna setting to use for transmit.
155 *
156 * @ATH9K_ANT_VARIABLE: this means transmit on all active antennas
157 * @ATH9K_ANT_FIXED_A: this means transmit on the first antenna only
158 * @ATH9K_ANT_FIXED_B: this means transmit on the second antenna only
159 */
135enum ath9k_ant_setting { 160enum ath9k_ant_setting {
136 ATH9K_ANT_VARIABLE = 0, 161 ATH9K_ANT_VARIABLE = 0,
137 ATH9K_ANT_FIXED_A, 162 ATH9K_ANT_FIXED_A,
@@ -433,7 +458,8 @@ struct ath_gen_timer_table {
433}; 458};
434 459
435struct ath_hw { 460struct ath_hw {
436 struct ath_softc *ah_sc; 461 struct ieee80211_hw *hw;
462 struct ath_common common;
437 struct ath9k_hw_version hw_version; 463 struct ath9k_hw_version hw_version;
438 struct ath9k_ops_config config; 464 struct ath9k_ops_config config;
439 struct ath9k_hw_capabilities caps; 465 struct ath9k_hw_capabilities caps;
@@ -450,7 +476,6 @@ struct ath_hw {
450 476
451 bool sw_mgmt_crypto; 477 bool sw_mgmt_crypto;
452 bool is_pciexpress; 478 bool is_pciexpress;
453 u8 macaddr[ETH_ALEN];
454 u16 tx_trig_level; 479 u16 tx_trig_level;
455 u16 rfsilent; 480 u16 rfsilent;
456 u32 rfkill_gpio; 481 u32 rfkill_gpio;
@@ -523,7 +548,14 @@ struct ath_hw {
523 DONT_USE_32KHZ, 548 DONT_USE_32KHZ,
524 } enable_32kHz_clock; 549 } enable_32kHz_clock;
525 550
526 /* RF */ 551 /* Callback for radio frequency change */
552 int (*ath9k_hw_rf_set_freq)(struct ath_hw *ah, struct ath9k_channel *chan);
553
554 /* Callback for baseband spur frequency */
555 void (*ath9k_hw_spur_mitigate_freq)(struct ath_hw *ah,
556 struct ath9k_channel *chan);
557
558 /* Used to program the radio on non single-chip devices */
527 u32 *analogBank0Data; 559 u32 *analogBank0Data;
528 u32 *analogBank1Data; 560 u32 *analogBank1Data;
529 u32 *analogBank2Data; 561 u32 *analogBank2Data;
@@ -553,8 +585,10 @@ struct ath_hw {
553 int firpwr[5]; 585 int firpwr[5];
554 enum ath9k_ani_cmd ani_function; 586 enum ath9k_ani_cmd ani_function;
555 587
588 /* Bluetooth coexistance */
589 struct ath_btcoex_hw btcoex_hw;
590
556 u32 intr_txqs; 591 u32 intr_txqs;
557 enum ath9k_ht_extprotspacing extprotspacing;
558 u8 txchainmask; 592 u8 txchainmask;
559 u8 rxchainmask; 593 u8 rxchainmask;
560 594
@@ -578,17 +612,29 @@ struct ath_hw {
578 struct ar5416IniArray iniModesAdditional; 612 struct ar5416IniArray iniModesAdditional;
579 struct ar5416IniArray iniModesRxGain; 613 struct ar5416IniArray iniModesRxGain;
580 struct ar5416IniArray iniModesTxGain; 614 struct ar5416IniArray iniModesTxGain;
615 struct ar5416IniArray iniModes_9271_1_0_only;
616 struct ar5416IniArray iniCckfirNormal;
617 struct ar5416IniArray iniCckfirJapan2484;
581 618
582 u32 intr_gen_timer_trigger; 619 u32 intr_gen_timer_trigger;
583 u32 intr_gen_timer_thresh; 620 u32 intr_gen_timer_thresh;
584 struct ath_gen_timer_table hw_gen_timers; 621 struct ath_gen_timer_table hw_gen_timers;
585}; 622};
586 623
624static inline struct ath_common *ath9k_hw_common(struct ath_hw *ah)
625{
626 return &ah->common;
627}
628
629static inline struct ath_regulatory *ath9k_hw_regulatory(struct ath_hw *ah)
630{
631 return &(ath9k_hw_common(ah)->regulatory);
632}
633
587/* Initialization, Detach, Reset */ 634/* Initialization, Detach, Reset */
588const char *ath9k_hw_probe(u16 vendorid, u16 devid); 635const char *ath9k_hw_probe(u16 vendorid, u16 devid);
589void ath9k_hw_detach(struct ath_hw *ah); 636void ath9k_hw_detach(struct ath_hw *ah);
590int ath9k_hw_init(struct ath_hw *ah); 637int ath9k_hw_init(struct ath_hw *ah);
591void ath9k_hw_rf_free(struct ath_hw *ah);
592int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan, 638int ath9k_hw_reset(struct ath_hw *ah, struct ath9k_channel *chan,
593 bool bChannelChange); 639 bool bChannelChange);
594void ath9k_hw_fill_cap_info(struct ath_hw *ah); 640void ath9k_hw_fill_cap_info(struct ath_hw *ah);
@@ -637,19 +683,20 @@ void ath9k_hw_set_txpowerlimit(struct ath_hw *ah, u32 limit);
637void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac); 683void ath9k_hw_setmac(struct ath_hw *ah, const u8 *mac);
638void ath9k_hw_setopmode(struct ath_hw *ah); 684void ath9k_hw_setopmode(struct ath_hw *ah);
639void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1); 685void ath9k_hw_setmcastfilter(struct ath_hw *ah, u32 filter0, u32 filter1);
640void ath9k_hw_setbssidmask(struct ath_softc *sc); 686void ath9k_hw_setbssidmask(struct ath_hw *ah);
641void ath9k_hw_write_associd(struct ath_softc *sc); 687void ath9k_hw_write_associd(struct ath_hw *ah);
642u64 ath9k_hw_gettsf64(struct ath_hw *ah); 688u64 ath9k_hw_gettsf64(struct ath_hw *ah);
643void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64); 689void ath9k_hw_settsf64(struct ath_hw *ah, u64 tsf64);
644void ath9k_hw_reset_tsf(struct ath_hw *ah); 690void ath9k_hw_reset_tsf(struct ath_hw *ah);
645void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting); 691void ath9k_hw_set_tsfadjust(struct ath_hw *ah, u32 setting);
646bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us); 692bool ath9k_hw_setslottime(struct ath_hw *ah, u32 us);
647void ath9k_hw_set11nmac2040(struct ath_hw *ah, enum ath9k_ht_macmode mode); 693void ath9k_hw_set11nmac2040(struct ath_hw *ah);
648void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period); 694void ath9k_hw_beaconinit(struct ath_hw *ah, u32 next_beacon, u32 beacon_period);
649void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah, 695void ath9k_hw_set_sta_beacon_timers(struct ath_hw *ah,
650 const struct ath9k_beacon_state *bs); 696 const struct ath9k_beacon_state *bs);
651bool ath9k_hw_setpower(struct ath_hw *ah, 697
652 enum ath9k_power_mode mode); 698bool ath9k_hw_setpower(struct ath_hw *ah, enum ath9k_power_mode mode);
699
653void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off); 700void ath9k_hw_configpcipowersave(struct ath_hw *ah, int restore, int power_off);
654 701
655/* Interrupt Handling */ 702/* Interrupt Handling */
@@ -663,16 +710,20 @@ struct ath_gen_timer *ath_gen_timer_alloc(struct ath_hw *ah,
663 void (*overflow)(void *), 710 void (*overflow)(void *),
664 void *arg, 711 void *arg,
665 u8 timer_index); 712 u8 timer_index);
666void ath_gen_timer_start(struct ath_hw *ah, struct ath_gen_timer *timer, 713void ath9k_hw_gen_timer_start(struct ath_hw *ah,
667 u32 timer_next, u32 timer_period); 714 struct ath_gen_timer *timer,
668void ath_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer); 715 u32 timer_next,
716 u32 timer_period);
717void ath9k_hw_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer);
718
669void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer); 719void ath_gen_timer_free(struct ath_hw *ah, struct ath_gen_timer *timer);
670void ath_gen_timer_isr(struct ath_hw *hw); 720void ath_gen_timer_isr(struct ath_hw *hw);
671u32 ath9k_hw_gettsf32(struct ath_hw *ah); 721u32 ath9k_hw_gettsf32(struct ath_hw *ah);
672 722
723void ath9k_hw_name(struct ath_hw *ah, char *hw_name, size_t len);
724
673#define ATH_PCIE_CAP_LINK_CTRL 0x70 725#define ATH_PCIE_CAP_LINK_CTRL 0x70
674#define ATH_PCIE_CAP_LINK_L0S 1 726#define ATH_PCIE_CAP_LINK_L0S 1
675#define ATH_PCIE_CAP_LINK_L1 2 727#define ATH_PCIE_CAP_LINK_L1 2
676 728
677void ath_pcie_aspm_disable(struct ath_softc *sc);
678#endif 729#endif
diff --git a/drivers/net/wireless/ath/ath9k/initvals.h b/drivers/net/wireless/ath/ath9k/initvals.h
index 8622265a030a..8a3bf3ab998d 100644
--- a/drivers/net/wireless/ath/ath9k/initvals.h
+++ b/drivers/net/wireless/ath/ath9k/initvals.h
@@ -21,6 +21,8 @@ static const u32 ar5416Modes[][6] = {
21 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 }, 21 { 0x000010f0, 0x0000a000, 0x00014000, 0x00016000, 0x0000b000, 0x00014008 },
22 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 }, 22 { 0x00008014, 0x03e803e8, 0x07d007d0, 0x10801600, 0x08400b00, 0x06e006e0 },
23 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf }, 23 { 0x0000801c, 0x128d93a7, 0x128d93cf, 0x12e013d7, 0x12e013ab, 0x098813cf },
24 { 0x00008120, 0x08f04800, 0x08f04800, 0x08f04810, 0x08f04810, 0x08f04810 },
25 { 0x000081d0, 0x00003210, 0x00003210, 0x0000320a, 0x0000320a, 0x0000320a },
24 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, 26 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
25 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, 27 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
26 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, 28 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
@@ -31,11 +33,11 @@ static const u32 ar5416Modes[][6] = {
31 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, 33 { 0x00009848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
32 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, 34 { 0x0000a848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
33 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 }, 35 { 0x0000b848, 0x001a6a65, 0x001a6a65, 0x00197a68, 0x00197a68, 0x00197a68 },
34 { 0x00009850, 0x6c48b4e0, 0x6c48b4e0, 0x6c48b0de, 0x6c48b0de, 0x6c48b0de }, 36 { 0x00009850, 0x6c48b4e0, 0x6d48b4e0, 0x6d48b0de, 0x6c48b0de, 0x6c48b0de },
35 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e }, 37 { 0x00009858, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e, 0x7ec82d2e },
36 { 0x0000985c, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e, 0x31395d5e }, 38 { 0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e },
37 { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 }, 39 { 0x00009860, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18, 0x00049d18 },
38 { 0x0000c864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, 40 { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
39 { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 }, 41 { 0x00009868, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190, 0x409a4190 },
40 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 }, 42 { 0x0000986c, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081, 0x050cb081 },
41 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 }, 43 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
@@ -46,10 +48,10 @@ static const u32 ar5416Modes[][6] = {
46 { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 }, 48 { 0x0000a960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
47 { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 }, 49 { 0x0000b960, 0x00000900, 0x00000900, 0x00012d80, 0x00012d80, 0x00012d80 },
48 { 0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120 }, 50 { 0x00009964, 0x00000000, 0x00000000, 0x00001120, 0x00001120, 0x00001120 },
49 { 0x0000c9bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 }, 51 { 0x000099bc, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00, 0x001a0a00 },
50 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be }, 52 { 0x000099c0, 0x038919be, 0x038919be, 0x038919be, 0x038919be, 0x038919be },
51 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, 53 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
52 { 0x000099c8, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c, 0x60f6532c }, 54 { 0x000099c8, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c, 0x6af6532c },
53 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, 55 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
54 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, 56 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
55 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 57 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
@@ -199,7 +201,6 @@ static const u32 ar5416Common[][2] = {
199 { 0x00008110, 0x00000168 }, 201 { 0x00008110, 0x00000168 },
200 { 0x00008118, 0x000100aa }, 202 { 0x00008118, 0x000100aa },
201 { 0x0000811c, 0x00003210 }, 203 { 0x0000811c, 0x00003210 },
202 { 0x00008120, 0x08f04800 },
203 { 0x00008124, 0x00000000 }, 204 { 0x00008124, 0x00000000 },
204 { 0x00008128, 0x00000000 }, 205 { 0x00008128, 0x00000000 },
205 { 0x0000812c, 0x00000000 }, 206 { 0x0000812c, 0x00000000 },
@@ -215,7 +216,6 @@ static const u32 ar5416Common[][2] = {
215 { 0x00008178, 0x00000100 }, 216 { 0x00008178, 0x00000100 },
216 { 0x0000817c, 0x00000000 }, 217 { 0x0000817c, 0x00000000 },
217 { 0x000081c4, 0x00000000 }, 218 { 0x000081c4, 0x00000000 },
218 { 0x000081d0, 0x00003210 },
219 { 0x000081ec, 0x00000000 }, 219 { 0x000081ec, 0x00000000 },
220 { 0x000081f0, 0x00000000 }, 220 { 0x000081f0, 0x00000000 },
221 { 0x000081f4, 0x00000000 }, 221 { 0x000081f4, 0x00000000 },
@@ -246,6 +246,7 @@ static const u32 ar5416Common[][2] = {
246 { 0x00008258, 0x00000000 }, 246 { 0x00008258, 0x00000000 },
247 { 0x0000825c, 0x400000ff }, 247 { 0x0000825c, 0x400000ff },
248 { 0x00008260, 0x00080922 }, 248 { 0x00008260, 0x00080922 },
249 { 0x00008264, 0xa8000010 },
249 { 0x00008270, 0x00000000 }, 250 { 0x00008270, 0x00000000 },
250 { 0x00008274, 0x40000000 }, 251 { 0x00008274, 0x40000000 },
251 { 0x00008278, 0x003e4180 }, 252 { 0x00008278, 0x003e4180 },
@@ -406,9 +407,9 @@ static const u32 ar5416Common[][2] = {
406 { 0x0000a25c, 0x0f0f0f01 }, 407 { 0x0000a25c, 0x0f0f0f01 },
407 { 0x0000a260, 0xdfa91f01 }, 408 { 0x0000a260, 0xdfa91f01 },
408 { 0x0000a268, 0x00000000 }, 409 { 0x0000a268, 0x00000000 },
409 { 0x0000a26c, 0x0ebae9c6 }, 410 { 0x0000a26c, 0x0e79e5c6 },
410 { 0x0000b26c, 0x0ebae9c6 }, 411 { 0x0000b26c, 0x0e79e5c6 },
411 { 0x0000c26c, 0x0ebae9c6 }, 412 { 0x0000c26c, 0x0e79e5c6 },
412 { 0x0000d270, 0x00820820 }, 413 { 0x0000d270, 0x00820820 },
413 { 0x0000a278, 0x1ce739ce }, 414 { 0x0000a278, 0x1ce739ce },
414 { 0x0000a27c, 0x051701ce }, 415 { 0x0000a27c, 0x051701ce },
@@ -2551,26 +2552,27 @@ static const u32 ar9280Modes_9280_2[][6] = {
2551 { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 }, 2552 { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 },
2552 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, 2553 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
2553 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, 2554 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
2554 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, 2555 { 0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e },
2555 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, 2556 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
2556 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, 2557 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
2557 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, 2558 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
2558 { 0x00009840, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e, 0x206a012e }, 2559 { 0x00009840, 0x206a022e, 0x206a022e, 0x206a012e, 0x206a012e, 0x206a012e },
2559 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 }, 2560 { 0x00009844, 0x0372161e, 0x0372161e, 0x037216a0, 0x037216a0, 0x037216a0 },
2560 { 0x00009850, 0x6c4000e2, 0x6c4000e2, 0x6d4000e2, 0x6c4000e2, 0x6c4000e2 }, 2561 { 0x00009850, 0x6c4000e2, 0x6d4000e2, 0x6d4000e2, 0x6c4000e2, 0x6c4000e2 },
2561 { 0x00009858, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e }, 2562 { 0x00009858, 0x7ec88d2e, 0x7ec88d2e, 0x7ec84d2e, 0x7ec84d2e, 0x7ec84d2e },
2562 { 0x0000985c, 0x31395d5e, 0x31395d5e, 0x3139605e, 0x31395d5e, 0x31395d5e }, 2563 { 0x0000985c, 0x31395d5e, 0x3139605e, 0x3139605e, 0x31395d5e, 0x31395d5e },
2563 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 }, 2564 { 0x00009860, 0x00048d18, 0x00048d18, 0x00048d20, 0x00048d20, 0x00048d18 },
2564 { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, 2565 { 0x00009864, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
2565 { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 }, 2566 { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 },
2566 { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 }, 2567 { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 },
2567 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 }, 2568 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
2568 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 }, 2569 { 0x00009918, 0x0000000a, 0x00000014, 0x00000268, 0x0000000b, 0x00000016 },
2569 { 0x00009924, 0xd00a8a0b, 0xd00a8a0b, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d }, 2570 { 0x00009924, 0xd00a8a0b, 0xd00a8a0b, 0xd00a8a0d, 0xd00a8a0d, 0xd00a8a0d },
2570 { 0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010 }, 2571 { 0x00009944, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010, 0xffbc1010 },
2571 { 0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 }, 2572 { 0x00009960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
2572 { 0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 }, 2573 { 0x0000a960, 0x00000010, 0x00000010, 0x00000010, 0x00000010, 0x00000010 },
2573 { 0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210 }, 2574 { 0x00009964, 0x00000210, 0x00000210, 0x00000210, 0x00000210, 0x00000210 },
2575 { 0x0000c968, 0x000003b5, 0x000003b5, 0x000003ce, 0x000003ce, 0x000003ce },
2574 { 0x000099b8, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c }, 2576 { 0x000099b8, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c, 0x0000001c },
2575 { 0x000099bc, 0x00000a00, 0x00000a00, 0x00000c00, 0x00000c00, 0x00000c00 }, 2577 { 0x000099bc, 0x00000a00, 0x00000a00, 0x00000c00, 0x00000c00, 0x00000c00 },
2576 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, 2578 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
@@ -2585,8 +2587,10 @@ static const u32 ar9280Modes_9280_2[][6] = {
2585 { 0x0000b20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019 }, 2587 { 0x0000b20c, 0x00000014, 0x00000014, 0x0001f019, 0x0001f019, 0x0001f019 },
2586 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a }, 2588 { 0x0000a21c, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a, 0x1883800a },
2587 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 }, 2589 { 0x0000a230, 0x00000000, 0x00000000, 0x00000210, 0x00000108, 0x00000000 },
2590 { 0x0000a23c, 0x13c88000, 0x13c88000, 0x13c88001, 0x13c88000, 0x13c88000 },
2588 { 0x0000a250, 0x001ff000, 0x001ff000, 0x0004a000, 0x0004a000, 0x0004a000 }, 2591 { 0x0000a250, 0x001ff000, 0x001ff000, 0x0004a000, 0x0004a000, 0x0004a000 },
2589 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, 2592 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
2593 { 0x0000a388, 0x0c000000, 0x0c000000, 0x08000000, 0x0c000000, 0x0c000000 },
2590 { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 2594 { 0x0000a3d8, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
2591 { 0x00007894, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000 }, 2595 { 0x00007894, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000, 0x5a508000 },
2592}; 2596};
@@ -2813,7 +2817,6 @@ static const u32 ar9280Common_9280_2[][2] = {
2813 { 0x00009958, 0x2108ecff }, 2817 { 0x00009958, 0x2108ecff },
2814 { 0x00009940, 0x14750604 }, 2818 { 0x00009940, 0x14750604 },
2815 { 0x0000c95c, 0x004b6a8e }, 2819 { 0x0000c95c, 0x004b6a8e },
2816 { 0x0000c968, 0x000003ce },
2817 { 0x00009970, 0x190fb515 }, 2820 { 0x00009970, 0x190fb515 },
2818 { 0x00009974, 0x00000000 }, 2821 { 0x00009974, 0x00000000 },
2819 { 0x00009978, 0x00000001 }, 2822 { 0x00009978, 0x00000001 },
@@ -2849,7 +2852,6 @@ static const u32 ar9280Common_9280_2[][2] = {
2849 { 0x0000a22c, 0x233f7180 }, 2852 { 0x0000a22c, 0x233f7180 },
2850 { 0x0000a234, 0x20202020 }, 2853 { 0x0000a234, 0x20202020 },
2851 { 0x0000a238, 0x20202020 }, 2854 { 0x0000a238, 0x20202020 },
2852 { 0x0000a23c, 0x13c88000 },
2853 { 0x0000a240, 0x38490a20 }, 2855 { 0x0000a240, 0x38490a20 },
2854 { 0x0000a244, 0x00007bb6 }, 2856 { 0x0000a244, 0x00007bb6 },
2855 { 0x0000a248, 0x0fff3ffc }, 2857 { 0x0000a248, 0x0fff3ffc },
@@ -2859,8 +2861,8 @@ static const u32 ar9280Common_9280_2[][2] = {
2859 { 0x0000a25c, 0x0f0f0f01 }, 2861 { 0x0000a25c, 0x0f0f0f01 },
2860 { 0x0000a260, 0xdfa91f01 }, 2862 { 0x0000a260, 0xdfa91f01 },
2861 { 0x0000a268, 0x00000000 }, 2863 { 0x0000a268, 0x00000000 },
2862 { 0x0000a26c, 0x0ebae9c6 }, 2864 { 0x0000a26c, 0x0e79e5c6 },
2863 { 0x0000b26c, 0x0ebae9c6 }, 2865 { 0x0000b26c, 0x0e79e5c6 },
2864 { 0x0000d270, 0x00820820 }, 2866 { 0x0000d270, 0x00820820 },
2865 { 0x0000a278, 0x1ce739ce }, 2867 { 0x0000a278, 0x1ce739ce },
2866 { 0x0000d35c, 0x07ffffef }, 2868 { 0x0000d35c, 0x07ffffef },
@@ -2874,7 +2876,6 @@ static const u32 ar9280Common_9280_2[][2] = {
2874 { 0x0000d37c, 0x7fffffe2 }, 2876 { 0x0000d37c, 0x7fffffe2 },
2875 { 0x0000d380, 0x7f3c7bba }, 2877 { 0x0000d380, 0x7f3c7bba },
2876 { 0x0000d384, 0xf3307ff0 }, 2878 { 0x0000d384, 0xf3307ff0 },
2877 { 0x0000a388, 0x0c000000 },
2878 { 0x0000a38c, 0x20202020 }, 2879 { 0x0000a38c, 0x20202020 },
2879 { 0x0000a390, 0x20202020 }, 2880 { 0x0000a390, 0x20202020 },
2880 { 0x0000a394, 0x1ce739ce }, 2881 { 0x0000a394, 0x1ce739ce },
@@ -2940,7 +2941,7 @@ static const u32 ar9280Modes_fast_clock_9280_2[][3] = {
2940 { 0x0000801c, 0x148ec02b, 0x148ec057 }, 2941 { 0x0000801c, 0x148ec02b, 0x148ec057 },
2941 { 0x00008318, 0x000044c0, 0x00008980 }, 2942 { 0x00008318, 0x000044c0, 0x00008980 },
2942 { 0x00009820, 0x02020200, 0x02020200 }, 2943 { 0x00009820, 0x02020200, 0x02020200 },
2943 { 0x00009824, 0x00000f0f, 0x00000f0f }, 2944 { 0x00009824, 0x01000f0f, 0x01000f0f },
2944 { 0x00009828, 0x0b020001, 0x0b020001 }, 2945 { 0x00009828, 0x0b020001, 0x0b020001 },
2945 { 0x00009834, 0x00000f0f, 0x00000f0f }, 2946 { 0x00009834, 0x00000f0f, 0x00000f0f },
2946 { 0x00009844, 0x03721821, 0x03721821 }, 2947 { 0x00009844, 0x03721821, 0x03721821 },
@@ -3348,6 +3349,8 @@ static const u32 ar9280Modes_backoff_13db_rxgain_9280_2[][6] = {
3348}; 3349};
3349 3350
3350static const u32 ar9280Modes_high_power_tx_gain_9280_2[][6] = { 3351static const u32 ar9280Modes_high_power_tx_gain_9280_2[][6] = {
3352 { 0x0000a274, 0x0a19e652, 0x0a19e652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 },
3353 { 0x0000a27c, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce },
3351 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 3354 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
3352 { 0x0000a304, 0x00003002, 0x00003002, 0x00004002, 0x00004002, 0x00004002 }, 3355 { 0x0000a304, 0x00003002, 0x00003002, 0x00004002, 0x00004002, 0x00004002 },
3353 { 0x0000a308, 0x00006004, 0x00006004, 0x00007008, 0x00007008, 0x00007008 }, 3356 { 0x0000a308, 0x00006004, 0x00006004, 0x00007008, 0x00007008, 0x00007008 },
@@ -3376,11 +3379,11 @@ static const u32 ar9280Modes_high_power_tx_gain_9280_2[][6] = {
3376 { 0x00007840, 0x00172000, 0x00172000, 0x00172000, 0x00172000, 0x00172000 }, 3379 { 0x00007840, 0x00172000, 0x00172000, 0x00172000, 0x00172000, 0x00172000 },
3377 { 0x00007820, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480 }, 3380 { 0x00007820, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480 },
3378 { 0x00007844, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480 }, 3381 { 0x00007844, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480, 0xf258a480 },
3379 { 0x0000a274, 0x0a19e652, 0x0a19e652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 },
3380 { 0x0000a27c, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce, 0x050739ce },
3381}; 3382};
3382 3383
3383static const u32 ar9280Modes_original_tx_gain_9280_2[][6] = { 3384static const u32 ar9280Modes_original_tx_gain_9280_2[][6] = {
3385 { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 },
3386 { 0x0000a27c, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce },
3384 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 3387 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
3385 { 0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002 }, 3388 { 0x0000a304, 0x00003002, 0x00003002, 0x00003002, 0x00003002, 0x00003002 },
3386 { 0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009 }, 3389 { 0x0000a308, 0x00006004, 0x00006004, 0x00008009, 0x00008009, 0x00008009 },
@@ -3409,8 +3412,6 @@ static const u32 ar9280Modes_original_tx_gain_9280_2[][6] = {
3409 { 0x00007840, 0x00392000, 0x00392000, 0x00392000, 0x00392000, 0x00392000 }, 3412 { 0x00007840, 0x00392000, 0x00392000, 0x00392000, 0x00392000, 0x00392000 },
3410 { 0x00007820, 0x92592480, 0x92592480, 0x92592480, 0x92592480, 0x92592480 }, 3413 { 0x00007820, 0x92592480, 0x92592480, 0x92592480, 0x92592480, 0x92592480 },
3411 { 0x00007844, 0x92592480, 0x92592480, 0x92592480, 0x92592480, 0x92592480 }, 3414 { 0x00007844, 0x92592480, 0x92592480, 0x92592480, 0x92592480, 0x92592480 },
3412 { 0x0000a274, 0x0a19c652, 0x0a19c652, 0x0a1aa652, 0x0a1aa652, 0x0a1aa652 },
3413 { 0x0000a27c, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce, 0x050701ce },
3414}; 3415};
3415 3416
3416static const u32 ar9280PciePhy_clkreq_off_L1_9280[][2] = { 3417static const u32 ar9280PciePhy_clkreq_off_L1_9280[][2] = {
@@ -5918,9 +5919,6 @@ static const u_int32_t ar9287Common_9287_1_1[][2] = {
5918 { 0x000099ec, 0x0cc80caa }, 5919 { 0x000099ec, 0x0cc80caa },
5919 { 0x000099f0, 0x00000000 }, 5920 { 0x000099f0, 0x00000000 },
5920 { 0x000099fc, 0x00001042 }, 5921 { 0x000099fc, 0x00001042 },
5921 { 0x0000a1f4, 0x00fffeff },
5922 { 0x0000a1f8, 0x00f5f9ff },
5923 { 0x0000a1fc, 0xb79f6427 },
5924 { 0x0000a208, 0x803e4788 }, 5922 { 0x0000a208, 0x803e4788 },
5925 { 0x0000a210, 0x4080a333 }, 5923 { 0x0000a210, 0x4080a333 },
5926 { 0x0000a214, 0x40206c10 }, 5924 { 0x0000a214, 0x40206c10 },
@@ -5980,7 +5978,7 @@ static const u_int32_t ar9287Common_9287_1_1[][2] = {
5980 { 0x0000b3f4, 0x00000000 }, 5978 { 0x0000b3f4, 0x00000000 },
5981 { 0x0000a7d8, 0x000003f1 }, 5979 { 0x0000a7d8, 0x000003f1 },
5982 { 0x00007800, 0x00000800 }, 5980 { 0x00007800, 0x00000800 },
5983 { 0x00007804, 0x6c35ffc2 }, 5981 { 0x00007804, 0x6c35ffd2 },
5984 { 0x00007808, 0x6db6c000 }, 5982 { 0x00007808, 0x6db6c000 },
5985 { 0x0000780c, 0x6db6cb30 }, 5983 { 0x0000780c, 0x6db6cb30 },
5986 { 0x00007810, 0x6db6cb6c }, 5984 { 0x00007810, 0x6db6cb6c },
@@ -6000,7 +5998,7 @@ static const u_int32_t ar9287Common_9287_1_1[][2] = {
6000 { 0x00007848, 0x934934a8 }, 5998 { 0x00007848, 0x934934a8 },
6001 { 0x00007850, 0x00000000 }, 5999 { 0x00007850, 0x00000000 },
6002 { 0x00007854, 0x00000800 }, 6000 { 0x00007854, 0x00000800 },
6003 { 0x00007858, 0x6c35ffc2 }, 6001 { 0x00007858, 0x6c35ffd2 },
6004 { 0x0000785c, 0x6db6c000 }, 6002 { 0x0000785c, 0x6db6c000 },
6005 { 0x00007860, 0x6db6cb30 }, 6003 { 0x00007860, 0x6db6cb30 },
6006 { 0x00007864, 0x6db6cb6c }, 6004 { 0x00007864, 0x6db6cb6c },
@@ -6027,6 +6025,22 @@ static const u_int32_t ar9287Common_9287_1_1[][2] = {
6027 { 0x000078b8, 0x2a850160 }, 6025 { 0x000078b8, 0x2a850160 },
6028}; 6026};
6029 6027
6028/*
6029 * For Japanese regulatory requirements, 2484 MHz requires the following three
6030 * registers be programmed differently from the channel between 2412 and 2472 MHz.
6031 */
6032static const u_int32_t ar9287Common_normal_cck_fir_coeff_92871_1[][2] = {
6033 { 0x0000a1f4, 0x00fffeff },
6034 { 0x0000a1f8, 0x00f5f9ff },
6035 { 0x0000a1fc, 0xb79f6427 },
6036};
6037
6038static const u_int32_t ar9287Common_japan_2484_cck_fir_coeff_92871_1[][2] = {
6039 { 0x0000a1f4, 0x00000000 },
6040 { 0x0000a1f8, 0xefff0301 },
6041 { 0x0000a1fc, 0xca9228ee },
6042};
6043
6030static const u_int32_t ar9287Modes_tx_gain_9287_1_1[][6] = { 6044static const u_int32_t ar9287Modes_tx_gain_9287_1_1[][6] = {
6031 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */ 6045 /* Address 5G-HT20 5G-HT40 2G-HT40 2G-HT20 Turbo */
6032 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 6046 { 0x0000a300, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
@@ -6365,8 +6379,8 @@ static const u_int32_t ar9287PciePhy_clkreq_off_L1_9287_1_1[][2] = {
6365}; 6379};
6366 6380
6367 6381
6368/* AR9271 initialization values automaticaly created: 03/23/09 */ 6382/* AR9271 initialization values automaticaly created: 06/04/09 */
6369static const u_int32_t ar9271Modes_9271_1_0[][6] = { 6383static const u_int32_t ar9271Modes_9271[][6] = {
6370 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 }, 6384 { 0x00001030, 0x00000230, 0x00000460, 0x000002c0, 0x00000160, 0x000001e0 },
6371 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 }, 6385 { 0x00001070, 0x00000168, 0x000002d0, 0x00000318, 0x0000018c, 0x000001e0 },
6372 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 }, 6386 { 0x000010b0, 0x00000e60, 0x00001cc0, 0x00007c70, 0x00003e38, 0x00001180 },
@@ -6376,8 +6390,8 @@ static const u_int32_t ar9271Modes_9271_1_0[][6] = {
6376 { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 }, 6390 { 0x00008318, 0x00003e80, 0x00007d00, 0x00006880, 0x00003440, 0x00006880 },
6377 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 }, 6391 { 0x00009804, 0x00000300, 0x000003c4, 0x000003c4, 0x00000300, 0x00000303 },
6378 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 }, 6392 { 0x00009820, 0x02020200, 0x02020200, 0x02020200, 0x02020200, 0x02020200 },
6379 { 0x00009824, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, 6393 { 0x00009824, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e, 0x01000e0e },
6380 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 }, 6394 { 0x00009828, 0x3a020001, 0x3a020001, 0x3a020001, 0x3a020001, 0x3a020001 },
6381 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e }, 6395 { 0x00009834, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e, 0x00000e0e },
6382 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 }, 6396 { 0x00009838, 0x00000007, 0x00000007, 0x00000007, 0x00000007, 0x00000007 },
6383 { 0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e }, 6397 { 0x00009840, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e, 0x206a012e },
@@ -6391,6 +6405,7 @@ static const u_int32_t ar9271Modes_9271_1_0[][6] = {
6391 { 0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00 }, 6405 { 0x00009864, 0x0000fe00, 0x0000fe00, 0x0001ce00, 0x0001ce00, 0x0001ce00 },
6392 { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 }, 6406 { 0x00009868, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0, 0x5ac640d0 },
6393 { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 }, 6407 { 0x0000986c, 0x06903081, 0x06903081, 0x06903881, 0x06903881, 0x06903881 },
6408 { 0x00009910, 0x30002310, 0x30002310, 0x30002310, 0x30002310, 0x30002310 },
6394 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 }, 6409 { 0x00009914, 0x000007d0, 0x00000fa0, 0x00001130, 0x00000898, 0x000007d0 },
6395 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 }, 6410 { 0x00009918, 0x0000000a, 0x00000014, 0x00000016, 0x0000000b, 0x00000016 },
6396 { 0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d }, 6411 { 0x00009924, 0xd00a8007, 0xd00a8007, 0xd00a800d, 0xd00a800d, 0xd00a800d },
@@ -6401,7 +6416,7 @@ static const u_int32_t ar9271Modes_9271_1_0[][6] = {
6401 { 0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 }, 6416 { 0x000099bc, 0x00000600, 0x00000600, 0x00000c00, 0x00000c00, 0x00000c00 },
6402 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 }, 6417 { 0x000099c0, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4, 0x05eea6d4 },
6403 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 }, 6418 { 0x000099c4, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77, 0x06336f77 },
6404 { 0x000099c8, 0x6af65329, 0x6af65329, 0x6af65329, 0x6af65329, 0x6af65329 }, 6419 { 0x000099c8, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f, 0x6af6532f },
6405 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 }, 6420 { 0x000099cc, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8, 0x08f186c8 },
6406 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 }, 6421 { 0x000099d0, 0x00046384, 0x00046384, 0x00046384, 0x00046384, 0x00046384 },
6407 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 }, 6422 { 0x000099d4, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000 },
@@ -6690,7 +6705,7 @@ static const u_int32_t ar9271Modes_9271_1_0[][6] = {
6690 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e }, 6705 { 0x0000a358, 0x7999aa02, 0x7999aa02, 0x7999aa0e, 0x7999aa0e, 0x7999aa0e },
6691}; 6706};
6692 6707
6693static const u_int32_t ar9271Common_9271_1_0[][2] = { 6708static const u_int32_t ar9271Common_9271[][2] = {
6694 { 0x0000000c, 0x00000000 }, 6709 { 0x0000000c, 0x00000000 },
6695 { 0x00000030, 0x00020045 }, 6710 { 0x00000030, 0x00020045 },
6696 { 0x00000034, 0x00000005 }, 6711 { 0x00000034, 0x00000005 },
@@ -6786,7 +6801,7 @@ static const u_int32_t ar9271Common_9271_1_0[][2] = {
6786 { 0x0000803c, 0x00000000 }, 6801 { 0x0000803c, 0x00000000 },
6787 { 0x00008048, 0x00000000 }, 6802 { 0x00008048, 0x00000000 },
6788 { 0x00008054, 0x00000000 }, 6803 { 0x00008054, 0x00000000 },
6789 { 0x00008058, 0x02000000 }, 6804 { 0x00008058, 0x00000000 },
6790 { 0x0000805c, 0x000fc78f }, 6805 { 0x0000805c, 0x000fc78f },
6791 { 0x00008060, 0x0000000f }, 6806 { 0x00008060, 0x0000000f },
6792 { 0x00008064, 0x00000000 }, 6807 { 0x00008064, 0x00000000 },
@@ -6817,7 +6832,7 @@ static const u_int32_t ar9271Common_9271_1_0[][2] = {
6817 { 0x00008110, 0x00000168 }, 6832 { 0x00008110, 0x00000168 },
6818 { 0x00008118, 0x000100aa }, 6833 { 0x00008118, 0x000100aa },
6819 { 0x0000811c, 0x00003210 }, 6834 { 0x0000811c, 0x00003210 },
6820 { 0x00008120, 0x08f04814 }, 6835 { 0x00008120, 0x08f04810 },
6821 { 0x00008124, 0x00000000 }, 6836 { 0x00008124, 0x00000000 },
6822 { 0x00008128, 0x00000000 }, 6837 { 0x00008128, 0x00000000 },
6823 { 0x0000812c, 0x00000000 }, 6838 { 0x0000812c, 0x00000000 },
@@ -6864,7 +6879,7 @@ static const u_int32_t ar9271Common_9271_1_0[][2] = {
6864 { 0x00008258, 0x00000000 }, 6879 { 0x00008258, 0x00000000 },
6865 { 0x0000825c, 0x400000ff }, 6880 { 0x0000825c, 0x400000ff },
6866 { 0x00008260, 0x00080922 }, 6881 { 0x00008260, 0x00080922 },
6867 { 0x00008264, 0xa8a00010 }, 6882 { 0x00008264, 0x88a00010 },
6868 { 0x00008270, 0x00000000 }, 6883 { 0x00008270, 0x00000000 },
6869 { 0x00008274, 0x40000000 }, 6884 { 0x00008274, 0x40000000 },
6870 { 0x00008278, 0x003e4180 }, 6885 { 0x00008278, 0x003e4180 },
@@ -6896,7 +6911,7 @@ static const u_int32_t ar9271Common_9271_1_0[][2] = {
6896 { 0x00007814, 0x924934a8 }, 6911 { 0x00007814, 0x924934a8 },
6897 { 0x0000781c, 0x00000000 }, 6912 { 0x0000781c, 0x00000000 },
6898 { 0x00007820, 0x00000c04 }, 6913 { 0x00007820, 0x00000c04 },
6899 { 0x00007824, 0x00d86bff }, 6914 { 0x00007824, 0x00d8abff },
6900 { 0x00007828, 0x66964300 }, 6915 { 0x00007828, 0x66964300 },
6901 { 0x0000782c, 0x8db6d961 }, 6916 { 0x0000782c, 0x8db6d961 },
6902 { 0x00007830, 0x8db6d96c }, 6917 { 0x00007830, 0x8db6d96c },
@@ -6930,7 +6945,6 @@ static const u_int32_t ar9271Common_9271_1_0[][2] = {
6930 { 0x00009904, 0x00000000 }, 6945 { 0x00009904, 0x00000000 },
6931 { 0x00009908, 0x00000000 }, 6946 { 0x00009908, 0x00000000 },
6932 { 0x0000990c, 0x00000000 }, 6947 { 0x0000990c, 0x00000000 },
6933 { 0x00009910, 0x30002310 },
6934 { 0x0000991c, 0x10000fff }, 6948 { 0x0000991c, 0x10000fff },
6935 { 0x00009920, 0x04900000 }, 6949 { 0x00009920, 0x04900000 },
6936 { 0x00009928, 0x00000001 }, 6950 { 0x00009928, 0x00000001 },
@@ -6944,7 +6958,7 @@ static const u_int32_t ar9271Common_9271_1_0[][2] = {
6944 { 0x00009954, 0x5f3ca3de }, 6958 { 0x00009954, 0x5f3ca3de },
6945 { 0x00009958, 0x0108ecff }, 6959 { 0x00009958, 0x0108ecff },
6946 { 0x00009968, 0x000003ce }, 6960 { 0x00009968, 0x000003ce },
6947 { 0x00009970, 0x192bb515 }, 6961 { 0x00009970, 0x192bb514 },
6948 { 0x00009974, 0x00000000 }, 6962 { 0x00009974, 0x00000000 },
6949 { 0x00009978, 0x00000001 }, 6963 { 0x00009978, 0x00000001 },
6950 { 0x0000997c, 0x00000000 }, 6964 { 0x0000997c, 0x00000000 },
@@ -7031,3 +7045,8 @@ static const u_int32_t ar9271Common_9271_1_0[][2] = {
7031 { 0x0000d380, 0x7f3c7bba }, 7045 { 0x0000d380, 0x7f3c7bba },
7032 { 0x0000d384, 0xf3307ff0 }, 7046 { 0x0000d384, 0xf3307ff0 },
7033}; 7047};
7048
7049static const u_int32_t ar9271Modes_9271_1_0_only[][6] = {
7050 { 0x00009910, 0x30002311, 0x30002311, 0x30002311, 0x30002311, 0x30002311 },
7051 { 0x00009828, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001, 0x0a020001 },
7052};
diff --git a/drivers/net/wireless/ath/ath9k/mac.c b/drivers/net/wireless/ath/ath9k/mac.c
index 800bfab94635..46466ffebcb0 100644
--- a/drivers/net/wireless/ath/ath9k/mac.c
+++ b/drivers/net/wireless/ath/ath9k/mac.c
@@ -14,16 +14,16 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "ath9k.h" 17#include "hw.h"
18 18
19static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah, 19static void ath9k_hw_set_txq_interrupts(struct ath_hw *ah,
20 struct ath9k_tx_queue_info *qi) 20 struct ath9k_tx_queue_info *qi)
21{ 21{
22 DPRINTF(ah->ah_sc, ATH_DBG_INTERRUPT, 22 ath_print(ath9k_hw_common(ah), ATH_DBG_INTERRUPT,
23 "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n", 23 "tx ok 0x%x err 0x%x desc 0x%x eol 0x%x urn 0x%x\n",
24 ah->txok_interrupt_mask, ah->txerr_interrupt_mask, 24 ah->txok_interrupt_mask, ah->txerr_interrupt_mask,
25 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask, 25 ah->txdesc_interrupt_mask, ah->txeol_interrupt_mask,
26 ah->txurn_interrupt_mask); 26 ah->txurn_interrupt_mask);
27 27
28 REG_WRITE(ah, AR_IMR_S0, 28 REG_WRITE(ah, AR_IMR_S0,
29 SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK) 29 SM(ah->txok_interrupt_mask, AR_IMR_S0_QCU_TXOK)
@@ -39,17 +39,21 @@ u32 ath9k_hw_gettxbuf(struct ath_hw *ah, u32 q)
39{ 39{
40 return REG_READ(ah, AR_QTXDP(q)); 40 return REG_READ(ah, AR_QTXDP(q));
41} 41}
42EXPORT_SYMBOL(ath9k_hw_gettxbuf);
42 43
43void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp) 44void ath9k_hw_puttxbuf(struct ath_hw *ah, u32 q, u32 txdp)
44{ 45{
45 REG_WRITE(ah, AR_QTXDP(q), txdp); 46 REG_WRITE(ah, AR_QTXDP(q), txdp);
46} 47}
48EXPORT_SYMBOL(ath9k_hw_puttxbuf);
47 49
48void ath9k_hw_txstart(struct ath_hw *ah, u32 q) 50void ath9k_hw_txstart(struct ath_hw *ah, u32 q)
49{ 51{
50 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Enable TXE on queue: %u\n", q); 52 ath_print(ath9k_hw_common(ah), ATH_DBG_QUEUE,
53 "Enable TXE on queue: %u\n", q);
51 REG_WRITE(ah, AR_Q_TXE, 1 << q); 54 REG_WRITE(ah, AR_Q_TXE, 1 << q);
52} 55}
56EXPORT_SYMBOL(ath9k_hw_txstart);
53 57
54u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q) 58u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
55{ 59{
@@ -64,6 +68,7 @@ u32 ath9k_hw_numtxpending(struct ath_hw *ah, u32 q)
64 68
65 return npend; 69 return npend;
66} 70}
71EXPORT_SYMBOL(ath9k_hw_numtxpending);
67 72
68bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel) 73bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
69{ 74{
@@ -93,27 +98,28 @@ bool ath9k_hw_updatetxtriglevel(struct ath_hw *ah, bool bIncTrigLevel)
93 98
94 return newLevel != curLevel; 99 return newLevel != curLevel;
95} 100}
101EXPORT_SYMBOL(ath9k_hw_updatetxtriglevel);
96 102
97bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q) 103bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
98{ 104{
99#define ATH9K_TX_STOP_DMA_TIMEOUT 4000 /* usec */ 105#define ATH9K_TX_STOP_DMA_TIMEOUT 4000 /* usec */
100#define ATH9K_TIME_QUANTUM 100 /* usec */ 106#define ATH9K_TIME_QUANTUM 100 /* usec */
101 107 struct ath_common *common = ath9k_hw_common(ah);
102 struct ath9k_hw_capabilities *pCap = &ah->caps; 108 struct ath9k_hw_capabilities *pCap = &ah->caps;
103 struct ath9k_tx_queue_info *qi; 109 struct ath9k_tx_queue_info *qi;
104 u32 tsfLow, j, wait; 110 u32 tsfLow, j, wait;
105 u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM; 111 u32 wait_time = ATH9K_TX_STOP_DMA_TIMEOUT / ATH9K_TIME_QUANTUM;
106 112
107 if (q >= pCap->total_queues) { 113 if (q >= pCap->total_queues) {
108 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Stopping TX DMA, " 114 ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, "
109 "invalid queue: %u\n", q); 115 "invalid queue: %u\n", q);
110 return false; 116 return false;
111 } 117 }
112 118
113 qi = &ah->txq[q]; 119 qi = &ah->txq[q];
114 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 120 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
115 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Stopping TX DMA, " 121 ath_print(common, ATH_DBG_QUEUE, "Stopping TX DMA, "
116 "inactive queue: %u\n", q); 122 "inactive queue: %u\n", q);
117 return false; 123 return false;
118 } 124 }
119 125
@@ -126,9 +132,9 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
126 } 132 }
127 133
128 if (ath9k_hw_numtxpending(ah, q)) { 134 if (ath9k_hw_numtxpending(ah, q)) {
129 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, 135 ath_print(common, ATH_DBG_QUEUE,
130 "%s: Num of pending TX Frames %d on Q %d\n", 136 "%s: Num of pending TX Frames %d on Q %d\n",
131 __func__, ath9k_hw_numtxpending(ah, q), q); 137 __func__, ath9k_hw_numtxpending(ah, q), q);
132 138
133 for (j = 0; j < 2; j++) { 139 for (j = 0; j < 2; j++) {
134 tsfLow = REG_READ(ah, AR_TSF_L32); 140 tsfLow = REG_READ(ah, AR_TSF_L32);
@@ -142,9 +148,9 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
142 if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10)) 148 if ((REG_READ(ah, AR_TSF_L32) >> 10) == (tsfLow >> 10))
143 break; 149 break;
144 150
145 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, 151 ath_print(common, ATH_DBG_QUEUE,
146 "TSF has moved while trying to set " 152 "TSF has moved while trying to set "
147 "quiet time TSF: 0x%08x\n", tsfLow); 153 "quiet time TSF: 0x%08x\n", tsfLow);
148 } 154 }
149 155
150 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH); 156 REG_SET_BIT(ah, AR_DIAG_SW, AR_DIAG_FORCE_CH_IDLE_HIGH);
@@ -155,9 +161,9 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
155 wait = wait_time; 161 wait = wait_time;
156 while (ath9k_hw_numtxpending(ah, q)) { 162 while (ath9k_hw_numtxpending(ah, q)) {
157 if ((--wait) == 0) { 163 if ((--wait) == 0) {
158 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, 164 ath_print(common, ATH_DBG_QUEUE,
159 "Failed to stop TX DMA in 100 " 165 "Failed to stop TX DMA in 100 "
160 "msec after killing last frame\n"); 166 "msec after killing last frame\n");
161 break; 167 break;
162 } 168 }
163 udelay(ATH9K_TIME_QUANTUM); 169 udelay(ATH9K_TIME_QUANTUM);
@@ -172,6 +178,7 @@ bool ath9k_hw_stoptxdma(struct ath_hw *ah, u32 q)
172#undef ATH9K_TX_STOP_DMA_TIMEOUT 178#undef ATH9K_TX_STOP_DMA_TIMEOUT
173#undef ATH9K_TIME_QUANTUM 179#undef ATH9K_TIME_QUANTUM
174} 180}
181EXPORT_SYMBOL(ath9k_hw_stoptxdma);
175 182
176void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds, 183void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
177 u32 segLen, bool firstSeg, 184 u32 segLen, bool firstSeg,
@@ -198,6 +205,7 @@ void ath9k_hw_filltxdesc(struct ath_hw *ah, struct ath_desc *ds,
198 ads->ds_txstatus6 = ads->ds_txstatus7 = 0; 205 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
199 ads->ds_txstatus8 = ads->ds_txstatus9 = 0; 206 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
200} 207}
208EXPORT_SYMBOL(ath9k_hw_filltxdesc);
201 209
202void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds) 210void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds)
203{ 211{
@@ -209,6 +217,7 @@ void ath9k_hw_cleartxdesc(struct ath_hw *ah, struct ath_desc *ds)
209 ads->ds_txstatus6 = ads->ds_txstatus7 = 0; 217 ads->ds_txstatus6 = ads->ds_txstatus7 = 0;
210 ads->ds_txstatus8 = ads->ds_txstatus9 = 0; 218 ads->ds_txstatus8 = ads->ds_txstatus9 = 0;
211} 219}
220EXPORT_SYMBOL(ath9k_hw_cleartxdesc);
212 221
213int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds) 222int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds)
214{ 223{
@@ -284,6 +293,7 @@ int ath9k_hw_txprocdesc(struct ath_hw *ah, struct ath_desc *ds)
284 293
285 return 0; 294 return 0;
286} 295}
296EXPORT_SYMBOL(ath9k_hw_txprocdesc);
287 297
288void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds, 298void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
289 u32 pktLen, enum ath9k_pkt_type type, u32 txPower, 299 u32 pktLen, enum ath9k_pkt_type type, u32 txPower,
@@ -319,6 +329,7 @@ void ath9k_hw_set11n_txdesc(struct ath_hw *ah, struct ath_desc *ds,
319 ads->ds_ctl11 = 0; 329 ads->ds_ctl11 = 0;
320 } 330 }
321} 331}
332EXPORT_SYMBOL(ath9k_hw_set11n_txdesc);
322 333
323void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds, 334void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds,
324 struct ath_desc *lastds, 335 struct ath_desc *lastds,
@@ -374,6 +385,7 @@ void ath9k_hw_set11n_ratescenario(struct ath_hw *ah, struct ath_desc *ds,
374 last_ads->ds_ctl2 = ads->ds_ctl2; 385 last_ads->ds_ctl2 = ads->ds_ctl2;
375 last_ads->ds_ctl3 = ads->ds_ctl3; 386 last_ads->ds_ctl3 = ads->ds_ctl3;
376} 387}
388EXPORT_SYMBOL(ath9k_hw_set11n_ratescenario);
377 389
378void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds, 390void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds,
379 u32 aggrLen) 391 u32 aggrLen)
@@ -384,6 +396,7 @@ void ath9k_hw_set11n_aggr_first(struct ath_hw *ah, struct ath_desc *ds,
384 ads->ds_ctl6 &= ~AR_AggrLen; 396 ads->ds_ctl6 &= ~AR_AggrLen;
385 ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen); 397 ads->ds_ctl6 |= SM(aggrLen, AR_AggrLen);
386} 398}
399EXPORT_SYMBOL(ath9k_hw_set11n_aggr_first);
387 400
388void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds, 401void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds,
389 u32 numDelims) 402 u32 numDelims)
@@ -398,6 +411,7 @@ void ath9k_hw_set11n_aggr_middle(struct ath_hw *ah, struct ath_desc *ds,
398 ctl6 |= SM(numDelims, AR_PadDelim); 411 ctl6 |= SM(numDelims, AR_PadDelim);
399 ads->ds_ctl6 = ctl6; 412 ads->ds_ctl6 = ctl6;
400} 413}
414EXPORT_SYMBOL(ath9k_hw_set11n_aggr_middle);
401 415
402void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds) 416void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds)
403{ 417{
@@ -407,6 +421,7 @@ void ath9k_hw_set11n_aggr_last(struct ath_hw *ah, struct ath_desc *ds)
407 ads->ds_ctl1 &= ~AR_MoreAggr; 421 ads->ds_ctl1 &= ~AR_MoreAggr;
408 ads->ds_ctl6 &= ~AR_PadDelim; 422 ads->ds_ctl6 &= ~AR_PadDelim;
409} 423}
424EXPORT_SYMBOL(ath9k_hw_set11n_aggr_last);
410 425
411void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds) 426void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds)
412{ 427{
@@ -414,6 +429,7 @@ void ath9k_hw_clr11n_aggr(struct ath_hw *ah, struct ath_desc *ds)
414 429
415 ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr); 430 ads->ds_ctl1 &= (~AR_IsAggr & ~AR_MoreAggr);
416} 431}
432EXPORT_SYMBOL(ath9k_hw_clr11n_aggr);
417 433
418void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds, 434void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds,
419 u32 burstDuration) 435 u32 burstDuration)
@@ -423,6 +439,7 @@ void ath9k_hw_set11n_burstduration(struct ath_hw *ah, struct ath_desc *ds,
423 ads->ds_ctl2 &= ~AR_BurstDur; 439 ads->ds_ctl2 &= ~AR_BurstDur;
424 ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur); 440 ads->ds_ctl2 |= SM(burstDuration, AR_BurstDur);
425} 441}
442EXPORT_SYMBOL(ath9k_hw_set11n_burstduration);
426 443
427void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds, 444void ath9k_hw_set11n_virtualmorefrag(struct ath_hw *ah, struct ath_desc *ds,
428 u32 vmf) 445 u32 vmf)
@@ -440,28 +457,30 @@ void ath9k_hw_gettxintrtxqs(struct ath_hw *ah, u32 *txqs)
440 *txqs &= ah->intr_txqs; 457 *txqs &= ah->intr_txqs;
441 ah->intr_txqs &= ~(*txqs); 458 ah->intr_txqs &= ~(*txqs);
442} 459}
460EXPORT_SYMBOL(ath9k_hw_gettxintrtxqs);
443 461
444bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q, 462bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
445 const struct ath9k_tx_queue_info *qinfo) 463 const struct ath9k_tx_queue_info *qinfo)
446{ 464{
447 u32 cw; 465 u32 cw;
466 struct ath_common *common = ath9k_hw_common(ah);
448 struct ath9k_hw_capabilities *pCap = &ah->caps; 467 struct ath9k_hw_capabilities *pCap = &ah->caps;
449 struct ath9k_tx_queue_info *qi; 468 struct ath9k_tx_queue_info *qi;
450 469
451 if (q >= pCap->total_queues) { 470 if (q >= pCap->total_queues) {
452 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Set TXQ properties, " 471 ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, "
453 "invalid queue: %u\n", q); 472 "invalid queue: %u\n", q);
454 return false; 473 return false;
455 } 474 }
456 475
457 qi = &ah->txq[q]; 476 qi = &ah->txq[q];
458 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 477 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
459 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Set TXQ properties, " 478 ath_print(common, ATH_DBG_QUEUE, "Set TXQ properties, "
460 "inactive queue: %u\n", q); 479 "inactive queue: %u\n", q);
461 return false; 480 return false;
462 } 481 }
463 482
464 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q); 483 ath_print(common, ATH_DBG_QUEUE, "Set queue properties for: %u\n", q);
465 484
466 qi->tqi_ver = qinfo->tqi_ver; 485 qi->tqi_ver = qinfo->tqi_ver;
467 qi->tqi_subtype = qinfo->tqi_subtype; 486 qi->tqi_subtype = qinfo->tqi_subtype;
@@ -510,23 +529,25 @@ bool ath9k_hw_set_txq_props(struct ath_hw *ah, int q,
510 529
511 return true; 530 return true;
512} 531}
532EXPORT_SYMBOL(ath9k_hw_set_txq_props);
513 533
514bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q, 534bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
515 struct ath9k_tx_queue_info *qinfo) 535 struct ath9k_tx_queue_info *qinfo)
516{ 536{
537 struct ath_common *common = ath9k_hw_common(ah);
517 struct ath9k_hw_capabilities *pCap = &ah->caps; 538 struct ath9k_hw_capabilities *pCap = &ah->caps;
518 struct ath9k_tx_queue_info *qi; 539 struct ath9k_tx_queue_info *qi;
519 540
520 if (q >= pCap->total_queues) { 541 if (q >= pCap->total_queues) {
521 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Get TXQ properties, " 542 ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, "
522 "invalid queue: %u\n", q); 543 "invalid queue: %u\n", q);
523 return false; 544 return false;
524 } 545 }
525 546
526 qi = &ah->txq[q]; 547 qi = &ah->txq[q];
527 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 548 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
528 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Get TXQ properties, " 549 ath_print(common, ATH_DBG_QUEUE, "Get TXQ properties, "
529 "inactive queue: %u\n", q); 550 "inactive queue: %u\n", q);
530 return false; 551 return false;
531 } 552 }
532 553
@@ -547,10 +568,12 @@ bool ath9k_hw_get_txq_props(struct ath_hw *ah, int q,
547 568
548 return true; 569 return true;
549} 570}
571EXPORT_SYMBOL(ath9k_hw_get_txq_props);
550 572
551int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type, 573int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
552 const struct ath9k_tx_queue_info *qinfo) 574 const struct ath9k_tx_queue_info *qinfo)
553{ 575{
576 struct ath_common *common = ath9k_hw_common(ah);
554 struct ath9k_tx_queue_info *qi; 577 struct ath9k_tx_queue_info *qi;
555 struct ath9k_hw_capabilities *pCap = &ah->caps; 578 struct ath9k_hw_capabilities *pCap = &ah->caps;
556 int q; 579 int q;
@@ -574,23 +597,23 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
574 ATH9K_TX_QUEUE_INACTIVE) 597 ATH9K_TX_QUEUE_INACTIVE)
575 break; 598 break;
576 if (q == pCap->total_queues) { 599 if (q == pCap->total_queues) {
577 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 600 ath_print(common, ATH_DBG_FATAL,
578 "No available TX queue\n"); 601 "No available TX queue\n");
579 return -1; 602 return -1;
580 } 603 }
581 break; 604 break;
582 default: 605 default:
583 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, "Invalid TX queue type: %u\n", 606 ath_print(common, ATH_DBG_FATAL,
584 type); 607 "Invalid TX queue type: %u\n", type);
585 return -1; 608 return -1;
586 } 609 }
587 610
588 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q); 611 ath_print(common, ATH_DBG_QUEUE, "Setup TX queue: %u\n", q);
589 612
590 qi = &ah->txq[q]; 613 qi = &ah->txq[q];
591 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) { 614 if (qi->tqi_type != ATH9K_TX_QUEUE_INACTIVE) {
592 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 615 ath_print(common, ATH_DBG_FATAL,
593 "TX queue: %u already active\n", q); 616 "TX queue: %u already active\n", q);
594 return -1; 617 return -1;
595 } 618 }
596 memset(qi, 0, sizeof(struct ath9k_tx_queue_info)); 619 memset(qi, 0, sizeof(struct ath9k_tx_queue_info));
@@ -613,25 +636,27 @@ int ath9k_hw_setuptxqueue(struct ath_hw *ah, enum ath9k_tx_queue type,
613 636
614 return q; 637 return q;
615} 638}
639EXPORT_SYMBOL(ath9k_hw_setuptxqueue);
616 640
617bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q) 641bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
618{ 642{
619 struct ath9k_hw_capabilities *pCap = &ah->caps; 643 struct ath9k_hw_capabilities *pCap = &ah->caps;
644 struct ath_common *common = ath9k_hw_common(ah);
620 struct ath9k_tx_queue_info *qi; 645 struct ath9k_tx_queue_info *qi;
621 646
622 if (q >= pCap->total_queues) { 647 if (q >= pCap->total_queues) {
623 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Release TXQ, " 648 ath_print(common, ATH_DBG_QUEUE, "Release TXQ, "
624 "invalid queue: %u\n", q); 649 "invalid queue: %u\n", q);
625 return false; 650 return false;
626 } 651 }
627 qi = &ah->txq[q]; 652 qi = &ah->txq[q];
628 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 653 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
629 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Release TXQ, " 654 ath_print(common, ATH_DBG_QUEUE, "Release TXQ, "
630 "inactive queue: %u\n", q); 655 "inactive queue: %u\n", q);
631 return false; 656 return false;
632 } 657 }
633 658
634 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Release TX queue: %u\n", q); 659 ath_print(common, ATH_DBG_QUEUE, "Release TX queue: %u\n", q);
635 660
636 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE; 661 qi->tqi_type = ATH9K_TX_QUEUE_INACTIVE;
637 ah->txok_interrupt_mask &= ~(1 << q); 662 ah->txok_interrupt_mask &= ~(1 << q);
@@ -643,28 +668,30 @@ bool ath9k_hw_releasetxqueue(struct ath_hw *ah, u32 q)
643 668
644 return true; 669 return true;
645} 670}
671EXPORT_SYMBOL(ath9k_hw_releasetxqueue);
646 672
647bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q) 673bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
648{ 674{
649 struct ath9k_hw_capabilities *pCap = &ah->caps; 675 struct ath9k_hw_capabilities *pCap = &ah->caps;
676 struct ath_common *common = ath9k_hw_common(ah);
650 struct ath9k_channel *chan = ah->curchan; 677 struct ath9k_channel *chan = ah->curchan;
651 struct ath9k_tx_queue_info *qi; 678 struct ath9k_tx_queue_info *qi;
652 u32 cwMin, chanCwMin, value; 679 u32 cwMin, chanCwMin, value;
653 680
654 if (q >= pCap->total_queues) { 681 if (q >= pCap->total_queues) {
655 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Reset TXQ, " 682 ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, "
656 "invalid queue: %u\n", q); 683 "invalid queue: %u\n", q);
657 return false; 684 return false;
658 } 685 }
659 686
660 qi = &ah->txq[q]; 687 qi = &ah->txq[q];
661 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) { 688 if (qi->tqi_type == ATH9K_TX_QUEUE_INACTIVE) {
662 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Reset TXQ, " 689 ath_print(common, ATH_DBG_QUEUE, "Reset TXQ, "
663 "inactive queue: %u\n", q); 690 "inactive queue: %u\n", q);
664 return true; 691 return true;
665 } 692 }
666 693
667 DPRINTF(ah->ah_sc, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q); 694 ath_print(common, ATH_DBG_QUEUE, "Reset TX queue: %u\n", q);
668 695
669 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) { 696 if (qi->tqi_cwmin == ATH9K_TXQ_USEDEFAULT) {
670 if (chan && IS_CHAN_B(chan)) 697 if (chan && IS_CHAN_B(chan))
@@ -799,6 +826,7 @@ bool ath9k_hw_resettxqueue(struct ath_hw *ah, u32 q)
799 826
800 return true; 827 return true;
801} 828}
829EXPORT_SYMBOL(ath9k_hw_resettxqueue);
802 830
803int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds, 831int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
804 u32 pa, struct ath_desc *nds, u64 tsf) 832 u32 pa, struct ath_desc *nds, u64 tsf)
@@ -880,6 +908,7 @@ int ath9k_hw_rxprocdesc(struct ath_hw *ah, struct ath_desc *ds,
880 908
881 return 0; 909 return 0;
882} 910}
911EXPORT_SYMBOL(ath9k_hw_rxprocdesc);
883 912
884void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds, 913void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
885 u32 size, u32 flags) 914 u32 size, u32 flags)
@@ -895,6 +924,7 @@ void ath9k_hw_setuprxdesc(struct ath_hw *ah, struct ath_desc *ds,
895 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP)) 924 if (!(pCap->hw_caps & ATH9K_HW_CAP_AUTOSLEEP))
896 memset(&(ads->u), 0, sizeof(ads->u)); 925 memset(&(ads->u), 0, sizeof(ads->u));
897} 926}
927EXPORT_SYMBOL(ath9k_hw_setuprxdesc);
898 928
899bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set) 929bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
900{ 930{
@@ -911,8 +941,9 @@ bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
911 AR_DIAG_RX_ABORT)); 941 AR_DIAG_RX_ABORT));
912 942
913 reg = REG_READ(ah, AR_OBS_BUS_1); 943 reg = REG_READ(ah, AR_OBS_BUS_1);
914 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 944 ath_print(ath9k_hw_common(ah), ATH_DBG_FATAL,
915 "RX failed to go idle in 10 ms RXSM=0x%x\n", reg); 945 "RX failed to go idle in 10 ms RXSM=0x%x\n",
946 reg);
916 947
917 return false; 948 return false;
918 } 949 }
@@ -923,16 +954,19 @@ bool ath9k_hw_setrxabort(struct ath_hw *ah, bool set)
923 954
924 return true; 955 return true;
925} 956}
957EXPORT_SYMBOL(ath9k_hw_setrxabort);
926 958
927void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp) 959void ath9k_hw_putrxbuf(struct ath_hw *ah, u32 rxdp)
928{ 960{
929 REG_WRITE(ah, AR_RXDP, rxdp); 961 REG_WRITE(ah, AR_RXDP, rxdp);
930} 962}
963EXPORT_SYMBOL(ath9k_hw_putrxbuf);
931 964
932void ath9k_hw_rxena(struct ath_hw *ah) 965void ath9k_hw_rxena(struct ath_hw *ah)
933{ 966{
934 REG_WRITE(ah, AR_CR, AR_CR_RXE); 967 REG_WRITE(ah, AR_CR, AR_CR_RXE);
935} 968}
969EXPORT_SYMBOL(ath9k_hw_rxena);
936 970
937void ath9k_hw_startpcureceive(struct ath_hw *ah) 971void ath9k_hw_startpcureceive(struct ath_hw *ah)
938{ 972{
@@ -942,6 +976,7 @@ void ath9k_hw_startpcureceive(struct ath_hw *ah)
942 976
943 REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT)); 977 REG_CLR_BIT(ah, AR_DIAG_SW, (AR_DIAG_RX_DIS | AR_DIAG_RX_ABORT));
944} 978}
979EXPORT_SYMBOL(ath9k_hw_startpcureceive);
945 980
946void ath9k_hw_stoppcurecv(struct ath_hw *ah) 981void ath9k_hw_stoppcurecv(struct ath_hw *ah)
947{ 982{
@@ -949,12 +984,13 @@ void ath9k_hw_stoppcurecv(struct ath_hw *ah)
949 984
950 ath9k_hw_disable_mib_counters(ah); 985 ath9k_hw_disable_mib_counters(ah);
951} 986}
987EXPORT_SYMBOL(ath9k_hw_stoppcurecv);
952 988
953bool ath9k_hw_stopdmarecv(struct ath_hw *ah) 989bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
954{ 990{
955#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */ 991#define AH_RX_STOP_DMA_TIMEOUT 10000 /* usec */
956#define AH_RX_TIME_QUANTUM 100 /* usec */ 992#define AH_RX_TIME_QUANTUM 100 /* usec */
957 993 struct ath_common *common = ath9k_hw_common(ah);
958 int i; 994 int i;
959 995
960 REG_WRITE(ah, AR_CR, AR_CR_RXD); 996 REG_WRITE(ah, AR_CR, AR_CR_RXD);
@@ -967,12 +1003,12 @@ bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
967 } 1003 }
968 1004
969 if (i == 0) { 1005 if (i == 0) {
970 DPRINTF(ah->ah_sc, ATH_DBG_FATAL, 1006 ath_print(common, ATH_DBG_FATAL,
971 "DMA failed to stop in %d ms " 1007 "DMA failed to stop in %d ms "
972 "AR_CR=0x%08x AR_DIAG_SW=0x%08x\n", 1008 "AR_CR=0x%08x AR_DIAG_SW=0x%08x\n",
973 AH_RX_STOP_DMA_TIMEOUT / 1000, 1009 AH_RX_STOP_DMA_TIMEOUT / 1000,
974 REG_READ(ah, AR_CR), 1010 REG_READ(ah, AR_CR),
975 REG_READ(ah, AR_DIAG_SW)); 1011 REG_READ(ah, AR_DIAG_SW));
976 return false; 1012 return false;
977 } else { 1013 } else {
978 return true; 1014 return true;
@@ -981,3 +1017,17 @@ bool ath9k_hw_stopdmarecv(struct ath_hw *ah)
981#undef AH_RX_TIME_QUANTUM 1017#undef AH_RX_TIME_QUANTUM
982#undef AH_RX_STOP_DMA_TIMEOUT 1018#undef AH_RX_STOP_DMA_TIMEOUT
983} 1019}
1020EXPORT_SYMBOL(ath9k_hw_stopdmarecv);
1021
1022int ath9k_hw_beaconq_setup(struct ath_hw *ah)
1023{
1024 struct ath9k_tx_queue_info qi;
1025
1026 memset(&qi, 0, sizeof(qi));
1027 qi.tqi_aifs = 1;
1028 qi.tqi_cwmin = 0;
1029 qi.tqi_cwmax = 0;
1030 /* NB: don't enable any interrupts */
1031 return ath9k_hw_setuptxqueue(ah, ATH9K_TX_QUEUE_BEACON, &qi);
1032}
1033EXPORT_SYMBOL(ath9k_hw_beaconq_setup);
diff --git a/drivers/net/wireless/ath/ath9k/mac.h b/drivers/net/wireless/ath/ath9k/mac.h
index f56e77da6c3e..fefb65dafb1c 100644
--- a/drivers/net/wireless/ath/ath9k/mac.h
+++ b/drivers/net/wireless/ath/ath9k/mac.h
@@ -614,16 +614,6 @@ enum ath9k_cipher {
614 ATH9K_CIPHER_MIC = 127 614 ATH9K_CIPHER_MIC = 127
615}; 615};
616 616
617enum ath9k_ht_macmode {
618 ATH9K_HT_MACMODE_20 = 0,
619 ATH9K_HT_MACMODE_2040 = 1,
620};
621
622enum ath9k_ht_extprotspacing {
623 ATH9K_HT_EXTPROTSPACING_20 = 0,
624 ATH9K_HT_EXTPROTSPACING_25 = 1,
625};
626
627struct ath_hw; 617struct ath_hw;
628struct ath9k_channel; 618struct ath9k_channel;
629struct ath_rate_table; 619struct ath_rate_table;
@@ -677,5 +667,6 @@ void ath9k_hw_rxena(struct ath_hw *ah);
677void ath9k_hw_startpcureceive(struct ath_hw *ah); 667void ath9k_hw_startpcureceive(struct ath_hw *ah);
678void ath9k_hw_stoppcurecv(struct ath_hw *ah); 668void ath9k_hw_stoppcurecv(struct ath_hw *ah);
679bool ath9k_hw_stopdmarecv(struct ath_hw *ah); 669bool ath9k_hw_stopdmarecv(struct ath_hw *ah);
670int ath9k_hw_beaconq_setup(struct ath_hw *ah);
680 671
681#endif /* MAC_H */ 672#endif /* MAC_H */
diff --git a/drivers/net/wireless/ath/ath9k/main.c b/drivers/net/wireless/ath/ath9k/main.c
index 52bed89063d4..9fefc51aec17 100644
--- a/drivers/net/wireless/ath/ath9k/main.c
+++ b/drivers/net/wireless/ath/ath9k/main.c
@@ -16,6 +16,7 @@
16 16
17#include <linux/nl80211.h> 17#include <linux/nl80211.h>
18#include "ath9k.h" 18#include "ath9k.h"
19#include "btcoex.h"
19 20
20static char *dev_info = "ath9k"; 21static char *dev_info = "ath9k";
21 22
@@ -28,6 +29,10 @@ static int modparam_nohwcrypt;
28module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444); 29module_param_named(nohwcrypt, modparam_nohwcrypt, int, 0444);
29MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption"); 30MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption");
30 31
32static unsigned int ath9k_debug = ATH_DBG_DEFAULT;
33module_param_named(debug, ath9k_debug, uint, 0);
34MODULE_PARM_DESC(debug, "Debugging mask");
35
31/* We use the hw_value as an index into our private channel structure */ 36/* We use the hw_value as an index into our private channel structure */
32 37
33#define CHAN2G(_freq, _idx) { \ 38#define CHAN2G(_freq, _idx) { \
@@ -224,8 +229,9 @@ static void ath_setup_rates(struct ath_softc *sc, enum ieee80211_band band)
224 } 229 }
225 sband->n_bitrates++; 230 sband->n_bitrates++;
226 231
227 DPRINTF(sc, ATH_DBG_CONFIG, "Rate: %2dMbps, ratecode: %2d\n", 232 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
228 rate[i].bitrate / 10, rate[i].hw_value); 233 "Rate: %2dMbps, ratecode: %2d\n",
234 rate[i].bitrate / 10, rate[i].hw_value);
229 } 235 }
230} 236}
231 237
@@ -242,6 +248,51 @@ static struct ath9k_channel *ath_get_curchannel(struct ath_softc *sc,
242 return channel; 248 return channel;
243} 249}
244 250
251static bool ath9k_setpower(struct ath_softc *sc, enum ath9k_power_mode mode)
252{
253 unsigned long flags;
254 bool ret;
255
256 spin_lock_irqsave(&sc->sc_pm_lock, flags);
257 ret = ath9k_hw_setpower(sc->sc_ah, mode);
258 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
259
260 return ret;
261}
262
263void ath9k_ps_wakeup(struct ath_softc *sc)
264{
265 unsigned long flags;
266
267 spin_lock_irqsave(&sc->sc_pm_lock, flags);
268 if (++sc->ps_usecount != 1)
269 goto unlock;
270
271 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE);
272
273 unlock:
274 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
275}
276
277void ath9k_ps_restore(struct ath_softc *sc)
278{
279 unsigned long flags;
280
281 spin_lock_irqsave(&sc->sc_pm_lock, flags);
282 if (--sc->ps_usecount != 0)
283 goto unlock;
284
285 if (sc->ps_enabled &&
286 !(sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
287 SC_OP_WAIT_FOR_CAB |
288 SC_OP_WAIT_FOR_PSPOLL_DATA |
289 SC_OP_WAIT_FOR_TX_ACK)))
290 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_NETWORK_SLEEP);
291
292 unlock:
293 spin_unlock_irqrestore(&sc->sc_pm_lock, flags);
294}
295
245/* 296/*
246 * Set/change channels. If the channel is really being changed, it's done 297 * Set/change channels. If the channel is really being changed, it's done
247 * by reseting the chip. To accomplish this we must first cleanup any pending 298 * by reseting the chip. To accomplish this we must first cleanup any pending
@@ -251,6 +302,8 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
251 struct ath9k_channel *hchan) 302 struct ath9k_channel *hchan)
252{ 303{
253 struct ath_hw *ah = sc->sc_ah; 304 struct ath_hw *ah = sc->sc_ah;
305 struct ath_common *common = ath9k_hw_common(ah);
306 struct ieee80211_conf *conf = &common->hw->conf;
254 bool fastcc = true, stopped; 307 bool fastcc = true, stopped;
255 struct ieee80211_channel *channel = hw->conf.channel; 308 struct ieee80211_channel *channel = hw->conf.channel;
256 int r; 309 int r;
@@ -280,19 +333,19 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
280 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET)) 333 if (!stopped || (sc->sc_flags & SC_OP_FULL_RESET))
281 fastcc = false; 334 fastcc = false;
282 335
283 DPRINTF(sc, ATH_DBG_CONFIG, 336 ath_print(common, ATH_DBG_CONFIG,
284 "(%u MHz) -> (%u MHz), chanwidth: %d\n", 337 "(%u MHz) -> (%u MHz), conf_is_ht40: %d\n",
285 sc->sc_ah->curchan->channel, 338 sc->sc_ah->curchan->channel,
286 channel->center_freq, sc->tx_chan_width); 339 channel->center_freq, conf_is_ht40(conf));
287 340
288 spin_lock_bh(&sc->sc_resetlock); 341 spin_lock_bh(&sc->sc_resetlock);
289 342
290 r = ath9k_hw_reset(ah, hchan, fastcc); 343 r = ath9k_hw_reset(ah, hchan, fastcc);
291 if (r) { 344 if (r) {
292 DPRINTF(sc, ATH_DBG_FATAL, 345 ath_print(common, ATH_DBG_FATAL,
293 "Unable to reset channel (%u Mhz) " 346 "Unable to reset channel (%u Mhz) "
294 "reset status %d\n", 347 "reset status %d\n",
295 channel->center_freq, r); 348 channel->center_freq, r);
296 spin_unlock_bh(&sc->sc_resetlock); 349 spin_unlock_bh(&sc->sc_resetlock);
297 goto ps_restore; 350 goto ps_restore;
298 } 351 }
@@ -301,8 +354,8 @@ int ath_set_channel(struct ath_softc *sc, struct ieee80211_hw *hw,
301 sc->sc_flags &= ~SC_OP_FULL_RESET; 354 sc->sc_flags &= ~SC_OP_FULL_RESET;
302 355
303 if (ath_startrecv(sc) != 0) { 356 if (ath_startrecv(sc) != 0) {
304 DPRINTF(sc, ATH_DBG_FATAL, 357 ath_print(common, ATH_DBG_FATAL,
305 "Unable to restart recv logic\n"); 358 "Unable to restart recv logic\n");
306 r = -EIO; 359 r = -EIO;
307 goto ps_restore; 360 goto ps_restore;
308 } 361 }
@@ -327,6 +380,7 @@ static void ath_ani_calibrate(unsigned long data)
327{ 380{
328 struct ath_softc *sc = (struct ath_softc *)data; 381 struct ath_softc *sc = (struct ath_softc *)data;
329 struct ath_hw *ah = sc->sc_ah; 382 struct ath_hw *ah = sc->sc_ah;
383 struct ath_common *common = ath9k_hw_common(ah);
330 bool longcal = false; 384 bool longcal = false;
331 bool shortcal = false; 385 bool shortcal = false;
332 bool aniflag = false; 386 bool aniflag = false;
@@ -353,7 +407,7 @@ static void ath_ani_calibrate(unsigned long data)
353 /* Long calibration runs independently of short calibration. */ 407 /* Long calibration runs independently of short calibration. */
354 if ((timestamp - sc->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) { 408 if ((timestamp - sc->ani.longcal_timer) >= ATH_LONG_CALINTERVAL) {
355 longcal = true; 409 longcal = true;
356 DPRINTF(sc, ATH_DBG_ANI, "longcal @%lu\n", jiffies); 410 ath_print(common, ATH_DBG_ANI, "longcal @%lu\n", jiffies);
357 sc->ani.longcal_timer = timestamp; 411 sc->ani.longcal_timer = timestamp;
358 } 412 }
359 413
@@ -361,7 +415,8 @@ static void ath_ani_calibrate(unsigned long data)
361 if (!sc->ani.caldone) { 415 if (!sc->ani.caldone) {
362 if ((timestamp - sc->ani.shortcal_timer) >= short_cal_interval) { 416 if ((timestamp - sc->ani.shortcal_timer) >= short_cal_interval) {
363 shortcal = true; 417 shortcal = true;
364 DPRINTF(sc, ATH_DBG_ANI, "shortcal @%lu\n", jiffies); 418 ath_print(common, ATH_DBG_ANI,
419 "shortcal @%lu\n", jiffies);
365 sc->ani.shortcal_timer = timestamp; 420 sc->ani.shortcal_timer = timestamp;
366 sc->ani.resetcal_timer = timestamp; 421 sc->ani.resetcal_timer = timestamp;
367 } 422 }
@@ -388,16 +443,21 @@ static void ath_ani_calibrate(unsigned long data)
388 443
389 /* Perform calibration if necessary */ 444 /* Perform calibration if necessary */
390 if (longcal || shortcal) { 445 if (longcal || shortcal) {
391 sc->ani.caldone = ath9k_hw_calibrate(ah, ah->curchan, 446 sc->ani.caldone =
392 sc->rx_chainmask, longcal); 447 ath9k_hw_calibrate(ah,
448 ah->curchan,
449 common->rx_chainmask,
450 longcal);
393 451
394 if (longcal) 452 if (longcal)
395 sc->ani.noise_floor = ath9k_hw_getchan_noise(ah, 453 sc->ani.noise_floor = ath9k_hw_getchan_noise(ah,
396 ah->curchan); 454 ah->curchan);
397 455
398 DPRINTF(sc, ATH_DBG_ANI," calibrate chan %u/%x nf: %d\n", 456 ath_print(common, ATH_DBG_ANI,
399 ah->curchan->channel, ah->curchan->channelFlags, 457 " calibrate chan %u/%x nf: %d\n",
400 sc->ani.noise_floor); 458 ah->curchan->channel,
459 ah->curchan->channelFlags,
460 sc->ani.noise_floor);
401 } 461 }
402 } 462 }
403 463
@@ -439,17 +499,22 @@ static void ath_start_ani(struct ath_softc *sc)
439 */ 499 */
440void ath_update_chainmask(struct ath_softc *sc, int is_ht) 500void ath_update_chainmask(struct ath_softc *sc, int is_ht)
441{ 501{
502 struct ath_hw *ah = sc->sc_ah;
503 struct ath_common *common = ath9k_hw_common(ah);
504
442 if ((sc->sc_flags & SC_OP_SCANNING) || is_ht || 505 if ((sc->sc_flags & SC_OP_SCANNING) || is_ht ||
443 (sc->btcoex_info.btcoex_scheme != ATH_BTCOEX_CFG_NONE)) { 506 (ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE)) {
444 sc->tx_chainmask = sc->sc_ah->caps.tx_chainmask; 507 common->tx_chainmask = ah->caps.tx_chainmask;
445 sc->rx_chainmask = sc->sc_ah->caps.rx_chainmask; 508 common->rx_chainmask = ah->caps.rx_chainmask;
446 } else { 509 } else {
447 sc->tx_chainmask = 1; 510 common->tx_chainmask = 1;
448 sc->rx_chainmask = 1; 511 common->rx_chainmask = 1;
449 } 512 }
450 513
451 DPRINTF(sc, ATH_DBG_CONFIG, "tx chmask: %d, rx chmask: %d\n", 514 ath_print(common, ATH_DBG_CONFIG,
452 sc->tx_chainmask, sc->rx_chainmask); 515 "tx chmask: %d, rx chmask: %d\n",
516 common->tx_chainmask,
517 common->rx_chainmask);
453} 518}
454 519
455static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta) 520static void ath_node_attach(struct ath_softc *sc, struct ieee80211_sta *sta)
@@ -478,6 +543,9 @@ static void ath_node_detach(struct ath_softc *sc, struct ieee80211_sta *sta)
478static void ath9k_tasklet(unsigned long data) 543static void ath9k_tasklet(unsigned long data)
479{ 544{
480 struct ath_softc *sc = (struct ath_softc *)data; 545 struct ath_softc *sc = (struct ath_softc *)data;
546 struct ath_hw *ah = sc->sc_ah;
547 struct ath_common *common = ath9k_hw_common(ah);
548
481 u32 status = sc->intrstatus; 549 u32 status = sc->intrstatus;
482 550
483 ath9k_ps_wakeup(sc); 551 ath9k_ps_wakeup(sc);
@@ -502,16 +570,17 @@ static void ath9k_tasklet(unsigned long data)
502 * TSF sync does not look correct; remain awake to sync with 570 * TSF sync does not look correct; remain awake to sync with
503 * the next Beacon. 571 * the next Beacon.
504 */ 572 */
505 DPRINTF(sc, ATH_DBG_PS, "TSFOOR - Sync with next Beacon\n"); 573 ath_print(common, ATH_DBG_PS,
574 "TSFOOR - Sync with next Beacon\n");
506 sc->sc_flags |= SC_OP_WAIT_FOR_BEACON | SC_OP_BEACON_SYNC; 575 sc->sc_flags |= SC_OP_WAIT_FOR_BEACON | SC_OP_BEACON_SYNC;
507 } 576 }
508 577
509 if (sc->btcoex_info.btcoex_scheme == ATH_BTCOEX_CFG_3WIRE) 578 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
510 if (status & ATH9K_INT_GENTIMER) 579 if (status & ATH9K_INT_GENTIMER)
511 ath_gen_timer_isr(sc->sc_ah); 580 ath_gen_timer_isr(sc->sc_ah);
512 581
513 /* re-enable hardware interrupt */ 582 /* re-enable hardware interrupt */
514 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); 583 ath9k_hw_set_interrupts(ah, sc->imask);
515 ath9k_ps_restore(sc); 584 ath9k_ps_restore(sc);
516} 585}
517 586
@@ -602,7 +671,7 @@ irqreturn_t ath_isr(int irq, void *dev)
602 if (status & ATH9K_INT_TIM_TIMER) { 671 if (status & ATH9K_INT_TIM_TIMER) {
603 /* Clear RxAbort bit so that we can 672 /* Clear RxAbort bit so that we can
604 * receive frames */ 673 * receive frames */
605 ath9k_hw_setpower(ah, ATH9K_PM_AWAKE); 674 ath9k_setpower(sc, ATH9K_PM_AWAKE);
606 ath9k_hw_setrxabort(sc->sc_ah, 0); 675 ath9k_hw_setrxabort(sc->sc_ah, 0);
607 sc->sc_flags |= SC_OP_WAIT_FOR_BEACON; 676 sc->sc_flags |= SC_OP_WAIT_FOR_BEACON;
608 } 677 }
@@ -702,8 +771,8 @@ static int ath_setkey_tkip(struct ath_softc *sc, u16 keyix, const u8 *key,
702 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic)); 771 memcpy(hk->kv_mic, key_txmic, sizeof(hk->kv_mic));
703 if (!ath9k_hw_set_keycache_entry(sc->sc_ah, keyix, hk, NULL)) { 772 if (!ath9k_hw_set_keycache_entry(sc->sc_ah, keyix, hk, NULL)) {
704 /* TX MIC entry failed. No need to proceed further */ 773 /* TX MIC entry failed. No need to proceed further */
705 DPRINTF(sc, ATH_DBG_FATAL, 774 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
706 "Setting TX MIC Key Failed\n"); 775 "Setting TX MIC Key Failed\n");
707 return 0; 776 return 0;
708 } 777 }
709 778
@@ -890,6 +959,7 @@ static void ath_key_delete(struct ath_softc *sc, struct ieee80211_key_conf *key)
890static void setup_ht_cap(struct ath_softc *sc, 959static void setup_ht_cap(struct ath_softc *sc,
891 struct ieee80211_sta_ht_cap *ht_info) 960 struct ieee80211_sta_ht_cap *ht_info)
892{ 961{
962 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
893 u8 tx_streams, rx_streams; 963 u8 tx_streams, rx_streams;
894 964
895 ht_info->ht_supported = true; 965 ht_info->ht_supported = true;
@@ -903,12 +973,15 @@ static void setup_ht_cap(struct ath_softc *sc,
903 973
904 /* set up supported mcs set */ 974 /* set up supported mcs set */
905 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs)); 975 memset(&ht_info->mcs, 0, sizeof(ht_info->mcs));
906 tx_streams = !(sc->tx_chainmask & (sc->tx_chainmask - 1)) ? 1 : 2; 976 tx_streams = !(common->tx_chainmask & (common->tx_chainmask - 1)) ?
907 rx_streams = !(sc->rx_chainmask & (sc->rx_chainmask - 1)) ? 1 : 2; 977 1 : 2;
978 rx_streams = !(common->rx_chainmask & (common->rx_chainmask - 1)) ?
979 1 : 2;
908 980
909 if (tx_streams != rx_streams) { 981 if (tx_streams != rx_streams) {
910 DPRINTF(sc, ATH_DBG_CONFIG, "TX streams %d, RX streams: %d\n", 982 ath_print(common, ATH_DBG_CONFIG,
911 tx_streams, rx_streams); 983 "TX streams %d, RX streams: %d\n",
984 tx_streams, rx_streams);
912 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF; 985 ht_info->mcs.tx_params |= IEEE80211_HT_MCS_TX_RX_DIFF;
913 ht_info->mcs.tx_params |= ((tx_streams - 1) << 986 ht_info->mcs.tx_params |= ((tx_streams - 1) <<
914 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT); 987 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
@@ -925,14 +998,17 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
925 struct ieee80211_vif *vif, 998 struct ieee80211_vif *vif,
926 struct ieee80211_bss_conf *bss_conf) 999 struct ieee80211_bss_conf *bss_conf)
927{ 1000{
1001 struct ath_hw *ah = sc->sc_ah;
1002 struct ath_common *common = ath9k_hw_common(ah);
928 1003
929 if (bss_conf->assoc) { 1004 if (bss_conf->assoc) {
930 DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info ASSOC %d, bssid: %pM\n", 1005 ath_print(common, ATH_DBG_CONFIG,
931 bss_conf->aid, sc->curbssid); 1006 "Bss Info ASSOC %d, bssid: %pM\n",
1007 bss_conf->aid, common->curbssid);
932 1008
933 /* New association, store aid */ 1009 /* New association, store aid */
934 sc->curaid = bss_conf->aid; 1010 common->curaid = bss_conf->aid;
935 ath9k_hw_write_associd(sc); 1011 ath9k_hw_write_associd(ah);
936 1012
937 /* 1013 /*
938 * Request a re-configuration of Beacon related timers 1014 * Request a re-configuration of Beacon related timers
@@ -949,8 +1025,8 @@ static void ath9k_bss_assoc_info(struct ath_softc *sc,
949 1025
950 ath_start_ani(sc); 1026 ath_start_ani(sc);
951 } else { 1027 } else {
952 DPRINTF(sc, ATH_DBG_CONFIG, "Bss Info DISASSOC\n"); 1028 ath_print(common, ATH_DBG_CONFIG, "Bss Info DISASSOC\n");
953 sc->curaid = 0; 1029 common->curaid = 0;
954 /* Stop ANI */ 1030 /* Stop ANI */
955 del_timer_sync(&sc->ani.timer); 1031 del_timer_sync(&sc->ani.timer);
956 } 1032 }
@@ -1042,8 +1118,8 @@ static int ath_register_led(struct ath_softc *sc, struct ath_led *led,
1042 1118
1043 ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev); 1119 ret = led_classdev_register(wiphy_dev(sc->hw->wiphy), &led->led_cdev);
1044 if (ret) 1120 if (ret)
1045 DPRINTF(sc, ATH_DBG_FATAL, 1121 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1046 "Failed to register led:%s", led->name); 1122 "Failed to register led:%s", led->name);
1047 else 1123 else
1048 led->registered = 1; 1124 led->registered = 1;
1049 return ret; 1125 return ret;
@@ -1127,6 +1203,7 @@ fail:
1127void ath_radio_enable(struct ath_softc *sc) 1203void ath_radio_enable(struct ath_softc *sc)
1128{ 1204{
1129 struct ath_hw *ah = sc->sc_ah; 1205 struct ath_hw *ah = sc->sc_ah;
1206 struct ath_common *common = ath9k_hw_common(ah);
1130 struct ieee80211_channel *channel = sc->hw->conf.channel; 1207 struct ieee80211_channel *channel = sc->hw->conf.channel;
1131 int r; 1208 int r;
1132 1209
@@ -1139,17 +1216,17 @@ void ath_radio_enable(struct ath_softc *sc)
1139 spin_lock_bh(&sc->sc_resetlock); 1216 spin_lock_bh(&sc->sc_resetlock);
1140 r = ath9k_hw_reset(ah, ah->curchan, false); 1217 r = ath9k_hw_reset(ah, ah->curchan, false);
1141 if (r) { 1218 if (r) {
1142 DPRINTF(sc, ATH_DBG_FATAL, 1219 ath_print(common, ATH_DBG_FATAL,
1143 "Unable to reset channel %u (%uMhz) ", 1220 "Unable to reset channel %u (%uMhz) ",
1144 "reset status %d\n", 1221 "reset status %d\n",
1145 channel->center_freq, r); 1222 channel->center_freq, r);
1146 } 1223 }
1147 spin_unlock_bh(&sc->sc_resetlock); 1224 spin_unlock_bh(&sc->sc_resetlock);
1148 1225
1149 ath_update_txpow(sc); 1226 ath_update_txpow(sc);
1150 if (ath_startrecv(sc) != 0) { 1227 if (ath_startrecv(sc) != 0) {
1151 DPRINTF(sc, ATH_DBG_FATAL, 1228 ath_print(common, ATH_DBG_FATAL,
1152 "Unable to restart recv logic\n"); 1229 "Unable to restart recv logic\n");
1153 return; 1230 return;
1154 } 1231 }
1155 1232
@@ -1194,17 +1271,17 @@ void ath_radio_disable(struct ath_softc *sc)
1194 spin_lock_bh(&sc->sc_resetlock); 1271 spin_lock_bh(&sc->sc_resetlock);
1195 r = ath9k_hw_reset(ah, ah->curchan, false); 1272 r = ath9k_hw_reset(ah, ah->curchan, false);
1196 if (r) { 1273 if (r) {
1197 DPRINTF(sc, ATH_DBG_FATAL, 1274 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1198 "Unable to reset channel %u (%uMhz) " 1275 "Unable to reset channel %u (%uMhz) "
1199 "reset status %d\n", 1276 "reset status %d\n",
1200 channel->center_freq, r); 1277 channel->center_freq, r);
1201 } 1278 }
1202 spin_unlock_bh(&sc->sc_resetlock); 1279 spin_unlock_bh(&sc->sc_resetlock);
1203 1280
1204 ath9k_hw_phy_disable(ah); 1281 ath9k_hw_phy_disable(ah);
1205 ath9k_hw_configpcipowersave(ah, 1, 1); 1282 ath9k_hw_configpcipowersave(ah, 1, 1);
1206 ath9k_ps_restore(sc); 1283 ath9k_ps_restore(sc);
1207 ath9k_hw_setpower(ah, ATH9K_PM_FULL_SLEEP); 1284 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
1208} 1285}
1209 1286
1210/*******************/ 1287/*******************/
@@ -1236,23 +1313,26 @@ static void ath_start_rfkill_poll(struct ath_softc *sc)
1236 wiphy_rfkill_start_polling(sc->hw->wiphy); 1313 wiphy_rfkill_start_polling(sc->hw->wiphy);
1237} 1314}
1238 1315
1239void ath_cleanup(struct ath_softc *sc) 1316static void ath9k_uninit_hw(struct ath_softc *sc)
1240{ 1317{
1241 ath_detach(sc); 1318 struct ath_hw *ah = sc->sc_ah;
1242 free_irq(sc->irq, sc); 1319
1243 ath_bus_cleanup(sc); 1320 BUG_ON(!ah);
1244 kfree(sc->sec_wiphy); 1321
1245 ieee80211_free_hw(sc->hw); 1322 ath9k_exit_debug(ah);
1323 ath9k_hw_detach(ah);
1324 sc->sc_ah = NULL;
1246} 1325}
1247 1326
1248void ath_detach(struct ath_softc *sc) 1327static void ath_clean_core(struct ath_softc *sc)
1249{ 1328{
1250 struct ieee80211_hw *hw = sc->hw; 1329 struct ieee80211_hw *hw = sc->hw;
1330 struct ath_hw *ah = sc->sc_ah;
1251 int i = 0; 1331 int i = 0;
1252 1332
1253 ath9k_ps_wakeup(sc); 1333 ath9k_ps_wakeup(sc);
1254 1334
1255 DPRINTF(sc, ATH_DBG_CONFIG, "Detach ATH hw\n"); 1335 dev_dbg(sc->dev, "Detach ATH hw\n");
1256 1336
1257 ath_deinit_leds(sc); 1337 ath_deinit_leds(sc);
1258 wiphy_rfkill_stop_polling(sc->hw->wiphy); 1338 wiphy_rfkill_stop_polling(sc->hw->wiphy);
@@ -1273,20 +1353,36 @@ void ath_detach(struct ath_softc *sc)
1273 tasklet_kill(&sc->bcon_tasklet); 1353 tasklet_kill(&sc->bcon_tasklet);
1274 1354
1275 if (!(sc->sc_flags & SC_OP_INVALID)) 1355 if (!(sc->sc_flags & SC_OP_INVALID))
1276 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); 1356 ath9k_setpower(sc, ATH9K_PM_AWAKE);
1277 1357
1278 /* cleanup tx queues */ 1358 /* cleanup tx queues */
1279 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 1359 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1280 if (ATH_TXQ_SETUP(sc, i)) 1360 if (ATH_TXQ_SETUP(sc, i))
1281 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 1361 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1282 1362
1283 if ((sc->btcoex_info.no_stomp_timer) && 1363 if ((sc->btcoex.no_stomp_timer) &&
1284 sc->btcoex_info.btcoex_scheme == ATH_BTCOEX_CFG_3WIRE) 1364 ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
1285 ath_gen_timer_free(sc->sc_ah, sc->btcoex_info.no_stomp_timer); 1365 ath_gen_timer_free(ah, sc->btcoex.no_stomp_timer);
1366}
1286 1367
1287 ath9k_hw_detach(sc->sc_ah); 1368void ath_detach(struct ath_softc *sc)
1288 sc->sc_ah = NULL; 1369{
1289 ath9k_exit_debug(sc); 1370 ath_clean_core(sc);
1371 ath9k_uninit_hw(sc);
1372}
1373
1374void ath_cleanup(struct ath_softc *sc)
1375{
1376 struct ath_hw *ah = sc->sc_ah;
1377 struct ath_common *common = ath9k_hw_common(ah);
1378
1379 ath_clean_core(sc);
1380 free_irq(sc->irq, sc);
1381 ath_bus_cleanup(common);
1382 kfree(sc->sec_wiphy);
1383 ieee80211_free_hw(sc->hw);
1384
1385 ath9k_uninit_hw(sc);
1290} 1386}
1291 1387
1292static int ath9k_reg_notifier(struct wiphy *wiphy, 1388static int ath9k_reg_notifier(struct wiphy *wiphy,
@@ -1295,29 +1391,245 @@ static int ath9k_reg_notifier(struct wiphy *wiphy,
1295 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 1391 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
1296 struct ath_wiphy *aphy = hw->priv; 1392 struct ath_wiphy *aphy = hw->priv;
1297 struct ath_softc *sc = aphy->sc; 1393 struct ath_softc *sc = aphy->sc;
1298 struct ath_regulatory *reg = &sc->common.regulatory; 1394 struct ath_regulatory *reg = ath9k_hw_regulatory(sc->sc_ah);
1299 1395
1300 return ath_reg_notifier_apply(wiphy, request, reg); 1396 return ath_reg_notifier_apply(wiphy, request, reg);
1301} 1397}
1302 1398
1303/* 1399/*
1400 * Detects if there is any priority bt traffic
1401 */
1402static void ath_detect_bt_priority(struct ath_softc *sc)
1403{
1404 struct ath_btcoex *btcoex = &sc->btcoex;
1405 struct ath_hw *ah = sc->sc_ah;
1406
1407 if (ath9k_hw_gpio_get(sc->sc_ah, ah->btcoex_hw.btpriority_gpio))
1408 btcoex->bt_priority_cnt++;
1409
1410 if (time_after(jiffies, btcoex->bt_priority_time +
1411 msecs_to_jiffies(ATH_BT_PRIORITY_TIME_THRESHOLD))) {
1412 if (btcoex->bt_priority_cnt >= ATH_BT_CNT_THRESHOLD) {
1413 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_BTCOEX,
1414 "BT priority traffic detected");
1415 sc->sc_flags |= SC_OP_BT_PRIORITY_DETECTED;
1416 } else {
1417 sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
1418 }
1419
1420 btcoex->bt_priority_cnt = 0;
1421 btcoex->bt_priority_time = jiffies;
1422 }
1423}
1424
1425/*
1426 * Configures appropriate weight based on stomp type.
1427 */
1428static void ath9k_btcoex_bt_stomp(struct ath_softc *sc,
1429 enum ath_stomp_type stomp_type)
1430{
1431 struct ath_hw *ah = sc->sc_ah;
1432
1433 switch (stomp_type) {
1434 case ATH_BTCOEX_STOMP_ALL:
1435 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
1436 AR_STOMP_ALL_WLAN_WGHT);
1437 break;
1438 case ATH_BTCOEX_STOMP_LOW:
1439 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
1440 AR_STOMP_LOW_WLAN_WGHT);
1441 break;
1442 case ATH_BTCOEX_STOMP_NONE:
1443 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
1444 AR_STOMP_NONE_WLAN_WGHT);
1445 break;
1446 default:
1447 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
1448 "Invalid Stomptype\n");
1449 break;
1450 }
1451
1452 ath9k_hw_btcoex_enable(ah);
1453}
1454
1455static void ath9k_gen_timer_start(struct ath_hw *ah,
1456 struct ath_gen_timer *timer,
1457 u32 timer_next,
1458 u32 timer_period)
1459{
1460 struct ath_common *common = ath9k_hw_common(ah);
1461 struct ath_softc *sc = (struct ath_softc *) common->priv;
1462
1463 ath9k_hw_gen_timer_start(ah, timer, timer_next, timer_period);
1464
1465 if ((sc->imask & ATH9K_INT_GENTIMER) == 0) {
1466 ath9k_hw_set_interrupts(ah, 0);
1467 sc->imask |= ATH9K_INT_GENTIMER;
1468 ath9k_hw_set_interrupts(ah, sc->imask);
1469 }
1470}
1471
1472static void ath9k_gen_timer_stop(struct ath_hw *ah, struct ath_gen_timer *timer)
1473{
1474 struct ath_common *common = ath9k_hw_common(ah);
1475 struct ath_softc *sc = (struct ath_softc *) common->priv;
1476 struct ath_gen_timer_table *timer_table = &ah->hw_gen_timers;
1477
1478 ath9k_hw_gen_timer_stop(ah, timer);
1479
1480 /* if no timer is enabled, turn off interrupt mask */
1481 if (timer_table->timer_mask.val == 0) {
1482 ath9k_hw_set_interrupts(ah, 0);
1483 sc->imask &= ~ATH9K_INT_GENTIMER;
1484 ath9k_hw_set_interrupts(ah, sc->imask);
1485 }
1486}
1487
1488/*
1489 * This is the master bt coex timer which runs for every
1490 * 45ms, bt traffic will be given priority during 55% of this
1491 * period while wlan gets remaining 45%
1492 */
1493static void ath_btcoex_period_timer(unsigned long data)
1494{
1495 struct ath_softc *sc = (struct ath_softc *) data;
1496 struct ath_hw *ah = sc->sc_ah;
1497 struct ath_btcoex *btcoex = &sc->btcoex;
1498
1499 ath_detect_bt_priority(sc);
1500
1501 spin_lock_bh(&btcoex->btcoex_lock);
1502
1503 ath9k_btcoex_bt_stomp(sc, btcoex->bt_stomp_type);
1504
1505 spin_unlock_bh(&btcoex->btcoex_lock);
1506
1507 if (btcoex->btcoex_period != btcoex->btcoex_no_stomp) {
1508 if (btcoex->hw_timer_enabled)
1509 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
1510
1511 ath9k_gen_timer_start(ah,
1512 btcoex->no_stomp_timer,
1513 (ath9k_hw_gettsf32(ah) +
1514 btcoex->btcoex_no_stomp),
1515 btcoex->btcoex_no_stomp * 10);
1516 btcoex->hw_timer_enabled = true;
1517 }
1518
1519 mod_timer(&btcoex->period_timer, jiffies +
1520 msecs_to_jiffies(ATH_BTCOEX_DEF_BT_PERIOD));
1521}
1522
1523/*
1524 * Generic tsf based hw timer which configures weight
1525 * registers to time slice between wlan and bt traffic
1526 */
1527static void ath_btcoex_no_stomp_timer(void *arg)
1528{
1529 struct ath_softc *sc = (struct ath_softc *)arg;
1530 struct ath_hw *ah = sc->sc_ah;
1531 struct ath_btcoex *btcoex = &sc->btcoex;
1532
1533 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
1534 "no stomp timer running \n");
1535
1536 spin_lock_bh(&btcoex->btcoex_lock);
1537
1538 if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_LOW)
1539 ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_NONE);
1540 else if (btcoex->bt_stomp_type == ATH_BTCOEX_STOMP_ALL)
1541 ath9k_btcoex_bt_stomp(sc, ATH_BTCOEX_STOMP_LOW);
1542
1543 spin_unlock_bh(&btcoex->btcoex_lock);
1544}
1545
1546static int ath_init_btcoex_timer(struct ath_softc *sc)
1547{
1548 struct ath_btcoex *btcoex = &sc->btcoex;
1549
1550 btcoex->btcoex_period = ATH_BTCOEX_DEF_BT_PERIOD * 1000;
1551 btcoex->btcoex_no_stomp = (100 - ATH_BTCOEX_DEF_DUTY_CYCLE) *
1552 btcoex->btcoex_period / 100;
1553
1554 setup_timer(&btcoex->period_timer, ath_btcoex_period_timer,
1555 (unsigned long) sc);
1556
1557 spin_lock_init(&btcoex->btcoex_lock);
1558
1559 btcoex->no_stomp_timer = ath_gen_timer_alloc(sc->sc_ah,
1560 ath_btcoex_no_stomp_timer,
1561 ath_btcoex_no_stomp_timer,
1562 (void *) sc, AR_FIRST_NDP_TIMER);
1563
1564 if (!btcoex->no_stomp_timer)
1565 return -ENOMEM;
1566
1567 return 0;
1568}
1569
1570/*
1571 * Read and write, they both share the same lock. We do this to serialize
1572 * reads and writes on Atheros 802.11n PCI devices only. This is required
1573 * as the FIFO on these devices can only accept sanely 2 requests. After
1574 * that the device goes bananas. Serializing the reads/writes prevents this
1575 * from happening.
1576 */
1577
1578static void ath9k_iowrite32(void *hw_priv, u32 val, u32 reg_offset)
1579{
1580 struct ath_hw *ah = (struct ath_hw *) hw_priv;
1581 struct ath_common *common = ath9k_hw_common(ah);
1582 struct ath_softc *sc = (struct ath_softc *) common->priv;
1583
1584 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
1585 unsigned long flags;
1586 spin_lock_irqsave(&sc->sc_serial_rw, flags);
1587 iowrite32(val, sc->mem + reg_offset);
1588 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
1589 } else
1590 iowrite32(val, sc->mem + reg_offset);
1591}
1592
1593static unsigned int ath9k_ioread32(void *hw_priv, u32 reg_offset)
1594{
1595 struct ath_hw *ah = (struct ath_hw *) hw_priv;
1596 struct ath_common *common = ath9k_hw_common(ah);
1597 struct ath_softc *sc = (struct ath_softc *) common->priv;
1598 u32 val;
1599
1600 if (ah->config.serialize_regmode == SER_REG_MODE_ON) {
1601 unsigned long flags;
1602 spin_lock_irqsave(&sc->sc_serial_rw, flags);
1603 val = ioread32(sc->mem + reg_offset);
1604 spin_unlock_irqrestore(&sc->sc_serial_rw, flags);
1605 } else
1606 val = ioread32(sc->mem + reg_offset);
1607 return val;
1608}
1609
1610static const struct ath_ops ath9k_common_ops = {
1611 .read = ath9k_ioread32,
1612 .write = ath9k_iowrite32,
1613};
1614
1615/*
1304 * Initialize and fill ath_softc, ath_sofct is the 1616 * Initialize and fill ath_softc, ath_sofct is the
1305 * "Software Carrier" struct. Historically it has existed 1617 * "Software Carrier" struct. Historically it has existed
1306 * to allow the separation between hardware specific 1618 * to allow the separation between hardware specific
1307 * variables (now in ath_hw) and driver specific variables. 1619 * variables (now in ath_hw) and driver specific variables.
1308 */ 1620 */
1309static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid) 1621static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid,
1622 const struct ath_bus_ops *bus_ops)
1310{ 1623{
1311 struct ath_hw *ah = NULL; 1624 struct ath_hw *ah = NULL;
1625 struct ath_common *common;
1312 int r = 0, i; 1626 int r = 0, i;
1313 int csz = 0; 1627 int csz = 0;
1628 int qnum;
1314 1629
1315 /* XXX: hardware will not be ready until ath_open() being called */ 1630 /* XXX: hardware will not be ready until ath_open() being called */
1316 sc->sc_flags |= SC_OP_INVALID; 1631 sc->sc_flags |= SC_OP_INVALID;
1317 1632
1318 if (ath9k_init_debug(sc) < 0)
1319 printk(KERN_ERR "Unable to create debugfs files\n");
1320
1321 spin_lock_init(&sc->wiphy_lock); 1633 spin_lock_init(&sc->wiphy_lock);
1322 spin_lock_init(&sc->sc_resetlock); 1634 spin_lock_init(&sc->sc_resetlock);
1323 spin_lock_init(&sc->sc_serial_rw); 1635 spin_lock_init(&sc->sc_serial_rw);
@@ -1328,39 +1640,50 @@ static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid)
1328 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet, 1640 tasklet_init(&sc->bcon_tasklet, ath_beacon_tasklet,
1329 (unsigned long)sc); 1641 (unsigned long)sc);
1330 1642
1331 /*
1332 * Cache line size is used to size and align various
1333 * structures used to communicate with the hardware.
1334 */
1335 ath_read_cachesize(sc, &csz);
1336 /* XXX assert csz is non-zero */
1337 sc->common.cachelsz = csz << 2; /* convert to bytes */
1338
1339 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL); 1643 ah = kzalloc(sizeof(struct ath_hw), GFP_KERNEL);
1340 if (!ah) { 1644 if (!ah)
1341 r = -ENOMEM; 1645 return -ENOMEM;
1342 goto bad_no_ah;
1343 }
1344 1646
1345 ah->ah_sc = sc;
1346 ah->hw_version.devid = devid; 1647 ah->hw_version.devid = devid;
1347 ah->hw_version.subsysid = subsysid; 1648 ah->hw_version.subsysid = subsysid;
1348 sc->sc_ah = ah; 1649 sc->sc_ah = ah;
1349 1650
1651 common = ath9k_hw_common(ah);
1652 common->ops = &ath9k_common_ops;
1653 common->bus_ops = bus_ops;
1654 common->ah = ah;
1655 common->hw = sc->hw;
1656 common->priv = sc;
1657 common->debug_mask = ath9k_debug;
1658
1659 /*
1660 * Cache line size is used to size and align various
1661 * structures used to communicate with the hardware.
1662 */
1663 ath_read_cachesize(common, &csz);
1664 /* XXX assert csz is non-zero */
1665 common->cachelsz = csz << 2; /* convert to bytes */
1666
1350 r = ath9k_hw_init(ah); 1667 r = ath9k_hw_init(ah);
1351 if (r) { 1668 if (r) {
1352 DPRINTF(sc, ATH_DBG_FATAL, 1669 ath_print(common, ATH_DBG_FATAL,
1353 "Unable to initialize hardware; " 1670 "Unable to initialize hardware; "
1354 "initialization status: %d\n", r); 1671 "initialization status: %d\n", r);
1355 goto bad; 1672 goto bad_free_hw;
1673 }
1674
1675 if (ath9k_init_debug(ah) < 0) {
1676 ath_print(common, ATH_DBG_FATAL,
1677 "Unable to create debugfs files\n");
1678 goto bad_free_hw;
1356 } 1679 }
1357 1680
1358 /* Get the hardware key cache size. */ 1681 /* Get the hardware key cache size. */
1359 sc->keymax = ah->caps.keycache_size; 1682 sc->keymax = ah->caps.keycache_size;
1360 if (sc->keymax > ATH_KEYMAX) { 1683 if (sc->keymax > ATH_KEYMAX) {
1361 DPRINTF(sc, ATH_DBG_ANY, 1684 ath_print(common, ATH_DBG_ANY,
1362 "Warning, using only %u entries in %u key cache\n", 1685 "Warning, using only %u entries in %u key cache\n",
1363 ATH_KEYMAX, sc->keymax); 1686 ATH_KEYMAX, sc->keymax);
1364 sc->keymax = ATH_KEYMAX; 1687 sc->keymax = ATH_KEYMAX;
1365 } 1688 }
1366 1689
@@ -1386,17 +1709,17 @@ static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid)
1386 * priority. Note that the hal handles reseting 1709 * priority. Note that the hal handles reseting
1387 * these queues at the needed time. 1710 * these queues at the needed time.
1388 */ 1711 */
1389 sc->beacon.beaconq = ath_beaconq_setup(ah); 1712 sc->beacon.beaconq = ath9k_hw_beaconq_setup(ah);
1390 if (sc->beacon.beaconq == -1) { 1713 if (sc->beacon.beaconq == -1) {
1391 DPRINTF(sc, ATH_DBG_FATAL, 1714 ath_print(common, ATH_DBG_FATAL,
1392 "Unable to setup a beacon xmit queue\n"); 1715 "Unable to setup a beacon xmit queue\n");
1393 r = -EIO; 1716 r = -EIO;
1394 goto bad2; 1717 goto bad2;
1395 } 1718 }
1396 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0); 1719 sc->beacon.cabq = ath_txq_setup(sc, ATH9K_TX_QUEUE_CAB, 0);
1397 if (sc->beacon.cabq == NULL) { 1720 if (sc->beacon.cabq == NULL) {
1398 DPRINTF(sc, ATH_DBG_FATAL, 1721 ath_print(common, ATH_DBG_FATAL,
1399 "Unable to setup CAB xmit queue\n"); 1722 "Unable to setup CAB xmit queue\n");
1400 r = -EIO; 1723 r = -EIO;
1401 goto bad2; 1724 goto bad2;
1402 } 1725 }
@@ -1410,27 +1733,27 @@ static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid)
1410 /* Setup data queues */ 1733 /* Setup data queues */
1411 /* NB: ensure BK queue is the lowest priority h/w queue */ 1734 /* NB: ensure BK queue is the lowest priority h/w queue */
1412 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) { 1735 if (!ath_tx_setup(sc, ATH9K_WME_AC_BK)) {
1413 DPRINTF(sc, ATH_DBG_FATAL, 1736 ath_print(common, ATH_DBG_FATAL,
1414 "Unable to setup xmit queue for BK traffic\n"); 1737 "Unable to setup xmit queue for BK traffic\n");
1415 r = -EIO; 1738 r = -EIO;
1416 goto bad2; 1739 goto bad2;
1417 } 1740 }
1418 1741
1419 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) { 1742 if (!ath_tx_setup(sc, ATH9K_WME_AC_BE)) {
1420 DPRINTF(sc, ATH_DBG_FATAL, 1743 ath_print(common, ATH_DBG_FATAL,
1421 "Unable to setup xmit queue for BE traffic\n"); 1744 "Unable to setup xmit queue for BE traffic\n");
1422 r = -EIO; 1745 r = -EIO;
1423 goto bad2; 1746 goto bad2;
1424 } 1747 }
1425 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) { 1748 if (!ath_tx_setup(sc, ATH9K_WME_AC_VI)) {
1426 DPRINTF(sc, ATH_DBG_FATAL, 1749 ath_print(common, ATH_DBG_FATAL,
1427 "Unable to setup xmit queue for VI traffic\n"); 1750 "Unable to setup xmit queue for VI traffic\n");
1428 r = -EIO; 1751 r = -EIO;
1429 goto bad2; 1752 goto bad2;
1430 } 1753 }
1431 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) { 1754 if (!ath_tx_setup(sc, ATH9K_WME_AC_VO)) {
1432 DPRINTF(sc, ATH_DBG_FATAL, 1755 ath_print(common, ATH_DBG_FATAL,
1433 "Unable to setup xmit queue for VO traffic\n"); 1756 "Unable to setup xmit queue for VO traffic\n");
1434 r = -EIO; 1757 r = -EIO;
1435 goto bad2; 1758 goto bad2;
1436 } 1759 }
@@ -1480,14 +1803,14 @@ static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid)
1480 sc->sc_flags |= SC_OP_RXAGGR; 1803 sc->sc_flags |= SC_OP_RXAGGR;
1481 } 1804 }
1482 1805
1483 sc->tx_chainmask = ah->caps.tx_chainmask; 1806 common->tx_chainmask = ah->caps.tx_chainmask;
1484 sc->rx_chainmask = ah->caps.rx_chainmask; 1807 common->rx_chainmask = ah->caps.rx_chainmask;
1485 1808
1486 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL); 1809 ath9k_hw_setcapability(ah, ATH9K_CAP_DIVERSITY, 1, true, NULL);
1487 sc->rx.defant = ath9k_hw_getdefantenna(ah); 1810 sc->rx.defant = ath9k_hw_getdefantenna(ah);
1488 1811
1489 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 1812 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
1490 memcpy(sc->bssidmask, ath_bcast_mac, ETH_ALEN); 1813 memcpy(common->bssidmask, ath_bcast_mac, ETH_ALEN);
1491 1814
1492 sc->beacon.slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */ 1815 sc->beacon.slottime = ATH9K_SLOT_TIME_9; /* default to short slot time */
1493 1816
@@ -1515,10 +1838,24 @@ static int ath_init_softc(u16 devid, struct ath_softc *sc, u16 subsysid)
1515 ARRAY_SIZE(ath9k_5ghz_chantable); 1838 ARRAY_SIZE(ath9k_5ghz_chantable);
1516 } 1839 }
1517 1840
1518 if (sc->btcoex_info.btcoex_scheme != ATH_BTCOEX_CFG_NONE) { 1841 switch (ah->btcoex_hw.scheme) {
1519 r = ath9k_hw_btcoex_init(ah); 1842 case ATH_BTCOEX_CFG_NONE:
1843 break;
1844 case ATH_BTCOEX_CFG_2WIRE:
1845 ath9k_hw_btcoex_init_2wire(ah);
1846 break;
1847 case ATH_BTCOEX_CFG_3WIRE:
1848 ath9k_hw_btcoex_init_3wire(ah);
1849 r = ath_init_btcoex_timer(sc);
1520 if (r) 1850 if (r)
1521 goto bad2; 1851 goto bad2;
1852 qnum = ath_tx_get_qnum(sc, ATH9K_TX_QUEUE_DATA, ATH9K_WME_AC_BE);
1853 ath9k_hw_init_btcoex_hw(ah, qnum);
1854 sc->btcoex.bt_stomp_type = ATH_BTCOEX_STOMP_LOW;
1855 break;
1856 default:
1857 WARN_ON(1);
1858 break;
1522 } 1859 }
1523 1860
1524 return 0; 1861 return 0;
@@ -1527,12 +1864,9 @@ bad2:
1527 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++) 1864 for (i = 0; i < ATH9K_NUM_TX_QUEUES; i++)
1528 if (ATH_TXQ_SETUP(sc, i)) 1865 if (ATH_TXQ_SETUP(sc, i))
1529 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 1866 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1530bad:
1531 ath9k_hw_detach(ah);
1532 sc->sc_ah = NULL;
1533bad_no_ah:
1534 ath9k_exit_debug(sc);
1535 1867
1868bad_free_hw:
1869 ath9k_uninit_hw(sc);
1536 return r; 1870 return r;
1537} 1871}
1538 1872
@@ -1574,34 +1908,40 @@ void ath_set_hw_capab(struct ath_softc *sc, struct ieee80211_hw *hw)
1574} 1908}
1575 1909
1576/* Device driver core initialization */ 1910/* Device driver core initialization */
1577int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid) 1911int ath_init_device(u16 devid, struct ath_softc *sc, u16 subsysid,
1912 const struct ath_bus_ops *bus_ops)
1578{ 1913{
1579 struct ieee80211_hw *hw = sc->hw; 1914 struct ieee80211_hw *hw = sc->hw;
1915 struct ath_common *common;
1916 struct ath_hw *ah;
1580 int error = 0, i; 1917 int error = 0, i;
1581 struct ath_regulatory *reg; 1918 struct ath_regulatory *reg;
1582 1919
1583 DPRINTF(sc, ATH_DBG_CONFIG, "Attach ATH hw\n"); 1920 dev_dbg(sc->dev, "Attach ATH hw\n");
1584 1921
1585 error = ath_init_softc(devid, sc, subsysid); 1922 error = ath_init_softc(devid, sc, subsysid, bus_ops);
1586 if (error != 0) 1923 if (error != 0)
1587 return error; 1924 return error;
1588 1925
1926 ah = sc->sc_ah;
1927 common = ath9k_hw_common(ah);
1928
1589 /* get mac address from hardware and set in mac80211 */ 1929 /* get mac address from hardware and set in mac80211 */
1590 1930
1591 SET_IEEE80211_PERM_ADDR(hw, sc->sc_ah->macaddr); 1931 SET_IEEE80211_PERM_ADDR(hw, common->macaddr);
1592 1932
1593 ath_set_hw_capab(sc, hw); 1933 ath_set_hw_capab(sc, hw);
1594 1934
1595 error = ath_regd_init(&sc->common.regulatory, sc->hw->wiphy, 1935 error = ath_regd_init(&common->regulatory, sc->hw->wiphy,
1596 ath9k_reg_notifier); 1936 ath9k_reg_notifier);
1597 if (error) 1937 if (error)
1598 return error; 1938 return error;
1599 1939
1600 reg = &sc->common.regulatory; 1940 reg = &common->regulatory;
1601 1941
1602 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) { 1942 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT) {
1603 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap); 1943 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_2GHZ].ht_cap);
1604 if (test_bit(ATH9K_MODE_11A, sc->sc_ah->caps.wireless_modes)) 1944 if (test_bit(ATH9K_MODE_11A, ah->caps.wireless_modes))
1605 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap); 1945 setup_ht_cap(sc, &sc->sbands[IEEE80211_BAND_5GHZ].ht_cap);
1606 } 1946 }
1607 1947
@@ -1639,9 +1979,7 @@ error_attach:
1639 if (ATH_TXQ_SETUP(sc, i)) 1979 if (ATH_TXQ_SETUP(sc, i))
1640 ath_tx_cleanupq(sc, &sc->tx.txq[i]); 1980 ath_tx_cleanupq(sc, &sc->tx.txq[i]);
1641 1981
1642 ath9k_hw_detach(sc->sc_ah); 1982 ath9k_uninit_hw(sc);
1643 sc->sc_ah = NULL;
1644 ath9k_exit_debug(sc);
1645 1983
1646 return error; 1984 return error;
1647} 1985}
@@ -1649,6 +1987,7 @@ error_attach:
1649int ath_reset(struct ath_softc *sc, bool retry_tx) 1987int ath_reset(struct ath_softc *sc, bool retry_tx)
1650{ 1988{
1651 struct ath_hw *ah = sc->sc_ah; 1989 struct ath_hw *ah = sc->sc_ah;
1990 struct ath_common *common = ath9k_hw_common(ah);
1652 struct ieee80211_hw *hw = sc->hw; 1991 struct ieee80211_hw *hw = sc->hw;
1653 int r; 1992 int r;
1654 1993
@@ -1660,12 +1999,13 @@ int ath_reset(struct ath_softc *sc, bool retry_tx)
1660 spin_lock_bh(&sc->sc_resetlock); 1999 spin_lock_bh(&sc->sc_resetlock);
1661 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false); 2000 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, false);
1662 if (r) 2001 if (r)
1663 DPRINTF(sc, ATH_DBG_FATAL, 2002 ath_print(common, ATH_DBG_FATAL,
1664 "Unable to reset hardware; reset status %d\n", r); 2003 "Unable to reset hardware; reset status %d\n", r);
1665 spin_unlock_bh(&sc->sc_resetlock); 2004 spin_unlock_bh(&sc->sc_resetlock);
1666 2005
1667 if (ath_startrecv(sc) != 0) 2006 if (ath_startrecv(sc) != 0)
1668 DPRINTF(sc, ATH_DBG_FATAL, "Unable to start recv logic\n"); 2007 ath_print(common, ATH_DBG_FATAL,
2008 "Unable to start recv logic\n");
1669 2009
1670 /* 2010 /*
1671 * We may be doing a reset in response to a request 2011 * We may be doing a reset in response to a request
@@ -1708,19 +2048,20 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
1708 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc)) 2048 ((_dd)->dd_desc_paddr + ((caddr_t)(_ds) - (caddr_t)(_dd)->dd_desc))
1709#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0) 2049#define ATH_DESC_4KB_BOUND_CHECK(_daddr) ((((_daddr) & 0xFFF) > 0xF7F) ? 1 : 0)
1710#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096) 2050#define ATH_DESC_4KB_BOUND_NUM_SKIPPED(_len) ((_len) / 4096)
1711 2051 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1712 struct ath_desc *ds; 2052 struct ath_desc *ds;
1713 struct ath_buf *bf; 2053 struct ath_buf *bf;
1714 int i, bsize, error; 2054 int i, bsize, error;
1715 2055
1716 DPRINTF(sc, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n", 2056 ath_print(common, ATH_DBG_CONFIG, "%s DMA: %u buffers %u desc/buf\n",
1717 name, nbuf, ndesc); 2057 name, nbuf, ndesc);
1718 2058
1719 INIT_LIST_HEAD(head); 2059 INIT_LIST_HEAD(head);
1720 /* ath_desc must be a multiple of DWORDs */ 2060 /* ath_desc must be a multiple of DWORDs */
1721 if ((sizeof(struct ath_desc) % 4) != 0) { 2061 if ((sizeof(struct ath_desc) % 4) != 0) {
1722 DPRINTF(sc, ATH_DBG_FATAL, "ath_desc not DWORD aligned\n"); 2062 ath_print(common, ATH_DBG_FATAL,
1723 ASSERT((sizeof(struct ath_desc) % 4) == 0); 2063 "ath_desc not DWORD aligned\n");
2064 BUG_ON((sizeof(struct ath_desc) % 4) != 0);
1724 error = -ENOMEM; 2065 error = -ENOMEM;
1725 goto fail; 2066 goto fail;
1726 } 2067 }
@@ -1753,9 +2094,9 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
1753 goto fail; 2094 goto fail;
1754 } 2095 }
1755 ds = dd->dd_desc; 2096 ds = dd->dd_desc;
1756 DPRINTF(sc, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n", 2097 ath_print(common, ATH_DBG_CONFIG, "%s DMA map: %p (%u) -> %llx (%u)\n",
1757 name, ds, (u32) dd->dd_desc_len, 2098 name, ds, (u32) dd->dd_desc_len,
1758 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len); 2099 ito64(dd->dd_desc_paddr), /*XXX*/(u32) dd->dd_desc_len);
1759 2100
1760 /* allocate buffers */ 2101 /* allocate buffers */
1761 bsize = sizeof(struct ath_buf) * nbuf; 2102 bsize = sizeof(struct ath_buf) * nbuf;
@@ -1778,7 +2119,7 @@ int ath_descdma_setup(struct ath_softc *sc, struct ath_descdma *dd,
1778 * descriptor fetch. 2119 * descriptor fetch.
1779 */ 2120 */
1780 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) { 2121 while (ATH_DESC_4KB_BOUND_CHECK(bf->bf_daddr)) {
1781 ASSERT((caddr_t) bf->bf_desc < 2122 BUG_ON((caddr_t) bf->bf_desc >=
1782 ((caddr_t) dd->dd_desc + 2123 ((caddr_t) dd->dd_desc +
1783 dd->dd_desc_len)); 2124 dd->dd_desc_len));
1784 2125
@@ -1882,31 +2223,50 @@ void ath9k_update_ichannel(struct ath_softc *sc, struct ieee80211_hw *hw,
1882 ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM; 2223 ichan->channelFlags = CHANNEL_5GHZ | CHANNEL_OFDM;
1883 } 2224 }
1884 2225
1885 sc->tx_chan_width = ATH9K_HT_MACMODE_20; 2226 if (conf_is_ht(conf))
1886
1887 if (conf_is_ht(conf)) {
1888 if (conf_is_ht40(conf))
1889 sc->tx_chan_width = ATH9K_HT_MACMODE_2040;
1890
1891 ichan->chanmode = ath_get_extchanmode(sc, chan, 2227 ichan->chanmode = ath_get_extchanmode(sc, chan,
1892 conf->channel_type); 2228 conf->channel_type);
1893 }
1894} 2229}
1895 2230
1896/**********************/ 2231/**********************/
1897/* mac80211 callbacks */ 2232/* mac80211 callbacks */
1898/**********************/ 2233/**********************/
1899 2234
2235/*
2236 * (Re)start btcoex timers
2237 */
2238static void ath9k_btcoex_timer_resume(struct ath_softc *sc)
2239{
2240 struct ath_btcoex *btcoex = &sc->btcoex;
2241 struct ath_hw *ah = sc->sc_ah;
2242
2243 ath_print(ath9k_hw_common(ah), ATH_DBG_BTCOEX,
2244 "Starting btcoex timers");
2245
2246 /* make sure duty cycle timer is also stopped when resuming */
2247 if (btcoex->hw_timer_enabled)
2248 ath9k_gen_timer_stop(sc->sc_ah, btcoex->no_stomp_timer);
2249
2250 btcoex->bt_priority_cnt = 0;
2251 btcoex->bt_priority_time = jiffies;
2252 sc->sc_flags &= ~SC_OP_BT_PRIORITY_DETECTED;
2253
2254 mod_timer(&btcoex->period_timer, jiffies);
2255}
2256
1900static int ath9k_start(struct ieee80211_hw *hw) 2257static int ath9k_start(struct ieee80211_hw *hw)
1901{ 2258{
1902 struct ath_wiphy *aphy = hw->priv; 2259 struct ath_wiphy *aphy = hw->priv;
1903 struct ath_softc *sc = aphy->sc; 2260 struct ath_softc *sc = aphy->sc;
2261 struct ath_hw *ah = sc->sc_ah;
2262 struct ath_common *common = ath9k_hw_common(ah);
1904 struct ieee80211_channel *curchan = hw->conf.channel; 2263 struct ieee80211_channel *curchan = hw->conf.channel;
1905 struct ath9k_channel *init_channel; 2264 struct ath9k_channel *init_channel;
1906 int r; 2265 int r;
1907 2266
1908 DPRINTF(sc, ATH_DBG_CONFIG, "Starting driver with " 2267 ath_print(common, ATH_DBG_CONFIG,
1909 "initial channel: %d MHz\n", curchan->center_freq); 2268 "Starting driver with initial channel: %d MHz\n",
2269 curchan->center_freq);
1910 2270
1911 mutex_lock(&sc->mutex); 2271 mutex_lock(&sc->mutex);
1912 2272
@@ -1938,7 +2298,7 @@ static int ath9k_start(struct ieee80211_hw *hw)
1938 init_channel = ath_get_curchannel(sc, hw); 2298 init_channel = ath_get_curchannel(sc, hw);
1939 2299
1940 /* Reset SERDES registers */ 2300 /* Reset SERDES registers */
1941 ath9k_hw_configpcipowersave(sc->sc_ah, 0, 0); 2301 ath9k_hw_configpcipowersave(ah, 0, 0);
1942 2302
1943 /* 2303 /*
1944 * The basic interface to setting the hardware in a good 2304 * The basic interface to setting the hardware in a good
@@ -1948,12 +2308,12 @@ static int ath9k_start(struct ieee80211_hw *hw)
1948 * and then setup of the interrupt mask. 2308 * and then setup of the interrupt mask.
1949 */ 2309 */
1950 spin_lock_bh(&sc->sc_resetlock); 2310 spin_lock_bh(&sc->sc_resetlock);
1951 r = ath9k_hw_reset(sc->sc_ah, init_channel, false); 2311 r = ath9k_hw_reset(ah, init_channel, false);
1952 if (r) { 2312 if (r) {
1953 DPRINTF(sc, ATH_DBG_FATAL, 2313 ath_print(common, ATH_DBG_FATAL,
1954 "Unable to reset hardware; reset status %d " 2314 "Unable to reset hardware; reset status %d "
1955 "(freq %u MHz)\n", r, 2315 "(freq %u MHz)\n", r,
1956 curchan->center_freq); 2316 curchan->center_freq);
1957 spin_unlock_bh(&sc->sc_resetlock); 2317 spin_unlock_bh(&sc->sc_resetlock);
1958 goto mutex_unlock; 2318 goto mutex_unlock;
1959 } 2319 }
@@ -1973,7 +2333,8 @@ static int ath9k_start(struct ieee80211_hw *hw)
1973 * here except setup the interrupt mask. 2333 * here except setup the interrupt mask.
1974 */ 2334 */
1975 if (ath_startrecv(sc) != 0) { 2335 if (ath_startrecv(sc) != 0) {
1976 DPRINTF(sc, ATH_DBG_FATAL, "Unable to start recv logic\n"); 2336 ath_print(common, ATH_DBG_FATAL,
2337 "Unable to start recv logic\n");
1977 r = -EIO; 2338 r = -EIO;
1978 goto mutex_unlock; 2339 goto mutex_unlock;
1979 } 2340 }
@@ -1983,10 +2344,10 @@ static int ath9k_start(struct ieee80211_hw *hw)
1983 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN 2344 | ATH9K_INT_RXEOL | ATH9K_INT_RXORN
1984 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL; 2345 | ATH9K_INT_FATAL | ATH9K_INT_GLOBAL;
1985 2346
1986 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_GTT) 2347 if (ah->caps.hw_caps & ATH9K_HW_CAP_GTT)
1987 sc->imask |= ATH9K_INT_GTT; 2348 sc->imask |= ATH9K_INT_GTT;
1988 2349
1989 if (sc->sc_ah->caps.hw_caps & ATH9K_HW_CAP_HT) 2350 if (ah->caps.hw_caps & ATH9K_HW_CAP_HT)
1990 sc->imask |= ATH9K_INT_CST; 2351 sc->imask |= ATH9K_INT_CST;
1991 2352
1992 ath_cache_conf_rate(sc, &hw->conf); 2353 ath_cache_conf_rate(sc, &hw->conf);
@@ -1995,21 +2356,22 @@ static int ath9k_start(struct ieee80211_hw *hw)
1995 2356
1996 /* Disable BMISS interrupt when we're not associated */ 2357 /* Disable BMISS interrupt when we're not associated */
1997 sc->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS); 2358 sc->imask &= ~(ATH9K_INT_SWBA | ATH9K_INT_BMISS);
1998 ath9k_hw_set_interrupts(sc->sc_ah, sc->imask); 2359 ath9k_hw_set_interrupts(ah, sc->imask);
1999 2360
2000 ieee80211_wake_queues(hw); 2361 ieee80211_wake_queues(hw);
2001 2362
2002 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0); 2363 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 0);
2003 2364
2004 if ((sc->btcoex_info.btcoex_scheme != ATH_BTCOEX_CFG_NONE) && 2365 if ((ah->btcoex_hw.scheme != ATH_BTCOEX_CFG_NONE) &&
2005 !(sc->sc_flags & SC_OP_BTCOEX_ENABLED)) { 2366 !ah->btcoex_hw.enabled) {
2006 ath_btcoex_set_weight(&sc->btcoex_info, AR_BT_COEX_WGHT, 2367 ath9k_hw_btcoex_set_weight(ah, AR_BT_COEX_WGHT,
2007 AR_STOMP_LOW_WLAN_WGHT); 2368 AR_STOMP_LOW_WLAN_WGHT);
2008 ath9k_hw_btcoex_enable(sc->sc_ah); 2369 ath9k_hw_btcoex_enable(ah);
2009 2370
2010 ath_pcie_aspm_disable(sc); 2371 if (common->bus_ops->bt_coex_prep)
2011 if (sc->btcoex_info.btcoex_scheme == ATH_BTCOEX_CFG_3WIRE) 2372 common->bus_ops->bt_coex_prep(common);
2012 ath_btcoex_timer_resume(sc, &sc->btcoex_info); 2373 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
2374 ath9k_btcoex_timer_resume(sc);
2013 } 2375 }
2014 2376
2015mutex_unlock: 2377mutex_unlock:
@@ -2024,12 +2386,14 @@ static int ath9k_tx(struct ieee80211_hw *hw,
2024 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 2386 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2025 struct ath_wiphy *aphy = hw->priv; 2387 struct ath_wiphy *aphy = hw->priv;
2026 struct ath_softc *sc = aphy->sc; 2388 struct ath_softc *sc = aphy->sc;
2389 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2027 struct ath_tx_control txctl; 2390 struct ath_tx_control txctl;
2028 int hdrlen, padsize; 2391 int hdrlen, padsize;
2029 2392
2030 if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) { 2393 if (aphy->state != ATH_WIPHY_ACTIVE && aphy->state != ATH_WIPHY_SCAN) {
2031 printk(KERN_DEBUG "ath9k: %s: TX in unexpected wiphy state " 2394 ath_print(common, ATH_DBG_XMIT,
2032 "%d\n", wiphy_name(hw->wiphy), aphy->state); 2395 "ath9k: %s: TX in unexpected wiphy state "
2396 "%d\n", wiphy_name(hw->wiphy), aphy->state);
2033 goto exit; 2397 goto exit;
2034 } 2398 }
2035 2399
@@ -2042,8 +2406,8 @@ static int ath9k_tx(struct ieee80211_hw *hw,
2042 if (ieee80211_is_data(hdr->frame_control) && 2406 if (ieee80211_is_data(hdr->frame_control) &&
2043 !ieee80211_is_nullfunc(hdr->frame_control) && 2407 !ieee80211_is_nullfunc(hdr->frame_control) &&
2044 !ieee80211_has_pm(hdr->frame_control)) { 2408 !ieee80211_has_pm(hdr->frame_control)) {
2045 DPRINTF(sc, ATH_DBG_PS, "Add PM=1 for a TX frame " 2409 ath_print(common, ATH_DBG_PS, "Add PM=1 for a TX frame "
2046 "while in PS mode\n"); 2410 "while in PS mode\n");
2047 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM); 2411 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_PM);
2048 } 2412 }
2049 } 2413 }
@@ -2058,11 +2422,12 @@ static int ath9k_tx(struct ieee80211_hw *hw,
2058 ath9k_ps_wakeup(sc); 2422 ath9k_ps_wakeup(sc);
2059 ath9k_hw_setrxabort(sc->sc_ah, 0); 2423 ath9k_hw_setrxabort(sc->sc_ah, 0);
2060 if (ieee80211_is_pspoll(hdr->frame_control)) { 2424 if (ieee80211_is_pspoll(hdr->frame_control)) {
2061 DPRINTF(sc, ATH_DBG_PS, "Sending PS-Poll to pick a " 2425 ath_print(common, ATH_DBG_PS,
2062 "buffered frame\n"); 2426 "Sending PS-Poll to pick a buffered frame\n");
2063 sc->sc_flags |= SC_OP_WAIT_FOR_PSPOLL_DATA; 2427 sc->sc_flags |= SC_OP_WAIT_FOR_PSPOLL_DATA;
2064 } else { 2428 } else {
2065 DPRINTF(sc, ATH_DBG_PS, "Wake up to complete TX\n"); 2429 ath_print(common, ATH_DBG_PS,
2430 "Wake up to complete TX\n");
2066 sc->sc_flags |= SC_OP_WAIT_FOR_TX_ACK; 2431 sc->sc_flags |= SC_OP_WAIT_FOR_TX_ACK;
2067 } 2432 }
2068 /* 2433 /*
@@ -2104,10 +2469,10 @@ static int ath9k_tx(struct ieee80211_hw *hw,
2104 if (!txctl.txq) 2469 if (!txctl.txq)
2105 goto exit; 2470 goto exit;
2106 2471
2107 DPRINTF(sc, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb); 2472 ath_print(common, ATH_DBG_XMIT, "transmitting packet, skb: %p\n", skb);
2108 2473
2109 if (ath_tx_start(hw, skb, &txctl) != 0) { 2474 if (ath_tx_start(hw, skb, &txctl) != 0) {
2110 DPRINTF(sc, ATH_DBG_XMIT, "TX failed\n"); 2475 ath_print(common, ATH_DBG_XMIT, "TX failed\n");
2111 goto exit; 2476 goto exit;
2112 } 2477 }
2113 2478
@@ -2117,10 +2482,28 @@ exit:
2117 return 0; 2482 return 0;
2118} 2483}
2119 2484
2485/*
2486 * Pause btcoex timer and bt duty cycle timer
2487 */
2488static void ath9k_btcoex_timer_pause(struct ath_softc *sc)
2489{
2490 struct ath_btcoex *btcoex = &sc->btcoex;
2491 struct ath_hw *ah = sc->sc_ah;
2492
2493 del_timer_sync(&btcoex->period_timer);
2494
2495 if (btcoex->hw_timer_enabled)
2496 ath9k_gen_timer_stop(ah, btcoex->no_stomp_timer);
2497
2498 btcoex->hw_timer_enabled = false;
2499}
2500
2120static void ath9k_stop(struct ieee80211_hw *hw) 2501static void ath9k_stop(struct ieee80211_hw *hw)
2121{ 2502{
2122 struct ath_wiphy *aphy = hw->priv; 2503 struct ath_wiphy *aphy = hw->priv;
2123 struct ath_softc *sc = aphy->sc; 2504 struct ath_softc *sc = aphy->sc;
2505 struct ath_hw *ah = sc->sc_ah;
2506 struct ath_common *common = ath9k_hw_common(ah);
2124 2507
2125 mutex_lock(&sc->mutex); 2508 mutex_lock(&sc->mutex);
2126 2509
@@ -2135,7 +2518,7 @@ static void ath9k_stop(struct ieee80211_hw *hw)
2135 } 2518 }
2136 2519
2137 if (sc->sc_flags & SC_OP_INVALID) { 2520 if (sc->sc_flags & SC_OP_INVALID) {
2138 DPRINTF(sc, ATH_DBG_ANY, "Device not present\n"); 2521 ath_print(common, ATH_DBG_ANY, "Device not present\n");
2139 mutex_unlock(&sc->mutex); 2522 mutex_unlock(&sc->mutex);
2140 return; 2523 return;
2141 } 2524 }
@@ -2145,33 +2528,33 @@ static void ath9k_stop(struct ieee80211_hw *hw)
2145 return; /* another wiphy still in use */ 2528 return; /* another wiphy still in use */
2146 } 2529 }
2147 2530
2148 if (sc->sc_flags & SC_OP_BTCOEX_ENABLED) { 2531 if (ah->btcoex_hw.enabled) {
2149 ath9k_hw_btcoex_disable(sc->sc_ah); 2532 ath9k_hw_btcoex_disable(ah);
2150 if (sc->btcoex_info.btcoex_scheme == ATH_BTCOEX_CFG_3WIRE) 2533 if (ah->btcoex_hw.scheme == ATH_BTCOEX_CFG_3WIRE)
2151 ath_btcoex_timer_pause(sc, &sc->btcoex_info); 2534 ath9k_btcoex_timer_pause(sc);
2152 } 2535 }
2153 2536
2154 /* make sure h/w will not generate any interrupt 2537 /* make sure h/w will not generate any interrupt
2155 * before setting the invalid flag. */ 2538 * before setting the invalid flag. */
2156 ath9k_hw_set_interrupts(sc->sc_ah, 0); 2539 ath9k_hw_set_interrupts(ah, 0);
2157 2540
2158 if (!(sc->sc_flags & SC_OP_INVALID)) { 2541 if (!(sc->sc_flags & SC_OP_INVALID)) {
2159 ath_drain_all_txq(sc, false); 2542 ath_drain_all_txq(sc, false);
2160 ath_stoprecv(sc); 2543 ath_stoprecv(sc);
2161 ath9k_hw_phy_disable(sc->sc_ah); 2544 ath9k_hw_phy_disable(ah);
2162 } else 2545 } else
2163 sc->rx.rxlink = NULL; 2546 sc->rx.rxlink = NULL;
2164 2547
2165 /* disable HAL and put h/w to sleep */ 2548 /* disable HAL and put h/w to sleep */
2166 ath9k_hw_disable(sc->sc_ah); 2549 ath9k_hw_disable(ah);
2167 ath9k_hw_configpcipowersave(sc->sc_ah, 1, 1); 2550 ath9k_hw_configpcipowersave(ah, 1, 1);
2168 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_FULL_SLEEP); 2551 ath9k_setpower(sc, ATH9K_PM_FULL_SLEEP);
2169 2552
2170 sc->sc_flags |= SC_OP_INVALID; 2553 sc->sc_flags |= SC_OP_INVALID;
2171 2554
2172 mutex_unlock(&sc->mutex); 2555 mutex_unlock(&sc->mutex);
2173 2556
2174 DPRINTF(sc, ATH_DBG_CONFIG, "Driver halt\n"); 2557 ath_print(common, ATH_DBG_CONFIG, "Driver halt\n");
2175} 2558}
2176 2559
2177static int ath9k_add_interface(struct ieee80211_hw *hw, 2560static int ath9k_add_interface(struct ieee80211_hw *hw,
@@ -2179,6 +2562,7 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
2179{ 2562{
2180 struct ath_wiphy *aphy = hw->priv; 2563 struct ath_wiphy *aphy = hw->priv;
2181 struct ath_softc *sc = aphy->sc; 2564 struct ath_softc *sc = aphy->sc;
2565 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2182 struct ath_vif *avp = (void *)conf->vif->drv_priv; 2566 struct ath_vif *avp = (void *)conf->vif->drv_priv;
2183 enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED; 2567 enum nl80211_iftype ic_opmode = NL80211_IFTYPE_UNSPECIFIED;
2184 int ret = 0; 2568 int ret = 0;
@@ -2205,13 +2589,14 @@ static int ath9k_add_interface(struct ieee80211_hw *hw,
2205 ic_opmode = conf->type; 2589 ic_opmode = conf->type;
2206 break; 2590 break;
2207 default: 2591 default:
2208 DPRINTF(sc, ATH_DBG_FATAL, 2592 ath_print(common, ATH_DBG_FATAL,
2209 "Interface type %d not yet supported\n", conf->type); 2593 "Interface type %d not yet supported\n", conf->type);
2210 ret = -EOPNOTSUPP; 2594 ret = -EOPNOTSUPP;
2211 goto out; 2595 goto out;
2212 } 2596 }
2213 2597
2214 DPRINTF(sc, ATH_DBG_CONFIG, "Attach a VIF of type: %d\n", ic_opmode); 2598 ath_print(common, ATH_DBG_CONFIG,
2599 "Attach a VIF of type: %d\n", ic_opmode);
2215 2600
2216 /* Set the VIF opmode */ 2601 /* Set the VIF opmode */
2217 avp->av_opmode = ic_opmode; 2602 avp->av_opmode = ic_opmode;
@@ -2261,10 +2646,11 @@ static void ath9k_remove_interface(struct ieee80211_hw *hw,
2261{ 2646{
2262 struct ath_wiphy *aphy = hw->priv; 2647 struct ath_wiphy *aphy = hw->priv;
2263 struct ath_softc *sc = aphy->sc; 2648 struct ath_softc *sc = aphy->sc;
2649 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2264 struct ath_vif *avp = (void *)conf->vif->drv_priv; 2650 struct ath_vif *avp = (void *)conf->vif->drv_priv;
2265 int i; 2651 int i;
2266 2652
2267 DPRINTF(sc, ATH_DBG_CONFIG, "Detach Interface\n"); 2653 ath_print(common, ATH_DBG_CONFIG, "Detach Interface\n");
2268 2654
2269 mutex_lock(&sc->mutex); 2655 mutex_lock(&sc->mutex);
2270 2656
@@ -2299,6 +2685,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2299{ 2685{
2300 struct ath_wiphy *aphy = hw->priv; 2686 struct ath_wiphy *aphy = hw->priv;
2301 struct ath_softc *sc = aphy->sc; 2687 struct ath_softc *sc = aphy->sc;
2688 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2302 struct ieee80211_conf *conf = &hw->conf; 2689 struct ieee80211_conf *conf = &hw->conf;
2303 struct ath_hw *ah = sc->sc_ah; 2690 struct ath_hw *ah = sc->sc_ah;
2304 bool all_wiphys_idle = false, disable_radio = false; 2691 bool all_wiphys_idle = false, disable_radio = false;
@@ -2318,8 +2705,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2318 } 2705 }
2319 else if (all_wiphys_idle) { 2706 else if (all_wiphys_idle) {
2320 ath_radio_enable(sc); 2707 ath_radio_enable(sc);
2321 DPRINTF(sc, ATH_DBG_CONFIG, 2708 ath_print(common, ATH_DBG_CONFIG,
2322 "not-idle: enabling radio\n"); 2709 "not-idle: enabling radio\n");
2323 } 2710 }
2324 } 2711 }
2325 2712
@@ -2337,7 +2724,7 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2337 sc->ps_enabled = true; 2724 sc->ps_enabled = true;
2338 } else { 2725 } else {
2339 sc->ps_enabled = false; 2726 sc->ps_enabled = false;
2340 ath9k_hw_setpower(sc->sc_ah, ATH9K_PM_AWAKE); 2727 ath9k_setpower(sc, ATH9K_PM_AWAKE);
2341 if (!(ah->caps.hw_caps & 2728 if (!(ah->caps.hw_caps &
2342 ATH9K_HW_CAP_AUTOSLEEP)) { 2729 ATH9K_HW_CAP_AUTOSLEEP)) {
2343 ath9k_hw_setrxabort(sc->sc_ah, 0); 2730 ath9k_hw_setrxabort(sc->sc_ah, 0);
@@ -2372,8 +2759,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2372 goto skip_chan_change; 2759 goto skip_chan_change;
2373 } 2760 }
2374 2761
2375 DPRINTF(sc, ATH_DBG_CONFIG, "Set channel: %d MHz\n", 2762 ath_print(common, ATH_DBG_CONFIG, "Set channel: %d MHz\n",
2376 curchan->center_freq); 2763 curchan->center_freq);
2377 2764
2378 /* XXX: remove me eventualy */ 2765 /* XXX: remove me eventualy */
2379 ath9k_update_ichannel(sc, hw, &sc->sc_ah->channels[pos]); 2766 ath9k_update_ichannel(sc, hw, &sc->sc_ah->channels[pos]);
@@ -2381,7 +2768,8 @@ static int ath9k_config(struct ieee80211_hw *hw, u32 changed)
2381 ath_update_chainmask(sc, conf_is_ht(conf)); 2768 ath_update_chainmask(sc, conf_is_ht(conf));
2382 2769
2383 if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) { 2770 if (ath_set_channel(sc, hw, &sc->sc_ah->channels[pos]) < 0) {
2384 DPRINTF(sc, ATH_DBG_FATAL, "Unable to set channel\n"); 2771 ath_print(common, ATH_DBG_FATAL,
2772 "Unable to set channel\n");
2385 mutex_unlock(&sc->mutex); 2773 mutex_unlock(&sc->mutex);
2386 return -EINVAL; 2774 return -EINVAL;
2387 } 2775 }
@@ -2392,7 +2780,7 @@ skip_chan_change:
2392 sc->config.txpowlimit = 2 * conf->power_level; 2780 sc->config.txpowlimit = 2 * conf->power_level;
2393 2781
2394 if (disable_radio) { 2782 if (disable_radio) {
2395 DPRINTF(sc, ATH_DBG_CONFIG, "idle: disabling radio\n"); 2783 ath_print(common, ATH_DBG_CONFIG, "idle: disabling radio\n");
2396 ath_radio_disable(sc); 2784 ath_radio_disable(sc);
2397 } 2785 }
2398 2786
@@ -2429,7 +2817,8 @@ static void ath9k_configure_filter(struct ieee80211_hw *hw,
2429 ath9k_hw_setrxfilter(sc->sc_ah, rfilt); 2817 ath9k_hw_setrxfilter(sc->sc_ah, rfilt);
2430 ath9k_ps_restore(sc); 2818 ath9k_ps_restore(sc);
2431 2819
2432 DPRINTF(sc, ATH_DBG_CONFIG, "Set HW RX filter: 0x%x\n", rfilt); 2820 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
2821 "Set HW RX filter: 0x%x\n", rfilt);
2433} 2822}
2434 2823
2435static void ath9k_sta_notify(struct ieee80211_hw *hw, 2824static void ath9k_sta_notify(struct ieee80211_hw *hw,
@@ -2457,6 +2846,7 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
2457{ 2846{
2458 struct ath_wiphy *aphy = hw->priv; 2847 struct ath_wiphy *aphy = hw->priv;
2459 struct ath_softc *sc = aphy->sc; 2848 struct ath_softc *sc = aphy->sc;
2849 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2460 struct ath9k_tx_queue_info qi; 2850 struct ath9k_tx_queue_info qi;
2461 int ret = 0, qnum; 2851 int ret = 0, qnum;
2462 2852
@@ -2473,15 +2863,15 @@ static int ath9k_conf_tx(struct ieee80211_hw *hw, u16 queue,
2473 qi.tqi_burstTime = params->txop; 2863 qi.tqi_burstTime = params->txop;
2474 qnum = ath_get_hal_qnum(queue, sc); 2864 qnum = ath_get_hal_qnum(queue, sc);
2475 2865
2476 DPRINTF(sc, ATH_DBG_CONFIG, 2866 ath_print(common, ATH_DBG_CONFIG,
2477 "Configure tx [queue/halq] [%d/%d], " 2867 "Configure tx [queue/halq] [%d/%d], "
2478 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n", 2868 "aifs: %d, cw_min: %d, cw_max: %d, txop: %d\n",
2479 queue, qnum, params->aifs, params->cw_min, 2869 queue, qnum, params->aifs, params->cw_min,
2480 params->cw_max, params->txop); 2870 params->cw_max, params->txop);
2481 2871
2482 ret = ath_txq_update(sc, qnum, &qi); 2872 ret = ath_txq_update(sc, qnum, &qi);
2483 if (ret) 2873 if (ret)
2484 DPRINTF(sc, ATH_DBG_FATAL, "TXQ Update failed\n"); 2874 ath_print(common, ATH_DBG_FATAL, "TXQ Update failed\n");
2485 2875
2486 mutex_unlock(&sc->mutex); 2876 mutex_unlock(&sc->mutex);
2487 2877
@@ -2496,6 +2886,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
2496{ 2886{
2497 struct ath_wiphy *aphy = hw->priv; 2887 struct ath_wiphy *aphy = hw->priv;
2498 struct ath_softc *sc = aphy->sc; 2888 struct ath_softc *sc = aphy->sc;
2889 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2499 int ret = 0; 2890 int ret = 0;
2500 2891
2501 if (modparam_nohwcrypt) 2892 if (modparam_nohwcrypt)
@@ -2503,7 +2894,7 @@ static int ath9k_set_key(struct ieee80211_hw *hw,
2503 2894
2504 mutex_lock(&sc->mutex); 2895 mutex_lock(&sc->mutex);
2505 ath9k_ps_wakeup(sc); 2896 ath9k_ps_wakeup(sc);
2506 DPRINTF(sc, ATH_DBG_CONFIG, "Set HW Key\n"); 2897 ath_print(common, ATH_DBG_CONFIG, "Set HW Key\n");
2507 2898
2508 switch (cmd) { 2899 switch (cmd) {
2509 case SET_KEY: 2900 case SET_KEY:
@@ -2540,6 +2931,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2540 struct ath_wiphy *aphy = hw->priv; 2931 struct ath_wiphy *aphy = hw->priv;
2541 struct ath_softc *sc = aphy->sc; 2932 struct ath_softc *sc = aphy->sc;
2542 struct ath_hw *ah = sc->sc_ah; 2933 struct ath_hw *ah = sc->sc_ah;
2934 struct ath_common *common = ath9k_hw_common(ah);
2543 struct ath_vif *avp = (void *)vif->drv_priv; 2935 struct ath_vif *avp = (void *)vif->drv_priv;
2544 u32 rfilt = 0; 2936 u32 rfilt = 0;
2545 int error, i; 2937 int error, i;
@@ -2555,9 +2947,9 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2555 ah->opmode != NL80211_IFTYPE_AP) { 2947 ah->opmode != NL80211_IFTYPE_AP) {
2556 ah->opmode = NL80211_IFTYPE_STATION; 2948 ah->opmode = NL80211_IFTYPE_STATION;
2557 ath9k_hw_setopmode(ah); 2949 ath9k_hw_setopmode(ah);
2558 memcpy(sc->curbssid, sc->sc_ah->macaddr, ETH_ALEN); 2950 memcpy(common->curbssid, common->macaddr, ETH_ALEN);
2559 sc->curaid = 0; 2951 common->curaid = 0;
2560 ath9k_hw_write_associd(sc); 2952 ath9k_hw_write_associd(ah);
2561 /* Request full reset to get hw opmode changed properly */ 2953 /* Request full reset to get hw opmode changed properly */
2562 sc->sc_flags |= SC_OP_FULL_RESET; 2954 sc->sc_flags |= SC_OP_FULL_RESET;
2563 } 2955 }
@@ -2569,17 +2961,17 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2569 case NL80211_IFTYPE_ADHOC: 2961 case NL80211_IFTYPE_ADHOC:
2570 case NL80211_IFTYPE_MESH_POINT: 2962 case NL80211_IFTYPE_MESH_POINT:
2571 /* Set BSSID */ 2963 /* Set BSSID */
2572 memcpy(sc->curbssid, bss_conf->bssid, ETH_ALEN); 2964 memcpy(common->curbssid, bss_conf->bssid, ETH_ALEN);
2573 memcpy(avp->bssid, bss_conf->bssid, ETH_ALEN); 2965 memcpy(avp->bssid, bss_conf->bssid, ETH_ALEN);
2574 sc->curaid = 0; 2966 common->curaid = 0;
2575 ath9k_hw_write_associd(sc); 2967 ath9k_hw_write_associd(ah);
2576 2968
2577 /* Set aggregation protection mode parameters */ 2969 /* Set aggregation protection mode parameters */
2578 sc->config.ath_aggr_prot = 0; 2970 sc->config.ath_aggr_prot = 0;
2579 2971
2580 DPRINTF(sc, ATH_DBG_CONFIG, 2972 ath_print(common, ATH_DBG_CONFIG,
2581 "RX filter 0x%x bssid %pM aid 0x%x\n", 2973 "RX filter 0x%x bssid %pM aid 0x%x\n",
2582 rfilt, sc->curbssid, sc->curaid); 2974 rfilt, common->curbssid, common->curaid);
2583 2975
2584 /* need to reconfigure the beacon */ 2976 /* need to reconfigure the beacon */
2585 sc->sc_flags &= ~SC_OP_BEACONS ; 2977 sc->sc_flags &= ~SC_OP_BEACONS ;
@@ -2618,7 +3010,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2618 if (ath9k_hw_keyisvalid(sc->sc_ah, (u16)i)) 3010 if (ath9k_hw_keyisvalid(sc->sc_ah, (u16)i))
2619 ath9k_hw_keysetmac(sc->sc_ah, 3011 ath9k_hw_keysetmac(sc->sc_ah,
2620 (u16)i, 3012 (u16)i,
2621 sc->curbssid); 3013 common->curbssid);
2622 } 3014 }
2623 3015
2624 /* Only legacy IBSS for now */ 3016 /* Only legacy IBSS for now */
@@ -2626,8 +3018,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2626 ath_update_chainmask(sc, 0); 3018 ath_update_chainmask(sc, 0);
2627 3019
2628 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 3020 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
2629 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n", 3021 ath_print(common, ATH_DBG_CONFIG, "BSS Changed PREAMBLE %d\n",
2630 bss_conf->use_short_preamble); 3022 bss_conf->use_short_preamble);
2631 if (bss_conf->use_short_preamble) 3023 if (bss_conf->use_short_preamble)
2632 sc->sc_flags |= SC_OP_PREAMBLE_SHORT; 3024 sc->sc_flags |= SC_OP_PREAMBLE_SHORT;
2633 else 3025 else
@@ -2635,8 +3027,8 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2635 } 3027 }
2636 3028
2637 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 3029 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
2638 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n", 3030 ath_print(common, ATH_DBG_CONFIG, "BSS Changed CTS PROT %d\n",
2639 bss_conf->use_cts_prot); 3031 bss_conf->use_cts_prot);
2640 if (bss_conf->use_cts_prot && 3032 if (bss_conf->use_cts_prot &&
2641 hw->conf.channel->band != IEEE80211_BAND_5GHZ) 3033 hw->conf.channel->band != IEEE80211_BAND_5GHZ)
2642 sc->sc_flags |= SC_OP_PROTECT_ENABLE; 3034 sc->sc_flags |= SC_OP_PROTECT_ENABLE;
@@ -2645,7 +3037,7 @@ static void ath9k_bss_info_changed(struct ieee80211_hw *hw,
2645 } 3037 }
2646 3038
2647 if (changed & BSS_CHANGED_ASSOC) { 3039 if (changed & BSS_CHANGED_ASSOC) {
2648 DPRINTF(sc, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n", 3040 ath_print(common, ATH_DBG_CONFIG, "BSS Changed ASSOC %d\n",
2649 bss_conf->assoc); 3041 bss_conf->assoc);
2650 ath9k_bss_assoc_info(sc, vif, bss_conf); 3042 ath9k_bss_assoc_info(sc, vif, bss_conf);
2651 } 3043 }
@@ -2694,7 +3086,11 @@ static void ath9k_reset_tsf(struct ieee80211_hw *hw)
2694 struct ath_softc *sc = aphy->sc; 3086 struct ath_softc *sc = aphy->sc;
2695 3087
2696 mutex_lock(&sc->mutex); 3088 mutex_lock(&sc->mutex);
3089
3090 ath9k_ps_wakeup(sc);
2697 ath9k_hw_reset_tsf(sc->sc_ah); 3091 ath9k_hw_reset_tsf(sc->sc_ah);
3092 ath9k_ps_restore(sc);
3093
2698 mutex_unlock(&sc->mutex); 3094 mutex_unlock(&sc->mutex);
2699} 3095}
2700 3096
@@ -2726,7 +3122,8 @@ static int ath9k_ampdu_action(struct ieee80211_hw *hw,
2726 ath_tx_aggr_resume(sc, sta, tid); 3122 ath_tx_aggr_resume(sc, sta, tid);
2727 break; 3123 break;
2728 default: 3124 default:
2729 DPRINTF(sc, ATH_DBG_FATAL, "Unknown AMPDU action\n"); 3125 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
3126 "Unknown AMPDU action\n");
2730 } 3127 }
2731 3128
2732 return ret; 3129 return ret;
@@ -2794,64 +3191,6 @@ struct ieee80211_ops ath9k_ops = {
2794 .rfkill_poll = ath9k_rfkill_poll_state, 3191 .rfkill_poll = ath9k_rfkill_poll_state,
2795}; 3192};
2796 3193
2797static struct {
2798 u32 version;
2799 const char * name;
2800} ath_mac_bb_names[] = {
2801 { AR_SREV_VERSION_5416_PCI, "5416" },
2802 { AR_SREV_VERSION_5416_PCIE, "5418" },
2803 { AR_SREV_VERSION_9100, "9100" },
2804 { AR_SREV_VERSION_9160, "9160" },
2805 { AR_SREV_VERSION_9280, "9280" },
2806 { AR_SREV_VERSION_9285, "9285" },
2807 { AR_SREV_VERSION_9287, "9287" }
2808};
2809
2810static struct {
2811 u16 version;
2812 const char * name;
2813} ath_rf_names[] = {
2814 { 0, "5133" },
2815 { AR_RAD5133_SREV_MAJOR, "5133" },
2816 { AR_RAD5122_SREV_MAJOR, "5122" },
2817 { AR_RAD2133_SREV_MAJOR, "2133" },
2818 { AR_RAD2122_SREV_MAJOR, "2122" }
2819};
2820
2821/*
2822 * Return the MAC/BB name. "????" is returned if the MAC/BB is unknown.
2823 */
2824const char *
2825ath_mac_bb_name(u32 mac_bb_version)
2826{
2827 int i;
2828
2829 for (i=0; i<ARRAY_SIZE(ath_mac_bb_names); i++) {
2830 if (ath_mac_bb_names[i].version == mac_bb_version) {
2831 return ath_mac_bb_names[i].name;
2832 }
2833 }
2834
2835 return "????";
2836}
2837
2838/*
2839 * Return the RF name. "????" is returned if the RF is unknown.
2840 */
2841const char *
2842ath_rf_name(u16 rf_version)
2843{
2844 int i;
2845
2846 for (i=0; i<ARRAY_SIZE(ath_rf_names); i++) {
2847 if (ath_rf_names[i].version == rf_version) {
2848 return ath_rf_names[i].name;
2849 }
2850 }
2851
2852 return "????";
2853}
2854
2855static int __init ath9k_init(void) 3194static int __init ath9k_init(void)
2856{ 3195{
2857 int error; 3196 int error;
diff --git a/drivers/net/wireless/ath/ath9k/pci.c b/drivers/net/wireless/ath/ath9k/pci.c
index 903dd8ad9d43..5321f735e5a0 100644
--- a/drivers/net/wireless/ath/ath9k/pci.c
+++ b/drivers/net/wireless/ath/ath9k/pci.c
@@ -31,8 +31,9 @@ static struct pci_device_id ath_pci_id_table[] __devinitdata = {
31}; 31};
32 32
33/* return bus cachesize in 4B word units */ 33/* return bus cachesize in 4B word units */
34static void ath_pci_read_cachesize(struct ath_softc *sc, int *csz) 34static void ath_pci_read_cachesize(struct ath_common *common, int *csz)
35{ 35{
36 struct ath_softc *sc = (struct ath_softc *) common->priv;
36 u8 u8tmp; 37 u8 u8tmp;
37 38
38 pci_read_config_byte(to_pci_dev(sc->dev), PCI_CACHE_LINE_SIZE, &u8tmp); 39 pci_read_config_byte(to_pci_dev(sc->dev), PCI_CACHE_LINE_SIZE, &u8tmp);
@@ -48,8 +49,9 @@ static void ath_pci_read_cachesize(struct ath_softc *sc, int *csz)
48 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */ 49 *csz = DEFAULT_CACHELINE >> 2; /* Use the default size */
49} 50}
50 51
51static void ath_pci_cleanup(struct ath_softc *sc) 52static void ath_pci_cleanup(struct ath_common *common)
52{ 53{
54 struct ath_softc *sc = (struct ath_softc *) common->priv;
53 struct pci_dev *pdev = to_pci_dev(sc->dev); 55 struct pci_dev *pdev = to_pci_dev(sc->dev);
54 56
55 pci_iounmap(pdev, sc->mem); 57 pci_iounmap(pdev, sc->mem);
@@ -57,9 +59,11 @@ static void ath_pci_cleanup(struct ath_softc *sc)
57 pci_release_region(pdev, 0); 59 pci_release_region(pdev, 0);
58} 60}
59 61
60static bool ath_pci_eeprom_read(struct ath_hw *ah, u32 off, u16 *data) 62static bool ath_pci_eeprom_read(struct ath_common *common, u32 off, u16 *data)
61{ 63{
62 (void)REG_READ(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S)); 64 struct ath_hw *ah = (struct ath_hw *) common->ah;
65
66 common->ops->read(ah, AR5416_EEPROM_OFFSET + (off << AR5416_EEPROM_S));
63 67
64 if (!ath9k_hw_wait(ah, 68 if (!ath9k_hw_wait(ah,
65 AR_EEPROM_STATUS_DATA, 69 AR_EEPROM_STATUS_DATA,
@@ -69,16 +73,34 @@ static bool ath_pci_eeprom_read(struct ath_hw *ah, u32 off, u16 *data)
69 return false; 73 return false;
70 } 74 }
71 75
72 *data = MS(REG_READ(ah, AR_EEPROM_STATUS_DATA), 76 *data = MS(common->ops->read(ah, AR_EEPROM_STATUS_DATA),
73 AR_EEPROM_STATUS_DATA_VAL); 77 AR_EEPROM_STATUS_DATA_VAL);
74 78
75 return true; 79 return true;
76} 80}
77 81
78static struct ath_bus_ops ath_pci_bus_ops = { 82/*
83 * Bluetooth coexistance requires disabling ASPM.
84 */
85static void ath_pci_bt_coex_prep(struct ath_common *common)
86{
87 struct ath_softc *sc = (struct ath_softc *) common->priv;
88 struct pci_dev *pdev = to_pci_dev(sc->dev);
89 u8 aspm;
90
91 if (!pdev->is_pcie)
92 return;
93
94 pci_read_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, &aspm);
95 aspm &= ~(ATH_PCIE_CAP_LINK_L0S | ATH_PCIE_CAP_LINK_L1);
96 pci_write_config_byte(pdev, ATH_PCIE_CAP_LINK_CTRL, aspm);
97}
98
99const static struct ath_bus_ops ath_pci_bus_ops = {
79 .read_cachesize = ath_pci_read_cachesize, 100 .read_cachesize = ath_pci_read_cachesize,
80 .cleanup = ath_pci_cleanup, 101 .cleanup = ath_pci_cleanup,
81 .eeprom_read = ath_pci_eeprom_read, 102 .eeprom_read = ath_pci_eeprom_read,
103 .bt_coex_prep = ath_pci_bt_coex_prep,
82}; 104};
83 105
84static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) 106static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
@@ -92,6 +114,7 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
92 u32 val; 114 u32 val;
93 int ret = 0; 115 int ret = 0;
94 struct ath_hw *ah; 116 struct ath_hw *ah;
117 char hw_name[64];
95 118
96 if (pci_enable_device(pdev)) 119 if (pci_enable_device(pdev))
97 return -EIO; 120 return -EIO;
@@ -177,10 +200,9 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
177 sc->hw = hw; 200 sc->hw = hw;
178 sc->dev = &pdev->dev; 201 sc->dev = &pdev->dev;
179 sc->mem = mem; 202 sc->mem = mem;
180 sc->bus_ops = &ath_pci_bus_ops;
181 203
182 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsysid); 204 pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &subsysid);
183 ret = ath_init_device(id->device, sc, subsysid); 205 ret = ath_init_device(id->device, sc, subsysid, &ath_pci_bus_ops);
184 if (ret) { 206 if (ret) {
185 dev_err(&pdev->dev, "failed to initialize device\n"); 207 dev_err(&pdev->dev, "failed to initialize device\n");
186 goto bad3; 208 goto bad3;
@@ -197,14 +219,11 @@ static int ath_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
197 sc->irq = pdev->irq; 219 sc->irq = pdev->irq;
198 220
199 ah = sc->sc_ah; 221 ah = sc->sc_ah;
222 ath9k_hw_name(ah, hw_name, sizeof(hw_name));
200 printk(KERN_INFO 223 printk(KERN_INFO
201 "%s: Atheros AR%s MAC/BB Rev:%x " 224 "%s: %s mem=0x%lx, irq=%d\n",
202 "AR%s RF Rev:%x: mem=0x%lx, irq=%d\n",
203 wiphy_name(hw->wiphy), 225 wiphy_name(hw->wiphy),
204 ath_mac_bb_name(ah->hw_version.macVersion), 226 hw_name,
205 ah->hw_version.macRev,
206 ath_rf_name((ah->hw_version.analog5GhzRev & AR_RADIO_SREV_MAJOR)),
207 ah->hw_version.phyRev,
208 (unsigned long)mem, pdev->irq); 227 (unsigned long)mem, pdev->irq);
209 228
210 return 0; 229 return 0;
diff --git a/drivers/net/wireless/ath/ath9k/phy.c b/drivers/net/wireless/ath/ath9k/phy.c
index 63bf9a307c6a..13ab4d7eb7aa 100644
--- a/drivers/net/wireless/ath/ath9k/phy.c
+++ b/drivers/net/wireless/ath/ath9k/phy.c
@@ -14,90 +14,70 @@
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */ 15 */
16 16
17#include "ath9k.h" 17/**
18 * DOC: Programming Atheros 802.11n analog front end radios
19 *
20 * AR5416 MAC based PCI devices and AR518 MAC based PCI-Express
21 * devices have either an external AR2133 analog front end radio for single
22 * band 2.4 GHz communication or an AR5133 analog front end radio for dual
23 * band 2.4 GHz / 5 GHz communication.
24 *
25 * All devices after the AR5416 and AR5418 family starting with the AR9280
26 * have their analog front radios, MAC/BB and host PCIe/USB interface embedded
27 * into a single-chip and require less programming.
28 *
29 * The following single-chips exist with a respective embedded radio:
30 *
31 * AR9280 - 11n dual-band 2x2 MIMO for PCIe
32 * AR9281 - 11n single-band 1x2 MIMO for PCIe
33 * AR9285 - 11n single-band 1x1 for PCIe
34 * AR9287 - 11n single-band 2x2 MIMO for PCIe
35 *
36 * AR9220 - 11n dual-band 2x2 MIMO for PCI
37 * AR9223 - 11n single-band 2x2 MIMO for PCI
38 *
39 * AR9287 - 11n single-band 1x1 MIMO for USB
40 */
18 41
19void 42#include "hw.h"
20ath9k_hw_write_regs(struct ath_hw *ah, u32 modesIndex, u32 freqIndex,
21 int regWrites)
22{
23 REG_WRITE_ARRAY(&ah->iniBB_RfGain, freqIndex, regWrites);
24}
25 43
26bool 44/**
27ath9k_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan) 45 * ath9k_hw_write_regs - ??
46 *
47 * @ah: atheros hardware structure
48 * @freqIndex:
49 * @regWrites:
50 *
51 * Used for both the chipsets with an external AR2133/AR5133 radios and
52 * single-chip devices.
53 */
54void ath9k_hw_write_regs(struct ath_hw *ah, u32 freqIndex, int regWrites)
28{ 55{
29 u32 channelSel = 0; 56 REG_WRITE_ARRAY(&ah->iniBB_RfGain, freqIndex, regWrites);
30 u32 bModeSynth = 0;
31 u32 aModeRefSel = 0;
32 u32 reg32 = 0;
33 u16 freq;
34 struct chan_centers centers;
35
36 ath9k_hw_get_channel_centers(ah, chan, &centers);
37 freq = centers.synth_center;
38
39 if (freq < 4800) {
40 u32 txctl;
41
42 if (((freq - 2192) % 5) == 0) {
43 channelSel = ((freq - 672) * 2 - 3040) / 10;
44 bModeSynth = 0;
45 } else if (((freq - 2224) % 5) == 0) {
46 channelSel = ((freq - 704) * 2 - 3040) / 10;
47 bModeSynth = 1;
48 } else {
49 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
50 "Invalid channel %u MHz\n", freq);
51 return false;
52 }
53
54 channelSel = (channelSel << 2) & 0xff;
55 channelSel = ath9k_hw_reverse_bits(channelSel, 8);
56
57 txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
58 if (freq == 2484) {
59
60 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
61 txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
62 } else {
63 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
64 txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
65 }
66
67 } else if ((freq % 20) == 0 && freq >= 5120) {
68 channelSel =
69 ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8);
70 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
71 } else if ((freq % 10) == 0) {
72 channelSel =
73 ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8);
74 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
75 aModeRefSel = ath9k_hw_reverse_bits(2, 2);
76 else
77 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
78 } else if ((freq % 5) == 0) {
79 channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
80 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
81 } else {
82 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
83 "Invalid channel %u MHz\n", freq);
84 return false;
85 }
86
87 reg32 =
88 (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) |
89 (1 << 5) | 0x1;
90
91 REG_WRITE(ah, AR_PHY(0x37), reg32);
92
93 ah->curchan = chan;
94 ah->curchan_rad_index = -1;
95
96 return true;
97} 57}
98 58
99void ath9k_hw_ar9280_set_channel(struct ath_hw *ah, 59/**
100 struct ath9k_channel *chan) 60 * ath9k_hw_ar9280_set_channel - set channel on single-chip device
61 * @ah: atheros hardware structure
62 * @chan:
63 *
64 * This is the function to change channel on single-chip devices, that is
65 * all devices after ar9280.
66 *
67 * This function takes the channel value in MHz and sets
68 * hardware channel value. Assumes writes have been enabled to analog bus.
69 *
70 * Actual Expression,
71 *
72 * For 2GHz channel,
73 * Channel Frequency = (3/4) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^17)
74 * (freq_ref = 40MHz)
75 *
76 * For 5GHz channel,
77 * Channel Frequency = (3/2) * freq_ref * (chansel[8:0] + chanfrac[16:0]/2^10)
78 * (freq_ref = 40MHz/(24>>amodeRefSel))
79 */
80int ath9k_hw_ar9280_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
101{ 81{
102 u16 bMode, fracMode, aModeRefSel = 0; 82 u16 bMode, fracMode, aModeRefSel = 0;
103 u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0; 83 u32 freq, ndiv, channelSel = 0, channelFrac = 0, reg32 = 0;
@@ -110,22 +90,34 @@ void ath9k_hw_ar9280_set_channel(struct ath_hw *ah,
110 reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL); 90 reg32 = REG_READ(ah, AR_PHY_SYNTH_CONTROL);
111 reg32 &= 0xc0000000; 91 reg32 &= 0xc0000000;
112 92
113 if (freq < 4800) { 93 if (freq < 4800) { /* 2 GHz, fractional mode */
114 u32 txctl; 94 u32 txctl;
95 int regWrites = 0;
115 96
116 bMode = 1; 97 bMode = 1;
117 fracMode = 1; 98 fracMode = 1;
118 aModeRefSel = 0; 99 aModeRefSel = 0;
119 channelSel = (freq * 0x10000) / 15; 100 channelSel = (freq * 0x10000) / 15;
120 101
121 txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL); 102 if (AR_SREV_9287_11_OR_LATER(ah)) {
122 if (freq == 2484) { 103 if (freq == 2484) {
123 104 /* Enable channel spreading for channel 14 */
124 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL, 105 REG_WRITE_ARRAY(&ah->iniCckfirJapan2484,
125 txctl | AR_PHY_CCK_TX_CTRL_JAPAN); 106 1, regWrites);
107 } else {
108 REG_WRITE_ARRAY(&ah->iniCckfirNormal,
109 1, regWrites);
110 }
126 } else { 111 } else {
127 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL, 112 txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
128 txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN); 113 if (freq == 2484) {
114 /* Enable channel spreading for channel 14 */
115 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
116 txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
117 } else {
118 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
119 txctl &~ AR_PHY_CCK_TX_CTRL_JAPAN);
120 }
129 } 121 }
130 } else { 122 } else {
131 bMode = 0; 123 bMode = 0;
@@ -143,10 +135,15 @@ void ath9k_hw_ar9280_set_channel(struct ath_hw *ah,
143 case 1: 135 case 1:
144 default: 136 default:
145 aModeRefSel = 0; 137 aModeRefSel = 0;
138 /*
139 * Enable 2G (fractional) mode for channels
140 * which are 5MHz spaced.
141 */
146 fracMode = 1; 142 fracMode = 1;
147 refDivA = 1; 143 refDivA = 1;
148 channelSel = (freq * 0x8000) / 15; 144 channelSel = (freq * 0x8000) / 15;
149 145
146 /* RefDivA setting */
150 REG_RMW_FIELD(ah, AR_AN_SYNTH9, 147 REG_RMW_FIELD(ah, AR_AN_SYNTH9,
151 AR_AN_SYNTH9_REFDIVA, refDivA); 148 AR_AN_SYNTH9_REFDIVA, refDivA);
152 149
@@ -168,12 +165,284 @@ void ath9k_hw_ar9280_set_channel(struct ath_hw *ah,
168 165
169 ah->curchan = chan; 166 ah->curchan = chan;
170 ah->curchan_rad_index = -1; 167 ah->curchan_rad_index = -1;
168
169 return 0;
171} 170}
172 171
173static void 172/**
174ath9k_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32, 173 * ath9k_hw_9280_spur_mitigate - convert baseband spur frequency
175 u32 numBits, u32 firstBit, 174 * @ah: atheros hardware structure
176 u32 column) 175 * @chan:
176 *
177 * For single-chip solutions. Converts to baseband spur frequency given the
178 * input channel frequency and compute register settings below.
179 */
180void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan)
181{
182 int bb_spur = AR_NO_SPUR;
183 int freq;
184 int bin, cur_bin;
185 int bb_spur_off, spur_subchannel_sd;
186 int spur_freq_sd;
187 int spur_delta_phase;
188 int denominator;
189 int upper, lower, cur_vit_mask;
190 int tmp, newVal;
191 int i;
192 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
193 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
194 };
195 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
196 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
197 };
198 int inc[4] = { 0, 100, 0, 0 };
199 struct chan_centers centers;
200
201 int8_t mask_m[123];
202 int8_t mask_p[123];
203 int8_t mask_amt;
204 int tmp_mask;
205 int cur_bb_spur;
206 bool is2GHz = IS_CHAN_2GHZ(chan);
207
208 memset(&mask_m, 0, sizeof(int8_t) * 123);
209 memset(&mask_p, 0, sizeof(int8_t) * 123);
210
211 ath9k_hw_get_channel_centers(ah, chan, &centers);
212 freq = centers.synth_center;
213
214 ah->config.spurmode = SPUR_ENABLE_EEPROM;
215 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
216 cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
217
218 if (is2GHz)
219 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_2GHZ;
220 else
221 cur_bb_spur = (cur_bb_spur / 10) + AR_BASE_FREQ_5GHZ;
222
223 if (AR_NO_SPUR == cur_bb_spur)
224 break;
225 cur_bb_spur = cur_bb_spur - freq;
226
227 if (IS_CHAN_HT40(chan)) {
228 if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT40) &&
229 (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT40)) {
230 bb_spur = cur_bb_spur;
231 break;
232 }
233 } else if ((cur_bb_spur > -AR_SPUR_FEEQ_BOUND_HT20) &&
234 (cur_bb_spur < AR_SPUR_FEEQ_BOUND_HT20)) {
235 bb_spur = cur_bb_spur;
236 break;
237 }
238 }
239
240 if (AR_NO_SPUR == bb_spur) {
241 REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
242 AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
243 return;
244 } else {
245 REG_CLR_BIT(ah, AR_PHY_FORCE_CLKEN_CCK,
246 AR_PHY_FORCE_CLKEN_CCK_MRC_MUX);
247 }
248
249 bin = bb_spur * 320;
250
251 tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
252
253 newVal = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
254 AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
255 AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
256 AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
257 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), newVal);
258
259 newVal = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
260 AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
261 AR_PHY_SPUR_REG_MASK_RATE_SELECT |
262 AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
263 SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
264 REG_WRITE(ah, AR_PHY_SPUR_REG, newVal);
265
266 if (IS_CHAN_HT40(chan)) {
267 if (bb_spur < 0) {
268 spur_subchannel_sd = 1;
269 bb_spur_off = bb_spur + 10;
270 } else {
271 spur_subchannel_sd = 0;
272 bb_spur_off = bb_spur - 10;
273 }
274 } else {
275 spur_subchannel_sd = 0;
276 bb_spur_off = bb_spur;
277 }
278
279 if (IS_CHAN_HT40(chan))
280 spur_delta_phase =
281 ((bb_spur * 262144) /
282 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
283 else
284 spur_delta_phase =
285 ((bb_spur * 524288) /
286 10) & AR_PHY_TIMING11_SPUR_DELTA_PHASE;
287
288 denominator = IS_CHAN_2GHZ(chan) ? 44 : 40;
289 spur_freq_sd = ((bb_spur_off * 2048) / denominator) & 0x3ff;
290
291 newVal = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
292 SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
293 SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
294 REG_WRITE(ah, AR_PHY_TIMING11, newVal);
295
296 newVal = spur_subchannel_sd << AR_PHY_SFCORR_SPUR_SUBCHNL_SD_S;
297 REG_WRITE(ah, AR_PHY_SFCORR_EXT, newVal);
298
299 cur_bin = -6000;
300 upper = bin + 100;
301 lower = bin - 100;
302
303 for (i = 0; i < 4; i++) {
304 int pilot_mask = 0;
305 int chan_mask = 0;
306 int bp = 0;
307 for (bp = 0; bp < 30; bp++) {
308 if ((cur_bin > lower) && (cur_bin < upper)) {
309 pilot_mask = pilot_mask | 0x1 << bp;
310 chan_mask = chan_mask | 0x1 << bp;
311 }
312 cur_bin += 100;
313 }
314 cur_bin += inc[i];
315 REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
316 REG_WRITE(ah, chan_mask_reg[i], chan_mask);
317 }
318
319 cur_vit_mask = 6100;
320 upper = bin + 120;
321 lower = bin - 120;
322
323 for (i = 0; i < 123; i++) {
324 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
325
326 /* workaround for gcc bug #37014 */
327 volatile int tmp_v = abs(cur_vit_mask - bin);
328
329 if (tmp_v < 75)
330 mask_amt = 1;
331 else
332 mask_amt = 0;
333 if (cur_vit_mask < 0)
334 mask_m[abs(cur_vit_mask / 100)] = mask_amt;
335 else
336 mask_p[cur_vit_mask / 100] = mask_amt;
337 }
338 cur_vit_mask -= 100;
339 }
340
341 tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
342 | (mask_m[48] << 26) | (mask_m[49] << 24)
343 | (mask_m[50] << 22) | (mask_m[51] << 20)
344 | (mask_m[52] << 18) | (mask_m[53] << 16)
345 | (mask_m[54] << 14) | (mask_m[55] << 12)
346 | (mask_m[56] << 10) | (mask_m[57] << 8)
347 | (mask_m[58] << 6) | (mask_m[59] << 4)
348 | (mask_m[60] << 2) | (mask_m[61] << 0);
349 REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
350 REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
351
352 tmp_mask = (mask_m[31] << 28)
353 | (mask_m[32] << 26) | (mask_m[33] << 24)
354 | (mask_m[34] << 22) | (mask_m[35] << 20)
355 | (mask_m[36] << 18) | (mask_m[37] << 16)
356 | (mask_m[48] << 14) | (mask_m[39] << 12)
357 | (mask_m[40] << 10) | (mask_m[41] << 8)
358 | (mask_m[42] << 6) | (mask_m[43] << 4)
359 | (mask_m[44] << 2) | (mask_m[45] << 0);
360 REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
361 REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
362
363 tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
364 | (mask_m[18] << 26) | (mask_m[18] << 24)
365 | (mask_m[20] << 22) | (mask_m[20] << 20)
366 | (mask_m[22] << 18) | (mask_m[22] << 16)
367 | (mask_m[24] << 14) | (mask_m[24] << 12)
368 | (mask_m[25] << 10) | (mask_m[26] << 8)
369 | (mask_m[27] << 6) | (mask_m[28] << 4)
370 | (mask_m[29] << 2) | (mask_m[30] << 0);
371 REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
372 REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
373
374 tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
375 | (mask_m[2] << 26) | (mask_m[3] << 24)
376 | (mask_m[4] << 22) | (mask_m[5] << 20)
377 | (mask_m[6] << 18) | (mask_m[7] << 16)
378 | (mask_m[8] << 14) | (mask_m[9] << 12)
379 | (mask_m[10] << 10) | (mask_m[11] << 8)
380 | (mask_m[12] << 6) | (mask_m[13] << 4)
381 | (mask_m[14] << 2) | (mask_m[15] << 0);
382 REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
383 REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
384
385 tmp_mask = (mask_p[15] << 28)
386 | (mask_p[14] << 26) | (mask_p[13] << 24)
387 | (mask_p[12] << 22) | (mask_p[11] << 20)
388 | (mask_p[10] << 18) | (mask_p[9] << 16)
389 | (mask_p[8] << 14) | (mask_p[7] << 12)
390 | (mask_p[6] << 10) | (mask_p[5] << 8)
391 | (mask_p[4] << 6) | (mask_p[3] << 4)
392 | (mask_p[2] << 2) | (mask_p[1] << 0);
393 REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
394 REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
395
396 tmp_mask = (mask_p[30] << 28)
397 | (mask_p[29] << 26) | (mask_p[28] << 24)
398 | (mask_p[27] << 22) | (mask_p[26] << 20)
399 | (mask_p[25] << 18) | (mask_p[24] << 16)
400 | (mask_p[23] << 14) | (mask_p[22] << 12)
401 | (mask_p[21] << 10) | (mask_p[20] << 8)
402 | (mask_p[19] << 6) | (mask_p[18] << 4)
403 | (mask_p[17] << 2) | (mask_p[16] << 0);
404 REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
405 REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
406
407 tmp_mask = (mask_p[45] << 28)
408 | (mask_p[44] << 26) | (mask_p[43] << 24)
409 | (mask_p[42] << 22) | (mask_p[41] << 20)
410 | (mask_p[40] << 18) | (mask_p[39] << 16)
411 | (mask_p[38] << 14) | (mask_p[37] << 12)
412 | (mask_p[36] << 10) | (mask_p[35] << 8)
413 | (mask_p[34] << 6) | (mask_p[33] << 4)
414 | (mask_p[32] << 2) | (mask_p[31] << 0);
415 REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
416 REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
417
418 tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
419 | (mask_p[59] << 26) | (mask_p[58] << 24)
420 | (mask_p[57] << 22) | (mask_p[56] << 20)
421 | (mask_p[55] << 18) | (mask_p[54] << 16)
422 | (mask_p[53] << 14) | (mask_p[52] << 12)
423 | (mask_p[51] << 10) | (mask_p[50] << 8)
424 | (mask_p[49] << 6) | (mask_p[48] << 4)
425 | (mask_p[47] << 2) | (mask_p[46] << 0);
426 REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
427 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
428}
429
430/* All code below is for non single-chip solutions */
431
432/**
433 * ath9k_phy_modify_rx_buffer() - perform analog swizzling of parameters
434 * @rfbuf:
435 * @reg32:
436 * @numBits:
437 * @firstBit:
438 * @column:
439 *
440 * Performs analog "swizzling" of parameters into their location.
441 * Used on external AR2133/AR5133 radios.
442 */
443static void ath9k_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
444 u32 numBits, u32 firstBit,
445 u32 column)
177{ 446{
178 u32 tmp32, mask, arrayEntry, lastBit; 447 u32 tmp32, mask, arrayEntry, lastBit;
179 int32_t bitPosition, bitsLeft; 448 int32_t bitPosition, bitsLeft;
@@ -197,26 +466,556 @@ ath9k_phy_modify_rx_buffer(u32 *rfBuf, u32 reg32,
197 } 466 }
198} 467}
199 468
200bool 469/*
201ath9k_hw_set_rf_regs(struct ath_hw *ah, struct ath9k_channel *chan, 470 * Fix on 2.4 GHz band for orientation sensitivity issue by increasing
202 u16 modesIndex) 471 * rf_pwd_icsyndiv.
472 *
473 * Theoretical Rules:
474 * if 2 GHz band
475 * if forceBiasAuto
476 * if synth_freq < 2412
477 * bias = 0
478 * else if 2412 <= synth_freq <= 2422
479 * bias = 1
480 * else // synth_freq > 2422
481 * bias = 2
482 * else if forceBias > 0
483 * bias = forceBias & 7
484 * else
485 * no change, use value from ini file
486 * else
487 * no change, invalid band
488 *
489 * 1st Mod:
490 * 2422 also uses value of 2
491 * <approved>
492 *
493 * 2nd Mod:
494 * Less than 2412 uses value of 0, 2412 and above uses value of 2
495 */
496static void ath9k_hw_force_bias(struct ath_hw *ah, u16 synth_freq)
497{
498 struct ath_common *common = ath9k_hw_common(ah);
499 u32 tmp_reg;
500 int reg_writes = 0;
501 u32 new_bias = 0;
502
503 if (!AR_SREV_5416(ah) || synth_freq >= 3000) {
504 return;
505 }
506
507 BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
508
509 if (synth_freq < 2412)
510 new_bias = 0;
511 else if (synth_freq < 2422)
512 new_bias = 1;
513 else
514 new_bias = 2;
515
516 /* pre-reverse this field */
517 tmp_reg = ath9k_hw_reverse_bits(new_bias, 3);
518
519 ath_print(common, ATH_DBG_CONFIG,
520 "Force rf_pwd_icsyndiv to %1d on %4d\n",
521 new_bias, synth_freq);
522
523 /* swizzle rf_pwd_icsyndiv */
524 ath9k_phy_modify_rx_buffer(ah->analogBank6Data, tmp_reg, 3, 181, 3);
525
526 /* write Bank 6 with new params */
527 REG_WRITE_RF_ARRAY(&ah->iniBank6, ah->analogBank6Data, reg_writes);
528}
529
530/**
531 * ath9k_hw_decrease_chain_power()
532 *
533 * @ah: atheros hardware structure
534 * @chan:
535 *
536 * Only used on the AR5416 and AR5418 with the external AR2133/AR5133 radios.
537 *
538 * Sets a chain internal RF path to the lowest output power. Any
539 * further writes to bank6 after this setting will override these
540 * changes. Thus this function must be the last function in the
541 * sequence to modify bank 6.
542 *
543 * This function must be called after ar5416SetRfRegs() which is
544 * called from ath9k_hw_process_ini() due to swizzling of bank 6.
545 * Depends on ah->analogBank6Data being initialized by
546 * ath9k_hw_set_rf_regs()
547 *
548 * Additional additive reduction in power -
549 * change chain's switch table so chain's tx state is actually the rx
550 * state value. May produce different results in 2GHz/5GHz as well as
551 * board to board but in general should be a reduction.
552 *
553 * Activated by #ifdef ALTER_SWITCH. Not tried yet. If so, must be
554 * called after ah->eep_ops->set_board_values() due to RMW of
555 * PHY_SWITCH_CHAIN_0.
556 */
557void ath9k_hw_decrease_chain_power(struct ath_hw *ah,
558 struct ath9k_channel *chan)
559{
560 int i, regWrites = 0;
561 u32 bank6SelMask;
562 u32 *bank6Temp = ah->bank6Temp;
563
564 BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
565
566 switch (ah->config.diversity_control) {
567 case ATH9K_ANT_FIXED_A:
568 bank6SelMask =
569 (ah->config.antenna_switch_swap & ANTSWAP_AB) ?
570 REDUCE_CHAIN_0 : /* swapped, reduce chain 0 */
571 REDUCE_CHAIN_1; /* normal, select chain 1/2 to reduce */
572 break;
573 case ATH9K_ANT_FIXED_B:
574 bank6SelMask =
575 (ah->config.antenna_switch_swap & ANTSWAP_AB) ?
576 REDUCE_CHAIN_1 : /* swapped, reduce chain 1/2 */
577 REDUCE_CHAIN_0; /* normal, select chain 0 to reduce */
578 break;
579 case ATH9K_ANT_VARIABLE:
580 return; /* do not change anything */
581 break;
582 default:
583 return; /* do not change anything */
584 break;
585 }
586
587 for (i = 0; i < ah->iniBank6.ia_rows; i++)
588 bank6Temp[i] = ah->analogBank6Data[i];
589
590 /* Write Bank 5 to switch Bank 6 write to selected chain only */
591 REG_WRITE(ah, AR_PHY_BASE + 0xD8, bank6SelMask);
592
593 /*
594 * Modify Bank6 selected chain to use lowest amplification.
595 * Modifies the parameters to a value of 1.
596 * Depends on existing bank 6 values to be cached in
597 * ah->analogBank6Data
598 */
599 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 189, 0);
600 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 190, 0);
601 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 191, 0);
602 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 192, 0);
603 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 193, 0);
604 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 222, 0);
605 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 245, 0);
606 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 246, 0);
607 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 247, 0);
608
609 REG_WRITE_RF_ARRAY(&ah->iniBank6, bank6Temp, regWrites);
610
611 REG_WRITE(ah, AR_PHY_BASE + 0xD8, 0x00000053);
612#ifdef ALTER_SWITCH
613 REG_WRITE(ah, PHY_SWITCH_CHAIN_0,
614 (REG_READ(ah, PHY_SWITCH_CHAIN_0) & ~0x38)
615 | ((REG_READ(ah, PHY_SWITCH_CHAIN_0) >> 3) & 0x38));
616#endif
617}
618
619/**
620 * ath9k_hw_set_channel - tune to a channel on the external AR2133/AR5133 radios
621 * @ah: atheros hardware stucture
622 * @chan:
623 *
624 * For the external AR2133/AR5133 radios, takes the MHz channel value and set
625 * the channel value. Assumes writes enabled to analog bus and bank6 register
626 * cache in ah->analogBank6Data.
627 */
628int ath9k_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan)
629{
630 struct ath_common *common = ath9k_hw_common(ah);
631 u32 channelSel = 0;
632 u32 bModeSynth = 0;
633 u32 aModeRefSel = 0;
634 u32 reg32 = 0;
635 u16 freq;
636 struct chan_centers centers;
637
638 ath9k_hw_get_channel_centers(ah, chan, &centers);
639 freq = centers.synth_center;
640
641 if (freq < 4800) {
642 u32 txctl;
643
644 if (((freq - 2192) % 5) == 0) {
645 channelSel = ((freq - 672) * 2 - 3040) / 10;
646 bModeSynth = 0;
647 } else if (((freq - 2224) % 5) == 0) {
648 channelSel = ((freq - 704) * 2 - 3040) / 10;
649 bModeSynth = 1;
650 } else {
651 ath_print(common, ATH_DBG_FATAL,
652 "Invalid channel %u MHz\n", freq);
653 return -EINVAL;
654 }
655
656 channelSel = (channelSel << 2) & 0xff;
657 channelSel = ath9k_hw_reverse_bits(channelSel, 8);
658
659 txctl = REG_READ(ah, AR_PHY_CCK_TX_CTRL);
660 if (freq == 2484) {
661
662 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
663 txctl | AR_PHY_CCK_TX_CTRL_JAPAN);
664 } else {
665 REG_WRITE(ah, AR_PHY_CCK_TX_CTRL,
666 txctl & ~AR_PHY_CCK_TX_CTRL_JAPAN);
667 }
668
669 } else if ((freq % 20) == 0 && freq >= 5120) {
670 channelSel =
671 ath9k_hw_reverse_bits(((freq - 4800) / 20 << 2), 8);
672 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
673 } else if ((freq % 10) == 0) {
674 channelSel =
675 ath9k_hw_reverse_bits(((freq - 4800) / 10 << 1), 8);
676 if (AR_SREV_9100(ah) || AR_SREV_9160_10_OR_LATER(ah))
677 aModeRefSel = ath9k_hw_reverse_bits(2, 2);
678 else
679 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
680 } else if ((freq % 5) == 0) {
681 channelSel = ath9k_hw_reverse_bits((freq - 4800) / 5, 8);
682 aModeRefSel = ath9k_hw_reverse_bits(1, 2);
683 } else {
684 ath_print(common, ATH_DBG_FATAL,
685 "Invalid channel %u MHz\n", freq);
686 return -EINVAL;
687 }
688
689 ath9k_hw_force_bias(ah, freq);
690 ath9k_hw_decrease_chain_power(ah, chan);
691
692 reg32 =
693 (channelSel << 8) | (aModeRefSel << 2) | (bModeSynth << 1) |
694 (1 << 5) | 0x1;
695
696 REG_WRITE(ah, AR_PHY(0x37), reg32);
697
698 ah->curchan = chan;
699 ah->curchan_rad_index = -1;
700
701 return 0;
702}
703
704/**
705 * ath9k_hw_spur_mitigate - convert baseband spur frequency for external radios
706 * @ah: atheros hardware structure
707 * @chan:
708 *
709 * For non single-chip solutions. Converts to baseband spur frequency given the
710 * input channel frequency and compute register settings below.
711 */
712void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan)
713{
714 int bb_spur = AR_NO_SPUR;
715 int bin, cur_bin;
716 int spur_freq_sd;
717 int spur_delta_phase;
718 int denominator;
719 int upper, lower, cur_vit_mask;
720 int tmp, new;
721 int i;
722 int pilot_mask_reg[4] = { AR_PHY_TIMING7, AR_PHY_TIMING8,
723 AR_PHY_PILOT_MASK_01_30, AR_PHY_PILOT_MASK_31_60
724 };
725 int chan_mask_reg[4] = { AR_PHY_TIMING9, AR_PHY_TIMING10,
726 AR_PHY_CHANNEL_MASK_01_30, AR_PHY_CHANNEL_MASK_31_60
727 };
728 int inc[4] = { 0, 100, 0, 0 };
729
730 int8_t mask_m[123];
731 int8_t mask_p[123];
732 int8_t mask_amt;
733 int tmp_mask;
734 int cur_bb_spur;
735 bool is2GHz = IS_CHAN_2GHZ(chan);
736
737 memset(&mask_m, 0, sizeof(int8_t) * 123);
738 memset(&mask_p, 0, sizeof(int8_t) * 123);
739
740 for (i = 0; i < AR_EEPROM_MODAL_SPURS; i++) {
741 cur_bb_spur = ah->eep_ops->get_spur_channel(ah, i, is2GHz);
742 if (AR_NO_SPUR == cur_bb_spur)
743 break;
744 cur_bb_spur = cur_bb_spur - (chan->channel * 10);
745 if ((cur_bb_spur > -95) && (cur_bb_spur < 95)) {
746 bb_spur = cur_bb_spur;
747 break;
748 }
749 }
750
751 if (AR_NO_SPUR == bb_spur)
752 return;
753
754 bin = bb_spur * 32;
755
756 tmp = REG_READ(ah, AR_PHY_TIMING_CTRL4(0));
757 new = tmp | (AR_PHY_TIMING_CTRL4_ENABLE_SPUR_RSSI |
758 AR_PHY_TIMING_CTRL4_ENABLE_SPUR_FILTER |
759 AR_PHY_TIMING_CTRL4_ENABLE_CHAN_MASK |
760 AR_PHY_TIMING_CTRL4_ENABLE_PILOT_MASK);
761
762 REG_WRITE(ah, AR_PHY_TIMING_CTRL4(0), new);
763
764 new = (AR_PHY_SPUR_REG_MASK_RATE_CNTL |
765 AR_PHY_SPUR_REG_ENABLE_MASK_PPM |
766 AR_PHY_SPUR_REG_MASK_RATE_SELECT |
767 AR_PHY_SPUR_REG_ENABLE_VIT_SPUR_RSSI |
768 SM(SPUR_RSSI_THRESH, AR_PHY_SPUR_REG_SPUR_RSSI_THRESH));
769 REG_WRITE(ah, AR_PHY_SPUR_REG, new);
770
771 spur_delta_phase = ((bb_spur * 524288) / 100) &
772 AR_PHY_TIMING11_SPUR_DELTA_PHASE;
773
774 denominator = IS_CHAN_2GHZ(chan) ? 440 : 400;
775 spur_freq_sd = ((bb_spur * 2048) / denominator) & 0x3ff;
776
777 new = (AR_PHY_TIMING11_USE_SPUR_IN_AGC |
778 SM(spur_freq_sd, AR_PHY_TIMING11_SPUR_FREQ_SD) |
779 SM(spur_delta_phase, AR_PHY_TIMING11_SPUR_DELTA_PHASE));
780 REG_WRITE(ah, AR_PHY_TIMING11, new);
781
782 cur_bin = -6000;
783 upper = bin + 100;
784 lower = bin - 100;
785
786 for (i = 0; i < 4; i++) {
787 int pilot_mask = 0;
788 int chan_mask = 0;
789 int bp = 0;
790 for (bp = 0; bp < 30; bp++) {
791 if ((cur_bin > lower) && (cur_bin < upper)) {
792 pilot_mask = pilot_mask | 0x1 << bp;
793 chan_mask = chan_mask | 0x1 << bp;
794 }
795 cur_bin += 100;
796 }
797 cur_bin += inc[i];
798 REG_WRITE(ah, pilot_mask_reg[i], pilot_mask);
799 REG_WRITE(ah, chan_mask_reg[i], chan_mask);
800 }
801
802 cur_vit_mask = 6100;
803 upper = bin + 120;
804 lower = bin - 120;
805
806 for (i = 0; i < 123; i++) {
807 if ((cur_vit_mask > lower) && (cur_vit_mask < upper)) {
808
809 /* workaround for gcc bug #37014 */
810 volatile int tmp_v = abs(cur_vit_mask - bin);
811
812 if (tmp_v < 75)
813 mask_amt = 1;
814 else
815 mask_amt = 0;
816 if (cur_vit_mask < 0)
817 mask_m[abs(cur_vit_mask / 100)] = mask_amt;
818 else
819 mask_p[cur_vit_mask / 100] = mask_amt;
820 }
821 cur_vit_mask -= 100;
822 }
823
824 tmp_mask = (mask_m[46] << 30) | (mask_m[47] << 28)
825 | (mask_m[48] << 26) | (mask_m[49] << 24)
826 | (mask_m[50] << 22) | (mask_m[51] << 20)
827 | (mask_m[52] << 18) | (mask_m[53] << 16)
828 | (mask_m[54] << 14) | (mask_m[55] << 12)
829 | (mask_m[56] << 10) | (mask_m[57] << 8)
830 | (mask_m[58] << 6) | (mask_m[59] << 4)
831 | (mask_m[60] << 2) | (mask_m[61] << 0);
832 REG_WRITE(ah, AR_PHY_BIN_MASK_1, tmp_mask);
833 REG_WRITE(ah, AR_PHY_VIT_MASK2_M_46_61, tmp_mask);
834
835 tmp_mask = (mask_m[31] << 28)
836 | (mask_m[32] << 26) | (mask_m[33] << 24)
837 | (mask_m[34] << 22) | (mask_m[35] << 20)
838 | (mask_m[36] << 18) | (mask_m[37] << 16)
839 | (mask_m[48] << 14) | (mask_m[39] << 12)
840 | (mask_m[40] << 10) | (mask_m[41] << 8)
841 | (mask_m[42] << 6) | (mask_m[43] << 4)
842 | (mask_m[44] << 2) | (mask_m[45] << 0);
843 REG_WRITE(ah, AR_PHY_BIN_MASK_2, tmp_mask);
844 REG_WRITE(ah, AR_PHY_MASK2_M_31_45, tmp_mask);
845
846 tmp_mask = (mask_m[16] << 30) | (mask_m[16] << 28)
847 | (mask_m[18] << 26) | (mask_m[18] << 24)
848 | (mask_m[20] << 22) | (mask_m[20] << 20)
849 | (mask_m[22] << 18) | (mask_m[22] << 16)
850 | (mask_m[24] << 14) | (mask_m[24] << 12)
851 | (mask_m[25] << 10) | (mask_m[26] << 8)
852 | (mask_m[27] << 6) | (mask_m[28] << 4)
853 | (mask_m[29] << 2) | (mask_m[30] << 0);
854 REG_WRITE(ah, AR_PHY_BIN_MASK_3, tmp_mask);
855 REG_WRITE(ah, AR_PHY_MASK2_M_16_30, tmp_mask);
856
857 tmp_mask = (mask_m[0] << 30) | (mask_m[1] << 28)
858 | (mask_m[2] << 26) | (mask_m[3] << 24)
859 | (mask_m[4] << 22) | (mask_m[5] << 20)
860 | (mask_m[6] << 18) | (mask_m[7] << 16)
861 | (mask_m[8] << 14) | (mask_m[9] << 12)
862 | (mask_m[10] << 10) | (mask_m[11] << 8)
863 | (mask_m[12] << 6) | (mask_m[13] << 4)
864 | (mask_m[14] << 2) | (mask_m[15] << 0);
865 REG_WRITE(ah, AR_PHY_MASK_CTL, tmp_mask);
866 REG_WRITE(ah, AR_PHY_MASK2_M_00_15, tmp_mask);
867
868 tmp_mask = (mask_p[15] << 28)
869 | (mask_p[14] << 26) | (mask_p[13] << 24)
870 | (mask_p[12] << 22) | (mask_p[11] << 20)
871 | (mask_p[10] << 18) | (mask_p[9] << 16)
872 | (mask_p[8] << 14) | (mask_p[7] << 12)
873 | (mask_p[6] << 10) | (mask_p[5] << 8)
874 | (mask_p[4] << 6) | (mask_p[3] << 4)
875 | (mask_p[2] << 2) | (mask_p[1] << 0);
876 REG_WRITE(ah, AR_PHY_BIN_MASK2_1, tmp_mask);
877 REG_WRITE(ah, AR_PHY_MASK2_P_15_01, tmp_mask);
878
879 tmp_mask = (mask_p[30] << 28)
880 | (mask_p[29] << 26) | (mask_p[28] << 24)
881 | (mask_p[27] << 22) | (mask_p[26] << 20)
882 | (mask_p[25] << 18) | (mask_p[24] << 16)
883 | (mask_p[23] << 14) | (mask_p[22] << 12)
884 | (mask_p[21] << 10) | (mask_p[20] << 8)
885 | (mask_p[19] << 6) | (mask_p[18] << 4)
886 | (mask_p[17] << 2) | (mask_p[16] << 0);
887 REG_WRITE(ah, AR_PHY_BIN_MASK2_2, tmp_mask);
888 REG_WRITE(ah, AR_PHY_MASK2_P_30_16, tmp_mask);
889
890 tmp_mask = (mask_p[45] << 28)
891 | (mask_p[44] << 26) | (mask_p[43] << 24)
892 | (mask_p[42] << 22) | (mask_p[41] << 20)
893 | (mask_p[40] << 18) | (mask_p[39] << 16)
894 | (mask_p[38] << 14) | (mask_p[37] << 12)
895 | (mask_p[36] << 10) | (mask_p[35] << 8)
896 | (mask_p[34] << 6) | (mask_p[33] << 4)
897 | (mask_p[32] << 2) | (mask_p[31] << 0);
898 REG_WRITE(ah, AR_PHY_BIN_MASK2_3, tmp_mask);
899 REG_WRITE(ah, AR_PHY_MASK2_P_45_31, tmp_mask);
900
901 tmp_mask = (mask_p[61] << 30) | (mask_p[60] << 28)
902 | (mask_p[59] << 26) | (mask_p[58] << 24)
903 | (mask_p[57] << 22) | (mask_p[56] << 20)
904 | (mask_p[55] << 18) | (mask_p[54] << 16)
905 | (mask_p[53] << 14) | (mask_p[52] << 12)
906 | (mask_p[51] << 10) | (mask_p[50] << 8)
907 | (mask_p[49] << 6) | (mask_p[48] << 4)
908 | (mask_p[47] << 2) | (mask_p[46] << 0);
909 REG_WRITE(ah, AR_PHY_BIN_MASK2_4, tmp_mask);
910 REG_WRITE(ah, AR_PHY_MASK2_P_61_45, tmp_mask);
911}
912
913/**
914 * ath9k_hw_rf_alloc_ext_banks - allocates banks for external radio programming
915 * @ah: atheros hardware structure
916 *
917 * Only required for older devices with external AR2133/AR5133 radios.
918 */
919int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah)
920{
921#define ATH_ALLOC_BANK(bank, size) do { \
922 bank = kzalloc((sizeof(u32) * size), GFP_KERNEL); \
923 if (!bank) { \
924 ath_print(common, ATH_DBG_FATAL, \
925 "Cannot allocate RF banks\n"); \
926 return -ENOMEM; \
927 } \
928 } while (0);
929
930 struct ath_common *common = ath9k_hw_common(ah);
931
932 BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
933
934 ATH_ALLOC_BANK(ah->analogBank0Data, ah->iniBank0.ia_rows);
935 ATH_ALLOC_BANK(ah->analogBank1Data, ah->iniBank1.ia_rows);
936 ATH_ALLOC_BANK(ah->analogBank2Data, ah->iniBank2.ia_rows);
937 ATH_ALLOC_BANK(ah->analogBank3Data, ah->iniBank3.ia_rows);
938 ATH_ALLOC_BANK(ah->analogBank6Data, ah->iniBank6.ia_rows);
939 ATH_ALLOC_BANK(ah->analogBank6TPCData, ah->iniBank6TPC.ia_rows);
940 ATH_ALLOC_BANK(ah->analogBank7Data, ah->iniBank7.ia_rows);
941 ATH_ALLOC_BANK(ah->addac5416_21,
942 ah->iniAddac.ia_rows * ah->iniAddac.ia_columns);
943 ATH_ALLOC_BANK(ah->bank6Temp, ah->iniBank6.ia_rows);
944
945 return 0;
946#undef ATH_ALLOC_BANK
947}
948
949
950/**
951 * ath9k_hw_rf_free_ext_banks - Free memory for analog bank scratch buffers
952 * @ah: atheros hardware struture
953 * For the external AR2133/AR5133 radios banks.
954 */
955void
956ath9k_hw_rf_free_ext_banks(struct ath_hw *ah)
957{
958#define ATH_FREE_BANK(bank) do { \
959 kfree(bank); \
960 bank = NULL; \
961 } while (0);
962
963 BUG_ON(AR_SREV_9280_10_OR_LATER(ah));
964
965 ATH_FREE_BANK(ah->analogBank0Data);
966 ATH_FREE_BANK(ah->analogBank1Data);
967 ATH_FREE_BANK(ah->analogBank2Data);
968 ATH_FREE_BANK(ah->analogBank3Data);
969 ATH_FREE_BANK(ah->analogBank6Data);
970 ATH_FREE_BANK(ah->analogBank6TPCData);
971 ATH_FREE_BANK(ah->analogBank7Data);
972 ATH_FREE_BANK(ah->addac5416_21);
973 ATH_FREE_BANK(ah->bank6Temp);
974
975#undef ATH_FREE_BANK
976}
977
978/* *
979 * ath9k_hw_set_rf_regs - programs rf registers based on EEPROM
980 * @ah: atheros hardware structure
981 * @chan:
982 * @modesIndex:
983 *
984 * Used for the external AR2133/AR5133 radios.
985 *
986 * Reads the EEPROM header info from the device structure and programs
987 * all rf registers. This routine requires access to the analog
988 * rf device. This is not required for single-chip devices.
989 */
990bool ath9k_hw_set_rf_regs(struct ath_hw *ah, struct ath9k_channel *chan,
991 u16 modesIndex)
203{ 992{
204 u32 eepMinorRev; 993 u32 eepMinorRev;
205 u32 ob5GHz = 0, db5GHz = 0; 994 u32 ob5GHz = 0, db5GHz = 0;
206 u32 ob2GHz = 0, db2GHz = 0; 995 u32 ob2GHz = 0, db2GHz = 0;
207 int regWrites = 0; 996 int regWrites = 0;
208 997
998 /*
999 * Software does not need to program bank data
1000 * for single chip devices, that is AR9280 or anything
1001 * after that.
1002 */
209 if (AR_SREV_9280_10_OR_LATER(ah)) 1003 if (AR_SREV_9280_10_OR_LATER(ah))
210 return true; 1004 return true;
211 1005
1006 /* Setup rf parameters */
212 eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV); 1007 eepMinorRev = ah->eep_ops->get_eeprom(ah, EEP_MINOR_REV);
213 1008
1009 /* Setup Bank 0 Write */
214 RF_BANK_SETUP(ah->analogBank0Data, &ah->iniBank0, 1); 1010 RF_BANK_SETUP(ah->analogBank0Data, &ah->iniBank0, 1);
215 1011
1012 /* Setup Bank 1 Write */
216 RF_BANK_SETUP(ah->analogBank1Data, &ah->iniBank1, 1); 1013 RF_BANK_SETUP(ah->analogBank1Data, &ah->iniBank1, 1);
217 1014
1015 /* Setup Bank 2 Write */
218 RF_BANK_SETUP(ah->analogBank2Data, &ah->iniBank2, 1); 1016 RF_BANK_SETUP(ah->analogBank2Data, &ah->iniBank2, 1);
219 1017
1018 /* Setup Bank 6 Write */
220 RF_BANK_SETUP(ah->analogBank3Data, &ah->iniBank3, 1019 RF_BANK_SETUP(ah->analogBank3Data, &ah->iniBank3,
221 modesIndex); 1020 modesIndex);
222 { 1021 {
@@ -227,6 +1026,7 @@ ath9k_hw_set_rf_regs(struct ath_hw *ah, struct ath9k_channel *chan,
227 } 1026 }
228 } 1027 }
229 1028
1029 /* Only the 5 or 2 GHz OB/DB need to be set for a mode */
230 if (eepMinorRev >= 2) { 1030 if (eepMinorRev >= 2) {
231 if (IS_CHAN_2GHZ(chan)) { 1031 if (IS_CHAN_2GHZ(chan)) {
232 ob2GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_2); 1032 ob2GHz = ah->eep_ops->get_eeprom(ah, EEP_OB_2);
@@ -245,8 +1045,10 @@ ath9k_hw_set_rf_regs(struct ath_hw *ah, struct ath9k_channel *chan,
245 } 1045 }
246 } 1046 }
247 1047
1048 /* Setup Bank 7 Setup */
248 RF_BANK_SETUP(ah->analogBank7Data, &ah->iniBank7, 1); 1049 RF_BANK_SETUP(ah->analogBank7Data, &ah->iniBank7, 1);
249 1050
1051 /* Write Analog registers */
250 REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data, 1052 REG_WRITE_RF_ARRAY(&ah->iniBank0, ah->analogBank0Data,
251 regWrites); 1053 regWrites);
252 REG_WRITE_RF_ARRAY(&ah->iniBank1, ah->analogBank1Data, 1054 REG_WRITE_RF_ARRAY(&ah->iniBank1, ah->analogBank1Data,
@@ -262,137 +1064,3 @@ ath9k_hw_set_rf_regs(struct ath_hw *ah, struct ath9k_channel *chan,
262 1064
263 return true; 1065 return true;
264} 1066}
265
266void
267ath9k_hw_rf_free(struct ath_hw *ah)
268{
269#define ATH_FREE_BANK(bank) do { \
270 kfree(bank); \
271 bank = NULL; \
272 } while (0);
273
274 ATH_FREE_BANK(ah->analogBank0Data);
275 ATH_FREE_BANK(ah->analogBank1Data);
276 ATH_FREE_BANK(ah->analogBank2Data);
277 ATH_FREE_BANK(ah->analogBank3Data);
278 ATH_FREE_BANK(ah->analogBank6Data);
279 ATH_FREE_BANK(ah->analogBank6TPCData);
280 ATH_FREE_BANK(ah->analogBank7Data);
281 ATH_FREE_BANK(ah->addac5416_21);
282 ATH_FREE_BANK(ah->bank6Temp);
283#undef ATH_FREE_BANK
284}
285
286bool ath9k_hw_init_rf(struct ath_hw *ah, int *status)
287{
288 if (!AR_SREV_9280_10_OR_LATER(ah)) {
289 ah->analogBank0Data =
290 kzalloc((sizeof(u32) *
291 ah->iniBank0.ia_rows), GFP_KERNEL);
292 ah->analogBank1Data =
293 kzalloc((sizeof(u32) *
294 ah->iniBank1.ia_rows), GFP_KERNEL);
295 ah->analogBank2Data =
296 kzalloc((sizeof(u32) *
297 ah->iniBank2.ia_rows), GFP_KERNEL);
298 ah->analogBank3Data =
299 kzalloc((sizeof(u32) *
300 ah->iniBank3.ia_rows), GFP_KERNEL);
301 ah->analogBank6Data =
302 kzalloc((sizeof(u32) *
303 ah->iniBank6.ia_rows), GFP_KERNEL);
304 ah->analogBank6TPCData =
305 kzalloc((sizeof(u32) *
306 ah->iniBank6TPC.ia_rows), GFP_KERNEL);
307 ah->analogBank7Data =
308 kzalloc((sizeof(u32) *
309 ah->iniBank7.ia_rows), GFP_KERNEL);
310
311 if (ah->analogBank0Data == NULL
312 || ah->analogBank1Data == NULL
313 || ah->analogBank2Data == NULL
314 || ah->analogBank3Data == NULL
315 || ah->analogBank6Data == NULL
316 || ah->analogBank6TPCData == NULL
317 || ah->analogBank7Data == NULL) {
318 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
319 "Cannot allocate RF banks\n");
320 *status = -ENOMEM;
321 return false;
322 }
323
324 ah->addac5416_21 =
325 kzalloc((sizeof(u32) *
326 ah->iniAddac.ia_rows *
327 ah->iniAddac.ia_columns), GFP_KERNEL);
328 if (ah->addac5416_21 == NULL) {
329 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
330 "Cannot allocate addac5416_21\n");
331 *status = -ENOMEM;
332 return false;
333 }
334
335 ah->bank6Temp =
336 kzalloc((sizeof(u32) *
337 ah->iniBank6.ia_rows), GFP_KERNEL);
338 if (ah->bank6Temp == NULL) {
339 DPRINTF(ah->ah_sc, ATH_DBG_FATAL,
340 "Cannot allocate bank6Temp\n");
341 *status = -ENOMEM;
342 return false;
343 }
344 }
345
346 return true;
347}
348
349void
350ath9k_hw_decrease_chain_power(struct ath_hw *ah, struct ath9k_channel *chan)
351{
352 int i, regWrites = 0;
353 u32 bank6SelMask;
354 u32 *bank6Temp = ah->bank6Temp;
355
356 switch (ah->config.diversity_control) {
357 case ATH9K_ANT_FIXED_A:
358 bank6SelMask =
359 (ah->config.antenna_switch_swap & ANTSWAP_AB) ?
360 REDUCE_CHAIN_0 : REDUCE_CHAIN_1;
361 break;
362 case ATH9K_ANT_FIXED_B:
363 bank6SelMask =
364 (ah->config.antenna_switch_swap & ANTSWAP_AB) ?
365 REDUCE_CHAIN_1 : REDUCE_CHAIN_0;
366 break;
367 case ATH9K_ANT_VARIABLE:
368 return;
369 break;
370 default:
371 return;
372 break;
373 }
374
375 for (i = 0; i < ah->iniBank6.ia_rows; i++)
376 bank6Temp[i] = ah->analogBank6Data[i];
377
378 REG_WRITE(ah, AR_PHY_BASE + 0xD8, bank6SelMask);
379
380 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 189, 0);
381 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 190, 0);
382 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 191, 0);
383 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 192, 0);
384 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 193, 0);
385 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 222, 0);
386 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 245, 0);
387 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 246, 0);
388 ath9k_phy_modify_rx_buffer(bank6Temp, 1, 1, 247, 0);
389
390 REG_WRITE_RF_ARRAY(&ah->iniBank6, bank6Temp, regWrites);
391
392 REG_WRITE(ah, AR_PHY_BASE + 0xD8, 0x00000053);
393#ifdef ALTER_SWITCH
394 REG_WRITE(ah, PHY_SWITCH_CHAIN_0,
395 (REG_READ(ah, PHY_SWITCH_CHAIN_0) & ~0x38)
396 | ((REG_READ(ah, PHY_SWITCH_CHAIN_0) >> 3) & 0x38));
397#endif
398}
diff --git a/drivers/net/wireless/ath/ath9k/phy.h b/drivers/net/wireless/ath/ath9k/phy.h
index dfda6f444648..dc145a135dc7 100644
--- a/drivers/net/wireless/ath/ath9k/phy.h
+++ b/drivers/net/wireless/ath/ath9k/phy.h
@@ -17,20 +17,26 @@
17#ifndef PHY_H 17#ifndef PHY_H
18#define PHY_H 18#define PHY_H
19 19
20void ath9k_hw_ar9280_set_channel(struct ath_hw *ah, 20/* Common between single chip and non single-chip solutions */
21 struct ath9k_channel 21void ath9k_hw_write_regs(struct ath_hw *ah, u32 freqIndex, int regWrites);
22 *chan); 22
23bool ath9k_hw_set_channel(struct ath_hw *ah, 23/* Single chip radio settings */
24 struct ath9k_channel *chan); 24int ath9k_hw_ar9280_set_channel(struct ath_hw *ah, struct ath9k_channel *chan);
25void ath9k_hw_write_regs(struct ath_hw *ah, u32 modesIndex, 25void ath9k_hw_9280_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan);
26 u32 freqIndex, int regWrites); 26
27/* Routines below are for non single-chip solutions */
28int ath9k_hw_set_channel(struct ath_hw *ah, struct ath9k_channel *chan);
29void ath9k_hw_spur_mitigate(struct ath_hw *ah, struct ath9k_channel *chan);
30
31int ath9k_hw_rf_alloc_ext_banks(struct ath_hw *ah);
32void ath9k_hw_rf_free_ext_banks(struct ath_hw *ah);
33
27bool ath9k_hw_set_rf_regs(struct ath_hw *ah, 34bool ath9k_hw_set_rf_regs(struct ath_hw *ah,
28 struct ath9k_channel *chan, 35 struct ath9k_channel *chan,
29 u16 modesIndex); 36 u16 modesIndex);
37
30void ath9k_hw_decrease_chain_power(struct ath_hw *ah, 38void ath9k_hw_decrease_chain_power(struct ath_hw *ah,
31 struct ath9k_channel *chan); 39 struct ath9k_channel *chan);
32bool ath9k_hw_init_rf(struct ath_hw *ah,
33 int *status);
34 40
35#define AR_PHY_BASE 0x9800 41#define AR_PHY_BASE 0x9800
36#define AR_PHY(_n) (AR_PHY_BASE + ((_n)<<2)) 42#define AR_PHY(_n) (AR_PHY_BASE + ((_n)<<2))
@@ -45,6 +51,7 @@ bool ath9k_hw_init_rf(struct ath_hw *ah,
45#define AR_PHY_FC_DYN2040_EN 0x00000004 51#define AR_PHY_FC_DYN2040_EN 0x00000004
46#define AR_PHY_FC_DYN2040_PRI_ONLY 0x00000008 52#define AR_PHY_FC_DYN2040_PRI_ONLY 0x00000008
47#define AR_PHY_FC_DYN2040_PRI_CH 0x00000010 53#define AR_PHY_FC_DYN2040_PRI_CH 0x00000010
54/* For 25 MHz channel spacing -- not used but supported by hw */
48#define AR_PHY_FC_DYN2040_EXT_CH 0x00000020 55#define AR_PHY_FC_DYN2040_EXT_CH 0x00000020
49#define AR_PHY_FC_HT_EN 0x00000040 56#define AR_PHY_FC_HT_EN 0x00000040
50#define AR_PHY_FC_SHORT_GI_40 0x00000080 57#define AR_PHY_FC_SHORT_GI_40 0x00000080
@@ -185,8 +192,20 @@ bool ath9k_hw_init_rf(struct ath_hw *ah,
185#define AR_PHY_PLL_CTL_44_2133 0xeb 192#define AR_PHY_PLL_CTL_44_2133 0xeb
186#define AR_PHY_PLL_CTL_40_2133 0xea 193#define AR_PHY_PLL_CTL_40_2133 0xea
187 194
188#define AR_PHY_SPECTRAL_SCAN 0x9912 195#define AR_PHY_SPECTRAL_SCAN 0x9910 /* AR9280 spectral scan configuration register */
189#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x1 196#define AR_PHY_SPECTRAL_SCAN_ENABLE 0x1
197#define AR_PHY_SPECTRAL_SCAN_ENA 0x00000001 /* Enable spectral scan, reg 68, bit 0 */
198#define AR_PHY_SPECTRAL_SCAN_ENA_S 0 /* Enable spectral scan, reg 68, bit 0 */
199#define AR_PHY_SPECTRAL_SCAN_ACTIVE 0x00000002 /* Activate spectral scan reg 68, bit 1*/
200#define AR_PHY_SPECTRAL_SCAN_ACTIVE_S 1 /* Activate spectral scan reg 68, bit 1*/
201#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD 0x000000F0 /* Interval for FFT reports, reg 68, bits 4-7*/
202#define AR_PHY_SPECTRAL_SCAN_FFT_PERIOD_S 4
203#define AR_PHY_SPECTRAL_SCAN_PERIOD 0x0000FF00 /* Interval for FFT reports, reg 68, bits 8-15*/
204#define AR_PHY_SPECTRAL_SCAN_PERIOD_S 8
205#define AR_PHY_SPECTRAL_SCAN_COUNT 0x00FF0000 /* Number of reports, reg 68, bits 16-23*/
206#define AR_PHY_SPECTRAL_SCAN_COUNT_S 16
207#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT 0x01000000 /* Short repeat, reg 68, bit 24*/
208#define AR_PHY_SPECTRAL_SCAN_SHORT_REPEAT_S 24 /* Short repeat, reg 68, bit 24*/
190 209
191#define AR_PHY_RX_DELAY 0x9914 210#define AR_PHY_RX_DELAY 0x9914
192#define AR_PHY_SEARCH_START_DELAY 0x9918 211#define AR_PHY_SEARCH_START_DELAY 0x9918
diff --git a/drivers/net/wireless/ath/ath9k/rc.c b/drivers/net/wireless/ath/ath9k/rc.c
index 1895d63aad0a..bb72b46567f9 100644
--- a/drivers/net/wireless/ath/ath9k/rc.c
+++ b/drivers/net/wireless/ath/ath9k/rc.c
@@ -425,7 +425,7 @@ static void ath_rc_init_valid_txmask(struct ath_rate_priv *ath_rc_priv)
425static inline void ath_rc_set_valid_txmask(struct ath_rate_priv *ath_rc_priv, 425static inline void ath_rc_set_valid_txmask(struct ath_rate_priv *ath_rc_priv,
426 u8 index, int valid_tx_rate) 426 u8 index, int valid_tx_rate)
427{ 427{
428 ASSERT(index <= ath_rc_priv->rate_table_size); 428 BUG_ON(index > ath_rc_priv->rate_table_size);
429 ath_rc_priv->valid_rate_index[index] = valid_tx_rate ? 1 : 0; 429 ath_rc_priv->valid_rate_index[index] = valid_tx_rate ? 1 : 0;
430} 430}
431 431
@@ -1160,6 +1160,7 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
1160 bool is_cw_40) 1160 bool is_cw_40)
1161{ 1161{
1162 int mode = 0; 1162 int mode = 0;
1163 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1163 1164
1164 switch(band) { 1165 switch(band) {
1165 case IEEE80211_BAND_2GHZ: 1166 case IEEE80211_BAND_2GHZ:
@@ -1177,13 +1178,14 @@ struct ath_rate_table *ath_choose_rate_table(struct ath_softc *sc,
1177 mode = ATH9K_MODE_11NA_HT40PLUS; 1178 mode = ATH9K_MODE_11NA_HT40PLUS;
1178 break; 1179 break;
1179 default: 1180 default:
1180 DPRINTF(sc, ATH_DBG_CONFIG, "Invalid band\n"); 1181 ath_print(common, ATH_DBG_CONFIG, "Invalid band\n");
1181 return NULL; 1182 return NULL;
1182 } 1183 }
1183 1184
1184 BUG_ON(mode >= ATH9K_MODE_MAX); 1185 BUG_ON(mode >= ATH9K_MODE_MAX);
1185 1186
1186 DPRINTF(sc, ATH_DBG_CONFIG, "Choosing rate table for mode: %d\n", mode); 1187 ath_print(common, ATH_DBG_CONFIG,
1188 "Choosing rate table for mode: %d\n", mode);
1187 return sc->hw_rate_table[mode]; 1189 return sc->hw_rate_table[mode];
1188} 1190}
1189 1191
@@ -1194,11 +1196,13 @@ static void ath_rc_init(struct ath_softc *sc,
1194 const struct ath_rate_table *rate_table) 1196 const struct ath_rate_table *rate_table)
1195{ 1197{
1196 struct ath_rateset *rateset = &ath_rc_priv->neg_rates; 1198 struct ath_rateset *rateset = &ath_rc_priv->neg_rates;
1199 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1197 u8 *ht_mcs = (u8 *)&ath_rc_priv->neg_ht_rates; 1200 u8 *ht_mcs = (u8 *)&ath_rc_priv->neg_ht_rates;
1198 u8 i, j, k, hi = 0, hthi = 0; 1201 u8 i, j, k, hi = 0, hthi = 0;
1199 1202
1200 if (!rate_table) { 1203 if (!rate_table) {
1201 DPRINTF(sc, ATH_DBG_FATAL, "Rate table not initialized\n"); 1204 ath_print(common, ATH_DBG_FATAL,
1205 "Rate table not initialized\n");
1202 return; 1206 return;
1203 } 1207 }
1204 1208
@@ -1239,7 +1243,7 @@ static void ath_rc_init(struct ath_softc *sc,
1239 1243
1240 ath_rc_priv->rate_table_size = hi + 1; 1244 ath_rc_priv->rate_table_size = hi + 1;
1241 ath_rc_priv->rate_max_phy = 0; 1245 ath_rc_priv->rate_max_phy = 0;
1242 ASSERT(ath_rc_priv->rate_table_size <= RATE_TABLE_SIZE); 1246 BUG_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
1243 1247
1244 for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) { 1248 for (i = 0, k = 0; i < WLAN_RC_PHY_MAX; i++) {
1245 for (j = 0; j < ath_rc_priv->valid_phy_ratecnt[i]; j++) { 1249 for (j = 0; j < ath_rc_priv->valid_phy_ratecnt[i]; j++) {
@@ -1253,16 +1257,17 @@ static void ath_rc_init(struct ath_softc *sc,
1253 1257
1254 ath_rc_priv->rate_max_phy = ath_rc_priv->valid_phy_rateidx[i][j-1]; 1258 ath_rc_priv->rate_max_phy = ath_rc_priv->valid_phy_rateidx[i][j-1];
1255 } 1259 }
1256 ASSERT(ath_rc_priv->rate_table_size <= RATE_TABLE_SIZE); 1260 BUG_ON(ath_rc_priv->rate_table_size > RATE_TABLE_SIZE);
1257 ASSERT(k <= RATE_TABLE_SIZE); 1261 BUG_ON(k > RATE_TABLE_SIZE);
1258 1262
1259 ath_rc_priv->max_valid_rate = k; 1263 ath_rc_priv->max_valid_rate = k;
1260 ath_rc_sort_validrates(rate_table, ath_rc_priv); 1264 ath_rc_sort_validrates(rate_table, ath_rc_priv);
1261 ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4]; 1265 ath_rc_priv->rate_max_phy = ath_rc_priv->valid_rate_index[k-4];
1262 sc->cur_rate_table = rate_table; 1266 sc->cur_rate_table = rate_table;
1263 1267
1264 DPRINTF(sc, ATH_DBG_CONFIG, "RC Initialized with capabilities: 0x%x\n", 1268 ath_print(common, ATH_DBG_CONFIG,
1265 ath_rc_priv->ht_cap); 1269 "RC Initialized with capabilities: 0x%x\n",
1270 ath_rc_priv->ht_cap);
1266} 1271}
1267 1272
1268static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta, 1273static u8 ath_rc_build_ht_caps(struct ath_softc *sc, struct ieee80211_sta *sta,
@@ -1438,9 +1443,9 @@ static void ath_rate_update(void *priv, struct ieee80211_supported_band *sband,
1438 oper_cw40, oper_sgi40); 1443 oper_cw40, oper_sgi40);
1439 ath_rc_init(sc, priv_sta, sband, sta, rate_table); 1444 ath_rc_init(sc, priv_sta, sband, sta, rate_table);
1440 1445
1441 DPRINTF(sc, ATH_DBG_CONFIG, 1446 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_CONFIG,
1442 "Operating HT Bandwidth changed to: %d\n", 1447 "Operating HT Bandwidth changed to: %d\n",
1443 sc->hw->conf.channel_type); 1448 sc->hw->conf.channel_type);
1444 } 1449 }
1445 } 1450 }
1446} 1451}
@@ -1463,8 +1468,8 @@ static void *ath_rate_alloc_sta(void *priv, struct ieee80211_sta *sta, gfp_t gfp
1463 1468
1464 rate_priv = kzalloc(sizeof(struct ath_rate_priv), gfp); 1469 rate_priv = kzalloc(sizeof(struct ath_rate_priv), gfp);
1465 if (!rate_priv) { 1470 if (!rate_priv) {
1466 DPRINTF(sc, ATH_DBG_FATAL, 1471 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1467 "Unable to allocate private rc structure\n"); 1472 "Unable to allocate private rc structure\n");
1468 return NULL; 1473 return NULL;
1469 } 1474 }
1470 1475
diff --git a/drivers/net/wireless/ath/ath9k/recv.c b/drivers/net/wireless/ath/ath9k/recv.c
index ec0abf823995..355dd1834e1d 100644
--- a/drivers/net/wireless/ath/ath9k/recv.c
+++ b/drivers/net/wireless/ath/ath9k/recv.c
@@ -59,7 +59,7 @@ static void ath_rx_buf_link(struct ath_softc *sc, struct ath_buf *bf)
59 59
60 /* virtual addr of the beginning of the buffer. */ 60 /* virtual addr of the beginning of the buffer. */
61 skb = bf->bf_mpdu; 61 skb = bf->bf_mpdu;
62 ASSERT(skb != NULL); 62 BUG_ON(skb == NULL);
63 ds->ds_vdata = skb->data; 63 ds->ds_vdata = skb->data;
64 64
65 /* setup rx descriptors. The rx.bufsize here tells the harware 65 /* setup rx descriptors. The rx.bufsize here tells the harware
@@ -202,7 +202,8 @@ static int ath_rx_prepare(struct sk_buff *skb, struct ath_desc *ds,
202 } 202 }
203 203
204 rcu_read_lock(); 204 rcu_read_lock();
205 sta = ieee80211_find_sta(sc->hw, hdr->addr2); 205 /* XXX: use ieee80211_find_sta! */
206 sta = ieee80211_find_sta_by_hw(sc->hw, hdr->addr2);
206 if (sta) { 207 if (sta) {
207 an = (struct ath_node *) sta->drv_priv; 208 an = (struct ath_node *) sta->drv_priv;
208 if (ds->ds_rxstat.rs_rssi != ATH9K_RSSI_BAD && 209 if (ds->ds_rxstat.rs_rssi != ATH9K_RSSI_BAD &&
@@ -272,6 +273,8 @@ rx_next:
272static void ath_opmode_init(struct ath_softc *sc) 273static void ath_opmode_init(struct ath_softc *sc)
273{ 274{
274 struct ath_hw *ah = sc->sc_ah; 275 struct ath_hw *ah = sc->sc_ah;
276 struct ath_common *common = ath9k_hw_common(ah);
277
275 u32 rfilt, mfilt[2]; 278 u32 rfilt, mfilt[2];
276 279
277 /* configure rx filter */ 280 /* configure rx filter */
@@ -280,13 +283,13 @@ static void ath_opmode_init(struct ath_softc *sc)
280 283
281 /* configure bssid mask */ 284 /* configure bssid mask */
282 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK) 285 if (ah->caps.hw_caps & ATH9K_HW_CAP_BSSIDMASK)
283 ath9k_hw_setbssidmask(sc); 286 ath_hw_setbssidmask(common);
284 287
285 /* configure operational mode */ 288 /* configure operational mode */
286 ath9k_hw_setopmode(ah); 289 ath9k_hw_setopmode(ah);
287 290
288 /* Handle any link-level address change. */ 291 /* Handle any link-level address change. */
289 ath9k_hw_setmac(ah, sc->sc_ah->macaddr); 292 ath9k_hw_setmac(ah, common->macaddr);
290 293
291 /* calculate and install multicast filter */ 294 /* calculate and install multicast filter */
292 mfilt[0] = mfilt[1] = ~0; 295 mfilt[0] = mfilt[1] = ~0;
@@ -295,6 +298,7 @@ static void ath_opmode_init(struct ath_softc *sc)
295 298
296int ath_rx_init(struct ath_softc *sc, int nbufs) 299int ath_rx_init(struct ath_softc *sc, int nbufs)
297{ 300{
301 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
298 struct sk_buff *skb; 302 struct sk_buff *skb;
299 struct ath_buf *bf; 303 struct ath_buf *bf;
300 int error = 0; 304 int error = 0;
@@ -304,23 +308,23 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
304 spin_lock_init(&sc->rx.rxbuflock); 308 spin_lock_init(&sc->rx.rxbuflock);
305 309
306 sc->rx.bufsize = roundup(IEEE80211_MAX_MPDU_LEN, 310 sc->rx.bufsize = roundup(IEEE80211_MAX_MPDU_LEN,
307 min(sc->common.cachelsz, (u16)64)); 311 min(common->cachelsz, (u16)64));
308 312
309 DPRINTF(sc, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n", 313 ath_print(common, ATH_DBG_CONFIG, "cachelsz %u rxbufsize %u\n",
310 sc->common.cachelsz, sc->rx.bufsize); 314 common->cachelsz, sc->rx.bufsize);
311 315
312 /* Initialize rx descriptors */ 316 /* Initialize rx descriptors */
313 317
314 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf, 318 error = ath_descdma_setup(sc, &sc->rx.rxdma, &sc->rx.rxbuf,
315 "rx", nbufs, 1); 319 "rx", nbufs, 1);
316 if (error != 0) { 320 if (error != 0) {
317 DPRINTF(sc, ATH_DBG_FATAL, 321 ath_print(common, ATH_DBG_FATAL,
318 "failed to allocate rx descriptors: %d\n", error); 322 "failed to allocate rx descriptors: %d\n", error);
319 goto err; 323 goto err;
320 } 324 }
321 325
322 list_for_each_entry(bf, &sc->rx.rxbuf, list) { 326 list_for_each_entry(bf, &sc->rx.rxbuf, list) {
323 skb = ath_rxbuf_alloc(&sc->common, sc->rx.bufsize, GFP_KERNEL); 327 skb = ath_rxbuf_alloc(common, sc->rx.bufsize, GFP_KERNEL);
324 if (skb == NULL) { 328 if (skb == NULL) {
325 error = -ENOMEM; 329 error = -ENOMEM;
326 goto err; 330 goto err;
@@ -334,8 +338,8 @@ int ath_rx_init(struct ath_softc *sc, int nbufs)
334 bf->bf_buf_addr))) { 338 bf->bf_buf_addr))) {
335 dev_kfree_skb_any(skb); 339 dev_kfree_skb_any(skb);
336 bf->bf_mpdu = NULL; 340 bf->bf_mpdu = NULL;
337 DPRINTF(sc, ATH_DBG_FATAL, 341 ath_print(common, ATH_DBG_FATAL,
338 "dma_mapping_error() on RX init\n"); 342 "dma_mapping_error() on RX init\n");
339 error = -ENOMEM; 343 error = -ENOMEM;
340 goto err; 344 goto err;
341 } 345 }
@@ -420,7 +424,10 @@ u32 ath_calcrxfilter(struct ath_softc *sc)
420 else 424 else
421 rfilt |= ATH9K_RX_FILTER_BEACON; 425 rfilt |= ATH9K_RX_FILTER_BEACON;
422 426
423 if (sc->rx.rxfilter & FIF_PSPOLL) 427 if ((AR_SREV_9280_10_OR_LATER(sc->sc_ah) ||
428 AR_SREV_9285_10_OR_LATER(sc->sc_ah)) &&
429 (sc->sc_ah->opmode == NL80211_IFTYPE_AP) &&
430 (sc->rx.rxfilter & FIF_PSPOLL))
424 rfilt |= ATH9K_RX_FILTER_PSPOLL; 431 rfilt |= ATH9K_RX_FILTER_PSPOLL;
425 432
426 if (conf_is_ht(&sc->hw->conf)) 433 if (conf_is_ht(&sc->hw->conf))
@@ -527,20 +534,22 @@ static bool ath_beacon_dtim_pending_cab(struct sk_buff *skb)
527static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb) 534static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
528{ 535{
529 struct ieee80211_mgmt *mgmt; 536 struct ieee80211_mgmt *mgmt;
537 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
530 538
531 if (skb->len < 24 + 8 + 2 + 2) 539 if (skb->len < 24 + 8 + 2 + 2)
532 return; 540 return;
533 541
534 mgmt = (struct ieee80211_mgmt *)skb->data; 542 mgmt = (struct ieee80211_mgmt *)skb->data;
535 if (memcmp(sc->curbssid, mgmt->bssid, ETH_ALEN) != 0) 543 if (memcmp(common->curbssid, mgmt->bssid, ETH_ALEN) != 0)
536 return; /* not from our current AP */ 544 return; /* not from our current AP */
537 545
538 sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON; 546 sc->sc_flags &= ~SC_OP_WAIT_FOR_BEACON;
539 547
540 if (sc->sc_flags & SC_OP_BEACON_SYNC) { 548 if (sc->sc_flags & SC_OP_BEACON_SYNC) {
541 sc->sc_flags &= ~SC_OP_BEACON_SYNC; 549 sc->sc_flags &= ~SC_OP_BEACON_SYNC;
542 DPRINTF(sc, ATH_DBG_PS, "Reconfigure Beacon timers based on " 550 ath_print(common, ATH_DBG_PS,
543 "timestamp from the AP\n"); 551 "Reconfigure Beacon timers based on "
552 "timestamp from the AP\n");
544 ath_beacon_config(sc, NULL); 553 ath_beacon_config(sc, NULL);
545 } 554 }
546 555
@@ -552,8 +561,8 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
552 * a backup trigger for returning into NETWORK SLEEP state, 561 * a backup trigger for returning into NETWORK SLEEP state,
553 * so we are waiting for it as well. 562 * so we are waiting for it as well.
554 */ 563 */
555 DPRINTF(sc, ATH_DBG_PS, "Received DTIM beacon indicating " 564 ath_print(common, ATH_DBG_PS, "Received DTIM beacon indicating "
556 "buffered broadcast/multicast frame(s)\n"); 565 "buffered broadcast/multicast frame(s)\n");
557 sc->sc_flags |= SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_BEACON; 566 sc->sc_flags |= SC_OP_WAIT_FOR_CAB | SC_OP_WAIT_FOR_BEACON;
558 return; 567 return;
559 } 568 }
@@ -565,13 +574,15 @@ static void ath_rx_ps_beacon(struct ath_softc *sc, struct sk_buff *skb)
565 * been delivered. 574 * been delivered.
566 */ 575 */
567 sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB; 576 sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB;
568 DPRINTF(sc, ATH_DBG_PS, "PS wait for CAB frames timed out\n"); 577 ath_print(common, ATH_DBG_PS,
578 "PS wait for CAB frames timed out\n");
569 } 579 }
570} 580}
571 581
572static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb) 582static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
573{ 583{
574 struct ieee80211_hdr *hdr; 584 struct ieee80211_hdr *hdr;
585 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
575 586
576 hdr = (struct ieee80211_hdr *)skb->data; 587 hdr = (struct ieee80211_hdr *)skb->data;
577 588
@@ -589,14 +600,15 @@ static void ath_rx_ps(struct ath_softc *sc, struct sk_buff *skb)
589 * point. 600 * point.
590 */ 601 */
591 sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB; 602 sc->sc_flags &= ~SC_OP_WAIT_FOR_CAB;
592 DPRINTF(sc, ATH_DBG_PS, "All PS CAB frames received, back to " 603 ath_print(common, ATH_DBG_PS,
593 "sleep\n"); 604 "All PS CAB frames received, back to sleep\n");
594 } else if ((sc->sc_flags & SC_OP_WAIT_FOR_PSPOLL_DATA) && 605 } else if ((sc->sc_flags & SC_OP_WAIT_FOR_PSPOLL_DATA) &&
595 !is_multicast_ether_addr(hdr->addr1) && 606 !is_multicast_ether_addr(hdr->addr1) &&
596 !ieee80211_has_morefrags(hdr->frame_control)) { 607 !ieee80211_has_morefrags(hdr->frame_control)) {
597 sc->sc_flags &= ~SC_OP_WAIT_FOR_PSPOLL_DATA; 608 sc->sc_flags &= ~SC_OP_WAIT_FOR_PSPOLL_DATA;
598 DPRINTF(sc, ATH_DBG_PS, "Going back to sleep after having " 609 ath_print(common, ATH_DBG_PS,
599 "received PS-Poll data (0x%x)\n", 610 "Going back to sleep after having received "
611 "PS-Poll data (0x%x)\n",
600 sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | 612 sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
601 SC_OP_WAIT_FOR_CAB | 613 SC_OP_WAIT_FOR_CAB |
602 SC_OP_WAIT_FOR_PSPOLL_DATA | 614 SC_OP_WAIT_FOR_PSPOLL_DATA |
@@ -651,6 +663,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
651 struct sk_buff *skb = NULL, *requeue_skb; 663 struct sk_buff *skb = NULL, *requeue_skb;
652 struct ieee80211_rx_status rx_status; 664 struct ieee80211_rx_status rx_status;
653 struct ath_hw *ah = sc->sc_ah; 665 struct ath_hw *ah = sc->sc_ah;
666 struct ath_common *common = ath9k_hw_common(ah);
654 struct ieee80211_hdr *hdr; 667 struct ieee80211_hdr *hdr;
655 int hdrlen, padsize, retval; 668 int hdrlen, padsize, retval;
656 bool decrypt_error = false; 669 bool decrypt_error = false;
@@ -749,7 +762,7 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
749 762
750 /* Ensure we always have an skb to requeue once we are done 763 /* Ensure we always have an skb to requeue once we are done
751 * processing the current buffer's skb */ 764 * processing the current buffer's skb */
752 requeue_skb = ath_rxbuf_alloc(&sc->common, sc->rx.bufsize, GFP_ATOMIC); 765 requeue_skb = ath_rxbuf_alloc(common, sc->rx.bufsize, GFP_ATOMIC);
753 766
754 /* If there is no memory we ignore the current RX'd frame, 767 /* If there is no memory we ignore the current RX'd frame,
755 * tell hardware it can give us a new frame using the old 768 * tell hardware it can give us a new frame using the old
@@ -811,8 +824,8 @@ int ath_rx_tasklet(struct ath_softc *sc, int flush)
811 bf->bf_buf_addr))) { 824 bf->bf_buf_addr))) {
812 dev_kfree_skb_any(requeue_skb); 825 dev_kfree_skb_any(requeue_skb);
813 bf->bf_mpdu = NULL; 826 bf->bf_mpdu = NULL;
814 DPRINTF(sc, ATH_DBG_FATAL, 827 ath_print(common, ATH_DBG_FATAL,
815 "dma_mapping_error() on RX\n"); 828 "dma_mapping_error() on RX\n");
816 ath_rx_send_to_mac80211(sc, skb, &rx_status); 829 ath_rx_send_to_mac80211(sc, skb, &rx_status);
817 break; 830 break;
818 } 831 }
diff --git a/drivers/net/wireless/ath/ath9k/reg.h b/drivers/net/wireless/ath/ath9k/reg.h
index d83b77f821e9..061e12ce0b24 100644
--- a/drivers/net/wireless/ath/ath9k/reg.h
+++ b/drivers/net/wireless/ath/ath9k/reg.h
@@ -17,6 +17,8 @@
17#ifndef REG_H 17#ifndef REG_H
18#define REG_H 18#define REG_H
19 19
20#include "../reg.h"
21
20#define AR_CR 0x0008 22#define AR_CR 0x0008
21#define AR_CR_RXE 0x00000004 23#define AR_CR_RXE 0x00000004
22#define AR_CR_RXD 0x00000020 24#define AR_CR_RXD 0x00000020
@@ -1421,9 +1423,6 @@ enum {
1421#define AR_SLEEP2_BEACON_TIMEOUT 0xFFE00000 1423#define AR_SLEEP2_BEACON_TIMEOUT 0xFFE00000
1422#define AR_SLEEP2_BEACON_TIMEOUT_S 21 1424#define AR_SLEEP2_BEACON_TIMEOUT_S 21
1423 1425
1424#define AR_BSSMSKL 0x80e0
1425#define AR_BSSMSKU 0x80e4
1426
1427#define AR_TPC 0x80e8 1426#define AR_TPC 0x80e8
1428#define AR_TPC_ACK 0x0000003f 1427#define AR_TPC_ACK 0x0000003f
1429#define AR_TPC_ACK_S 0x00 1428#define AR_TPC_ACK_S 0x00
@@ -1705,4 +1704,7 @@ enum {
1705#define AR_KEYTABLE_MAC0(_n) (AR_KEYTABLE(_n) + 24) 1704#define AR_KEYTABLE_MAC0(_n) (AR_KEYTABLE(_n) + 24)
1706#define AR_KEYTABLE_MAC1(_n) (AR_KEYTABLE(_n) + 28) 1705#define AR_KEYTABLE_MAC1(_n) (AR_KEYTABLE(_n) + 28)
1707 1706
1707#define AR9271_CORE_CLOCK 117 /* clock to 117Mhz */
1708#define AR9271_TARGET_BAUD_RATE 19200 /* 115200 */
1709
1708#endif 1710#endif
diff --git a/drivers/net/wireless/ath/ath9k/virtual.c b/drivers/net/wireless/ath/ath9k/virtual.c
index 19b88f8177fd..bc7d173b6fae 100644
--- a/drivers/net/wireless/ath/ath9k/virtual.c
+++ b/drivers/net/wireless/ath/ath9k/virtual.c
@@ -40,6 +40,7 @@ void ath9k_set_bssid_mask(struct ieee80211_hw *hw)
40{ 40{
41 struct ath_wiphy *aphy = hw->priv; 41 struct ath_wiphy *aphy = hw->priv;
42 struct ath_softc *sc = aphy->sc; 42 struct ath_softc *sc = aphy->sc;
43 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
43 struct ath9k_vif_iter_data iter_data; 44 struct ath9k_vif_iter_data iter_data;
44 int i, j; 45 int i, j;
45 u8 mask[ETH_ALEN]; 46 u8 mask[ETH_ALEN];
@@ -51,7 +52,7 @@ void ath9k_set_bssid_mask(struct ieee80211_hw *hw)
51 */ 52 */
52 iter_data.addr = kmalloc(ETH_ALEN, GFP_ATOMIC); 53 iter_data.addr = kmalloc(ETH_ALEN, GFP_ATOMIC);
53 if (iter_data.addr) { 54 if (iter_data.addr) {
54 memcpy(iter_data.addr, sc->sc_ah->macaddr, ETH_ALEN); 55 memcpy(iter_data.addr, common->macaddr, ETH_ALEN);
55 iter_data.count = 1; 56 iter_data.count = 1;
56 } else 57 } else
57 iter_data.count = 0; 58 iter_data.count = 0;
@@ -86,20 +87,21 @@ void ath9k_set_bssid_mask(struct ieee80211_hw *hw)
86 kfree(iter_data.addr); 87 kfree(iter_data.addr);
87 88
88 /* Invert the mask and configure hardware */ 89 /* Invert the mask and configure hardware */
89 sc->bssidmask[0] = ~mask[0]; 90 common->bssidmask[0] = ~mask[0];
90 sc->bssidmask[1] = ~mask[1]; 91 common->bssidmask[1] = ~mask[1];
91 sc->bssidmask[2] = ~mask[2]; 92 common->bssidmask[2] = ~mask[2];
92 sc->bssidmask[3] = ~mask[3]; 93 common->bssidmask[3] = ~mask[3];
93 sc->bssidmask[4] = ~mask[4]; 94 common->bssidmask[4] = ~mask[4];
94 sc->bssidmask[5] = ~mask[5]; 95 common->bssidmask[5] = ~mask[5];
95 96
96 ath9k_hw_setbssidmask(sc); 97 ath_hw_setbssidmask(common);
97} 98}
98 99
99int ath9k_wiphy_add(struct ath_softc *sc) 100int ath9k_wiphy_add(struct ath_softc *sc)
100{ 101{
101 int i, error; 102 int i, error;
102 struct ath_wiphy *aphy; 103 struct ath_wiphy *aphy;
104 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
103 struct ieee80211_hw *hw; 105 struct ieee80211_hw *hw;
104 u8 addr[ETH_ALEN]; 106 u8 addr[ETH_ALEN];
105 107
@@ -138,7 +140,7 @@ int ath9k_wiphy_add(struct ath_softc *sc)
138 sc->sec_wiphy[i] = aphy; 140 sc->sec_wiphy[i] = aphy;
139 spin_unlock_bh(&sc->wiphy_lock); 141 spin_unlock_bh(&sc->wiphy_lock);
140 142
141 memcpy(addr, sc->sc_ah->macaddr, ETH_ALEN); 143 memcpy(addr, common->macaddr, ETH_ALEN);
142 addr[0] |= 0x02; /* Locally managed address */ 144 addr[0] |= 0x02; /* Locally managed address */
143 /* 145 /*
144 * XOR virtual wiphy index into the least significant bits to generate 146 * XOR virtual wiphy index into the least significant bits to generate
diff --git a/drivers/net/wireless/ath/ath9k/xmit.c b/drivers/net/wireless/ath/ath9k/xmit.c
index 42551a48c8ac..8e052f406c35 100644
--- a/drivers/net/wireless/ath/ath9k/xmit.c
+++ b/drivers/net/wireless/ath/ath9k/xmit.c
@@ -107,7 +107,7 @@ static void ath_tx_resume_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
107{ 107{
108 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum]; 108 struct ath_txq *txq = &sc->tx.txq[tid->ac->qnum];
109 109
110 ASSERT(tid->paused > 0); 110 BUG_ON(tid->paused <= 0);
111 spin_lock_bh(&txq->axq_lock); 111 spin_lock_bh(&txq->axq_lock);
112 112
113 tid->paused--; 113 tid->paused--;
@@ -131,7 +131,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
131 struct list_head bf_head; 131 struct list_head bf_head;
132 INIT_LIST_HEAD(&bf_head); 132 INIT_LIST_HEAD(&bf_head);
133 133
134 ASSERT(tid->paused > 0); 134 BUG_ON(tid->paused <= 0);
135 spin_lock_bh(&txq->axq_lock); 135 spin_lock_bh(&txq->axq_lock);
136 136
137 tid->paused--; 137 tid->paused--;
@@ -143,7 +143,7 @@ static void ath_tx_flush_tid(struct ath_softc *sc, struct ath_atx_tid *tid)
143 143
144 while (!list_empty(&tid->buf_q)) { 144 while (!list_empty(&tid->buf_q)) {
145 bf = list_first_entry(&tid->buf_q, struct ath_buf, list); 145 bf = list_first_entry(&tid->buf_q, struct ath_buf, list);
146 ASSERT(!bf_isretried(bf)); 146 BUG_ON(bf_isretried(bf));
147 list_move_tail(&bf->list, &bf_head); 147 list_move_tail(&bf->list, &bf_head);
148 ath_tx_send_ht_normal(sc, txq, tid, &bf_head); 148 ath_tx_send_ht_normal(sc, txq, tid, &bf_head);
149 } 149 }
@@ -178,7 +178,7 @@ static void ath_tx_addto_baw(struct ath_softc *sc, struct ath_atx_tid *tid,
178 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno); 178 index = ATH_BA_INDEX(tid->seq_start, bf->bf_seqno);
179 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1); 179 cindex = (tid->baw_head + index) & (ATH_TID_MAX_BUFS - 1);
180 180
181 ASSERT(tid->tx_buf[cindex] == NULL); 181 BUG_ON(tid->tx_buf[cindex] != NULL);
182 tid->tx_buf[cindex] = bf; 182 tid->tx_buf[cindex] = bf;
183 183
184 if (index >= ((tid->baw_tail - tid->baw_head) & 184 if (index >= ((tid->baw_tail - tid->baw_head) &
@@ -282,7 +282,8 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
282 282
283 rcu_read_lock(); 283 rcu_read_lock();
284 284
285 sta = ieee80211_find_sta(sc->hw, hdr->addr1); 285 /* XXX: use ieee80211_find_sta! */
286 sta = ieee80211_find_sta_by_hw(sc->hw, hdr->addr1);
286 if (!sta) { 287 if (!sta) {
287 rcu_read_unlock(); 288 rcu_read_unlock();
288 return; 289 return;
@@ -358,7 +359,7 @@ static void ath_tx_complete_aggr(struct ath_softc *sc, struct ath_txq *txq,
358 else 359 else
359 INIT_LIST_HEAD(&bf_head); 360 INIT_LIST_HEAD(&bf_head);
360 } else { 361 } else {
361 ASSERT(!list_empty(bf_q)); 362 BUG_ON(list_empty(bf_q));
362 list_move_tail(&bf->list, &bf_head); 363 list_move_tail(&bf->list, &bf_head);
363 } 364 }
364 365
@@ -815,6 +816,7 @@ static void ath_txq_drain_pending_buffers(struct ath_softc *sc,
815struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype) 816struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
816{ 817{
817 struct ath_hw *ah = sc->sc_ah; 818 struct ath_hw *ah = sc->sc_ah;
819 struct ath_common *common = ath9k_hw_common(ah);
818 struct ath9k_tx_queue_info qi; 820 struct ath9k_tx_queue_info qi;
819 int qnum; 821 int qnum;
820 822
@@ -854,9 +856,9 @@ struct ath_txq *ath_txq_setup(struct ath_softc *sc, int qtype, int subtype)
854 return NULL; 856 return NULL;
855 } 857 }
856 if (qnum >= ARRAY_SIZE(sc->tx.txq)) { 858 if (qnum >= ARRAY_SIZE(sc->tx.txq)) {
857 DPRINTF(sc, ATH_DBG_FATAL, 859 ath_print(common, ATH_DBG_FATAL,
858 "qnum %u out of range, max %u!\n", 860 "qnum %u out of range, max %u!\n",
859 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq)); 861 qnum, (unsigned int)ARRAY_SIZE(sc->tx.txq));
860 ath9k_hw_releasetxqueue(ah, qnum); 862 ath9k_hw_releasetxqueue(ah, qnum);
861 return NULL; 863 return NULL;
862 } 864 }
@@ -884,9 +886,9 @@ int ath_tx_get_qnum(struct ath_softc *sc, int qtype, int haltype)
884 switch (qtype) { 886 switch (qtype) {
885 case ATH9K_TX_QUEUE_DATA: 887 case ATH9K_TX_QUEUE_DATA:
886 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) { 888 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
887 DPRINTF(sc, ATH_DBG_FATAL, 889 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
888 "HAL AC %u out of range, max %zu!\n", 890 "HAL AC %u out of range, max %zu!\n",
889 haltype, ARRAY_SIZE(sc->tx.hwq_map)); 891 haltype, ARRAY_SIZE(sc->tx.hwq_map));
890 return -1; 892 return -1;
891 } 893 }
892 qnum = sc->tx.hwq_map[haltype]; 894 qnum = sc->tx.hwq_map[haltype];
@@ -914,9 +916,9 @@ struct ath_txq *ath_test_get_txq(struct ath_softc *sc, struct sk_buff *skb)
914 spin_lock_bh(&txq->axq_lock); 916 spin_lock_bh(&txq->axq_lock);
915 917
916 if (txq->axq_depth >= (ATH_TXBUF - 20)) { 918 if (txq->axq_depth >= (ATH_TXBUF - 20)) {
917 DPRINTF(sc, ATH_DBG_XMIT, 919 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_XMIT,
918 "TX queue: %d is full, depth: %d\n", 920 "TX queue: %d is full, depth: %d\n",
919 qnum, txq->axq_depth); 921 qnum, txq->axq_depth);
920 ieee80211_stop_queue(sc->hw, skb_get_queue_mapping(skb)); 922 ieee80211_stop_queue(sc->hw, skb_get_queue_mapping(skb));
921 txq->stopped = 1; 923 txq->stopped = 1;
922 spin_unlock_bh(&txq->axq_lock); 924 spin_unlock_bh(&txq->axq_lock);
@@ -945,7 +947,7 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
945 return 0; 947 return 0;
946 } 948 }
947 949
948 ASSERT(sc->tx.txq[qnum].axq_qnum == qnum); 950 BUG_ON(sc->tx.txq[qnum].axq_qnum != qnum);
949 951
950 ath9k_hw_get_txq_props(ah, qnum, &qi); 952 ath9k_hw_get_txq_props(ah, qnum, &qi);
951 qi.tqi_aifs = qinfo->tqi_aifs; 953 qi.tqi_aifs = qinfo->tqi_aifs;
@@ -955,8 +957,8 @@ int ath_txq_update(struct ath_softc *sc, int qnum,
955 qi.tqi_readyTime = qinfo->tqi_readyTime; 957 qi.tqi_readyTime = qinfo->tqi_readyTime;
956 958
957 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) { 959 if (!ath9k_hw_set_txq_props(ah, qnum, &qi)) {
958 DPRINTF(sc, ATH_DBG_FATAL, 960 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
959 "Unable to update hardware queue %u!\n", qnum); 961 "Unable to update hardware queue %u!\n", qnum);
960 error = -EIO; 962 error = -EIO;
961 } else { 963 } else {
962 ath9k_hw_resettxqueue(ah, qnum); 964 ath9k_hw_resettxqueue(ah, qnum);
@@ -1055,6 +1057,7 @@ void ath_draintxq(struct ath_softc *sc, struct ath_txq *txq, bool retry_tx)
1055void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx) 1057void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1056{ 1058{
1057 struct ath_hw *ah = sc->sc_ah; 1059 struct ath_hw *ah = sc->sc_ah;
1060 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1058 struct ath_txq *txq; 1061 struct ath_txq *txq;
1059 int i, npend = 0; 1062 int i, npend = 0;
1060 1063
@@ -1076,14 +1079,15 @@ void ath_drain_all_txq(struct ath_softc *sc, bool retry_tx)
1076 if (npend) { 1079 if (npend) {
1077 int r; 1080 int r;
1078 1081
1079 DPRINTF(sc, ATH_DBG_XMIT, "Unable to stop TxDMA. Reset HAL!\n"); 1082 ath_print(common, ATH_DBG_XMIT,
1083 "Unable to stop TxDMA. Reset HAL!\n");
1080 1084
1081 spin_lock_bh(&sc->sc_resetlock); 1085 spin_lock_bh(&sc->sc_resetlock);
1082 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, true); 1086 r = ath9k_hw_reset(ah, sc->sc_ah->curchan, true);
1083 if (r) 1087 if (r)
1084 DPRINTF(sc, ATH_DBG_FATAL, 1088 ath_print(common, ATH_DBG_FATAL,
1085 "Unable to reset hardware; reset status %d\n", 1089 "Unable to reset hardware; reset status %d\n",
1086 r); 1090 r);
1087 spin_unlock_bh(&sc->sc_resetlock); 1091 spin_unlock_bh(&sc->sc_resetlock);
1088 } 1092 }
1089 1093
@@ -1147,8 +1151,8 @@ int ath_tx_setup(struct ath_softc *sc, int haltype)
1147 struct ath_txq *txq; 1151 struct ath_txq *txq;
1148 1152
1149 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) { 1153 if (haltype >= ARRAY_SIZE(sc->tx.hwq_map)) {
1150 DPRINTF(sc, ATH_DBG_FATAL, 1154 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1151 "HAL AC %u out of range, max %zu!\n", 1155 "HAL AC %u out of range, max %zu!\n",
1152 haltype, ARRAY_SIZE(sc->tx.hwq_map)); 1156 haltype, ARRAY_SIZE(sc->tx.hwq_map));
1153 return 0; 1157 return 0;
1154 } 1158 }
@@ -1172,6 +1176,7 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1172 struct list_head *head) 1176 struct list_head *head)
1173{ 1177{
1174 struct ath_hw *ah = sc->sc_ah; 1178 struct ath_hw *ah = sc->sc_ah;
1179 struct ath_common *common = ath9k_hw_common(ah);
1175 struct ath_buf *bf; 1180 struct ath_buf *bf;
1176 1181
1177 /* 1182 /*
@@ -1188,19 +1193,19 @@ static void ath_tx_txqaddbuf(struct ath_softc *sc, struct ath_txq *txq,
1188 txq->axq_depth++; 1193 txq->axq_depth++;
1189 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list); 1194 txq->axq_linkbuf = list_entry(txq->axq_q.prev, struct ath_buf, list);
1190 1195
1191 DPRINTF(sc, ATH_DBG_QUEUE, 1196 ath_print(common, ATH_DBG_QUEUE,
1192 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth); 1197 "qnum: %d, txq depth: %d\n", txq->axq_qnum, txq->axq_depth);
1193 1198
1194 if (txq->axq_link == NULL) { 1199 if (txq->axq_link == NULL) {
1195 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr); 1200 ath9k_hw_puttxbuf(ah, txq->axq_qnum, bf->bf_daddr);
1196 DPRINTF(sc, ATH_DBG_XMIT, 1201 ath_print(common, ATH_DBG_XMIT,
1197 "TXDP[%u] = %llx (%p)\n", 1202 "TXDP[%u] = %llx (%p)\n",
1198 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc); 1203 txq->axq_qnum, ito64(bf->bf_daddr), bf->bf_desc);
1199 } else { 1204 } else {
1200 *txq->axq_link = bf->bf_daddr; 1205 *txq->axq_link = bf->bf_daddr;
1201 DPRINTF(sc, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n", 1206 ath_print(common, ATH_DBG_XMIT, "link[%u] (%p)=%llx (%p)\n",
1202 txq->axq_qnum, txq->axq_link, 1207 txq->axq_qnum, txq->axq_link,
1203 ito64(bf->bf_daddr), bf->bf_desc); 1208 ito64(bf->bf_daddr), bf->bf_desc);
1204 } 1209 }
1205 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link); 1210 txq->axq_link = &(bf->bf_lastbf->bf_desc->ds_link);
1206 ath9k_hw_txstart(ah, txq->axq_qnum); 1211 ath9k_hw_txstart(ah, txq->axq_qnum);
@@ -1452,6 +1457,7 @@ static u32 ath_pkt_duration(struct ath_softc *sc, u8 rix, struct ath_buf *bf,
1452 1457
1453static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf) 1458static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1454{ 1459{
1460 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1455 const struct ath_rate_table *rt = sc->cur_rate_table; 1461 const struct ath_rate_table *rt = sc->cur_rate_table;
1456 struct ath9k_11n_rate_series series[4]; 1462 struct ath9k_11n_rate_series series[4];
1457 struct sk_buff *skb; 1463 struct sk_buff *skb;
@@ -1507,7 +1513,7 @@ static void ath_buf_set_rate(struct ath_softc *sc, struct ath_buf *bf)
1507 1513
1508 rix = rates[i].idx; 1514 rix = rates[i].idx;
1509 series[i].Tries = rates[i].count; 1515 series[i].Tries = rates[i].count;
1510 series[i].ChSel = sc->tx_chainmask; 1516 series[i].ChSel = common->tx_chainmask;
1511 1517
1512 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE) 1518 if (rates[i].flags & IEEE80211_TX_RC_USE_SHORT_PREAMBLE)
1513 series[i].Rate = rt->info[rix].ratecode | 1519 series[i].Rate = rt->info[rix].ratecode |
@@ -1587,7 +1593,8 @@ static int ath_tx_setup_buffer(struct ieee80211_hw *hw, struct ath_buf *bf,
1587 bf->bf_mpdu = NULL; 1593 bf->bf_mpdu = NULL;
1588 kfree(tx_info_priv); 1594 kfree(tx_info_priv);
1589 tx_info->rate_driver_data[0] = NULL; 1595 tx_info->rate_driver_data[0] = NULL;
1590 DPRINTF(sc, ATH_DBG_FATAL, "dma_mapping_error() on TX\n"); 1596 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_FATAL,
1597 "dma_mapping_error() on TX\n");
1591 return -ENOMEM; 1598 return -ENOMEM;
1592 } 1599 }
1593 1600
@@ -1669,12 +1676,13 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1669{ 1676{
1670 struct ath_wiphy *aphy = hw->priv; 1677 struct ath_wiphy *aphy = hw->priv;
1671 struct ath_softc *sc = aphy->sc; 1678 struct ath_softc *sc = aphy->sc;
1679 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1672 struct ath_buf *bf; 1680 struct ath_buf *bf;
1673 int r; 1681 int r;
1674 1682
1675 bf = ath_tx_get_buffer(sc); 1683 bf = ath_tx_get_buffer(sc);
1676 if (!bf) { 1684 if (!bf) {
1677 DPRINTF(sc, ATH_DBG_XMIT, "TX buffers are full\n"); 1685 ath_print(common, ATH_DBG_XMIT, "TX buffers are full\n");
1678 return -1; 1686 return -1;
1679 } 1687 }
1680 1688
@@ -1682,7 +1690,7 @@ int ath_tx_start(struct ieee80211_hw *hw, struct sk_buff *skb,
1682 if (unlikely(r)) { 1690 if (unlikely(r)) {
1683 struct ath_txq *txq = txctl->txq; 1691 struct ath_txq *txq = txctl->txq;
1684 1692
1685 DPRINTF(sc, ATH_DBG_FATAL, "TX mem alloc failure\n"); 1693 ath_print(common, ATH_DBG_FATAL, "TX mem alloc failure\n");
1686 1694
1687 /* upon ath_tx_processq() this TX queue will be resumed, we 1695 /* upon ath_tx_processq() this TX queue will be resumed, we
1688 * guarantee this will happen by knowing beforehand that 1696 * guarantee this will happen by knowing beforehand that
@@ -1712,6 +1720,7 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
1712{ 1720{
1713 struct ath_wiphy *aphy = hw->priv; 1721 struct ath_wiphy *aphy = hw->priv;
1714 struct ath_softc *sc = aphy->sc; 1722 struct ath_softc *sc = aphy->sc;
1723 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1715 int hdrlen, padsize; 1724 int hdrlen, padsize;
1716 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1725 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1717 struct ath_tx_control txctl; 1726 struct ath_tx_control txctl;
@@ -1736,7 +1745,8 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
1736 if (hdrlen & 3) { 1745 if (hdrlen & 3) {
1737 padsize = hdrlen % 4; 1746 padsize = hdrlen % 4;
1738 if (skb_headroom(skb) < padsize) { 1747 if (skb_headroom(skb) < padsize) {
1739 DPRINTF(sc, ATH_DBG_XMIT, "TX CABQ padding failed\n"); 1748 ath_print(common, ATH_DBG_XMIT,
1749 "TX CABQ padding failed\n");
1740 dev_kfree_skb_any(skb); 1750 dev_kfree_skb_any(skb);
1741 return; 1751 return;
1742 } 1752 }
@@ -1746,10 +1756,11 @@ void ath_tx_cabq(struct ieee80211_hw *hw, struct sk_buff *skb)
1746 1756
1747 txctl.txq = sc->beacon.cabq; 1757 txctl.txq = sc->beacon.cabq;
1748 1758
1749 DPRINTF(sc, ATH_DBG_XMIT, "transmitting CABQ packet, skb: %p\n", skb); 1759 ath_print(common, ATH_DBG_XMIT,
1760 "transmitting CABQ packet, skb: %p\n", skb);
1750 1761
1751 if (ath_tx_start(hw, skb, &txctl) != 0) { 1762 if (ath_tx_start(hw, skb, &txctl) != 0) {
1752 DPRINTF(sc, ATH_DBG_XMIT, "CABQ TX failed\n"); 1763 ath_print(common, ATH_DBG_XMIT, "CABQ TX failed\n");
1753 goto exit; 1764 goto exit;
1754 } 1765 }
1755 1766
@@ -1768,10 +1779,11 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1768 struct ieee80211_hw *hw = sc->hw; 1779 struct ieee80211_hw *hw = sc->hw;
1769 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 1780 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
1770 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info); 1781 struct ath_tx_info_priv *tx_info_priv = ATH_TX_INFO_PRIV(tx_info);
1782 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
1771 int hdrlen, padsize; 1783 int hdrlen, padsize;
1772 int frame_type = ATH9K_NOT_INTERNAL; 1784 int frame_type = ATH9K_NOT_INTERNAL;
1773 1785
1774 DPRINTF(sc, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb); 1786 ath_print(common, ATH_DBG_XMIT, "TX complete: skb: %p\n", skb);
1775 1787
1776 if (tx_info_priv) { 1788 if (tx_info_priv) {
1777 hw = tx_info_priv->aphy->hw; 1789 hw = tx_info_priv->aphy->hw;
@@ -1805,8 +1817,9 @@ static void ath_tx_complete(struct ath_softc *sc, struct sk_buff *skb,
1805 1817
1806 if (sc->sc_flags & SC_OP_WAIT_FOR_TX_ACK) { 1818 if (sc->sc_flags & SC_OP_WAIT_FOR_TX_ACK) {
1807 sc->sc_flags &= ~SC_OP_WAIT_FOR_TX_ACK; 1819 sc->sc_flags &= ~SC_OP_WAIT_FOR_TX_ACK;
1808 DPRINTF(sc, ATH_DBG_PS, "Going back to sleep after having " 1820 ath_print(common, ATH_DBG_PS,
1809 "received TX status (0x%x)\n", 1821 "Going back to sleep after having "
1822 "received TX status (0x%x)\n",
1810 sc->sc_flags & (SC_OP_WAIT_FOR_BEACON | 1823 sc->sc_flags & (SC_OP_WAIT_FOR_BEACON |
1811 SC_OP_WAIT_FOR_CAB | 1824 SC_OP_WAIT_FOR_CAB |
1812 SC_OP_WAIT_FOR_PSPOLL_DATA | 1825 SC_OP_WAIT_FOR_PSPOLL_DATA |
@@ -1936,15 +1949,16 @@ static void ath_wake_mac80211_queue(struct ath_softc *sc, struct ath_txq *txq)
1936static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq) 1949static void ath_tx_processq(struct ath_softc *sc, struct ath_txq *txq)
1937{ 1950{
1938 struct ath_hw *ah = sc->sc_ah; 1951 struct ath_hw *ah = sc->sc_ah;
1952 struct ath_common *common = ath9k_hw_common(ah);
1939 struct ath_buf *bf, *lastbf, *bf_held = NULL; 1953 struct ath_buf *bf, *lastbf, *bf_held = NULL;
1940 struct list_head bf_head; 1954 struct list_head bf_head;
1941 struct ath_desc *ds; 1955 struct ath_desc *ds;
1942 int txok; 1956 int txok;
1943 int status; 1957 int status;
1944 1958
1945 DPRINTF(sc, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n", 1959 ath_print(common, ATH_DBG_QUEUE, "tx queue %d (%x), link %p\n",
1946 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum), 1960 txq->axq_qnum, ath9k_hw_gettxbuf(sc->sc_ah, txq->axq_qnum),
1947 txq->axq_link); 1961 txq->axq_link);
1948 1962
1949 for (;;) { 1963 for (;;) {
1950 spin_lock_bh(&txq->axq_lock); 1964 spin_lock_bh(&txq->axq_lock);
@@ -2064,8 +2078,11 @@ static void ath_tx_complete_poll_work(struct work_struct *work)
2064 } 2078 }
2065 2079
2066 if (needreset) { 2080 if (needreset) {
2067 DPRINTF(sc, ATH_DBG_RESET, "tx hung, resetting the chip\n"); 2081 ath_print(ath9k_hw_common(sc->sc_ah), ATH_DBG_RESET,
2082 "tx hung, resetting the chip\n");
2083 ath9k_ps_wakeup(sc);
2068 ath_reset(sc, false); 2084 ath_reset(sc, false);
2085 ath9k_ps_restore(sc);
2069 } 2086 }
2070 2087
2071 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work, 2088 ieee80211_queue_delayed_work(sc->hw, &sc->tx_complete_work,
@@ -2093,6 +2110,7 @@ void ath_tx_tasklet(struct ath_softc *sc)
2093 2110
2094int ath_tx_init(struct ath_softc *sc, int nbufs) 2111int ath_tx_init(struct ath_softc *sc, int nbufs)
2095{ 2112{
2113 struct ath_common *common = ath9k_hw_common(sc->sc_ah);
2096 int error = 0; 2114 int error = 0;
2097 2115
2098 spin_lock_init(&sc->tx.txbuflock); 2116 spin_lock_init(&sc->tx.txbuflock);
@@ -2100,16 +2118,16 @@ int ath_tx_init(struct ath_softc *sc, int nbufs)
2100 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf, 2118 error = ath_descdma_setup(sc, &sc->tx.txdma, &sc->tx.txbuf,
2101 "tx", nbufs, 1); 2119 "tx", nbufs, 1);
2102 if (error != 0) { 2120 if (error != 0) {
2103 DPRINTF(sc, ATH_DBG_FATAL, 2121 ath_print(common, ATH_DBG_FATAL,
2104 "Failed to allocate tx descriptors: %d\n", error); 2122 "Failed to allocate tx descriptors: %d\n", error);
2105 goto err; 2123 goto err;
2106 } 2124 }
2107 2125
2108 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf, 2126 error = ath_descdma_setup(sc, &sc->beacon.bdma, &sc->beacon.bbuf,
2109 "beacon", ATH_BCBUF, 1); 2127 "beacon", ATH_BCBUF, 1);
2110 if (error != 0) { 2128 if (error != 0) {
2111 DPRINTF(sc, ATH_DBG_FATAL, 2129 ath_print(common, ATH_DBG_FATAL,
2112 "Failed to allocate beacon descriptors: %d\n", error); 2130 "Failed to allocate beacon descriptors: %d\n", error);
2113 goto err; 2131 goto err;
2114 } 2132 }
2115 2133
diff --git a/drivers/net/wireless/ath/debug.c b/drivers/net/wireless/ath/debug.c
new file mode 100644
index 000000000000..53e77bd131b9
--- /dev/null
+++ b/drivers/net/wireless/ath/debug.c
@@ -0,0 +1,32 @@
1/*
2 * Copyright (c) 2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include "ath.h"
18#include "debug.h"
19
20void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...)
21{
22 va_list args;
23
24 if (likely(!(common->debug_mask & dbg_mask)))
25 return;
26
27 va_start(args, fmt);
28 printk(KERN_DEBUG "ath: ");
29 vprintk(fmt, args);
30 va_end(args);
31}
32EXPORT_SYMBOL(ath_print);
diff --git a/drivers/net/wireless/ath/debug.h b/drivers/net/wireless/ath/debug.h
new file mode 100644
index 000000000000..d6b685a06c5e
--- /dev/null
+++ b/drivers/net/wireless/ath/debug.h
@@ -0,0 +1,77 @@
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef ATH_DEBUG_H
18#define ATH_DEBUG_H
19
20#include "ath.h"
21
22/**
23 * enum ath_debug_level - atheros wireless debug level
24 *
25 * @ATH_DBG_RESET: reset processing
26 * @ATH_DBG_QUEUE: hardware queue management
27 * @ATH_DBG_EEPROM: eeprom processing
28 * @ATH_DBG_CALIBRATE: periodic calibration
29 * @ATH_DBG_INTERRUPT: interrupt processing
30 * @ATH_DBG_REGULATORY: regulatory processing
31 * @ATH_DBG_ANI: adaptive noise immunitive processing
32 * @ATH_DBG_XMIT: basic xmit operation
33 * @ATH_DBG_BEACON: beacon handling
34 * @ATH_DBG_CONFIG: configuration of the hardware
35 * @ATH_DBG_FATAL: fatal errors, this is the default, DBG_DEFAULT
36 * @ATH_DBG_PS: power save processing
37 * @ATH_DBG_HWTIMER: hardware timer handling
38 * @ATH_DBG_BTCOEX: bluetooth coexistance
39 * @ATH_DBG_ANY: enable all debugging
40 *
41 * The debug level is used to control the amount and type of debugging output
42 * we want to see. Each driver has its own method for enabling debugging and
43 * modifying debug level states -- but this is typically done through a
44 * module parameter 'debug' along with a respective 'debug' debugfs file
45 * entry.
46 */
47enum ATH_DEBUG {
48 ATH_DBG_RESET = 0x00000001,
49 ATH_DBG_QUEUE = 0x00000002,
50 ATH_DBG_EEPROM = 0x00000004,
51 ATH_DBG_CALIBRATE = 0x00000008,
52 ATH_DBG_INTERRUPT = 0x00000010,
53 ATH_DBG_REGULATORY = 0x00000020,
54 ATH_DBG_ANI = 0x00000040,
55 ATH_DBG_XMIT = 0x00000080,
56 ATH_DBG_BEACON = 0x00000100,
57 ATH_DBG_CONFIG = 0x00000200,
58 ATH_DBG_FATAL = 0x00000400,
59 ATH_DBG_PS = 0x00000800,
60 ATH_DBG_HWTIMER = 0x00001000,
61 ATH_DBG_BTCOEX = 0x00002000,
62 ATH_DBG_ANY = 0xffffffff
63};
64
65#define ATH_DBG_DEFAULT (ATH_DBG_FATAL)
66
67#ifdef CONFIG_ATH_DEBUG
68void ath_print(struct ath_common *common, int dbg_mask, const char *fmt, ...);
69#else
70static inline void ath_print(struct ath_common *common,
71 int dbg_mask,
72 const char *fmt, ...)
73{
74}
75#endif /* CONFIG_ATH_DEBUG */
76
77#endif /* ATH_DEBUG_H */
diff --git a/drivers/net/wireless/ath/hw.c b/drivers/net/wireless/ath/hw.c
new file mode 100644
index 000000000000..ecc9eb01f4fa
--- /dev/null
+++ b/drivers/net/wireless/ath/hw.c
@@ -0,0 +1,126 @@
1/*
2 * Copyright (c) 2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#include <asm/unaligned.h>
18
19#include "ath.h"
20#include "reg.h"
21
22#define REG_READ common->ops->read
23#define REG_WRITE common->ops->write
24
25/**
26 * ath_hw_set_bssid_mask - filter out bssids we listen
27 *
28 * @common: the ath_common struct for the device.
29 *
30 * BSSID masking is a method used by AR5212 and newer hardware to inform PCU
31 * which bits of the interface's MAC address should be looked at when trying
32 * to decide which packets to ACK. In station mode and AP mode with a single
33 * BSS every bit matters since we lock to only one BSS. In AP mode with
34 * multiple BSSes (virtual interfaces) not every bit matters because hw must
35 * accept frames for all BSSes and so we tweak some bits of our mac address
36 * in order to have multiple BSSes.
37 *
38 * NOTE: This is a simple filter and does *not* filter out all
39 * relevant frames. Some frames that are not for us might get ACKed from us
40 * by PCU because they just match the mask.
41 *
42 * When handling multiple BSSes you can get the BSSID mask by computing the
43 * set of ~ ( MAC XOR BSSID ) for all bssids we handle.
44 *
45 * When you do this you are essentially computing the common bits of all your
46 * BSSes. Later it is assumed the harware will "and" (&) the BSSID mask with
47 * the MAC address to obtain the relevant bits and compare the result with
48 * (frame's BSSID & mask) to see if they match.
49 *
50 * Simple example: on your card you have have two BSSes you have created with
51 * BSSID-01 and BSSID-02. Lets assume BSSID-01 will not use the MAC address.
52 * There is another BSSID-03 but you are not part of it. For simplicity's sake,
53 * assuming only 4 bits for a mac address and for BSSIDs you can then have:
54 *
55 * \
56 * MAC: 0001 |
57 * BSSID-01: 0100 | --> Belongs to us
58 * BSSID-02: 1001 |
59 * /
60 * -------------------
61 * BSSID-03: 0110 | --> External
62 * -------------------
63 *
64 * Our bssid_mask would then be:
65 *
66 * On loop iteration for BSSID-01:
67 * ~(0001 ^ 0100) -> ~(0101)
68 * -> 1010
69 * bssid_mask = 1010
70 *
71 * On loop iteration for BSSID-02:
72 * bssid_mask &= ~(0001 ^ 1001)
73 * bssid_mask = (1010) & ~(0001 ^ 1001)
74 * bssid_mask = (1010) & ~(1001)
75 * bssid_mask = (1010) & (0110)
76 * bssid_mask = 0010
77 *
78 * A bssid_mask of 0010 means "only pay attention to the second least
79 * significant bit". This is because its the only bit common
80 * amongst the MAC and all BSSIDs we support. To findout what the real
81 * common bit is we can simply "&" the bssid_mask now with any BSSID we have
82 * or our MAC address (we assume the hardware uses the MAC address).
83 *
84 * Now, suppose there's an incoming frame for BSSID-03:
85 *
86 * IFRAME-01: 0110
87 *
88 * An easy eye-inspeciton of this already should tell you that this frame
89 * will not pass our check. This is beacuse the bssid_mask tells the
90 * hardware to only look at the second least significant bit and the
91 * common bit amongst the MAC and BSSIDs is 0, this frame has the 2nd LSB
92 * as 1, which does not match 0.
93 *
94 * So with IFRAME-01 we *assume* the hardware will do:
95 *
96 * allow = (IFRAME-01 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
97 * --> allow = (0110 & 0010) == (0010 & 0001) ? 1 : 0;
98 * --> allow = (0010) == 0000 ? 1 : 0;
99 * --> allow = 0
100 *
101 * Lets now test a frame that should work:
102 *
103 * IFRAME-02: 0001 (we should allow)
104 *
105 * allow = (0001 & 1010) == 1010
106 *
107 * allow = (IFRAME-02 & bssid_mask) == (bssid_mask & MAC) ? 1 : 0;
108 * --> allow = (0001 & 0010) == (0010 & 0001) ? 1 :0;
109 * --> allow = (0010) == (0010)
110 * --> allow = 1
111 *
112 * Other examples:
113 *
114 * IFRAME-03: 0100 --> allowed
115 * IFRAME-04: 1001 --> allowed
116 * IFRAME-05: 1101 --> allowed but its not for us!!!
117 *
118 */
119void ath_hw_setbssidmask(struct ath_common *common)
120{
121 void *ah = common->ah;
122
123 REG_WRITE(ah, get_unaligned_le32(common->bssidmask), AR_BSSMSKL);
124 REG_WRITE(ah, get_unaligned_le16(common->bssidmask + 4), AR_BSSMSKU);
125}
126EXPORT_SYMBOL(ath_hw_setbssidmask);
diff --git a/drivers/net/wireless/ath/reg.h b/drivers/net/wireless/ath/reg.h
new file mode 100644
index 000000000000..dfe1fbec24f5
--- /dev/null
+++ b/drivers/net/wireless/ath/reg.h
@@ -0,0 +1,27 @@
1/*
2 * Copyright (c) 2008-2009 Atheros Communications Inc.
3 *
4 * Permission to use, copy, modify, and/or distribute this software for any
5 * purpose with or without fee is hereby granted, provided that the above
6 * copyright notice and this permission notice appear in all copies.
7 *
8 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
9 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
10 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
11 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
12 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
13 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
14 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
15 */
16
17#ifndef ATH_REGISTERS_H
18#define ATH_REGISTERS_H
19
20/*
21 * BSSID mask registers. See ath_hw_set_bssid_mask()
22 * for detailed documentation about these registers.
23 */
24#define AR_BSSMSKL 0x80e0
25#define AR_BSSMSKU 0x80e4
26
27#endif /* ATH_REGISTERS_H */
diff --git a/drivers/net/wireless/ath/regd.h b/drivers/net/wireless/ath/regd.h
index c1dd857697a7..a1c39526161a 100644
--- a/drivers/net/wireless/ath/regd.h
+++ b/drivers/net/wireless/ath/regd.h
@@ -65,10 +65,13 @@ enum CountryCode {
65 CTRY_ALGERIA = 12, 65 CTRY_ALGERIA = 12,
66 CTRY_ARGENTINA = 32, 66 CTRY_ARGENTINA = 32,
67 CTRY_ARMENIA = 51, 67 CTRY_ARMENIA = 51,
68 CTRY_ARUBA = 533,
68 CTRY_AUSTRALIA = 36, 69 CTRY_AUSTRALIA = 36,
69 CTRY_AUSTRIA = 40, 70 CTRY_AUSTRIA = 40,
70 CTRY_AZERBAIJAN = 31, 71 CTRY_AZERBAIJAN = 31,
71 CTRY_BAHRAIN = 48, 72 CTRY_BAHRAIN = 48,
73 CTRY_BANGLADESH = 50,
74 CTRY_BARBADOS = 52,
72 CTRY_BELARUS = 112, 75 CTRY_BELARUS = 112,
73 CTRY_BELGIUM = 56, 76 CTRY_BELGIUM = 56,
74 CTRY_BELIZE = 84, 77 CTRY_BELIZE = 84,
@@ -77,6 +80,7 @@ enum CountryCode {
77 CTRY_BRAZIL = 76, 80 CTRY_BRAZIL = 76,
78 CTRY_BRUNEI_DARUSSALAM = 96, 81 CTRY_BRUNEI_DARUSSALAM = 96,
79 CTRY_BULGARIA = 100, 82 CTRY_BULGARIA = 100,
83 CTRY_CAMBODIA = 116,
80 CTRY_CANADA = 124, 84 CTRY_CANADA = 124,
81 CTRY_CHILE = 152, 85 CTRY_CHILE = 152,
82 CTRY_CHINA = 156, 86 CTRY_CHINA = 156,
@@ -97,7 +101,11 @@ enum CountryCode {
97 CTRY_GEORGIA = 268, 101 CTRY_GEORGIA = 268,
98 CTRY_GERMANY = 276, 102 CTRY_GERMANY = 276,
99 CTRY_GREECE = 300, 103 CTRY_GREECE = 300,
104 CTRY_GREENLAND = 304,
105 CTRY_GRENEDA = 308,
106 CTRY_GUAM = 316,
100 CTRY_GUATEMALA = 320, 107 CTRY_GUATEMALA = 320,
108 CTRY_HAITI = 332,
101 CTRY_HONDURAS = 340, 109 CTRY_HONDURAS = 340,
102 CTRY_HONG_KONG = 344, 110 CTRY_HONG_KONG = 344,
103 CTRY_HUNGARY = 348, 111 CTRY_HUNGARY = 348,
diff --git a/drivers/net/wireless/ath/regd_common.h b/drivers/net/wireless/ath/regd_common.h
index 9847af72208c..248c670fdfbe 100644
--- a/drivers/net/wireless/ath/regd_common.h
+++ b/drivers/net/wireless/ath/regd_common.h
@@ -288,13 +288,16 @@ static struct country_code_to_enum_rd allCountries[] = {
288 {CTRY_DEFAULT, FCC1_FCCA, "CO"}, 288 {CTRY_DEFAULT, FCC1_FCCA, "CO"},
289 {CTRY_ALBANIA, NULL1_WORLD, "AL"}, 289 {CTRY_ALBANIA, NULL1_WORLD, "AL"},
290 {CTRY_ALGERIA, NULL1_WORLD, "DZ"}, 290 {CTRY_ALGERIA, NULL1_WORLD, "DZ"},
291 {CTRY_ARGENTINA, APL3_WORLD, "AR"}, 291 {CTRY_ARGENTINA, FCC3_WORLD, "AR"},
292 {CTRY_ARMENIA, ETSI4_WORLD, "AM"}, 292 {CTRY_ARMENIA, ETSI4_WORLD, "AM"},
293 {CTRY_ARUBA, ETSI1_WORLD, "AW"},
293 {CTRY_AUSTRALIA, FCC2_WORLD, "AU"}, 294 {CTRY_AUSTRALIA, FCC2_WORLD, "AU"},
294 {CTRY_AUSTRALIA2, FCC6_WORLD, "AU"}, 295 {CTRY_AUSTRALIA2, FCC6_WORLD, "AU"},
295 {CTRY_AUSTRIA, ETSI1_WORLD, "AT"}, 296 {CTRY_AUSTRIA, ETSI1_WORLD, "AT"},
296 {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ"}, 297 {CTRY_AZERBAIJAN, ETSI4_WORLD, "AZ"},
297 {CTRY_BAHRAIN, APL6_WORLD, "BH"}, 298 {CTRY_BAHRAIN, APL6_WORLD, "BH"},
299 {CTRY_BANGLADESH, NULL1_WORLD, "BD"},
300 {CTRY_BARBADOS, FCC2_WORLD, "BB"},
298 {CTRY_BELARUS, ETSI1_WORLD, "BY"}, 301 {CTRY_BELARUS, ETSI1_WORLD, "BY"},
299 {CTRY_BELGIUM, ETSI1_WORLD, "BE"}, 302 {CTRY_BELGIUM, ETSI1_WORLD, "BE"},
300 {CTRY_BELGIUM2, ETSI4_WORLD, "BL"}, 303 {CTRY_BELGIUM2, ETSI4_WORLD, "BL"},
@@ -304,13 +307,14 @@ static struct country_code_to_enum_rd allCountries[] = {
304 {CTRY_BRAZIL, FCC3_WORLD, "BR"}, 307 {CTRY_BRAZIL, FCC3_WORLD, "BR"},
305 {CTRY_BRUNEI_DARUSSALAM, APL1_WORLD, "BN"}, 308 {CTRY_BRUNEI_DARUSSALAM, APL1_WORLD, "BN"},
306 {CTRY_BULGARIA, ETSI6_WORLD, "BG"}, 309 {CTRY_BULGARIA, ETSI6_WORLD, "BG"},
307 {CTRY_CANADA, FCC2_FCCA, "CA"}, 310 {CTRY_CAMBODIA, ETSI1_WORLD, "KH"},
311 {CTRY_CANADA, FCC3_FCCA, "CA"},
308 {CTRY_CANADA2, FCC6_FCCA, "CA"}, 312 {CTRY_CANADA2, FCC6_FCCA, "CA"},
309 {CTRY_CHILE, APL6_WORLD, "CL"}, 313 {CTRY_CHILE, APL6_WORLD, "CL"},
310 {CTRY_CHINA, APL1_WORLD, "CN"}, 314 {CTRY_CHINA, APL1_WORLD, "CN"},
311 {CTRY_COLOMBIA, FCC1_FCCA, "CO"}, 315 {CTRY_COLOMBIA, FCC1_FCCA, "CO"},
312 {CTRY_COSTA_RICA, FCC1_WORLD, "CR"}, 316 {CTRY_COSTA_RICA, FCC1_WORLD, "CR"},
313 {CTRY_CROATIA, ETSI3_WORLD, "HR"}, 317 {CTRY_CROATIA, ETSI1_WORLD, "HR"},
314 {CTRY_CYPRUS, ETSI1_WORLD, "CY"}, 318 {CTRY_CYPRUS, ETSI1_WORLD, "CY"},
315 {CTRY_CZECH, ETSI3_WORLD, "CZ"}, 319 {CTRY_CZECH, ETSI3_WORLD, "CZ"},
316 {CTRY_DENMARK, ETSI1_WORLD, "DK"}, 320 {CTRY_DENMARK, ETSI1_WORLD, "DK"},
@@ -324,18 +328,22 @@ static struct country_code_to_enum_rd allCountries[] = {
324 {CTRY_GEORGIA, ETSI4_WORLD, "GE"}, 328 {CTRY_GEORGIA, ETSI4_WORLD, "GE"},
325 {CTRY_GERMANY, ETSI1_WORLD, "DE"}, 329 {CTRY_GERMANY, ETSI1_WORLD, "DE"},
326 {CTRY_GREECE, ETSI1_WORLD, "GR"}, 330 {CTRY_GREECE, ETSI1_WORLD, "GR"},
331 {CTRY_GREENLAND, ETSI1_WORLD, "GL"},
332 {CTRY_GRENEDA, FCC3_FCCA, "GD"},
333 {CTRY_GUAM, FCC1_FCCA, "GU"},
327 {CTRY_GUATEMALA, FCC1_FCCA, "GT"}, 334 {CTRY_GUATEMALA, FCC1_FCCA, "GT"},
335 {CTRY_HAITI, ETSI1_WORLD, "HT"},
328 {CTRY_HONDURAS, NULL1_WORLD, "HN"}, 336 {CTRY_HONDURAS, NULL1_WORLD, "HN"},
329 {CTRY_HONG_KONG, FCC2_WORLD, "HK"}, 337 {CTRY_HONG_KONG, FCC3_WORLD, "HK"},
330 {CTRY_HUNGARY, ETSI1_WORLD, "HU"}, 338 {CTRY_HUNGARY, ETSI1_WORLD, "HU"},
331 {CTRY_ICELAND, ETSI1_WORLD, "IS"}, 339 {CTRY_ICELAND, ETSI1_WORLD, "IS"},
332 {CTRY_INDIA, APL6_WORLD, "IN"}, 340 {CTRY_INDIA, APL6_WORLD, "IN"},
333 {CTRY_INDONESIA, APL1_WORLD, "ID"}, 341 {CTRY_INDONESIA, NULL1_WORLD, "ID"},
334 {CTRY_IRAN, APL1_WORLD, "IR"}, 342 {CTRY_IRAN, APL1_WORLD, "IR"},
335 {CTRY_IRELAND, ETSI1_WORLD, "IE"}, 343 {CTRY_IRELAND, ETSI1_WORLD, "IE"},
336 {CTRY_ISRAEL, NULL1_WORLD, "IL"}, 344 {CTRY_ISRAEL, NULL1_WORLD, "IL"},
337 {CTRY_ITALY, ETSI1_WORLD, "IT"}, 345 {CTRY_ITALY, ETSI1_WORLD, "IT"},
338 {CTRY_JAMAICA, ETSI1_WORLD, "JM"}, 346 {CTRY_JAMAICA, FCC3_WORLD, "JM"},
339 347
340 {CTRY_JAPAN, MKK1_MKKA, "JP"}, 348 {CTRY_JAPAN, MKK1_MKKA, "JP"},
341 {CTRY_JAPAN1, MKK1_MKKB, "JP"}, 349 {CTRY_JAPAN1, MKK1_MKKB, "JP"},
@@ -402,7 +410,7 @@ static struct country_code_to_enum_rd allCountries[] = {
402 {CTRY_KOREA_ROC, APL9_WORLD, "KR"}, 410 {CTRY_KOREA_ROC, APL9_WORLD, "KR"},
403 {CTRY_KOREA_ROC2, APL2_WORLD, "K2"}, 411 {CTRY_KOREA_ROC2, APL2_WORLD, "K2"},
404 {CTRY_KOREA_ROC3, APL9_WORLD, "K3"}, 412 {CTRY_KOREA_ROC3, APL9_WORLD, "K3"},
405 {CTRY_KUWAIT, NULL1_WORLD, "KW"}, 413 {CTRY_KUWAIT, ETSI3_WORLD, "KW"},
406 {CTRY_LATVIA, ETSI1_WORLD, "LV"}, 414 {CTRY_LATVIA, ETSI1_WORLD, "LV"},
407 {CTRY_LEBANON, NULL1_WORLD, "LB"}, 415 {CTRY_LEBANON, NULL1_WORLD, "LB"},
408 {CTRY_LIECHTENSTEIN, ETSI1_WORLD, "LI"}, 416 {CTRY_LIECHTENSTEIN, ETSI1_WORLD, "LI"},
@@ -414,13 +422,13 @@ static struct country_code_to_enum_rd allCountries[] = {
414 {CTRY_MALTA, ETSI1_WORLD, "MT"}, 422 {CTRY_MALTA, ETSI1_WORLD, "MT"},
415 {CTRY_MEXICO, FCC1_FCCA, "MX"}, 423 {CTRY_MEXICO, FCC1_FCCA, "MX"},
416 {CTRY_MONACO, ETSI4_WORLD, "MC"}, 424 {CTRY_MONACO, ETSI4_WORLD, "MC"},
417 {CTRY_MOROCCO, NULL1_WORLD, "MA"}, 425 {CTRY_MOROCCO, APL4_WORLD, "MA"},
418 {CTRY_NEPAL, APL1_WORLD, "NP"}, 426 {CTRY_NEPAL, APL1_WORLD, "NP"},
419 {CTRY_NETHERLANDS, ETSI1_WORLD, "NL"}, 427 {CTRY_NETHERLANDS, ETSI1_WORLD, "NL"},
420 {CTRY_NETHERLANDS_ANTILLES, ETSI1_WORLD, "AN"}, 428 {CTRY_NETHERLANDS_ANTILLES, ETSI1_WORLD, "AN"},
421 {CTRY_NEW_ZEALAND, FCC2_ETSIC, "NZ"}, 429 {CTRY_NEW_ZEALAND, FCC2_ETSIC, "NZ"},
422 {CTRY_NORWAY, ETSI1_WORLD, "NO"}, 430 {CTRY_NORWAY, ETSI1_WORLD, "NO"},
423 {CTRY_OMAN, APL6_WORLD, "OM"}, 431 {CTRY_OMAN, FCC3_WORLD, "OM"},
424 {CTRY_PAKISTAN, NULL1_WORLD, "PK"}, 432 {CTRY_PAKISTAN, NULL1_WORLD, "PK"},
425 {CTRY_PANAMA, FCC1_FCCA, "PA"}, 433 {CTRY_PANAMA, FCC1_FCCA, "PA"},
426 {CTRY_PAPUA_NEW_GUINEA, FCC1_WORLD, "PG"}, 434 {CTRY_PAPUA_NEW_GUINEA, FCC1_WORLD, "PG"},
@@ -429,7 +437,7 @@ static struct country_code_to_enum_rd allCountries[] = {
429 {CTRY_POLAND, ETSI1_WORLD, "PL"}, 437 {CTRY_POLAND, ETSI1_WORLD, "PL"},
430 {CTRY_PORTUGAL, ETSI1_WORLD, "PT"}, 438 {CTRY_PORTUGAL, ETSI1_WORLD, "PT"},
431 {CTRY_PUERTO_RICO, FCC1_FCCA, "PR"}, 439 {CTRY_PUERTO_RICO, FCC1_FCCA, "PR"},
432 {CTRY_QATAR, NULL1_WORLD, "QA"}, 440 {CTRY_QATAR, APL1_WORLD, "QA"},
433 {CTRY_ROMANIA, NULL1_WORLD, "RO"}, 441 {CTRY_ROMANIA, NULL1_WORLD, "RO"},
434 {CTRY_RUSSIA, NULL1_WORLD, "RU"}, 442 {CTRY_RUSSIA, NULL1_WORLD, "RU"},
435 {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA"}, 443 {CTRY_SAUDI_ARABIA, NULL1_WORLD, "SA"},
@@ -445,7 +453,7 @@ static struct country_code_to_enum_rd allCountries[] = {
445 {CTRY_SYRIA, NULL1_WORLD, "SY"}, 453 {CTRY_SYRIA, NULL1_WORLD, "SY"},
446 {CTRY_TAIWAN, APL3_FCCA, "TW"}, 454 {CTRY_TAIWAN, APL3_FCCA, "TW"},
447 {CTRY_THAILAND, FCC3_WORLD, "TH"}, 455 {CTRY_THAILAND, FCC3_WORLD, "TH"},
448 {CTRY_TRINIDAD_Y_TOBAGO, ETSI4_WORLD, "TT"}, 456 {CTRY_TRINIDAD_Y_TOBAGO, FCC3_WORLD, "TT"},
449 {CTRY_TUNISIA, ETSI3_WORLD, "TN"}, 457 {CTRY_TUNISIA, ETSI3_WORLD, "TN"},
450 {CTRY_TURKEY, ETSI3_WORLD, "TR"}, 458 {CTRY_TURKEY, ETSI3_WORLD, "TR"},
451 {CTRY_UKRAINE, NULL1_WORLD, "UA"}, 459 {CTRY_UKRAINE, NULL1_WORLD, "UA"},
@@ -456,7 +464,7 @@ static struct country_code_to_enum_rd allCountries[] = {
456 * would need to assign new special alpha2 to CRDA db as with the world 464 * would need to assign new special alpha2 to CRDA db as with the world
457 * regdomain and use another alpha2 */ 465 * regdomain and use another alpha2 */
458 {CTRY_UNITED_STATES_FCC49, FCC4_FCCA, "PS"}, 466 {CTRY_UNITED_STATES_FCC49, FCC4_FCCA, "PS"},
459 {CTRY_URUGUAY, APL2_WORLD, "UY"}, 467 {CTRY_URUGUAY, FCC3_WORLD, "UY"},
460 {CTRY_UZBEKISTAN, FCC3_FCCA, "UZ"}, 468 {CTRY_UZBEKISTAN, FCC3_FCCA, "UZ"},
461 {CTRY_VENEZUELA, APL2_ETSIC, "VE"}, 469 {CTRY_VENEZUELA, APL2_ETSIC, "VE"},
462 {CTRY_VIET_NAM, NULL1_WORLD, "VN"}, 470 {CTRY_VIET_NAM, NULL1_WORLD, "VN"},
diff --git a/drivers/net/wireless/b43/Kconfig b/drivers/net/wireless/b43/Kconfig
index 54ea61c15d8b..64c12e1bced3 100644
--- a/drivers/net/wireless/b43/Kconfig
+++ b/drivers/net/wireless/b43/Kconfig
@@ -1,6 +1,6 @@
1config B43 1config B43
2 tristate "Broadcom 43xx wireless support (mac80211 stack)" 2 tristate "Broadcom 43xx wireless support (mac80211 stack)"
3 depends on SSB_POSSIBLE && MAC80211 && WLAN_80211 && HAS_DMA 3 depends on SSB_POSSIBLE && MAC80211 && HAS_DMA
4 select SSB 4 select SSB
5 select FW_LOADER 5 select FW_LOADER
6 ---help--- 6 ---help---
diff --git a/drivers/net/wireless/b43/b43.h b/drivers/net/wireless/b43/b43.h
index 660716214d49..fe3bf9491997 100644
--- a/drivers/net/wireless/b43/b43.h
+++ b/drivers/net/wireless/b43/b43.h
@@ -26,8 +26,6 @@
26# define B43_DEBUG 0 26# define B43_DEBUG 0
27#endif 27#endif
28 28
29#define B43_RX_MAX_SSI 60
30
31/* MMIO offsets */ 29/* MMIO offsets */
32#define B43_MMIO_DMA0_REASON 0x20 30#define B43_MMIO_DMA0_REASON 0x20
33#define B43_MMIO_DMA0_IRQ_MASK 0x24 31#define B43_MMIO_DMA0_IRQ_MASK 0x24
@@ -749,12 +747,6 @@ struct b43_wldev {
749#endif 747#endif
750}; 748};
751 749
752/*
753 * Include goes here to avoid a dependency problem.
754 * A better fix would be to integrate xmit.h into b43.h.
755 */
756#include "xmit.h"
757
758/* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */ 750/* Data structure for the WLAN parts (802.11 cores) of the b43 chip. */
759struct b43_wl { 751struct b43_wl {
760 /* Pointer to the active wireless device on this chip */ 752 /* Pointer to the active wireless device on this chip */
@@ -830,13 +822,9 @@ struct b43_wl {
830 struct b43_leds leds; 822 struct b43_leds leds;
831 823
832#ifdef CONFIG_B43_PIO 824#ifdef CONFIG_B43_PIO
833 /* 825 /* Kmalloc'ed scratch space for PIO TX/RX. Protected by wl->mutex. */
834 * RX/TX header/tail buffers used by the frame transmit functions. 826 u8 pio_scratchspace[110] __attribute__((__aligned__(8)));
835 */ 827 u8 pio_tailspace[4] __attribute__((__aligned__(8)));
836 struct b43_rxhdr_fw4 rxhdr;
837 struct b43_txhdr txhdr;
838 u8 rx_tail[4];
839 u8 tx_tail[4];
840#endif /* CONFIG_B43_PIO */ 828#endif /* CONFIG_B43_PIO */
841}; 829};
842 830
diff --git a/drivers/net/wireless/b43/main.c b/drivers/net/wireless/b43/main.c
index 098dda1a67c1..077480c4916a 100644
--- a/drivers/net/wireless/b43/main.c
+++ b/drivers/net/wireless/b43/main.c
@@ -3573,7 +3573,7 @@ static int b43_op_config(struct ieee80211_hw *hw, u32 changed)
3573 if (conf->channel->hw_value != phy->channel) 3573 if (conf->channel->hw_value != phy->channel)
3574 b43_switch_channel(dev, conf->channel->hw_value); 3574 b43_switch_channel(dev, conf->channel->hw_value);
3575 3575
3576 dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_RADIOTAP); 3576 dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_MONITOR);
3577 3577
3578 /* Adjust the desired TX power level. */ 3578 /* Adjust the desired TX power level. */
3579 if (conf->power_level != 0) { 3579 if (conf->power_level != 0) {
@@ -4669,7 +4669,7 @@ static int b43_wireless_core_attach(struct b43_wldev *dev)
4669{ 4669{
4670 struct b43_wl *wl = dev->wl; 4670 struct b43_wl *wl = dev->wl;
4671 struct ssb_bus *bus = dev->dev->bus; 4671 struct ssb_bus *bus = dev->dev->bus;
4672 struct pci_dev *pdev = bus->host_pci; 4672 struct pci_dev *pdev = (bus->bustype == SSB_BUSTYPE_PCI) ? bus->host_pci : NULL;
4673 int err; 4673 int err;
4674 bool have_2ghz_phy = 0, have_5ghz_phy = 0; 4674 bool have_2ghz_phy = 0, have_5ghz_phy = 0;
4675 u32 tmp; 4675 u32 tmp;
@@ -4802,7 +4802,7 @@ static int b43_one_core_attach(struct ssb_device *dev, struct b43_wl *wl)
4802 4802
4803 if (!list_empty(&wl->devlist)) { 4803 if (!list_empty(&wl->devlist)) {
4804 /* We are not the first core on this chip. */ 4804 /* We are not the first core on this chip. */
4805 pdev = dev->bus->host_pci; 4805 pdev = (dev->bus->bustype == SSB_BUSTYPE_PCI) ? dev->bus->host_pci : NULL;
4806 /* Only special chips support more than one wireless 4806 /* Only special chips support more than one wireless
4807 * core, although some of the other chips have more than 4807 * core, although some of the other chips have more than
4808 * one wireless core as well. Check for this and 4808 * one wireless core as well. Check for this and
diff --git a/drivers/net/wireless/b43/phy_lp.c b/drivers/net/wireless/b43/phy_lp.c
index 1e318d815a5b..3e046ec1ff86 100644
--- a/drivers/net/wireless/b43/phy_lp.c
+++ b/drivers/net/wireless/b43/phy_lp.c
@@ -67,6 +67,7 @@ static void b43_lpphy_op_prepare_structs(struct b43_wldev *dev)
67 struct b43_phy_lp *lpphy = phy->lp; 67 struct b43_phy_lp *lpphy = phy->lp;
68 68
69 memset(lpphy, 0, sizeof(*lpphy)); 69 memset(lpphy, 0, sizeof(*lpphy));
70 lpphy->antenna = B43_ANTENNA_DEFAULT;
70 71
71 //TODO 72 //TODO
72} 73}
@@ -751,11 +752,17 @@ static void lpphy_clear_deaf(struct b43_wldev *dev, bool user)
751 } 752 }
752} 753}
753 754
755static void lpphy_set_trsw_over(struct b43_wldev *dev, bool tx, bool rx)
756{
757 u16 trsw = (tx << 1) | rx;
758 b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFC, trsw);
759 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x3);
760}
761
754static void lpphy_disable_crs(struct b43_wldev *dev, bool user) 762static void lpphy_disable_crs(struct b43_wldev *dev, bool user)
755{ 763{
756 lpphy_set_deaf(dev, user); 764 lpphy_set_deaf(dev, user);
757 b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFC, 0x1); 765 lpphy_set_trsw_over(dev, false, true);
758 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x3);
759 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFB); 766 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFB);
760 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x4); 767 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x4);
761 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFF7); 768 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFF7);
@@ -790,6 +797,60 @@ static void lpphy_restore_crs(struct b43_wldev *dev, bool user)
790 797
791struct lpphy_tx_gains { u16 gm, pga, pad, dac; }; 798struct lpphy_tx_gains { u16 gm, pga, pad, dac; };
792 799
800static void lpphy_disable_rx_gain_override(struct b43_wldev *dev)
801{
802 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFFE);
803 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFEF);
804 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFBF);
805 if (dev->phy.rev >= 2) {
806 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFEFF);
807 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
808 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFBFF);
809 b43_phy_mask(dev, B43_PHY_OFDM(0xE5), 0xFFF7);
810 }
811 } else {
812 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFDFF);
813 }
814}
815
816static void lpphy_enable_rx_gain_override(struct b43_wldev *dev)
817{
818 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x1);
819 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x10);
820 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x40);
821 if (dev->phy.rev >= 2) {
822 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x100);
823 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
824 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x400);
825 b43_phy_set(dev, B43_PHY_OFDM(0xE5), 0x8);
826 }
827 } else {
828 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x200);
829 }
830}
831
832static void lpphy_disable_tx_gain_override(struct b43_wldev *dev)
833{
834 if (dev->phy.rev < 2)
835 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFEFF);
836 else {
837 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFF7F);
838 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xBFFF);
839 }
840 b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xFFBF);
841}
842
843static void lpphy_enable_tx_gain_override(struct b43_wldev *dev)
844{
845 if (dev->phy.rev < 2)
846 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x100);
847 else {
848 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x80);
849 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x4000);
850 }
851 b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVR, 0x40);
852}
853
793static struct lpphy_tx_gains lpphy_get_tx_gains(struct b43_wldev *dev) 854static struct lpphy_tx_gains lpphy_get_tx_gains(struct b43_wldev *dev)
794{ 855{
795 struct lpphy_tx_gains gains; 856 struct lpphy_tx_gains gains;
@@ -819,6 +880,17 @@ static void lpphy_set_dac_gain(struct b43_wldev *dev, u16 dac)
819 b43_phy_maskset(dev, B43_LPPHY_AFE_DAC_CTL, 0xF000, ctl); 880 b43_phy_maskset(dev, B43_LPPHY_AFE_DAC_CTL, 0xF000, ctl);
820} 881}
821 882
883static u16 lpphy_get_pa_gain(struct b43_wldev *dev)
884{
885 return b43_phy_read(dev, B43_PHY_OFDM(0xFB)) & 0x7F;
886}
887
888static void lpphy_set_pa_gain(struct b43_wldev *dev, u16 gain)
889{
890 b43_phy_maskset(dev, B43_PHY_OFDM(0xFB), 0xE03F, gain << 6);
891 b43_phy_maskset(dev, B43_PHY_OFDM(0xFD), 0x80FF, gain << 8);
892}
893
822static void lpphy_set_tx_gains(struct b43_wldev *dev, 894static void lpphy_set_tx_gains(struct b43_wldev *dev,
823 struct lpphy_tx_gains gains) 895 struct lpphy_tx_gains gains)
824{ 896{
@@ -829,25 +901,22 @@ static void lpphy_set_tx_gains(struct b43_wldev *dev,
829 b43_phy_maskset(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL, 901 b43_phy_maskset(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL,
830 0xF800, rf_gain); 902 0xF800, rf_gain);
831 } else { 903 } else {
832 pa_gain = b43_phy_read(dev, B43_PHY_OFDM(0xFB)) & 0x1FC0; 904 pa_gain = lpphy_get_pa_gain(dev);
833 pa_gain <<= 2;
834 b43_phy_write(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL, 905 b43_phy_write(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL,
835 (gains.pga << 8) | gains.gm); 906 (gains.pga << 8) | gains.gm);
907 /*
908 * SPEC FIXME The spec calls for (pa_gain << 8) here, but that
909 * conflicts with the spec for set_pa_gain! Vendor driver bug?
910 */
836 b43_phy_maskset(dev, B43_PHY_OFDM(0xFB), 911 b43_phy_maskset(dev, B43_PHY_OFDM(0xFB),
837 0x8000, gains.pad | pa_gain); 912 0x8000, gains.pad | (pa_gain << 6));
838 b43_phy_write(dev, B43_PHY_OFDM(0xFC), 913 b43_phy_write(dev, B43_PHY_OFDM(0xFC),
839 (gains.pga << 8) | gains.gm); 914 (gains.pga << 8) | gains.gm);
840 b43_phy_maskset(dev, B43_PHY_OFDM(0xFD), 915 b43_phy_maskset(dev, B43_PHY_OFDM(0xFD),
841 0x8000, gains.pad | pa_gain); 916 0x8000, gains.pad | (pa_gain << 8));
842 } 917 }
843 lpphy_set_dac_gain(dev, gains.dac); 918 lpphy_set_dac_gain(dev, gains.dac);
844 if (dev->phy.rev < 2) { 919 lpphy_enable_tx_gain_override(dev);
845 b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFEFF, 1 << 8);
846 } else {
847 b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFF7F, 1 << 7);
848 b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_2, 0xBFFF, 1 << 14);
849 }
850 b43_phy_maskset(dev, B43_LPPHY_AFE_CTL_OVR, 0xFFBF, 1 << 6);
851} 920}
852 921
853static void lpphy_rev0_1_set_rx_gain(struct b43_wldev *dev, u32 gain) 922static void lpphy_rev0_1_set_rx_gain(struct b43_wldev *dev, u32 gain)
@@ -887,38 +956,6 @@ static void lpphy_rev2plus_set_rx_gain(struct b43_wldev *dev, u32 gain)
887 } 956 }
888} 957}
889 958
890static void lpphy_disable_rx_gain_override(struct b43_wldev *dev)
891{
892 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFFE);
893 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFEF);
894 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFBF);
895 if (dev->phy.rev >= 2) {
896 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFEFF);
897 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
898 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFBFF);
899 b43_phy_mask(dev, B43_PHY_OFDM(0xE5), 0xFFF7);
900 }
901 } else {
902 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFDFF);
903 }
904}
905
906static void lpphy_enable_rx_gain_override(struct b43_wldev *dev)
907{
908 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x1);
909 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x10);
910 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x40);
911 if (dev->phy.rev >= 2) {
912 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x100);
913 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
914 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x400);
915 b43_phy_set(dev, B43_PHY_OFDM(0xE5), 0x8);
916 }
917 } else {
918 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x200);
919 }
920}
921
922static void lpphy_set_rx_gain(struct b43_wldev *dev, u32 gain) 959static void lpphy_set_rx_gain(struct b43_wldev *dev, u32 gain)
923{ 960{
924 if (dev->phy.rev < 2) 961 if (dev->phy.rev < 2)
@@ -1003,8 +1040,7 @@ static int lpphy_loopback(struct b43_wldev *dev)
1003 1040
1004 memset(&iq_est, 0, sizeof(iq_est)); 1041 memset(&iq_est, 0, sizeof(iq_est));
1005 1042
1006 b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xFFFC, 0x3); 1043 lpphy_set_trsw_over(dev, true, true);
1007 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x3);
1008 b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVR, 1); 1044 b43_phy_set(dev, B43_LPPHY_AFE_CTL_OVR, 1);
1009 b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0xFFFE); 1045 b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0xFFFE);
1010 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x800); 1046 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x800);
@@ -1126,7 +1162,7 @@ static void lpphy_set_tx_power_control(struct b43_wldev *dev,
1126 b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_NNUM, 1162 b43_phy_maskset(dev, B43_LPPHY_TX_PWR_CTL_NNUM,
1127 0x8FFF, ((u16)lpphy->tssi_npt << 16)); 1163 0x8FFF, ((u16)lpphy->tssi_npt << 16));
1128 //TODO Set "TSSI Transmit Count" variable to total transmitted frame count 1164 //TODO Set "TSSI Transmit Count" variable to total transmitted frame count
1129 //TODO Disable TX gain override 1165 lpphy_disable_tx_gain_override(dev);
1130 lpphy->tx_pwr_idx_over = -1; 1166 lpphy->tx_pwr_idx_over = -1;
1131 } 1167 }
1132 } 1168 }
@@ -1312,15 +1348,73 @@ static void lpphy_calibrate_rc(struct b43_wldev *dev)
1312 } 1348 }
1313} 1349}
1314 1350
1351static void b43_lpphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna)
1352{
1353 if (dev->phy.rev >= 2)
1354 return; // rev2+ doesn't support antenna diversity
1355
1356 if (B43_WARN_ON(antenna > B43_ANTENNA_AUTO1))
1357 return;
1358
1359 b43_hf_write(dev, b43_hf_read(dev) & ~B43_HF_ANTDIVHELP);
1360
1361 b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFFFD, antenna & 0x2);
1362 b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFFFE, antenna & 0x1);
1363
1364 b43_hf_write(dev, b43_hf_read(dev) | B43_HF_ANTDIVHELP);
1365
1366 dev->phy.lp->antenna = antenna;
1367}
1368
1369static void lpphy_set_tx_iqcc(struct b43_wldev *dev, u16 a, u16 b)
1370{
1371 u16 tmp[2];
1372
1373 tmp[0] = a;
1374 tmp[1] = b;
1375 b43_lptab_write_bulk(dev, B43_LPTAB16(0, 80), 2, tmp);
1376}
1377
1315static void lpphy_set_tx_power_by_index(struct b43_wldev *dev, u8 index) 1378static void lpphy_set_tx_power_by_index(struct b43_wldev *dev, u8 index)
1316{ 1379{
1317 struct b43_phy_lp *lpphy = dev->phy.lp; 1380 struct b43_phy_lp *lpphy = dev->phy.lp;
1381 struct lpphy_tx_gains gains;
1382 u32 iq_comp, tx_gain, coeff, rf_power;
1318 1383
1319 lpphy->tx_pwr_idx_over = index; 1384 lpphy->tx_pwr_idx_over = index;
1385 lpphy_read_tx_pctl_mode_from_hardware(dev);
1320 if (lpphy->txpctl_mode != B43_LPPHY_TXPCTL_OFF) 1386 if (lpphy->txpctl_mode != B43_LPPHY_TXPCTL_OFF)
1321 lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_SW); 1387 lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_SW);
1322 1388 if (dev->phy.rev >= 2) {
1323 //TODO 1389 iq_comp = b43_lptab_read(dev, B43_LPTAB32(7, index + 320));
1390 tx_gain = b43_lptab_read(dev, B43_LPTAB32(7, index + 192));
1391 gains.pad = (tx_gain >> 16) & 0xFF;
1392 gains.gm = tx_gain & 0xFF;
1393 gains.pga = (tx_gain >> 8) & 0xFF;
1394 gains.dac = (iq_comp >> 28) & 0xFF;
1395 lpphy_set_tx_gains(dev, gains);
1396 } else {
1397 iq_comp = b43_lptab_read(dev, B43_LPTAB32(10, index + 320));
1398 tx_gain = b43_lptab_read(dev, B43_LPTAB32(10, index + 192));
1399 b43_phy_maskset(dev, B43_LPPHY_TX_GAIN_CTL_OVERRIDE_VAL,
1400 0xF800, (tx_gain >> 4) & 0x7FFF);
1401 lpphy_set_dac_gain(dev, tx_gain & 0x7);
1402 lpphy_set_pa_gain(dev, (tx_gain >> 24) & 0x7F);
1403 }
1404 lpphy_set_bb_mult(dev, (iq_comp >> 20) & 0xFF);
1405 lpphy_set_tx_iqcc(dev, (iq_comp >> 10) & 0x3FF, iq_comp & 0x3FF);
1406 if (dev->phy.rev >= 2) {
1407 coeff = b43_lptab_read(dev, B43_LPTAB32(7, index + 448));
1408 } else {
1409 coeff = b43_lptab_read(dev, B43_LPTAB32(10, index + 448));
1410 }
1411 b43_lptab_write(dev, B43_LPTAB16(0, 85), coeff & 0xFFFF);
1412 if (dev->phy.rev >= 2) {
1413 rf_power = b43_lptab_read(dev, B43_LPTAB32(7, index + 576));
1414 b43_phy_maskset(dev, B43_LPPHY_RF_PWR_OVERRIDE, 0xFF00,
1415 rf_power & 0xFFFF);//SPEC FIXME mask & set != 0
1416 }
1417 lpphy_enable_tx_gain_override(dev);
1324} 1418}
1325 1419
1326static void lpphy_btcoex_override(struct b43_wldev *dev) 1420static void lpphy_btcoex_override(struct b43_wldev *dev)
@@ -1329,58 +1423,45 @@ static void lpphy_btcoex_override(struct b43_wldev *dev)
1329 b43_write16(dev, B43_MMIO_BTCOEX_TXCTL, 0xFF); 1423 b43_write16(dev, B43_MMIO_BTCOEX_TXCTL, 0xFF);
1330} 1424}
1331 1425
1332static void lpphy_pr41573_workaround(struct b43_wldev *dev) 1426static void b43_lpphy_op_software_rfkill(struct b43_wldev *dev,
1427 bool blocked)
1333{ 1428{
1334 struct b43_phy_lp *lpphy = dev->phy.lp; 1429 //TODO check MAC control register
1335 u32 *saved_tab; 1430 if (blocked) {
1336 const unsigned int saved_tab_size = 256; 1431 if (dev->phy.rev >= 2) {
1337 enum b43_lpphy_txpctl_mode txpctl_mode; 1432 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0x83FF);
1338 s8 tx_pwr_idx_over; 1433 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x1F00);
1339 u16 tssi_npt, tssi_idx; 1434 b43_phy_mask(dev, B43_LPPHY_AFE_DDFS, 0x80FF);
1340 1435 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xDFFF);
1341 saved_tab = kcalloc(saved_tab_size, sizeof(saved_tab[0]), GFP_KERNEL); 1436 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x0808);
1342 if (!saved_tab) { 1437 } else {
1343 b43err(dev->wl, "PR41573 failed. Out of memory!\n"); 1438 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0xE0FF);
1344 return; 1439 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x1F00);
1345 } 1440 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2_VAL, 0xFCFF);
1346 1441 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_2, 0x0018);
1347 lpphy_read_tx_pctl_mode_from_hardware(dev); 1442 }
1348 txpctl_mode = lpphy->txpctl_mode;
1349 tx_pwr_idx_over = lpphy->tx_pwr_idx_over;
1350 tssi_npt = lpphy->tssi_npt;
1351 tssi_idx = lpphy->tssi_idx;
1352
1353 if (dev->phy.rev < 2) {
1354 b43_lptab_read_bulk(dev, B43_LPTAB32(10, 0x140),
1355 saved_tab_size, saved_tab);
1356 } else { 1443 } else {
1357 b43_lptab_read_bulk(dev, B43_LPTAB32(7, 0x140), 1444 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xE0FF);
1358 saved_tab_size, saved_tab); 1445 if (dev->phy.rev >= 2)
1446 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xF7F7);
1447 else
1448 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_2, 0xFFE7);
1359 } 1449 }
1360 //TODO
1361
1362 kfree(saved_tab);
1363} 1450}
1364 1451
1365static void lpphy_calibration(struct b43_wldev *dev) 1452/* This was previously called lpphy_japan_filter */
1453static void lpphy_set_analog_filter(struct b43_wldev *dev, int channel)
1366{ 1454{
1367 struct b43_phy_lp *lpphy = dev->phy.lp; 1455 struct b43_phy_lp *lpphy = dev->phy.lp;
1368 enum b43_lpphy_txpctl_mode saved_pctl_mode; 1456 u16 tmp = (channel == 14); //SPEC FIXME check japanwidefilter!
1369
1370 b43_mac_suspend(dev);
1371
1372 lpphy_btcoex_override(dev);
1373 lpphy_read_tx_pctl_mode_from_hardware(dev);
1374 saved_pctl_mode = lpphy->txpctl_mode;
1375 lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
1376 //TODO Perform transmit power table I/Q LO calibration
1377 if ((dev->phy.rev == 0) && (saved_pctl_mode != B43_LPPHY_TXPCTL_OFF))
1378 lpphy_pr41573_workaround(dev);
1379 //TODO If a full calibration has not been performed on this channel yet, perform PAPD TX-power calibration
1380 lpphy_set_tx_power_control(dev, saved_pctl_mode);
1381 //TODO Perform I/Q calibration with a single control value set
1382 1457
1383 b43_mac_enable(dev); 1458 if (dev->phy.rev < 2) { //SPEC FIXME Isn't this rev0/1-specific?
1459 b43_phy_maskset(dev, B43_LPPHY_LP_PHY_CTL, 0xFCFF, tmp << 9);
1460 if ((dev->phy.rev == 1) && (lpphy->rc_cap))
1461 lpphy_set_rc_cap(dev);
1462 } else {
1463 b43_radio_write(dev, B2063_TX_BB_SP3, 0x3F);
1464 }
1384} 1465}
1385 1466
1386static void lpphy_set_tssi_mux(struct b43_wldev *dev, enum tssi_mux_mode mode) 1467static void lpphy_set_tssi_mux(struct b43_wldev *dev, enum tssi_mux_mode mode)
@@ -1489,6 +1570,473 @@ static void lpphy_tx_pctl_init(struct b43_wldev *dev)
1489 } 1570 }
1490} 1571}
1491 1572
1573static void lpphy_pr41573_workaround(struct b43_wldev *dev)
1574{
1575 struct b43_phy_lp *lpphy = dev->phy.lp;
1576 u32 *saved_tab;
1577 const unsigned int saved_tab_size = 256;
1578 enum b43_lpphy_txpctl_mode txpctl_mode;
1579 s8 tx_pwr_idx_over;
1580 u16 tssi_npt, tssi_idx;
1581
1582 saved_tab = kcalloc(saved_tab_size, sizeof(saved_tab[0]), GFP_KERNEL);
1583 if (!saved_tab) {
1584 b43err(dev->wl, "PR41573 failed. Out of memory!\n");
1585 return;
1586 }
1587
1588 lpphy_read_tx_pctl_mode_from_hardware(dev);
1589 txpctl_mode = lpphy->txpctl_mode;
1590 tx_pwr_idx_over = lpphy->tx_pwr_idx_over;
1591 tssi_npt = lpphy->tssi_npt;
1592 tssi_idx = lpphy->tssi_idx;
1593
1594 if (dev->phy.rev < 2) {
1595 b43_lptab_read_bulk(dev, B43_LPTAB32(10, 0x140),
1596 saved_tab_size, saved_tab);
1597 } else {
1598 b43_lptab_read_bulk(dev, B43_LPTAB32(7, 0x140),
1599 saved_tab_size, saved_tab);
1600 }
1601 //FIXME PHY reset
1602 lpphy_table_init(dev); //FIXME is table init needed?
1603 lpphy_baseband_init(dev);
1604 lpphy_tx_pctl_init(dev);
1605 b43_lpphy_op_software_rfkill(dev, false);
1606 lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
1607 if (dev->phy.rev < 2) {
1608 b43_lptab_write_bulk(dev, B43_LPTAB32(10, 0x140),
1609 saved_tab_size, saved_tab);
1610 } else {
1611 b43_lptab_write_bulk(dev, B43_LPTAB32(7, 0x140),
1612 saved_tab_size, saved_tab);
1613 }
1614 b43_write16(dev, B43_MMIO_CHANNEL, lpphy->channel);
1615 lpphy->tssi_npt = tssi_npt;
1616 lpphy->tssi_idx = tssi_idx;
1617 lpphy_set_analog_filter(dev, lpphy->channel);
1618 if (tx_pwr_idx_over != -1)
1619 lpphy_set_tx_power_by_index(dev, tx_pwr_idx_over);
1620 if (lpphy->rc_cap)
1621 lpphy_set_rc_cap(dev);
1622 b43_lpphy_op_set_rx_antenna(dev, lpphy->antenna);
1623 lpphy_set_tx_power_control(dev, txpctl_mode);
1624 kfree(saved_tab);
1625}
1626
1627struct lpphy_rx_iq_comp { u8 chan; s8 c1, c0; };
1628
1629static const struct lpphy_rx_iq_comp lpphy_5354_iq_table[] = {
1630 { .chan = 1, .c1 = -66, .c0 = 15, },
1631 { .chan = 2, .c1 = -66, .c0 = 15, },
1632 { .chan = 3, .c1 = -66, .c0 = 15, },
1633 { .chan = 4, .c1 = -66, .c0 = 15, },
1634 { .chan = 5, .c1 = -66, .c0 = 15, },
1635 { .chan = 6, .c1 = -66, .c0 = 15, },
1636 { .chan = 7, .c1 = -66, .c0 = 14, },
1637 { .chan = 8, .c1 = -66, .c0 = 14, },
1638 { .chan = 9, .c1 = -66, .c0 = 14, },
1639 { .chan = 10, .c1 = -66, .c0 = 14, },
1640 { .chan = 11, .c1 = -66, .c0 = 14, },
1641 { .chan = 12, .c1 = -66, .c0 = 13, },
1642 { .chan = 13, .c1 = -66, .c0 = 13, },
1643 { .chan = 14, .c1 = -66, .c0 = 13, },
1644};
1645
1646static const struct lpphy_rx_iq_comp lpphy_rev0_1_iq_table[] = {
1647 { .chan = 1, .c1 = -64, .c0 = 13, },
1648 { .chan = 2, .c1 = -64, .c0 = 13, },
1649 { .chan = 3, .c1 = -64, .c0 = 13, },
1650 { .chan = 4, .c1 = -64, .c0 = 13, },
1651 { .chan = 5, .c1 = -64, .c0 = 12, },
1652 { .chan = 6, .c1 = -64, .c0 = 12, },
1653 { .chan = 7, .c1 = -64, .c0 = 12, },
1654 { .chan = 8, .c1 = -64, .c0 = 12, },
1655 { .chan = 9, .c1 = -64, .c0 = 12, },
1656 { .chan = 10, .c1 = -64, .c0 = 11, },
1657 { .chan = 11, .c1 = -64, .c0 = 11, },
1658 { .chan = 12, .c1 = -64, .c0 = 11, },
1659 { .chan = 13, .c1 = -64, .c0 = 11, },
1660 { .chan = 14, .c1 = -64, .c0 = 10, },
1661 { .chan = 34, .c1 = -62, .c0 = 24, },
1662 { .chan = 38, .c1 = -62, .c0 = 24, },
1663 { .chan = 42, .c1 = -62, .c0 = 24, },
1664 { .chan = 46, .c1 = -62, .c0 = 23, },
1665 { .chan = 36, .c1 = -62, .c0 = 24, },
1666 { .chan = 40, .c1 = -62, .c0 = 24, },
1667 { .chan = 44, .c1 = -62, .c0 = 23, },
1668 { .chan = 48, .c1 = -62, .c0 = 23, },
1669 { .chan = 52, .c1 = -62, .c0 = 23, },
1670 { .chan = 56, .c1 = -62, .c0 = 22, },
1671 { .chan = 60, .c1 = -62, .c0 = 22, },
1672 { .chan = 64, .c1 = -62, .c0 = 22, },
1673 { .chan = 100, .c1 = -62, .c0 = 16, },
1674 { .chan = 104, .c1 = -62, .c0 = 16, },
1675 { .chan = 108, .c1 = -62, .c0 = 15, },
1676 { .chan = 112, .c1 = -62, .c0 = 14, },
1677 { .chan = 116, .c1 = -62, .c0 = 14, },
1678 { .chan = 120, .c1 = -62, .c0 = 13, },
1679 { .chan = 124, .c1 = -62, .c0 = 12, },
1680 { .chan = 128, .c1 = -62, .c0 = 12, },
1681 { .chan = 132, .c1 = -62, .c0 = 12, },
1682 { .chan = 136, .c1 = -62, .c0 = 11, },
1683 { .chan = 140, .c1 = -62, .c0 = 10, },
1684 { .chan = 149, .c1 = -61, .c0 = 9, },
1685 { .chan = 153, .c1 = -61, .c0 = 9, },
1686 { .chan = 157, .c1 = -61, .c0 = 9, },
1687 { .chan = 161, .c1 = -61, .c0 = 8, },
1688 { .chan = 165, .c1 = -61, .c0 = 8, },
1689 { .chan = 184, .c1 = -62, .c0 = 25, },
1690 { .chan = 188, .c1 = -62, .c0 = 25, },
1691 { .chan = 192, .c1 = -62, .c0 = 25, },
1692 { .chan = 196, .c1 = -62, .c0 = 25, },
1693 { .chan = 200, .c1 = -62, .c0 = 25, },
1694 { .chan = 204, .c1 = -62, .c0 = 25, },
1695 { .chan = 208, .c1 = -62, .c0 = 25, },
1696 { .chan = 212, .c1 = -62, .c0 = 25, },
1697 { .chan = 216, .c1 = -62, .c0 = 26, },
1698};
1699
1700static const struct lpphy_rx_iq_comp lpphy_rev2plus_iq_comp = {
1701 .chan = 0,
1702 .c1 = -64,
1703 .c0 = 0,
1704};
1705
1706static u8 lpphy_nbits(s32 val)
1707{
1708 u32 tmp = abs(val);
1709 u8 nbits = 0;
1710
1711 while (tmp != 0) {
1712 nbits++;
1713 tmp >>= 1;
1714 }
1715
1716 return nbits;
1717}
1718
1719static int lpphy_calc_rx_iq_comp(struct b43_wldev *dev, u16 samples)
1720{
1721 struct lpphy_iq_est iq_est;
1722 u16 c0, c1;
1723 int prod, ipwr, qpwr, prod_msb, q_msb, tmp1, tmp2, tmp3, tmp4, ret;
1724
1725 c1 = b43_phy_read(dev, B43_LPPHY_RX_COMP_COEFF_S);
1726 c0 = c1 >> 8;
1727 c1 |= 0xFF;
1728
1729 b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, 0xFF00, 0x00C0);
1730 b43_phy_mask(dev, B43_LPPHY_RX_COMP_COEFF_S, 0x00FF);
1731
1732 ret = lpphy_rx_iq_est(dev, samples, 32, &iq_est);
1733 if (!ret)
1734 goto out;
1735
1736 prod = iq_est.iq_prod;
1737 ipwr = iq_est.i_pwr;
1738 qpwr = iq_est.q_pwr;
1739
1740 if (ipwr + qpwr < 2) {
1741 ret = 0;
1742 goto out;
1743 }
1744
1745 prod_msb = lpphy_nbits(prod);
1746 q_msb = lpphy_nbits(qpwr);
1747 tmp1 = prod_msb - 20;
1748
1749 if (tmp1 >= 0) {
1750 tmp3 = ((prod << (30 - prod_msb)) + (ipwr >> (1 + tmp1))) /
1751 (ipwr >> tmp1);
1752 } else {
1753 tmp3 = ((prod << (30 - prod_msb)) + (ipwr << (-1 - tmp1))) /
1754 (ipwr << -tmp1);
1755 }
1756
1757 tmp2 = q_msb - 11;
1758
1759 if (tmp2 >= 0)
1760 tmp4 = (qpwr << (31 - q_msb)) / (ipwr >> tmp2);
1761 else
1762 tmp4 = (qpwr << (31 - q_msb)) / (ipwr << -tmp2);
1763
1764 tmp4 -= tmp3 * tmp3;
1765 tmp4 = -int_sqrt(tmp4);
1766
1767 c0 = tmp3 >> 3;
1768 c1 = tmp4 >> 4;
1769
1770out:
1771 b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, 0xFF00, c1);
1772 b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, 0x00FF, c0 << 8);
1773 return ret;
1774}
1775
1776/* Complex number using 2 32-bit signed integers */
1777typedef struct {s32 i, q;} lpphy_c32;
1778
1779static lpphy_c32 lpphy_cordic(int theta)
1780{
1781 u32 arctg[] = { 2949120, 1740967, 919879, 466945, 234379, 117304,
1782 58666, 29335, 14668, 7334, 3667, 1833, 917, 458,
1783 229, 115, 57, 29, };
1784 int i, tmp, signx = 1, angle = 0;
1785 lpphy_c32 ret = { .i = 39797, .q = 0, };
1786
1787 theta = clamp_t(int, theta, -180, 180);
1788
1789 if (theta > 90) {
1790 theta -= 180;
1791 signx = -1;
1792 } else if (theta < -90) {
1793 theta += 180;
1794 signx = -1;
1795 }
1796
1797 for (i = 0; i <= 17; i++) {
1798 if (theta > angle) {
1799 tmp = ret.i - (ret.q >> i);
1800 ret.q += ret.i >> i;
1801 ret.i = tmp;
1802 angle += arctg[i];
1803 } else {
1804 tmp = ret.i + (ret.q >> i);
1805 ret.q -= ret.i >> i;
1806 ret.i = tmp;
1807 angle -= arctg[i];
1808 }
1809 }
1810
1811 ret.i *= signx;
1812 ret.q *= signx;
1813
1814 return ret;
1815}
1816
1817static void lpphy_run_samples(struct b43_wldev *dev, u16 samples, u16 loops,
1818 u16 wait)
1819{
1820 b43_phy_maskset(dev, B43_LPPHY_SMPL_PLAY_BUFFER_CTL,
1821 0xFFC0, samples - 1);
1822 if (loops != 0xFFFF)
1823 loops--;
1824 b43_phy_maskset(dev, B43_LPPHY_SMPL_PLAY_COUNT, 0xF000, loops);
1825 b43_phy_maskset(dev, B43_LPPHY_SMPL_PLAY_BUFFER_CTL, 0x3F, wait << 6);
1826 b43_phy_set(dev, B43_LPPHY_A_PHY_CTL_ADDR, 0x1);
1827}
1828
1829//SPEC FIXME what does a negative freq mean?
1830static void lpphy_start_tx_tone(struct b43_wldev *dev, s32 freq, u16 max)
1831{
1832 struct b43_phy_lp *lpphy = dev->phy.lp;
1833 u16 buf[64];
1834 int i, samples = 0, angle = 0, rotation = (9 * freq) / 500;
1835 lpphy_c32 sample;
1836
1837 lpphy->tx_tone_freq = freq;
1838
1839 if (freq) {
1840 /* Find i for which abs(freq) integrally divides 20000 * i */
1841 for (i = 1; samples * abs(freq) != 20000 * i; i++) {
1842 samples = (20000 * i) / abs(freq);
1843 if(B43_WARN_ON(samples > 63))
1844 return;
1845 }
1846 } else {
1847 samples = 2;
1848 }
1849
1850 for (i = 0; i < samples; i++) {
1851 sample = lpphy_cordic(angle);
1852 angle += rotation;
1853 buf[i] = ((sample.i * max) & 0xFF) << 8;
1854 buf[i] |= (sample.q * max) & 0xFF;
1855 }
1856
1857 b43_lptab_write_bulk(dev, B43_LPTAB16(5, 0), samples, buf);
1858
1859 lpphy_run_samples(dev, samples, 0xFFFF, 0);
1860}
1861
1862static void lpphy_stop_tx_tone(struct b43_wldev *dev)
1863{
1864 struct b43_phy_lp *lpphy = dev->phy.lp;
1865 int i;
1866
1867 lpphy->tx_tone_freq = 0;
1868
1869 b43_phy_mask(dev, B43_LPPHY_SMPL_PLAY_COUNT, 0xF000);
1870 for (i = 0; i < 31; i++) {
1871 if (!(b43_phy_read(dev, B43_LPPHY_A_PHY_CTL_ADDR) & 0x1))
1872 break;
1873 udelay(100);
1874 }
1875}
1876
1877
1878static void lpphy_papd_cal(struct b43_wldev *dev, struct lpphy_tx_gains gains,
1879 int mode, bool useindex, u8 index)
1880{
1881 //TODO
1882}
1883
1884static void lpphy_papd_cal_txpwr(struct b43_wldev *dev)
1885{
1886 struct b43_phy_lp *lpphy = dev->phy.lp;
1887 struct ssb_bus *bus = dev->dev->bus;
1888 struct lpphy_tx_gains gains, oldgains;
1889 int old_txpctl, old_afe_ovr, old_rf, old_bbmult;
1890
1891 lpphy_read_tx_pctl_mode_from_hardware(dev);
1892 old_txpctl = lpphy->txpctl_mode;
1893 old_afe_ovr = b43_phy_read(dev, B43_LPPHY_AFE_CTL_OVR) & 0x40;
1894 if (old_afe_ovr)
1895 oldgains = lpphy_get_tx_gains(dev);
1896 old_rf = b43_phy_read(dev, B43_LPPHY_RF_PWR_OVERRIDE) & 0xFF;
1897 old_bbmult = lpphy_get_bb_mult(dev);
1898
1899 lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
1900
1901 if (bus->chip_id == 0x4325 && bus->chip_rev == 0)
1902 lpphy_papd_cal(dev, gains, 0, 1, 30);
1903 else
1904 lpphy_papd_cal(dev, gains, 0, 1, 65);
1905
1906 if (old_afe_ovr)
1907 lpphy_set_tx_gains(dev, oldgains);
1908 lpphy_set_bb_mult(dev, old_bbmult);
1909 lpphy_set_tx_power_control(dev, old_txpctl);
1910 b43_phy_maskset(dev, B43_LPPHY_RF_PWR_OVERRIDE, 0xFF00, old_rf);
1911}
1912
1913static int lpphy_rx_iq_cal(struct b43_wldev *dev, bool noise, bool tx,
1914 bool rx, bool pa, struct lpphy_tx_gains *gains)
1915{
1916 struct b43_phy_lp *lpphy = dev->phy.lp;
1917 struct ssb_bus *bus = dev->dev->bus;
1918 const struct lpphy_rx_iq_comp *iqcomp = NULL;
1919 struct lpphy_tx_gains nogains, oldgains;
1920 u16 tmp;
1921 int i, ret;
1922
1923 memset(&nogains, 0, sizeof(nogains));
1924 memset(&oldgains, 0, sizeof(oldgains));
1925
1926 if (bus->chip_id == 0x5354) {
1927 for (i = 0; i < ARRAY_SIZE(lpphy_5354_iq_table); i++) {
1928 if (lpphy_5354_iq_table[i].chan == lpphy->channel) {
1929 iqcomp = &lpphy_5354_iq_table[i];
1930 }
1931 }
1932 } else if (dev->phy.rev >= 2) {
1933 iqcomp = &lpphy_rev2plus_iq_comp;
1934 } else {
1935 for (i = 0; i < ARRAY_SIZE(lpphy_rev0_1_iq_table); i++) {
1936 if (lpphy_rev0_1_iq_table[i].chan == lpphy->channel) {
1937 iqcomp = &lpphy_rev0_1_iq_table[i];
1938 }
1939 }
1940 }
1941
1942 if (B43_WARN_ON(!iqcomp))
1943 return 0;
1944
1945 b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S, 0xFF00, iqcomp->c1);
1946 b43_phy_maskset(dev, B43_LPPHY_RX_COMP_COEFF_S,
1947 0x00FF, iqcomp->c0 << 8);
1948
1949 if (noise) {
1950 tx = true;
1951 rx = false;
1952 pa = false;
1953 }
1954
1955 lpphy_set_trsw_over(dev, tx, rx);
1956
1957 if (b43_current_band(dev->wl) == IEEE80211_BAND_2GHZ) {
1958 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x8);
1959 b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0,
1960 0xFFF7, pa << 3);
1961 } else {
1962 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x20);
1963 b43_phy_maskset(dev, B43_LPPHY_RF_OVERRIDE_VAL_0,
1964 0xFFDF, pa << 5);
1965 }
1966
1967 tmp = b43_phy_read(dev, B43_LPPHY_AFE_CTL_OVR) & 0x40;
1968
1969 if (noise)
1970 lpphy_set_rx_gain(dev, 0x2D5D);
1971 else {
1972 if (tmp)
1973 oldgains = lpphy_get_tx_gains(dev);
1974 if (!gains)
1975 gains = &nogains;
1976 lpphy_set_tx_gains(dev, *gains);
1977 }
1978
1979 b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xFFFE);
1980 b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0xFFFE);
1981 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_0, 0x800);
1982 b43_phy_set(dev, B43_LPPHY_RF_OVERRIDE_VAL_0, 0x800);
1983 lpphy_set_deaf(dev, false);
1984 if (noise)
1985 ret = lpphy_calc_rx_iq_comp(dev, 0xFFF0);
1986 else {
1987 lpphy_start_tx_tone(dev, 4000, 100);
1988 ret = lpphy_calc_rx_iq_comp(dev, 0x4000);
1989 lpphy_stop_tx_tone(dev);
1990 }
1991 lpphy_clear_deaf(dev, false);
1992 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFFC);
1993 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFF7);
1994 b43_phy_mask(dev, B43_LPPHY_RF_OVERRIDE_0, 0xFFDF);
1995 if (!noise) {
1996 if (tmp)
1997 lpphy_set_tx_gains(dev, oldgains);
1998 else
1999 lpphy_disable_tx_gain_override(dev);
2000 }
2001 lpphy_disable_rx_gain_override(dev);
2002 b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVR, 0xFFFE);
2003 b43_phy_mask(dev, B43_LPPHY_AFE_CTL_OVRVAL, 0xF7FF);
2004 return ret;
2005}
2006
2007static void lpphy_calibration(struct b43_wldev *dev)
2008{
2009 struct b43_phy_lp *lpphy = dev->phy.lp;
2010 enum b43_lpphy_txpctl_mode saved_pctl_mode;
2011 bool full_cal = false;
2012
2013 if (lpphy->full_calib_chan != lpphy->channel) {
2014 full_cal = true;
2015 lpphy->full_calib_chan = lpphy->channel;
2016 }
2017
2018 b43_mac_suspend(dev);
2019
2020 lpphy_btcoex_override(dev);
2021 if (dev->phy.rev >= 2)
2022 lpphy_save_dig_flt_state(dev);
2023 lpphy_read_tx_pctl_mode_from_hardware(dev);
2024 saved_pctl_mode = lpphy->txpctl_mode;
2025 lpphy_set_tx_power_control(dev, B43_LPPHY_TXPCTL_OFF);
2026 //TODO Perform transmit power table I/Q LO calibration
2027 if ((dev->phy.rev == 0) && (saved_pctl_mode != B43_LPPHY_TXPCTL_OFF))
2028 lpphy_pr41573_workaround(dev);
2029 if ((dev->phy.rev >= 2) && full_cal) {
2030 lpphy_papd_cal_txpwr(dev);
2031 }
2032 lpphy_set_tx_power_control(dev, saved_pctl_mode);
2033 if (dev->phy.rev >= 2)
2034 lpphy_restore_dig_flt_state(dev);
2035 lpphy_rx_iq_cal(dev, true, true, false, false, NULL);
2036
2037 b43_mac_enable(dev);
2038}
2039
1492static u16 b43_lpphy_op_read(struct b43_wldev *dev, u16 reg) 2040static u16 b43_lpphy_op_read(struct b43_wldev *dev, u16 reg)
1493{ 2041{
1494 b43_write16(dev, B43_MMIO_PHY_CONTROL, reg); 2042 b43_write16(dev, B43_MMIO_PHY_CONTROL, reg);
@@ -1533,12 +2081,6 @@ static void b43_lpphy_op_radio_write(struct b43_wldev *dev, u16 reg, u16 value)
1533 b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value); 2081 b43_write16(dev, B43_MMIO_RADIO_DATA_LOW, value);
1534} 2082}
1535 2083
1536static void b43_lpphy_op_software_rfkill(struct b43_wldev *dev,
1537 bool blocked)
1538{
1539 //TODO
1540}
1541
1542struct b206x_channel { 2084struct b206x_channel {
1543 u8 channel; 2085 u8 channel;
1544 u16 freq; 2086 u16 freq;
@@ -2004,22 +2546,6 @@ static int lpphy_b2062_tune(struct b43_wldev *dev,
2004 return err; 2546 return err;
2005} 2547}
2006 2548
2007
2008/* This was previously called lpphy_japan_filter */
2009static void lpphy_set_analog_filter(struct b43_wldev *dev, int channel)
2010{
2011 struct b43_phy_lp *lpphy = dev->phy.lp;
2012 u16 tmp = (channel == 14); //SPEC FIXME check japanwidefilter!
2013
2014 if (dev->phy.rev < 2) { //SPEC FIXME Isn't this rev0/1-specific?
2015 b43_phy_maskset(dev, B43_LPPHY_LP_PHY_CTL, 0xFCFF, tmp << 9);
2016 if ((dev->phy.rev == 1) && (lpphy->rc_cap))
2017 lpphy_set_rc_cap(dev);
2018 } else {
2019 b43_radio_write(dev, B2063_TX_BB_SP3, 0x3F);
2020 }
2021}
2022
2023static void lpphy_b2063_vco_calib(struct b43_wldev *dev) 2549static void lpphy_b2063_vco_calib(struct b43_wldev *dev)
2024{ 2550{
2025 u16 tmp; 2551 u16 tmp;
@@ -2204,18 +2730,6 @@ static int b43_lpphy_op_init(struct b43_wldev *dev)
2204 return 0; 2730 return 0;
2205} 2731}
2206 2732
2207static void b43_lpphy_op_set_rx_antenna(struct b43_wldev *dev, int antenna)
2208{
2209 if (dev->phy.rev >= 2)
2210 return; // rev2+ doesn't support antenna diversity
2211
2212 if (B43_WARN_ON(antenna > B43_ANTENNA_AUTO1))
2213 return;
2214
2215 b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFFFD, antenna & 0x2);
2216 b43_phy_maskset(dev, B43_LPPHY_CRSGAIN_CTL, 0xFFFE, antenna & 0x1);
2217}
2218
2219static void b43_lpphy_op_adjust_txpower(struct b43_wldev *dev) 2733static void b43_lpphy_op_adjust_txpower(struct b43_wldev *dev)
2220{ 2734{
2221 //TODO 2735 //TODO
@@ -2238,6 +2752,11 @@ void b43_lpphy_op_switch_analog(struct b43_wldev *dev, bool on)
2238 } 2752 }
2239} 2753}
2240 2754
2755static void b43_lpphy_op_pwork_15sec(struct b43_wldev *dev)
2756{
2757 //TODO
2758}
2759
2241const struct b43_phy_operations b43_phyops_lp = { 2760const struct b43_phy_operations b43_phyops_lp = {
2242 .allocate = b43_lpphy_op_allocate, 2761 .allocate = b43_lpphy_op_allocate,
2243 .free = b43_lpphy_op_free, 2762 .free = b43_lpphy_op_free,
@@ -2255,4 +2774,6 @@ const struct b43_phy_operations b43_phyops_lp = {
2255 .set_rx_antenna = b43_lpphy_op_set_rx_antenna, 2774 .set_rx_antenna = b43_lpphy_op_set_rx_antenna,
2256 .recalc_txpower = b43_lpphy_op_recalc_txpower, 2775 .recalc_txpower = b43_lpphy_op_recalc_txpower,
2257 .adjust_txpower = b43_lpphy_op_adjust_txpower, 2776 .adjust_txpower = b43_lpphy_op_adjust_txpower,
2777 .pwork_15sec = b43_lpphy_op_pwork_15sec,
2778 .pwork_60sec = lpphy_calibration,
2258}; 2779};
diff --git a/drivers/net/wireless/b43/phy_lp.h b/drivers/net/wireless/b43/phy_lp.h
index c3232c17b60a..62737f700cbc 100644
--- a/drivers/net/wireless/b43/phy_lp.h
+++ b/drivers/net/wireless/b43/phy_lp.h
@@ -286,6 +286,7 @@
286#define B43_LPPHY_TR_LOOKUP_6 B43_PHY_OFDM(0xC8) /* TR Lookup 6 */ 286#define B43_LPPHY_TR_LOOKUP_6 B43_PHY_OFDM(0xC8) /* TR Lookup 6 */
287#define B43_LPPHY_TR_LOOKUP_7 B43_PHY_OFDM(0xC9) /* TR Lookup 7 */ 287#define B43_LPPHY_TR_LOOKUP_7 B43_PHY_OFDM(0xC9) /* TR Lookup 7 */
288#define B43_LPPHY_TR_LOOKUP_8 B43_PHY_OFDM(0xCA) /* TR Lookup 8 */ 288#define B43_LPPHY_TR_LOOKUP_8 B43_PHY_OFDM(0xCA) /* TR Lookup 8 */
289#define B43_LPPHY_RF_PWR_OVERRIDE B43_PHY_OFDM(0xD3) /* RF power override */
289 290
290 291
291 292
@@ -871,12 +872,12 @@ struct b43_phy_lp {
871 u8 rssi_gs; 872 u8 rssi_gs;
872 873
873 /* RC cap */ 874 /* RC cap */
874 u8 rc_cap; /* FIXME initial value? */ 875 u8 rc_cap;
875 /* BX arch */ 876 /* BX arch */
876 u8 bx_arch; 877 u8 bx_arch;
877 878
878 /* Full calibration channel */ 879 /* Full calibration channel */
879 u8 full_calib_chan; /* FIXME initial value? */ 880 u8 full_calib_chan;
880 881
881 /* Transmit iqlocal best coeffs */ 882 /* Transmit iqlocal best coeffs */
882 bool tx_iqloc_best_coeffs_valid; 883 bool tx_iqloc_best_coeffs_valid;
@@ -891,6 +892,12 @@ struct b43_phy_lp {
891 892
892 /* The channel we are tuned to */ 893 /* The channel we are tuned to */
893 u8 channel; 894 u8 channel;
895
896 /* The active antenna diversity mode */
897 int antenna;
898
899 /* Frequency of the active TX tone */
900 int tx_tone_freq;
894}; 901};
895 902
896enum tssi_mux_mode { 903enum tssi_mux_mode {
diff --git a/drivers/net/wireless/b43/pio.c b/drivers/net/wireless/b43/pio.c
index dbbf0d11e18e..3105f235303a 100644
--- a/drivers/net/wireless/b43/pio.c
+++ b/drivers/net/wireless/b43/pio.c
@@ -341,12 +341,15 @@ static u16 tx_write_2byte_queue(struct b43_pio_txqueue *q,
341 q->mmio_base + B43_PIO_TXDATA, 341 q->mmio_base + B43_PIO_TXDATA,
342 sizeof(u16)); 342 sizeof(u16));
343 if (data_len & 1) { 343 if (data_len & 1) {
344 u8 *tail = wl->pio_tailspace;
345 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);
346
344 /* Write the last byte. */ 347 /* Write the last byte. */
345 ctl &= ~B43_PIO_TXCTL_WRITEHI; 348 ctl &= ~B43_PIO_TXCTL_WRITEHI;
346 b43_piotx_write16(q, B43_PIO_TXCTL, ctl); 349 b43_piotx_write16(q, B43_PIO_TXCTL, ctl);
347 wl->tx_tail[0] = data[data_len - 1]; 350 tail[0] = data[data_len - 1];
348 wl->tx_tail[1] = 0; 351 tail[1] = 0;
349 ssb_block_write(dev->dev, wl->tx_tail, 2, 352 ssb_block_write(dev->dev, tail, 2,
350 q->mmio_base + B43_PIO_TXDATA, 353 q->mmio_base + B43_PIO_TXDATA,
351 sizeof(u16)); 354 sizeof(u16));
352 } 355 }
@@ -392,31 +395,31 @@ static u32 tx_write_4byte_queue(struct b43_pio_txqueue *q,
392 q->mmio_base + B43_PIO8_TXDATA, 395 q->mmio_base + B43_PIO8_TXDATA,
393 sizeof(u32)); 396 sizeof(u32));
394 if (data_len & 3) { 397 if (data_len & 3) {
395 wl->tx_tail[3] = 0; 398 u8 *tail = wl->pio_tailspace;
399 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);
400
401 memset(tail, 0, 4);
396 /* Write the last few bytes. */ 402 /* Write the last few bytes. */
397 ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 | 403 ctl &= ~(B43_PIO8_TXCTL_8_15 | B43_PIO8_TXCTL_16_23 |
398 B43_PIO8_TXCTL_24_31); 404 B43_PIO8_TXCTL_24_31);
399 switch (data_len & 3) { 405 switch (data_len & 3) {
400 case 3: 406 case 3:
401 ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15; 407 ctl |= B43_PIO8_TXCTL_16_23 | B43_PIO8_TXCTL_8_15;
402 wl->tx_tail[0] = data[data_len - 3]; 408 tail[0] = data[data_len - 3];
403 wl->tx_tail[1] = data[data_len - 2]; 409 tail[1] = data[data_len - 2];
404 wl->tx_tail[2] = data[data_len - 1]; 410 tail[2] = data[data_len - 1];
405 break; 411 break;
406 case 2: 412 case 2:
407 ctl |= B43_PIO8_TXCTL_8_15; 413 ctl |= B43_PIO8_TXCTL_8_15;
408 wl->tx_tail[0] = data[data_len - 2]; 414 tail[0] = data[data_len - 2];
409 wl->tx_tail[1] = data[data_len - 1]; 415 tail[1] = data[data_len - 1];
410 wl->tx_tail[2] = 0;
411 break; 416 break;
412 case 1: 417 case 1:
413 wl->tx_tail[0] = data[data_len - 1]; 418 tail[0] = data[data_len - 1];
414 wl->tx_tail[1] = 0;
415 wl->tx_tail[2] = 0;
416 break; 419 break;
417 } 420 }
418 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl); 421 b43_piotx_write32(q, B43_PIO8_TXCTL, ctl);
419 ssb_block_write(dev->dev, wl->tx_tail, 4, 422 ssb_block_write(dev->dev, tail, 4,
420 q->mmio_base + B43_PIO8_TXDATA, 423 q->mmio_base + B43_PIO8_TXDATA,
421 sizeof(u32)); 424 sizeof(u32));
422 } 425 }
@@ -455,6 +458,7 @@ static int pio_tx_frame(struct b43_pio_txqueue *q,
455 int err; 458 int err;
456 unsigned int hdrlen; 459 unsigned int hdrlen;
457 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 460 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
461 struct b43_txhdr *txhdr = (struct b43_txhdr *)wl->pio_scratchspace;
458 462
459 B43_WARN_ON(list_empty(&q->packets_list)); 463 B43_WARN_ON(list_empty(&q->packets_list));
460 pack = list_entry(q->packets_list.next, 464 pack = list_entry(q->packets_list.next,
@@ -462,7 +466,9 @@ static int pio_tx_frame(struct b43_pio_txqueue *q,
462 466
463 cookie = generate_cookie(q, pack); 467 cookie = generate_cookie(q, pack);
464 hdrlen = b43_txhdr_size(dev); 468 hdrlen = b43_txhdr_size(dev);
465 err = b43_generate_txhdr(dev, (u8 *)&wl->txhdr, skb, 469 BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(struct b43_txhdr));
470 B43_WARN_ON(sizeof(wl->pio_scratchspace) < hdrlen);
471 err = b43_generate_txhdr(dev, (u8 *)txhdr, skb,
466 info, cookie); 472 info, cookie);
467 if (err) 473 if (err)
468 return err; 474 return err;
@@ -476,9 +482,9 @@ static int pio_tx_frame(struct b43_pio_txqueue *q,
476 482
477 pack->skb = skb; 483 pack->skb = skb;
478 if (q->rev >= 8) 484 if (q->rev >= 8)
479 pio_tx_frame_4byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen); 485 pio_tx_frame_4byte_queue(pack, (const u8 *)txhdr, hdrlen);
480 else 486 else
481 pio_tx_frame_2byte_queue(pack, (const u8 *)&wl->txhdr, hdrlen); 487 pio_tx_frame_2byte_queue(pack, (const u8 *)txhdr, hdrlen);
482 488
483 /* Remove it from the list of available packet slots. 489 /* Remove it from the list of available packet slots.
484 * It will be put back when we receive the status report. */ 490 * It will be put back when we receive the status report. */
@@ -624,8 +630,11 @@ static bool pio_rx_frame(struct b43_pio_rxqueue *q)
624 unsigned int i, padding; 630 unsigned int i, padding;
625 struct sk_buff *skb; 631 struct sk_buff *skb;
626 const char *err_msg = NULL; 632 const char *err_msg = NULL;
633 struct b43_rxhdr_fw4 *rxhdr =
634 (struct b43_rxhdr_fw4 *)wl->pio_scratchspace;
627 635
628 memset(&wl->rxhdr, 0, sizeof(wl->rxhdr)); 636 BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr));
637 memset(rxhdr, 0, sizeof(*rxhdr));
629 638
630 /* Check if we have data and wait for it to get ready. */ 639 /* Check if we have data and wait for it to get ready. */
631 if (q->rev >= 8) { 640 if (q->rev >= 8) {
@@ -663,16 +672,16 @@ data_ready:
663 672
664 /* Get the preamble (RX header) */ 673 /* Get the preamble (RX header) */
665 if (q->rev >= 8) { 674 if (q->rev >= 8) {
666 ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr), 675 ssb_block_read(dev->dev, rxhdr, sizeof(*rxhdr),
667 q->mmio_base + B43_PIO8_RXDATA, 676 q->mmio_base + B43_PIO8_RXDATA,
668 sizeof(u32)); 677 sizeof(u32));
669 } else { 678 } else {
670 ssb_block_read(dev->dev, &wl->rxhdr, sizeof(wl->rxhdr), 679 ssb_block_read(dev->dev, rxhdr, sizeof(*rxhdr),
671 q->mmio_base + B43_PIO_RXDATA, 680 q->mmio_base + B43_PIO_RXDATA,
672 sizeof(u16)); 681 sizeof(u16));
673 } 682 }
674 /* Sanity checks. */ 683 /* Sanity checks. */
675 len = le16_to_cpu(wl->rxhdr.frame_len); 684 len = le16_to_cpu(rxhdr->frame_len);
676 if (unlikely(len > 0x700)) { 685 if (unlikely(len > 0x700)) {
677 err_msg = "len > 0x700"; 686 err_msg = "len > 0x700";
678 goto rx_error; 687 goto rx_error;
@@ -682,7 +691,7 @@ data_ready:
682 goto rx_error; 691 goto rx_error;
683 } 692 }
684 693
685 macstat = le32_to_cpu(wl->rxhdr.mac_status); 694 macstat = le32_to_cpu(rxhdr->mac_status);
686 if (macstat & B43_RX_MAC_FCSERR) { 695 if (macstat & B43_RX_MAC_FCSERR) {
687 if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) { 696 if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) {
688 /* Drop frames with failed FCS. */ 697 /* Drop frames with failed FCS. */
@@ -707,22 +716,25 @@ data_ready:
707 q->mmio_base + B43_PIO8_RXDATA, 716 q->mmio_base + B43_PIO8_RXDATA,
708 sizeof(u32)); 717 sizeof(u32));
709 if (len & 3) { 718 if (len & 3) {
719 u8 *tail = wl->pio_tailspace;
720 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);
721
710 /* Read the last few bytes. */ 722 /* Read the last few bytes. */
711 ssb_block_read(dev->dev, wl->rx_tail, 4, 723 ssb_block_read(dev->dev, tail, 4,
712 q->mmio_base + B43_PIO8_RXDATA, 724 q->mmio_base + B43_PIO8_RXDATA,
713 sizeof(u32)); 725 sizeof(u32));
714 switch (len & 3) { 726 switch (len & 3) {
715 case 3: 727 case 3:
716 skb->data[len + padding - 3] = wl->rx_tail[0]; 728 skb->data[len + padding - 3] = tail[0];
717 skb->data[len + padding - 2] = wl->rx_tail[1]; 729 skb->data[len + padding - 2] = tail[1];
718 skb->data[len + padding - 1] = wl->rx_tail[2]; 730 skb->data[len + padding - 1] = tail[2];
719 break; 731 break;
720 case 2: 732 case 2:
721 skb->data[len + padding - 2] = wl->rx_tail[0]; 733 skb->data[len + padding - 2] = tail[0];
722 skb->data[len + padding - 1] = wl->rx_tail[1]; 734 skb->data[len + padding - 1] = tail[1];
723 break; 735 break;
724 case 1: 736 case 1:
725 skb->data[len + padding - 1] = wl->rx_tail[0]; 737 skb->data[len + padding - 1] = tail[0];
726 break; 738 break;
727 } 739 }
728 } 740 }
@@ -731,15 +743,18 @@ data_ready:
731 q->mmio_base + B43_PIO_RXDATA, 743 q->mmio_base + B43_PIO_RXDATA,
732 sizeof(u16)); 744 sizeof(u16));
733 if (len & 1) { 745 if (len & 1) {
746 u8 *tail = wl->pio_tailspace;
747 BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);
748
734 /* Read the last byte. */ 749 /* Read the last byte. */
735 ssb_block_read(dev->dev, wl->rx_tail, 2, 750 ssb_block_read(dev->dev, tail, 2,
736 q->mmio_base + B43_PIO_RXDATA, 751 q->mmio_base + B43_PIO_RXDATA,
737 sizeof(u16)); 752 sizeof(u16));
738 skb->data[len + padding - 1] = wl->rx_tail[0]; 753 skb->data[len + padding - 1] = tail[0];
739 } 754 }
740 } 755 }
741 756
742 b43_rx(q->dev, skb, &wl->rxhdr); 757 b43_rx(q->dev, skb, rxhdr);
743 758
744 return 1; 759 return 1;
745 760
diff --git a/drivers/net/wireless/b43/xmit.c b/drivers/net/wireless/b43/xmit.c
index f4e9695ec186..eda06529ef5f 100644
--- a/drivers/net/wireless/b43/xmit.c
+++ b/drivers/net/wireless/b43/xmit.c
@@ -27,7 +27,7 @@
27 27
28*/ 28*/
29 29
30#include "b43.h" 30#include "xmit.h"
31#include "phy_common.h" 31#include "phy_common.h"
32#include "dma.h" 32#include "dma.h"
33#include "pio.h" 33#include "pio.h"
@@ -621,7 +621,6 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
621 (phystat0 & B43_RX_PHYST0_OFDM), 621 (phystat0 & B43_RX_PHYST0_OFDM),
622 (phystat0 & B43_RX_PHYST0_GAINCTL), 622 (phystat0 & B43_RX_PHYST0_GAINCTL),
623 (phystat3 & B43_RX_PHYST3_TRSTATE)); 623 (phystat3 & B43_RX_PHYST3_TRSTATE));
624 status.qual = (rxhdr->jssi * 100) / B43_RX_MAX_SSI;
625 } 624 }
626 625
627 if (phystat0 & B43_RX_PHYST0_OFDM) 626 if (phystat0 & B43_RX_PHYST0_OFDM)
@@ -690,10 +689,7 @@ void b43_rx(struct b43_wldev *dev, struct sk_buff *skb, const void *_rxhdr)
690 } 689 }
691 690
692 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); 691 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
693 692 ieee80211_rx_ni(dev->wl->hw, skb);
694 local_bh_disable();
695 ieee80211_rx(dev->wl->hw, skb);
696 local_bh_enable();
697 693
698#if B43_DEBUG 694#if B43_DEBUG
699 dev->rx_count++; 695 dev->rx_count++;
diff --git a/drivers/net/wireless/b43legacy/Kconfig b/drivers/net/wireless/b43legacy/Kconfig
index 94a463478053..1ffa28835c58 100644
--- a/drivers/net/wireless/b43legacy/Kconfig
+++ b/drivers/net/wireless/b43legacy/Kconfig
@@ -1,6 +1,6 @@
1config B43LEGACY 1config B43LEGACY
2 tristate "Broadcom 43xx-legacy wireless support (mac80211 stack)" 2 tristate "Broadcom 43xx-legacy wireless support (mac80211 stack)"
3 depends on SSB_POSSIBLE && MAC80211 && WLAN_80211 && HAS_DMA 3 depends on SSB_POSSIBLE && MAC80211 && HAS_DMA
4 select SSB 4 select SSB
5 select FW_LOADER 5 select FW_LOADER
6 ---help--- 6 ---help---
diff --git a/drivers/net/wireless/b43legacy/b43legacy.h b/drivers/net/wireless/b43legacy/b43legacy.h
index 038baa8869e2..89fe2f972c72 100644
--- a/drivers/net/wireless/b43legacy/b43legacy.h
+++ b/drivers/net/wireless/b43legacy/b43legacy.h
@@ -29,8 +29,6 @@
29 29
30#define B43legacy_IRQWAIT_MAX_RETRIES 20 30#define B43legacy_IRQWAIT_MAX_RETRIES 20
31 31
32#define B43legacy_RX_MAX_SSI 60 /* best guess at max ssi */
33
34/* MMIO offsets */ 32/* MMIO offsets */
35#define B43legacy_MMIO_DMA0_REASON 0x20 33#define B43legacy_MMIO_DMA0_REASON 0x20
36#define B43legacy_MMIO_DMA0_IRQ_MASK 0x24 34#define B43legacy_MMIO_DMA0_IRQ_MASK 0x24
diff --git a/drivers/net/wireless/b43legacy/dma.c b/drivers/net/wireless/b43legacy/dma.c
index 866403415811..0a86bdf53154 100644
--- a/drivers/net/wireless/b43legacy/dma.c
+++ b/drivers/net/wireless/b43legacy/dma.c
@@ -1240,8 +1240,9 @@ struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev,
1240} 1240}
1241 1241
1242static int dma_tx_fragment(struct b43legacy_dmaring *ring, 1242static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1243 struct sk_buff *skb) 1243 struct sk_buff **in_skb)
1244{ 1244{
1245 struct sk_buff *skb = *in_skb;
1245 const struct b43legacy_dma_ops *ops = ring->ops; 1246 const struct b43legacy_dma_ops *ops = ring->ops;
1246 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1247 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1247 u8 *header; 1248 u8 *header;
@@ -1305,8 +1306,14 @@ static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1305 } 1306 }
1306 1307
1307 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len); 1308 memcpy(skb_put(bounce_skb, skb->len), skb->data, skb->len);
1309 memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb));
1310 bounce_skb->dev = skb->dev;
1311 skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb));
1312 info = IEEE80211_SKB_CB(bounce_skb);
1313
1308 dev_kfree_skb_any(skb); 1314 dev_kfree_skb_any(skb);
1309 skb = bounce_skb; 1315 skb = bounce_skb;
1316 *in_skb = bounce_skb;
1310 meta->skb = skb; 1317 meta->skb = skb;
1311 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1); 1318 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1312 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) { 1319 if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
@@ -1360,8 +1367,10 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
1360 struct sk_buff *skb) 1367 struct sk_buff *skb)
1361{ 1368{
1362 struct b43legacy_dmaring *ring; 1369 struct b43legacy_dmaring *ring;
1370 struct ieee80211_hdr *hdr;
1363 int err = 0; 1371 int err = 0;
1364 unsigned long flags; 1372 unsigned long flags;
1373 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1365 1374
1366 ring = priority_to_txring(dev, skb_get_queue_mapping(skb)); 1375 ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
1367 spin_lock_irqsave(&ring->lock, flags); 1376 spin_lock_irqsave(&ring->lock, flags);
@@ -1386,7 +1395,11 @@ int b43legacy_dma_tx(struct b43legacy_wldev *dev,
1386 goto out_unlock; 1395 goto out_unlock;
1387 } 1396 }
1388 1397
1389 err = dma_tx_fragment(ring, skb); 1398 /* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
1399 * into the skb data or cb now. */
1400 hdr = NULL;
1401 info = NULL;
1402 err = dma_tx_fragment(ring, &skb);
1390 if (unlikely(err == -ENOKEY)) { 1403 if (unlikely(err == -ENOKEY)) {
1391 /* Drop this packet, as we don't have the encryption key 1404 /* Drop this packet, as we don't have the encryption key
1392 * anymore and must not transmit it unencrypted. */ 1405 * anymore and must not transmit it unencrypted. */
diff --git a/drivers/net/wireless/b43legacy/main.c b/drivers/net/wireless/b43legacy/main.c
index 1d9223b3d4c4..d579bb9035c4 100644
--- a/drivers/net/wireless/b43legacy/main.c
+++ b/drivers/net/wireless/b43legacy/main.c
@@ -2676,7 +2676,7 @@ static int b43legacy_op_dev_config(struct ieee80211_hw *hw,
2676 if (conf->channel->hw_value != phy->channel) 2676 if (conf->channel->hw_value != phy->channel)
2677 b43legacy_radio_selectchannel(dev, conf->channel->hw_value, 0); 2677 b43legacy_radio_selectchannel(dev, conf->channel->hw_value, 0);
2678 2678
2679 dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_RADIOTAP); 2679 dev->wl->radiotap_enabled = !!(conf->flags & IEEE80211_CONF_MONITOR);
2680 2680
2681 /* Adjust the desired TX power level. */ 2681 /* Adjust the desired TX power level. */
2682 if (conf->power_level != 0) { 2682 if (conf->power_level != 0) {
@@ -3592,7 +3592,7 @@ static int b43legacy_wireless_core_attach(struct b43legacy_wldev *dev)
3592{ 3592{
3593 struct b43legacy_wl *wl = dev->wl; 3593 struct b43legacy_wl *wl = dev->wl;
3594 struct ssb_bus *bus = dev->dev->bus; 3594 struct ssb_bus *bus = dev->dev->bus;
3595 struct pci_dev *pdev = bus->host_pci; 3595 struct pci_dev *pdev = (bus->bustype == SSB_BUSTYPE_PCI) ? bus->host_pci : NULL;
3596 int err; 3596 int err;
3597 int have_bphy = 0; 3597 int have_bphy = 0;
3598 int have_gphy = 0; 3598 int have_gphy = 0;
@@ -3706,7 +3706,7 @@ static int b43legacy_one_core_attach(struct ssb_device *dev,
3706 3706
3707 if (!list_empty(&wl->devlist)) { 3707 if (!list_empty(&wl->devlist)) {
3708 /* We are not the first core on this chip. */ 3708 /* We are not the first core on this chip. */
3709 pdev = dev->bus->host_pci; 3709 pdev = (dev->bus->bustype == SSB_BUSTYPE_PCI) ? dev->bus->host_pci : NULL;
3710 /* Only special chips support more than one wireless 3710 /* Only special chips support more than one wireless
3711 * core, although some of the other chips have more than 3711 * core, although some of the other chips have more than
3712 * one wireless core as well. Check for this and 3712 * one wireless core as well. Check for this and
diff --git a/drivers/net/wireless/b43legacy/xmit.c b/drivers/net/wireless/b43legacy/xmit.c
index 103f3c9e7f58..9c8882d9275e 100644
--- a/drivers/net/wireless/b43legacy/xmit.c
+++ b/drivers/net/wireless/b43legacy/xmit.c
@@ -549,7 +549,6 @@ void b43legacy_rx(struct b43legacy_wldev *dev,
549 (phystat0 & B43legacy_RX_PHYST0_GAINCTL), 549 (phystat0 & B43legacy_RX_PHYST0_GAINCTL),
550 (phystat3 & B43legacy_RX_PHYST3_TRSTATE)); 550 (phystat3 & B43legacy_RX_PHYST3_TRSTATE));
551 status.noise = dev->stats.link_noise; 551 status.noise = dev->stats.link_noise;
552 status.qual = (jssi * 100) / B43legacy_RX_MAX_SSI;
553 /* change to support A PHY */ 552 /* change to support A PHY */
554 if (phystat0 & B43legacy_RX_PHYST0_OFDM) 553 if (phystat0 & B43legacy_RX_PHYST0_OFDM)
555 status.rate_idx = b43legacy_plcp_get_bitrate_idx_ofdm(plcp, false); 554 status.rate_idx = b43legacy_plcp_get_bitrate_idx_ofdm(plcp, false);
diff --git a/drivers/net/wireless/hostap/Kconfig b/drivers/net/wireless/hostap/Kconfig
index c15db2293515..287d82728bc3 100644
--- a/drivers/net/wireless/hostap/Kconfig
+++ b/drivers/net/wireless/hostap/Kconfig
@@ -1,7 +1,8 @@
1config HOSTAP 1config HOSTAP
2 tristate "IEEE 802.11 for Host AP (Prism2/2.5/3 and WEP/TKIP/CCMP)" 2 tristate "IEEE 802.11 for Host AP (Prism2/2.5/3 and WEP/TKIP/CCMP)"
3 depends on WLAN_80211
4 select WIRELESS_EXT 3 select WIRELESS_EXT
4 select WEXT_SPY
5 select WEXT_PRIV
5 select CRYPTO 6 select CRYPTO
6 select CRYPTO_ARC4 7 select CRYPTO_ARC4
7 select CRYPTO_ECB 8 select CRYPTO_ECB
diff --git a/drivers/net/wireless/ipw2x00/Kconfig b/drivers/net/wireless/ipw2x00/Kconfig
index a8131384c6b9..2715b101aded 100644
--- a/drivers/net/wireless/ipw2x00/Kconfig
+++ b/drivers/net/wireless/ipw2x00/Kconfig
@@ -4,8 +4,10 @@
4 4
5config IPW2100 5config IPW2100
6 tristate "Intel PRO/Wireless 2100 Network Connection" 6 tristate "Intel PRO/Wireless 2100 Network Connection"
7 depends on PCI && WLAN_80211 && CFG80211 7 depends on PCI && CFG80211
8 select WIRELESS_EXT 8 select WIRELESS_EXT
9 select WEXT_SPY
10 select WEXT_PRIV
9 select FW_LOADER 11 select FW_LOADER
10 select LIB80211 12 select LIB80211
11 select LIBIPW 13 select LIBIPW
@@ -63,8 +65,10 @@ config IPW2100_DEBUG
63 65
64config IPW2200 66config IPW2200
65 tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection" 67 tristate "Intel PRO/Wireless 2200BG and 2915ABG Network Connection"
66 depends on PCI && WLAN_80211 && CFG80211 68 depends on PCI && CFG80211 && CFG80211_WEXT
67 select WIRELESS_EXT 69 select WIRELESS_EXT
70 select WEXT_SPY
71 select WEXT_PRIV
68 select FW_LOADER 72 select FW_LOADER
69 select LIB80211 73 select LIB80211
70 select LIBIPW 74 select LIBIPW
@@ -150,8 +154,9 @@ config IPW2200_DEBUG
150 154
151config LIBIPW 155config LIBIPW
152 tristate 156 tristate
153 depends on PCI && WLAN_80211 && CFG80211 157 depends on PCI && CFG80211
154 select WIRELESS_EXT 158 select WIRELESS_EXT
159 select WEXT_SPY
155 select CRYPTO 160 select CRYPTO
156 select CRYPTO_ARC4 161 select CRYPTO_ARC4
157 select CRYPTO_ECB 162 select CRYPTO_ECB
diff --git a/drivers/net/wireless/ipw2x00/ipw2200.c b/drivers/net/wireless/ipw2x00/ipw2200.c
index 0d30a9e2d3df..5c6ff58732d5 100644
--- a/drivers/net/wireless/ipw2x00/ipw2200.c
+++ b/drivers/net/wireless/ipw2x00/ipw2200.c
@@ -11274,6 +11274,7 @@ static int ipw_up(struct ipw_priv *priv)
11274 if (!(priv->config & CFG_CUSTOM_MAC)) 11274 if (!(priv->config & CFG_CUSTOM_MAC))
11275 eeprom_parse_mac(priv, priv->mac_addr); 11275 eeprom_parse_mac(priv, priv->mac_addr);
11276 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN); 11276 memcpy(priv->net_dev->dev_addr, priv->mac_addr, ETH_ALEN);
11277 memcpy(priv->net_dev->perm_addr, priv->mac_addr, ETH_ALEN);
11277 11278
11278 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) { 11279 for (j = 0; j < ARRAY_SIZE(ipw_geos); j++) {
11279 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE], 11280 if (!memcmp(&priv->eeprom[EEPROM_COUNTRY_CODE],
diff --git a/drivers/net/wireless/iwlwifi/Kconfig b/drivers/net/wireless/iwlwifi/Kconfig
index 99310c033253..b16b06c2031f 100644
--- a/drivers/net/wireless/iwlwifi/Kconfig
+++ b/drivers/net/wireless/iwlwifi/Kconfig
@@ -1,17 +1,7 @@
1config IWLWIFI 1config IWLWIFI
2 tristate "Intel Wireless Wifi" 2 tristate "Intel Wireless Wifi"
3 depends on PCI && MAC80211 && WLAN_80211 && EXPERIMENTAL 3 depends on PCI && MAC80211 && EXPERIMENTAL
4 select LIB80211
5 select FW_LOADER 4 select FW_LOADER
6 select MAC80211_LEDS if IWLWIFI_LEDS
7 select LEDS_CLASS if IWLWIFI_LEDS
8
9config IWLWIFI_LEDS
10 bool "Enable LED support in iwlagn and iwl3945 drivers"
11 depends on IWLWIFI
12 default y
13 ---help---
14 Select this if you want LED support.
15 5
16config IWLWIFI_SPECTRUM_MEASUREMENT 6config IWLWIFI_SPECTRUM_MEASUREMENT
17 bool "Enable Spectrum Measurement in iwlagn driver" 7 bool "Enable Spectrum Measurement in iwlagn driver"
@@ -50,6 +40,24 @@ config IWLWIFI_DEBUGFS
50 ---help--- 40 ---help---
51 Enable creation of debugfs files for the iwlwifi drivers. 41 Enable creation of debugfs files for the iwlwifi drivers.
52 42
43config IWLWIFI_DEVICE_TRACING
44 bool "iwlwifi device access tracing"
45 depends on IWLWIFI
46 depends on EVENT_TRACING
47 help
48 Say Y here to trace all commands, including TX frames and IO
49 accesses, sent to the device. If you say yes, iwlwifi will
50 register with the ftrace framework for event tracing and dump
51 all this information to the ringbuffer, you may need to
52 increase the ringbuffer size. See the ftrace documentation
53 for more information.
54
55 When tracing is not enabled, this option still has some
56 (though rather small) overhead.
57
58 If unsure, say Y so we can help you better when problems
59 occur.
60
53config IWLAGN 61config IWLAGN
54 tristate "Intel Wireless WiFi Next Gen AGN (iwlagn)" 62 tristate "Intel Wireless WiFi Next Gen AGN (iwlagn)"
55 depends on IWLWIFI 63 depends on IWLWIFI
diff --git a/drivers/net/wireless/iwlwifi/Makefile b/drivers/net/wireless/iwlwifi/Makefile
index 1d4e0a226fd4..7f82044af242 100644
--- a/drivers/net/wireless/iwlwifi/Makefile
+++ b/drivers/net/wireless/iwlwifi/Makefile
@@ -1,20 +1,22 @@
1obj-$(CONFIG_IWLWIFI) += iwlcore.o 1obj-$(CONFIG_IWLWIFI) += iwlcore.o
2iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o 2iwlcore-objs := iwl-core.o iwl-eeprom.o iwl-hcmd.o iwl-power.o
3iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o iwl-calib.o 3iwlcore-objs += iwl-rx.o iwl-tx.o iwl-sta.o iwl-calib.o
4iwlcore-objs += iwl-scan.o 4iwlcore-objs += iwl-scan.o iwl-led.o
5iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o 5iwlcore-$(CONFIG_IWLWIFI_DEBUGFS) += iwl-debugfs.o
6iwlcore-$(CONFIG_IWLWIFI_LEDS) += iwl-led.o
7iwlcore-$(CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT) += iwl-spectrum.o 6iwlcore-$(CONFIG_IWLWIFI_SPECTRUM_MEASUREMENT) += iwl-spectrum.o
7iwlcore-$(CONFIG_IWLWIFI_DEVICE_TRACING) += iwl-devtrace.o
8 8
9CFLAGS_iwl-devtrace.o := -I$(src)
10
11# AGN
9obj-$(CONFIG_IWLAGN) += iwlagn.o 12obj-$(CONFIG_IWLAGN) += iwlagn.o
10iwlagn-objs := iwl-agn.o iwl-agn-rs.o 13iwlagn-objs := iwl-agn.o iwl-agn-rs.o iwl-agn-led.o
11 14
12iwlagn-$(CONFIG_IWL4965) += iwl-4965.o 15iwlagn-$(CONFIG_IWL4965) += iwl-4965.o
13iwlagn-$(CONFIG_IWL5000) += iwl-5000.o 16iwlagn-$(CONFIG_IWL5000) += iwl-5000.o
14iwlagn-$(CONFIG_IWL5000) += iwl-6000.o 17iwlagn-$(CONFIG_IWL5000) += iwl-6000.o
15iwlagn-$(CONFIG_IWL5000) += iwl-1000.o 18iwlagn-$(CONFIG_IWL5000) += iwl-1000.o
16 19
20# 3945
17obj-$(CONFIG_IWL3945) += iwl3945.o 21obj-$(CONFIG_IWL3945) += iwl3945.o
18iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o 22iwl3945-objs := iwl3945-base.o iwl-3945.o iwl-3945-rs.o iwl-3945-led.o
19
20
diff --git a/drivers/net/wireless/iwlwifi/iwl-1000.c b/drivers/net/wireless/iwlwifi/iwl-1000.c
index 950267ab556a..8f82537045bf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-1000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-1000.c
@@ -44,6 +44,7 @@
44#include "iwl-sta.h" 44#include "iwl-sta.h"
45#include "iwl-helpers.h" 45#include "iwl-helpers.h"
46#include "iwl-5000-hw.h" 46#include "iwl-5000-hw.h"
47#include "iwl-agn-led.h"
47 48
48/* Highest firmware API version supported */ 49/* Highest firmware API version supported */
49#define IWL1000_UCODE_API_MAX 3 50#define IWL1000_UCODE_API_MAX 3
@@ -76,7 +77,10 @@ static void iwl1000_set_ct_threshold(struct iwl_priv *priv)
76/* NIC configuration for 1000 series */ 77/* NIC configuration for 1000 series */
77static void iwl1000_nic_config(struct iwl_priv *priv) 78static void iwl1000_nic_config(struct iwl_priv *priv)
78{ 79{
79 iwl5000_nic_config(priv); 80 /* set CSR_HW_CONFIG_REG for uCode use */
81 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
82 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
83 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
80 84
81 /* Setting digital SVR for 1000 card to 1.32V */ 85 /* Setting digital SVR for 1000 card to 1.32V */
82 /* locking is acquired in iwl_set_bits_mask_prph() function */ 86 /* locking is acquired in iwl_set_bits_mask_prph() function */
@@ -106,9 +110,8 @@ static struct iwl_lib_ops iwl1000_lib = {
106 .send_tx_power = iwl5000_send_tx_power, 110 .send_tx_power = iwl5000_send_tx_power,
107 .update_chain_flags = iwl_update_chain_flags, 111 .update_chain_flags = iwl_update_chain_flags,
108 .apm_ops = { 112 .apm_ops = {
109 .init = iwl5000_apm_init, 113 .init = iwl_apm_init,
110 .reset = iwl5000_apm_reset, 114 .stop = iwl_apm_stop,
111 .stop = iwl5000_apm_stop,
112 .config = iwl1000_nic_config, 115 .config = iwl1000_nic_config,
113 .set_pwr_src = iwl_set_pwr_src, 116 .set_pwr_src = iwl_set_pwr_src,
114 }, 117 },
@@ -142,6 +145,7 @@ static struct iwl_ops iwl1000_ops = {
142 .lib = &iwl1000_lib, 145 .lib = &iwl1000_lib,
143 .hcmd = &iwl5000_hcmd, 146 .hcmd = &iwl5000_hcmd,
144 .utils = &iwl5000_hcmd_utils, 147 .utils = &iwl5000_hcmd_utils,
148 .led = &iwlagn_led_ops,
145}; 149};
146 150
147struct iwl_cfg iwl1000_bgn_cfg = { 151struct iwl_cfg iwl1000_bgn_cfg = {
@@ -152,15 +156,49 @@ struct iwl_cfg iwl1000_bgn_cfg = {
152 .sku = IWL_SKU_G|IWL_SKU_N, 156 .sku = IWL_SKU_G|IWL_SKU_N,
153 .ops = &iwl1000_ops, 157 .ops = &iwl1000_ops,
154 .eeprom_size = OTP_LOW_IMAGE_SIZE, 158 .eeprom_size = OTP_LOW_IMAGE_SIZE,
155 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 159 .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
156 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 160 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
161 .num_of_queues = IWL50_NUM_QUEUES,
162 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
157 .mod_params = &iwl50_mod_params, 163 .mod_params = &iwl50_mod_params,
158 .valid_tx_ant = ANT_A, 164 .valid_tx_ant = ANT_A,
159 .valid_rx_ant = ANT_AB, 165 .valid_rx_ant = ANT_AB,
160 .need_pll_cfg = true, 166 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
167 .set_l0s = true,
168 .use_bsm = false,
161 .max_ll_items = OTP_MAX_LL_ITEMS_1000, 169 .max_ll_items = OTP_MAX_LL_ITEMS_1000,
162 .shadow_ram_support = false, 170 .shadow_ram_support = false,
163 .ht_greenfield_support = true, 171 .ht_greenfield_support = true,
172 .led_compensation = 51,
164 .use_rts_for_ht = true, /* use rts/cts protection */ 173 .use_rts_for_ht = true, /* use rts/cts protection */
174 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
175 .support_ct_kill_exit = true,
165}; 176};
166 177
178struct iwl_cfg iwl1000_bg_cfg = {
179 .name = "1000 Series BG",
180 .fw_name_pre = IWL1000_FW_PRE,
181 .ucode_api_max = IWL1000_UCODE_API_MAX,
182 .ucode_api_min = IWL1000_UCODE_API_MIN,
183 .sku = IWL_SKU_G,
184 .ops = &iwl1000_ops,
185 .eeprom_size = OTP_LOW_IMAGE_SIZE,
186 .eeprom_ver = EEPROM_1000_EEPROM_VERSION,
187 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
188 .num_of_queues = IWL50_NUM_QUEUES,
189 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
190 .mod_params = &iwl50_mod_params,
191 .valid_tx_ant = ANT_A,
192 .valid_rx_ant = ANT_AB,
193 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
194 .set_l0s = true,
195 .use_bsm = false,
196 .max_ll_items = OTP_MAX_LL_ITEMS_1000,
197 .shadow_ram_support = false,
198 .ht_greenfield_support = true,
199 .led_compensation = 51,
200 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
201 .support_ct_kill_exit = true,
202};
203
204MODULE_FIRMWARE(IWL1000_MODULE_FIRMWARE(IWL1000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
index 16772780c5b0..6fd10d443ba3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-hw.h
@@ -71,12 +71,6 @@
71 71
72#include "iwl-eeprom.h" 72#include "iwl-eeprom.h"
73 73
74/*
75 * uCode queue management definitions ...
76 * Queue #4 is the command queue for 3945 and 4965.
77 */
78#define IWL_CMD_QUEUE_NUM 4
79
80/* Time constants */ 74/* Time constants */
81#define SHORT_SLOT_TIME 9 75#define SHORT_SLOT_TIME 9
82#define LONG_SLOT_TIME 20 76#define LONG_SLOT_TIME 20
@@ -254,12 +248,6 @@ struct iwl3945_eeprom {
254#define TFD_CTL_PAD_SET(n) (n << 28) 248#define TFD_CTL_PAD_SET(n) (n << 28)
255#define TFD_CTL_PAD_GET(ctl) (ctl >> 28) 249#define TFD_CTL_PAD_GET(ctl) (ctl >> 28)
256 250
257/*
258 * RX related structures and functions
259 */
260#define RX_FREE_BUFFERS 64
261#define RX_LOW_WATERMARK 8
262
263/* Sizes and addresses for instruction and data memory (SRAM) in 251/* Sizes and addresses for instruction and data memory (SRAM) in
264 * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */ 252 * 3945's embedded processor. Driver access is via HBUS_TARG_MEM_* regs. */
265#define IWL39_RTC_INST_LOWER_BOUND (0x000000) 253#define IWL39_RTC_INST_LOWER_BOUND (0x000000)
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.c b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
index 8c29ded7d02c..a871d09d598f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.c
@@ -24,8 +24,6 @@
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
26 26
27#ifdef CONFIG_IWLWIFI_LEDS
28
29#include <linux/kernel.h> 27#include <linux/kernel.h>
30#include <linux/module.h> 28#include <linux/module.h>
31#include <linux/init.h> 29#include <linux/init.h>
@@ -43,388 +41,51 @@
43#include "iwl-3945.h" 41#include "iwl-3945.h"
44#include "iwl-core.h" 42#include "iwl-core.h"
45#include "iwl-dev.h" 43#include "iwl-dev.h"
44#include "iwl-3945-led.h"
46 45
47#ifdef CONFIG_IWLWIFI_DEBUG
48static const char *led_type_str[] = {
49 __stringify(IWL_LED_TRG_TX),
50 __stringify(IWL_LED_TRG_RX),
51 __stringify(IWL_LED_TRG_ASSOC),
52 __stringify(IWL_LED_TRG_RADIO),
53 NULL
54};
55#endif /* CONFIG_IWLWIFI_DEBUG */
56
57static const struct {
58 u16 brightness;
59 u8 on_time;
60 u8 off_time;
61} blink_tbl[] =
62{
63 {300, 25, 25},
64 {200, 40, 40},
65 {100, 55, 55},
66 {70, 65, 65},
67 {50, 75, 75},
68 {20, 85, 85},
69 {15, 95, 95 },
70 {10, 110, 110},
71 {5, 130, 130},
72 {0, 167, 167},
73 /* SOLID_ON */
74 {-1, IWL_LED_SOLID, 0}
75};
76
77#define IWL_1MB_RATE (128 * 1024)
78#define IWL_LED_THRESHOLD (16)
79#define IWL_MAX_BLINK_TBL (ARRAY_SIZE(blink_tbl) - 1) /*Exclude Solid on*/
80#define IWL_SOLID_BLINK_IDX (ARRAY_SIZE(blink_tbl) - 1)
81
82static void iwl3945_led_cmd_callback(struct iwl_priv *priv,
83 struct iwl_device_cmd *cmd,
84 struct sk_buff *skb)
85{
86}
87
88static inline int iwl3945_brightness_to_idx(enum led_brightness brightness)
89{
90 return fls(0x000000FF & (u32)brightness);
91}
92 46
93/* Send led command */ 47/* Send led command */
94static int iwl_send_led_cmd(struct iwl_priv *priv, 48static int iwl3945_send_led_cmd(struct iwl_priv *priv,
95 struct iwl_led_cmd *led_cmd) 49 struct iwl_led_cmd *led_cmd)
96{ 50{
97 struct iwl_host_cmd cmd = { 51 struct iwl_host_cmd cmd = {
98 .id = REPLY_LEDS_CMD, 52 .id = REPLY_LEDS_CMD,
99 .len = sizeof(struct iwl_led_cmd), 53 .len = sizeof(struct iwl_led_cmd),
100 .data = led_cmd, 54 .data = led_cmd,
101 .flags = CMD_ASYNC, 55 .flags = CMD_ASYNC,
102 .callback = iwl3945_led_cmd_callback, 56 .callback = NULL,
103 }; 57 };
104 58
105 return iwl_send_cmd(priv, &cmd); 59 return iwl_send_cmd(priv, &cmd);
106} 60}
107 61
108
109
110/* Set led on command */
111static int iwl3945_led_pattern(struct iwl_priv *priv, int led_id,
112 unsigned int idx)
113{
114 struct iwl_led_cmd led_cmd = {
115 .id = led_id,
116 .interval = IWL_DEF_LED_INTRVL
117 };
118
119 BUG_ON(idx > IWL_MAX_BLINK_TBL);
120
121 led_cmd.on = blink_tbl[idx].on_time;
122 led_cmd.off = blink_tbl[idx].off_time;
123
124 return iwl_send_led_cmd(priv, &led_cmd);
125}
126
127
128/* Set led on command */ 62/* Set led on command */
129static int iwl3945_led_on(struct iwl_priv *priv, int led_id) 63static int iwl3945_led_on(struct iwl_priv *priv)
130{ 64{
131 struct iwl_led_cmd led_cmd = { 65 struct iwl_led_cmd led_cmd = {
132 .id = led_id, 66 .id = IWL_LED_LINK,
133 .on = IWL_LED_SOLID, 67 .on = IWL_LED_SOLID,
134 .off = 0, 68 .off = 0,
135 .interval = IWL_DEF_LED_INTRVL 69 .interval = IWL_DEF_LED_INTRVL
136 }; 70 };
137 return iwl_send_led_cmd(priv, &led_cmd); 71 return iwl3945_send_led_cmd(priv, &led_cmd);
138} 72}
139 73
140/* Set led off command */ 74/* Set led off command */
141static int iwl3945_led_off(struct iwl_priv *priv, int led_id) 75static int iwl3945_led_off(struct iwl_priv *priv)
142{ 76{
143 struct iwl_led_cmd led_cmd = { 77 struct iwl_led_cmd led_cmd = {
144 .id = led_id, 78 .id = IWL_LED_LINK,
145 .on = 0, 79 .on = 0,
146 .off = 0, 80 .off = 0,
147 .interval = IWL_DEF_LED_INTRVL 81 .interval = IWL_DEF_LED_INTRVL
148 }; 82 };
149 IWL_DEBUG_LED(priv, "led off %d\n", led_id); 83 IWL_DEBUG_LED(priv, "led off\n");
150 return iwl_send_led_cmd(priv, &led_cmd); 84 return iwl3945_send_led_cmd(priv, &led_cmd);
151} 85}
152 86
153/* 87const struct iwl_led_ops iwl3945_led_ops = {
154 * Set led on in case of association 88 .cmd = iwl3945_send_led_cmd,
155 * */ 89 .on = iwl3945_led_on,
156static int iwl3945_led_associate(struct iwl_priv *priv, int led_id) 90 .off = iwl3945_led_off,
157{ 91};
158 IWL_DEBUG_LED(priv, "Associated\n");
159
160 priv->allow_blinking = 1;
161 return iwl3945_led_on(priv, led_id);
162}
163/* Set Led off in case of disassociation */
164static int iwl3945_led_disassociate(struct iwl_priv *priv, int led_id)
165{
166 IWL_DEBUG_LED(priv, "Disassociated\n");
167
168 priv->allow_blinking = 0;
169
170 return 0;
171}
172
173/*
174 * brightness call back function for Tx/Rx LED
175 */
176static int iwl3945_led_associated(struct iwl_priv *priv, int led_id)
177{
178 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
179 !test_bit(STATUS_READY, &priv->status))
180 return 0;
181
182
183 /* start counting Tx/Rx bytes */
184 if (!priv->last_blink_time && priv->allow_blinking)
185 priv->last_blink_time = jiffies;
186 return 0;
187}
188
189/*
190 * brightness call back for association and radio
191 */
192static void iwl3945_led_brightness_set(struct led_classdev *led_cdev,
193 enum led_brightness brightness)
194{
195 struct iwl_led *led = container_of(led_cdev,
196 struct iwl_led, led_dev);
197 struct iwl_priv *priv = led->priv;
198
199 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
200 return;
201
202 IWL_DEBUG_LED(priv, "Led type = %s brightness = %d\n",
203 led_type_str[led->type], brightness);
204
205 switch (brightness) {
206 case LED_FULL:
207 if (led->led_on)
208 led->led_on(priv, IWL_LED_LINK);
209 break;
210 case LED_OFF:
211 if (led->led_off)
212 led->led_off(priv, IWL_LED_LINK);
213 break;
214 default:
215 if (led->led_pattern) {
216 int idx = iwl3945_brightness_to_idx(brightness);
217 led->led_pattern(priv, IWL_LED_LINK, idx);
218 }
219 break;
220 }
221}
222
223/*
224 * Register led class with the system
225 */
226static int iwl3945_led_register_led(struct iwl_priv *priv,
227 struct iwl_led *led,
228 enum led_type type, u8 set_led,
229 char *trigger)
230{
231 struct device *device = wiphy_dev(priv->hw->wiphy);
232 int ret;
233
234 led->led_dev.name = led->name;
235 led->led_dev.brightness_set = iwl3945_led_brightness_set;
236 led->led_dev.default_trigger = trigger;
237
238 led->priv = priv;
239 led->type = type;
240
241 ret = led_classdev_register(device, &led->led_dev);
242 if (ret) {
243 IWL_ERR(priv, "Error: failed to register led handler.\n");
244 return ret;
245 }
246
247 led->registered = 1;
248
249 if (set_led && led->led_on)
250 led->led_on(priv, IWL_LED_LINK);
251 return 0;
252}
253
254
255/*
256 * calculate blink rate according to last 2 sec Tx/Rx activities
257 */
258static inline u8 get_blink_rate(struct iwl_priv *priv)
259{
260 int index;
261 s64 tpt = priv->rxtxpackets;
262
263 if (tpt < 0)
264 tpt = -tpt;
265
266 IWL_DEBUG_LED(priv, "tpt %lld \n", (long long)tpt);
267
268 if (!priv->allow_blinking)
269 index = IWL_MAX_BLINK_TBL;
270 else
271 for (index = 0; index < IWL_MAX_BLINK_TBL; index++)
272 if (tpt > (blink_tbl[index].brightness * IWL_1MB_RATE))
273 break;
274
275 IWL_DEBUG_LED(priv, "LED BLINK IDX=%d\n", index);
276 return index;
277}
278
279/*
280 * this function called from handler. Since setting Led command can
281 * happen very frequent we postpone led command to be called from
282 * REPLY handler so we know ucode is up
283 */
284void iwl3945_led_background(struct iwl_priv *priv)
285{
286 u8 blink_idx;
287
288 if (test_bit(STATUS_EXIT_PENDING, &priv->status)) {
289 priv->last_blink_time = 0;
290 return;
291 }
292 if (iwl_is_rfkill(priv)) {
293 priv->last_blink_time = 0;
294 return;
295 }
296
297 if (!priv->allow_blinking) {
298 priv->last_blink_time = 0;
299 if (priv->last_blink_rate != IWL_SOLID_BLINK_IDX) {
300 priv->last_blink_rate = IWL_SOLID_BLINK_IDX;
301 iwl3945_led_pattern(priv, IWL_LED_LINK,
302 IWL_SOLID_BLINK_IDX);
303 }
304 return;
305 }
306 if (!priv->last_blink_time ||
307 !time_after(jiffies, priv->last_blink_time +
308 msecs_to_jiffies(1000)))
309 return;
310
311 blink_idx = get_blink_rate(priv);
312
313 /* call only if blink rate change */
314 if (blink_idx != priv->last_blink_rate)
315 iwl3945_led_pattern(priv, IWL_LED_LINK, blink_idx);
316
317 priv->last_blink_time = jiffies;
318 priv->last_blink_rate = blink_idx;
319 priv->rxtxpackets = 0;
320}
321
322
323/* Register all led handler */
324int iwl3945_led_register(struct iwl_priv *priv)
325{
326 char *trigger;
327 int ret;
328
329 priv->last_blink_rate = 0;
330 priv->rxtxpackets = 0;
331 priv->led_tpt = 0;
332 priv->last_blink_time = 0;
333 priv->allow_blinking = 0;
334
335 trigger = ieee80211_get_radio_led_name(priv->hw);
336 snprintf(priv->led[IWL_LED_TRG_RADIO].name,
337 sizeof(priv->led[IWL_LED_TRG_RADIO].name), "iwl-%s::radio",
338 wiphy_name(priv->hw->wiphy));
339
340 priv->led[IWL_LED_TRG_RADIO].led_on = iwl3945_led_on;
341 priv->led[IWL_LED_TRG_RADIO].led_off = iwl3945_led_off;
342 priv->led[IWL_LED_TRG_RADIO].led_pattern = NULL;
343
344 ret = iwl3945_led_register_led(priv,
345 &priv->led[IWL_LED_TRG_RADIO],
346 IWL_LED_TRG_RADIO, 1, trigger);
347
348 if (ret)
349 goto exit_fail;
350
351 trigger = ieee80211_get_assoc_led_name(priv->hw);
352 snprintf(priv->led[IWL_LED_TRG_ASSOC].name,
353 sizeof(priv->led[IWL_LED_TRG_ASSOC].name), "iwl-%s::assoc",
354 wiphy_name(priv->hw->wiphy));
355
356 ret = iwl3945_led_register_led(priv,
357 &priv->led[IWL_LED_TRG_ASSOC],
358 IWL_LED_TRG_ASSOC, 0, trigger);
359
360 /* for assoc always turn led on */
361 priv->led[IWL_LED_TRG_ASSOC].led_on = iwl3945_led_associate;
362 priv->led[IWL_LED_TRG_ASSOC].led_off = iwl3945_led_disassociate;
363 priv->led[IWL_LED_TRG_ASSOC].led_pattern = NULL;
364
365 if (ret)
366 goto exit_fail;
367
368 trigger = ieee80211_get_rx_led_name(priv->hw);
369 snprintf(priv->led[IWL_LED_TRG_RX].name,
370 sizeof(priv->led[IWL_LED_TRG_RX].name), "iwl-%s::RX",
371 wiphy_name(priv->hw->wiphy));
372
373 ret = iwl3945_led_register_led(priv,
374 &priv->led[IWL_LED_TRG_RX],
375 IWL_LED_TRG_RX, 0, trigger);
376
377 priv->led[IWL_LED_TRG_RX].led_on = iwl3945_led_associated;
378 priv->led[IWL_LED_TRG_RX].led_off = iwl3945_led_associated;
379 priv->led[IWL_LED_TRG_RX].led_pattern = iwl3945_led_pattern;
380
381 if (ret)
382 goto exit_fail;
383
384 trigger = ieee80211_get_tx_led_name(priv->hw);
385 snprintf(priv->led[IWL_LED_TRG_TX].name,
386 sizeof(priv->led[IWL_LED_TRG_TX].name), "iwl-%s::TX",
387 wiphy_name(priv->hw->wiphy));
388
389 ret = iwl3945_led_register_led(priv,
390 &priv->led[IWL_LED_TRG_TX],
391 IWL_LED_TRG_TX, 0, trigger);
392
393 priv->led[IWL_LED_TRG_TX].led_on = iwl3945_led_associated;
394 priv->led[IWL_LED_TRG_TX].led_off = iwl3945_led_associated;
395 priv->led[IWL_LED_TRG_TX].led_pattern = iwl3945_led_pattern;
396
397 if (ret)
398 goto exit_fail;
399
400 return 0;
401
402exit_fail:
403 iwl3945_led_unregister(priv);
404 return ret;
405}
406
407
408/* unregister led class */
409static void iwl3945_led_unregister_led(struct iwl_led *led, u8 set_led)
410{
411 if (!led->registered)
412 return;
413
414 led_classdev_unregister(&led->led_dev);
415
416 if (set_led)
417 led->led_dev.brightness_set(&led->led_dev, LED_OFF);
418 led->registered = 0;
419}
420
421/* Unregister all led handlers */
422void iwl3945_led_unregister(struct iwl_priv *priv)
423{
424 iwl3945_led_unregister_led(&priv->led[IWL_LED_TRG_ASSOC], 0);
425 iwl3945_led_unregister_led(&priv->led[IWL_LED_TRG_RX], 0);
426 iwl3945_led_unregister_led(&priv->led[IWL_LED_TRG_TX], 0);
427 iwl3945_led_unregister_led(&priv->led[IWL_LED_TRG_RADIO], 1);
428}
429
430#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-led.h b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
index 3b65642258ca..5a1033ca7aaa 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-led.h
@@ -24,23 +24,9 @@
24 * 24 *
25 *****************************************************************************/ 25 *****************************************************************************/
26 26
27#ifndef IWL3945_LEDS_H 27#ifndef __iwl_3945_led_h__
28#define IWL3945_LEDS_H 28#define __iwl_3945_led_h__
29 29
30struct iwl_priv; 30extern const struct iwl_led_ops iwl3945_led_ops;
31 31
32#ifdef CONFIG_IWLWIFI_LEDS 32#endif /* __iwl_3945_led_h__ */
33
34#include "iwl-led.h"
35
36extern int iwl3945_led_register(struct iwl_priv *priv);
37extern void iwl3945_led_unregister(struct iwl_priv *priv);
38extern void iwl3945_led_background(struct iwl_priv *priv);
39
40#else
41static inline int iwl3945_led_register(struct iwl_priv *priv) { return 0; }
42static inline void iwl3945_led_unregister(struct iwl_priv *priv) {}
43static inline void iwl3945_led_background(struct iwl_priv *priv) {}
44
45#endif /* IWLWIFI_LEDS*/
46#endif /* IWL3945_LEDS_H */
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
index cbb0585083a9..dc81e19674f7 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945-rs.c
@@ -42,38 +42,6 @@
42 42
43#define RS_NAME "iwl-3945-rs" 43#define RS_NAME "iwl-3945-rs"
44 44
45struct iwl3945_rate_scale_data {
46 u64 data;
47 s32 success_counter;
48 s32 success_ratio;
49 s32 counter;
50 s32 average_tpt;
51 unsigned long stamp;
52};
53
54struct iwl3945_rs_sta {
55 spinlock_t lock;
56 struct iwl_priv *priv;
57 s32 *expected_tpt;
58 unsigned long last_partial_flush;
59 unsigned long last_flush;
60 u32 flush_time;
61 u32 last_tx_packets;
62 u32 tx_packets;
63 u8 tgg;
64 u8 flush_pending;
65 u8 start_rate;
66 u8 ibss_sta_added;
67 struct timer_list rate_scale_flush;
68 struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
69#ifdef CONFIG_MAC80211_DEBUGFS
70 struct dentry *rs_sta_dbgfs_stats_table_file;
71#endif
72
73 /* used to be in sta_info */
74 int last_txrate_idx;
75};
76
77static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT_3945] = { 45static s32 iwl3945_expected_tpt_g[IWL_RATE_COUNT_3945] = {
78 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202 46 7, 13, 35, 58, 0, 0, 76, 104, 130, 168, 191, 202
79}; 47};
@@ -370,6 +338,28 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
370 338
371 IWL_DEBUG_RATE(priv, "enter\n"); 339 IWL_DEBUG_RATE(priv, "enter\n");
372 340
341 spin_lock_init(&rs_sta->lock);
342
343 rs_sta->priv = priv;
344
345 rs_sta->start_rate = IWL_RATE_INVALID;
346
347 /* default to just 802.11b */
348 rs_sta->expected_tpt = iwl3945_expected_tpt_b;
349
350 rs_sta->last_partial_flush = jiffies;
351 rs_sta->last_flush = jiffies;
352 rs_sta->flush_time = IWL_RATE_FLUSH;
353 rs_sta->last_tx_packets = 0;
354 rs_sta->ibss_sta_added = 0;
355
356 init_timer(&rs_sta->rate_scale_flush);
357 rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
358 rs_sta->rate_scale_flush.function = &iwl3945_bg_rate_scale_flush;
359
360 for (i = 0; i < IWL_RATE_COUNT_3945; i++)
361 iwl3945_clear_window(&rs_sta->win[i]);
362
373 /* TODO: what is a good starting rate for STA? About middle? Maybe not 363 /* TODO: what is a good starting rate for STA? About middle? Maybe not
374 * the lowest or the highest rate.. Could consider using RSSI from 364 * the lowest or the highest rate.. Could consider using RSSI from
375 * previous packets? Need to have IEEE 802.1X auth succeed immediately 365 * previous packets? Need to have IEEE 802.1X auth succeed immediately
@@ -409,45 +399,11 @@ static void *rs_alloc_sta(void *iwl_priv, struct ieee80211_sta *sta, gfp_t gfp)
409{ 399{
410 struct iwl3945_rs_sta *rs_sta; 400 struct iwl3945_rs_sta *rs_sta;
411 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv; 401 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
412 struct iwl_priv *priv = iwl_priv; 402 struct iwl_priv *priv __maybe_unused = iwl_priv;
413 int i;
414
415 /*
416 * XXX: If it's using sta->drv_priv anyway, it might
417 * as well just put all the information there.
418 */
419 403
420 IWL_DEBUG_RATE(priv, "enter\n"); 404 IWL_DEBUG_RATE(priv, "enter\n");
421 405
422 rs_sta = kzalloc(sizeof(struct iwl3945_rs_sta), gfp); 406 rs_sta = &psta->rs_sta;
423 if (!rs_sta) {
424 IWL_DEBUG_RATE(priv, "leave: ENOMEM\n");
425 return NULL;
426 }
427
428 psta->rs_sta = rs_sta;
429
430 spin_lock_init(&rs_sta->lock);
431
432 rs_sta->priv = priv;
433
434 rs_sta->start_rate = IWL_RATE_INVALID;
435
436 /* default to just 802.11b */
437 rs_sta->expected_tpt = iwl3945_expected_tpt_b;
438
439 rs_sta->last_partial_flush = jiffies;
440 rs_sta->last_flush = jiffies;
441 rs_sta->flush_time = IWL_RATE_FLUSH;
442 rs_sta->last_tx_packets = 0;
443 rs_sta->ibss_sta_added = 0;
444
445 init_timer(&rs_sta->rate_scale_flush);
446 rs_sta->rate_scale_flush.data = (unsigned long)rs_sta;
447 rs_sta->rate_scale_flush.function = &iwl3945_bg_rate_scale_flush;
448
449 for (i = 0; i < IWL_RATE_COUNT_3945; i++)
450 iwl3945_clear_window(&rs_sta->win[i]);
451 407
452 IWL_DEBUG_RATE(priv, "leave\n"); 408 IWL_DEBUG_RATE(priv, "leave\n");
453 409
@@ -458,14 +414,11 @@ static void rs_free_sta(void *iwl_priv, struct ieee80211_sta *sta,
458 void *priv_sta) 414 void *priv_sta)
459{ 415{
460 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv; 416 struct iwl3945_sta_priv *psta = (void *) sta->drv_priv;
461 struct iwl3945_rs_sta *rs_sta = priv_sta; 417 struct iwl3945_rs_sta *rs_sta = &psta->rs_sta;
462 struct iwl_priv *priv __maybe_unused = rs_sta->priv; 418 struct iwl_priv *priv __maybe_unused = rs_sta->priv;
463 419
464 psta->rs_sta = NULL;
465
466 IWL_DEBUG_RATE(priv, "enter\n"); 420 IWL_DEBUG_RATE(priv, "enter\n");
467 del_timer_sync(&rs_sta->rate_scale_flush); 421 del_timer_sync(&rs_sta->rate_scale_flush);
468 kfree(rs_sta);
469 IWL_DEBUG_RATE(priv, "leave\n"); 422 IWL_DEBUG_RATE(priv, "leave\n");
470} 423}
471 424
@@ -960,14 +913,15 @@ void iwl3945_rate_scale_init(struct ieee80211_hw *hw, s32 sta_id)
960 913
961 rcu_read_lock(); 914 rcu_read_lock();
962 915
963 sta = ieee80211_find_sta(hw, priv->stations[sta_id].sta.sta.addr); 916 sta = ieee80211_find_sta(priv->vif,
917 priv->stations[sta_id].sta.sta.addr);
964 if (!sta) { 918 if (!sta) {
965 rcu_read_unlock(); 919 rcu_read_unlock();
966 return; 920 return;
967 } 921 }
968 922
969 psta = (void *) sta->drv_priv; 923 psta = (void *) sta->drv_priv;
970 rs_sta = psta->rs_sta; 924 rs_sta = &psta->rs_sta;
971 925
972 spin_lock_irqsave(&rs_sta->lock, flags); 926 spin_lock_irqsave(&rs_sta->lock, flags);
973 927
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.c b/drivers/net/wireless/iwlwifi/iwl-3945.c
index 231c833f6469..09a7bd2c0be4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.c
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.c
@@ -46,7 +46,8 @@
46#include "iwl-eeprom.h" 46#include "iwl-eeprom.h"
47#include "iwl-helpers.h" 47#include "iwl-helpers.h"
48#include "iwl-core.h" 48#include "iwl-core.h"
49#include "iwl-agn-rs.h" 49#include "iwl-led.h"
50#include "iwl-3945-led.h"
50 51
51#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \ 52#define IWL_DECLARE_RATE_INFO(r, ip, in, rp, rn, pp, np) \
52 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ 53 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
@@ -292,7 +293,7 @@ static void iwl3945_tx_queue_reclaim(struct iwl_priv *priv,
292static void iwl3945_rx_reply_tx(struct iwl_priv *priv, 293static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
293 struct iwl_rx_mem_buffer *rxb) 294 struct iwl_rx_mem_buffer *rxb)
294{ 295{
295 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 296 struct iwl_rx_packet *pkt = rxb_addr(rxb);
296 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 297 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
297 int txq_id = SEQ_TO_QUEUE(sequence); 298 int txq_id = SEQ_TO_QUEUE(sequence);
298 int index = SEQ_TO_INDEX(sequence); 299 int index = SEQ_TO_INDEX(sequence);
@@ -352,16 +353,12 @@ static void iwl3945_rx_reply_tx(struct iwl_priv *priv,
352void iwl3945_hw_rx_statistics(struct iwl_priv *priv, 353void iwl3945_hw_rx_statistics(struct iwl_priv *priv,
353 struct iwl_rx_mem_buffer *rxb) 354 struct iwl_rx_mem_buffer *rxb)
354{ 355{
355 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 356 struct iwl_rx_packet *pkt = rxb_addr(rxb);
356 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", 357 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
357 (int)sizeof(struct iwl3945_notif_statistics), 358 (int)sizeof(struct iwl3945_notif_statistics),
358 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK); 359 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
359 360
360 memcpy(&priv->statistics_39, pkt->u.raw, sizeof(priv->statistics_39)); 361 memcpy(&priv->statistics_39, pkt->u.raw, sizeof(priv->statistics_39));
361
362 iwl3945_led_background(priv);
363
364 priv->last_statistics_time = jiffies;
365} 362}
366 363
367/****************************************************************************** 364/******************************************************************************
@@ -544,14 +541,18 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
544 struct iwl_rx_mem_buffer *rxb, 541 struct iwl_rx_mem_buffer *rxb,
545 struct ieee80211_rx_status *stats) 542 struct ieee80211_rx_status *stats)
546{ 543{
547 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 544 struct iwl_rx_packet *pkt = rxb_addr(rxb);
548 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt); 545 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)IWL_RX_DATA(pkt);
549 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); 546 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
550 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); 547 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
551 short len = le16_to_cpu(rx_hdr->len); 548 u16 len = le16_to_cpu(rx_hdr->len);
549 struct sk_buff *skb;
550 int ret;
551 __le16 fc = hdr->frame_control;
552 552
553 /* We received data from the HW, so stop the watchdog */ 553 /* We received data from the HW, so stop the watchdog */
554 if (unlikely((len + IWL39_RX_FRAME_SIZE) > skb_tailroom(rxb->skb))) { 554 if (unlikely(len + IWL39_RX_FRAME_SIZE >
555 PAGE_SIZE << priv->hw_params.rx_page_order)) {
555 IWL_DEBUG_DROP(priv, "Corruption detected!\n"); 556 IWL_DEBUG_DROP(priv, "Corruption detected!\n");
556 return; 557 return;
557 } 558 }
@@ -563,24 +564,49 @@ static void iwl3945_pass_packet_to_mac80211(struct iwl_priv *priv,
563 return; 564 return;
564 } 565 }
565 566
566 skb_reserve(rxb->skb, (void *)rx_hdr->payload - (void *)pkt); 567 skb = alloc_skb(IWL_LINK_HDR_MAX, GFP_ATOMIC);
567 /* Set the size of the skb to the size of the frame */ 568 if (!skb) {
568 skb_put(rxb->skb, le16_to_cpu(rx_hdr->len)); 569 IWL_ERR(priv, "alloc_skb failed\n");
570 return;
571 }
569 572
570 if (!iwl3945_mod_params.sw_crypto) 573 if (!iwl3945_mod_params.sw_crypto)
571 iwl_set_decrypted_flag(priv, 574 iwl_set_decrypted_flag(priv,
572 (struct ieee80211_hdr *)rxb->skb->data, 575 (struct ieee80211_hdr *)rxb_addr(rxb),
573 le32_to_cpu(rx_end->status), stats); 576 le32_to_cpu(rx_end->status), stats);
574 577
575#ifdef CONFIG_IWLWIFI_LEDS 578 skb_add_rx_frag(skb, 0, rxb->page,
576 if (ieee80211_is_data(hdr->frame_control)) 579 (void *)rx_hdr->payload - (void *)pkt, len);
577 priv->rxtxpackets += len; 580
578#endif 581 /* mac80211 currently doesn't support paged SKB. Convert it to
579 iwl_update_stats(priv, false, hdr->frame_control, len); 582 * linear SKB for management frame and data frame requires
583 * software decryption or software defragementation. */
584 if (ieee80211_is_mgmt(fc) ||
585 ieee80211_has_protected(fc) ||
586 ieee80211_has_morefrags(fc) ||
587 le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
588 ret = skb_linearize(skb);
589 else
590 ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
591 0 : -ENOMEM;
592
593 if (ret) {
594 kfree_skb(skb);
595 goto out;
596 }
580 597
581 memcpy(IEEE80211_SKB_RXCB(rxb->skb), stats, sizeof(*stats)); 598 /*
582 ieee80211_rx_irqsafe(priv->hw, rxb->skb); 599 * XXX: We cannot touch the page and its virtual memory (pkt) after
583 rxb->skb = NULL; 600 * here. It might have already been freed by the above skb change.
601 */
602
603 iwl_update_stats(priv, false, fc, len);
604 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
605
606 ieee80211_rx(priv->hw, skb);
607 out:
608 priv->alloc_rxb_page--;
609 rxb->page = NULL;
584} 610}
585 611
586#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6) 612#define IWL_DELAY_NEXT_SCAN_AFTER_ASSOC (HZ*6)
@@ -590,7 +616,7 @@ static void iwl3945_rx_reply_rx(struct iwl_priv *priv,
590{ 616{
591 struct ieee80211_hdr *header; 617 struct ieee80211_hdr *header;
592 struct ieee80211_rx_status rx_status; 618 struct ieee80211_rx_status rx_status;
593 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 619 struct iwl_rx_packet *pkt = rxb_addr(rxb);
594 struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt); 620 struct iwl3945_rx_frame_stats *rx_stats = IWL_RX_STATS(pkt);
595 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt); 621 struct iwl3945_rx_frame_hdr *rx_hdr = IWL_RX_HDR(pkt);
596 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt); 622 struct iwl3945_rx_frame_end *rx_end = IWL_RX_END(pkt);
@@ -790,29 +816,31 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
790 u8 data_retry_limit; 816 u8 data_retry_limit;
791 __le32 tx_flags; 817 __le32 tx_flags;
792 __le16 fc = hdr->frame_control; 818 __le16 fc = hdr->frame_control;
793 struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload; 819 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
794 820
795 rate = iwl3945_rates[rate_index].plcp; 821 rate = iwl3945_rates[rate_index].plcp;
796 tx_flags = tx->tx_flags; 822 tx_flags = tx_cmd->tx_flags;
797 823
798 /* We need to figure out how to get the sta->supp_rates while 824 /* We need to figure out how to get the sta->supp_rates while
799 * in this running context */ 825 * in this running context */
800 rate_mask = IWL_RATES_MASK; 826 rate_mask = IWL_RATES_MASK;
801 827
828
829 /* Set retry limit on DATA packets and Probe Responses*/
830 if (ieee80211_is_probe_resp(fc))
831 data_retry_limit = 3;
832 else
833 data_retry_limit = IWL_DEFAULT_TX_RETRY;
834 tx_cmd->data_retry_limit = data_retry_limit;
835
802 if (tx_id >= IWL_CMD_QUEUE_NUM) 836 if (tx_id >= IWL_CMD_QUEUE_NUM)
803 rts_retry_limit = 3; 837 rts_retry_limit = 3;
804 else 838 else
805 rts_retry_limit = 7; 839 rts_retry_limit = 7;
806 840
807 if (ieee80211_is_probe_resp(fc)) { 841 if (data_retry_limit < rts_retry_limit)
808 data_retry_limit = 3; 842 rts_retry_limit = data_retry_limit;
809 if (data_retry_limit < rts_retry_limit) 843 tx_cmd->rts_retry_limit = rts_retry_limit;
810 rts_retry_limit = data_retry_limit;
811 } else
812 data_retry_limit = IWL_DEFAULT_TX_RETRY;
813
814 if (priv->data_retry_limit != -1)
815 data_retry_limit = priv->data_retry_limit;
816 844
817 if (ieee80211_is_mgmt(fc)) { 845 if (ieee80211_is_mgmt(fc)) {
818 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) { 846 switch (fc & cpu_to_le16(IEEE80211_FCTL_STYPE)) {
@@ -830,22 +858,20 @@ void iwl3945_hw_build_tx_cmd_rate(struct iwl_priv *priv,
830 } 858 }
831 } 859 }
832 860
833 tx->rts_retry_limit = rts_retry_limit; 861 tx_cmd->rate = rate;
834 tx->data_retry_limit = data_retry_limit; 862 tx_cmd->tx_flags = tx_flags;
835 tx->rate = rate;
836 tx->tx_flags = tx_flags;
837 863
838 /* OFDM */ 864 /* OFDM */
839 tx->supp_rates[0] = 865 tx_cmd->supp_rates[0] =
840 ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF; 866 ((rate_mask & IWL_OFDM_RATES_MASK) >> IWL_FIRST_OFDM_RATE) & 0xFF;
841 867
842 /* CCK */ 868 /* CCK */
843 tx->supp_rates[1] = (rate_mask & 0xF); 869 tx_cmd->supp_rates[1] = (rate_mask & 0xF);
844 870
845 IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X " 871 IWL_DEBUG_RATE(priv, "Tx sta id: %d, rate: %d (plcp), flags: 0x%4X "
846 "cck/ofdm mask: 0x%x/0x%x\n", sta_id, 872 "cck/ofdm mask: 0x%x/0x%x\n", sta_id,
847 tx->rate, le32_to_cpu(tx->tx_flags), 873 tx_cmd->rate, le32_to_cpu(tx_cmd->tx_flags),
848 tx->supp_rates[1], tx->supp_rates[0]); 874 tx_cmd->supp_rates[1], tx_cmd->supp_rates[0]);
849} 875}
850 876
851u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate, u8 flags) 877u8 iwl3945_sync_sta(struct iwl_priv *priv, int sta_id, u16 tx_rate, u8 flags)
@@ -961,6 +987,11 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
961 987
962 iwl3945_hw_txq_ctx_free(priv); 988 iwl3945_hw_txq_ctx_free(priv);
963 989
990 /* allocate tx queue structure */
991 rc = iwl_alloc_txq_mem(priv);
992 if (rc)
993 return rc;
994
964 /* Tx CMD queue */ 995 /* Tx CMD queue */
965 rc = iwl3945_tx_reset(priv); 996 rc = iwl3945_tx_reset(priv);
966 if (rc) 997 if (rc)
@@ -985,41 +1016,25 @@ static int iwl3945_txq_ctx_reset(struct iwl_priv *priv)
985 return rc; 1016 return rc;
986} 1017}
987 1018
1019
1020/*
1021 * Start up 3945's basic functionality after it has been reset
1022 * (e.g. after platform boot, or shutdown via iwl_apm_stop())
1023 * NOTE: This does not load uCode nor start the embedded processor
1024 */
988static int iwl3945_apm_init(struct iwl_priv *priv) 1025static int iwl3945_apm_init(struct iwl_priv *priv)
989{ 1026{
990 int ret; 1027 int ret = iwl_apm_init(priv);
991
992 iwl_power_initialize(priv);
993
994 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
995 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
996 1028
997 /* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */ 1029 /* Clear APMG (NIC's internal power management) interrupts */
998 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS, 1030 iwl_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
999 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX); 1031 iwl_write_prph(priv, APMG_RTC_INT_STT_REG, 0xFFFFFFFF);
1000
1001 /* set "initialization complete" bit to move adapter
1002 * D0U* --> D0A* state */
1003 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1004
1005 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
1006 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1007 if (ret < 0) {
1008 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
1009 goto out;
1010 }
1011
1012 /* enable DMA */
1013 iwl_write_prph(priv, APMG_CLK_CTRL_REG, APMG_CLK_VAL_DMA_CLK_RQT |
1014 APMG_CLK_VAL_BSM_CLK_RQT);
1015
1016 udelay(20);
1017 1032
1018 /* disable L1-Active */ 1033 /* Reset radio chip */
1019 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG, 1034 iwl_set_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
1020 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 1035 udelay(5);
1036 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, APMG_PS_CTRL_VAL_RESET_REQ);
1021 1037
1022out:
1023 return ret; 1038 return ret;
1024} 1039}
1025 1040
@@ -1144,12 +1159,16 @@ void iwl3945_hw_txq_ctx_free(struct iwl_priv *priv)
1144 int txq_id; 1159 int txq_id;
1145 1160
1146 /* Tx queues */ 1161 /* Tx queues */
1147 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) 1162 if (priv->txq)
1148 if (txq_id == IWL_CMD_QUEUE_NUM) 1163 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
1149 iwl_cmd_queue_free(priv); 1164 txq_id++)
1150 else 1165 if (txq_id == IWL_CMD_QUEUE_NUM)
1151 iwl_tx_queue_free(priv, txq_id); 1166 iwl_cmd_queue_free(priv);
1167 else
1168 iwl_tx_queue_free(priv, txq_id);
1152 1169
1170 /* free tx queue structure */
1171 iwl_free_txq_mem(priv);
1153} 1172}
1154 1173
1155void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv) 1174void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
@@ -1158,6 +1177,7 @@ void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
1158 1177
1159 /* stop SCD */ 1178 /* stop SCD */
1160 iwl_write_prph(priv, ALM_SCD_MODE_REG, 0); 1179 iwl_write_prph(priv, ALM_SCD_MODE_REG, 0);
1180 iwl_write_prph(priv, ALM_SCD_TXFACT_REG, 0);
1161 1181
1162 /* reset TFD queues */ 1182 /* reset TFD queues */
1163 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) { 1183 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) {
@@ -1170,85 +1190,6 @@ void iwl3945_hw_txq_ctx_stop(struct iwl_priv *priv)
1170 iwl3945_hw_txq_ctx_free(priv); 1190 iwl3945_hw_txq_ctx_free(priv);
1171} 1191}
1172 1192
1173static int iwl3945_apm_stop_master(struct iwl_priv *priv)
1174{
1175 int ret = 0;
1176 unsigned long flags;
1177
1178 spin_lock_irqsave(&priv->lock, flags);
1179
1180 /* set stop master bit */
1181 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
1182
1183 iwl_poll_direct_bit(priv, CSR_RESET,
1184 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
1185
1186 if (ret < 0)
1187 goto out;
1188
1189out:
1190 spin_unlock_irqrestore(&priv->lock, flags);
1191 IWL_DEBUG_INFO(priv, "stop master\n");
1192
1193 return ret;
1194}
1195
1196static void iwl3945_apm_stop(struct iwl_priv *priv)
1197{
1198 unsigned long flags;
1199
1200 iwl3945_apm_stop_master(priv);
1201
1202 spin_lock_irqsave(&priv->lock, flags);
1203
1204 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1205
1206 udelay(10);
1207 /* clear "init complete" move adapter D0A* --> D0U state */
1208 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1209 spin_unlock_irqrestore(&priv->lock, flags);
1210}
1211
1212static int iwl3945_apm_reset(struct iwl_priv *priv)
1213{
1214 iwl3945_apm_stop_master(priv);
1215
1216
1217 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1218 udelay(10);
1219
1220 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1221
1222 iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
1223 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1224
1225 iwl_write_prph(priv, APMG_CLK_CTRL_REG,
1226 APMG_CLK_VAL_BSM_CLK_RQT);
1227
1228 iwl_write_prph(priv, APMG_RTC_INT_MSK_REG, 0x0);
1229 iwl_write_prph(priv, APMG_RTC_INT_STT_REG,
1230 0xFFFFFFFF);
1231
1232 /* enable DMA */
1233 iwl_write_prph(priv, APMG_CLK_EN_REG,
1234 APMG_CLK_VAL_DMA_CLK_RQT |
1235 APMG_CLK_VAL_BSM_CLK_RQT);
1236 udelay(10);
1237
1238 iwl_set_bits_prph(priv, APMG_PS_CTRL_REG,
1239 APMG_PS_CTRL_VAL_RESET_REQ);
1240 udelay(5);
1241 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG,
1242 APMG_PS_CTRL_VAL_RESET_REQ);
1243
1244 /* Clear the 'host command active' bit... */
1245 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
1246
1247 wake_up_interruptible(&priv->wait_command_queue);
1248
1249 return 0;
1250}
1251
1252/** 1193/**
1253 * iwl3945_hw_reg_adjust_power_by_temp 1194 * iwl3945_hw_reg_adjust_power_by_temp
1254 * return index delta into power gain settings table 1195 * return index delta into power gain settings table
@@ -1857,7 +1798,7 @@ int iwl3945_hw_reg_set_txpower(struct iwl_priv *priv, s8 power)
1857static int iwl3945_send_rxon_assoc(struct iwl_priv *priv) 1798static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
1858{ 1799{
1859 int rc = 0; 1800 int rc = 0;
1860 struct iwl_rx_packet *res = NULL; 1801 struct iwl_rx_packet *pkt;
1861 struct iwl3945_rxon_assoc_cmd rxon_assoc; 1802 struct iwl3945_rxon_assoc_cmd rxon_assoc;
1862 struct iwl_host_cmd cmd = { 1803 struct iwl_host_cmd cmd = {
1863 .id = REPLY_RXON_ASSOC, 1804 .id = REPLY_RXON_ASSOC,
@@ -1886,14 +1827,14 @@ static int iwl3945_send_rxon_assoc(struct iwl_priv *priv)
1886 if (rc) 1827 if (rc)
1887 return rc; 1828 return rc;
1888 1829
1889 res = (struct iwl_rx_packet *)cmd.reply_skb->data; 1830 pkt = (struct iwl_rx_packet *)cmd.reply_page;
1890 if (res->hdr.flags & IWL_CMD_FAILED_MSK) { 1831 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
1891 IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n"); 1832 IWL_ERR(priv, "Bad return from REPLY_RXON_ASSOC command\n");
1892 rc = -EIO; 1833 rc = -EIO;
1893 } 1834 }
1894 1835
1895 priv->alloc_rxb_skb--; 1836 priv->alloc_rxb_page--;
1896 dev_kfree_skb_any(cmd.reply_skb); 1837 free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
1897 1838
1898 return rc; 1839 return rc;
1899} 1840}
@@ -2041,12 +1982,6 @@ static int iwl3945_commit_rxon(struct iwl_priv *priv)
2041 return 0; 1982 return 0;
2042} 1983}
2043 1984
2044/* will add 3945 channel switch cmd handling later */
2045int iwl3945_hw_channel_switch(struct iwl_priv *priv, u16 channel)
2046{
2047 return 0;
2048}
2049
2050/** 1985/**
2051 * iwl3945_reg_txpower_periodic - called when time to check our temperature. 1986 * iwl3945_reg_txpower_periodic - called when time to check our temperature.
2052 * 1987 *
@@ -2556,11 +2491,10 @@ int iwl3945_hw_set_hw_params(struct iwl_priv *priv)
2556 } 2491 }
2557 2492
2558 /* Assign number of Usable TX queues */ 2493 /* Assign number of Usable TX queues */
2559 priv->hw_params.max_txq_num = IWL39_NUM_QUEUES; 2494 priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
2560 2495
2561 priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd); 2496 priv->hw_params.tfd_size = sizeof(struct iwl3945_tfd);
2562 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_3K; 2497 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_3K);
2563 priv->hw_params.max_pkt_size = 2342;
2564 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; 2498 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
2565 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; 2499 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
2566 priv->hw_params.max_stations = IWL3945_STATION_COUNT; 2500 priv->hw_params.max_stations = IWL3945_STATION_COUNT;
@@ -2843,8 +2777,7 @@ static struct iwl_lib_ops iwl3945_lib = {
2843 .dump_nic_error_log = iwl3945_dump_nic_error_log, 2777 .dump_nic_error_log = iwl3945_dump_nic_error_log,
2844 .apm_ops = { 2778 .apm_ops = {
2845 .init = iwl3945_apm_init, 2779 .init = iwl3945_apm_init,
2846 .reset = iwl3945_apm_reset, 2780 .stop = iwl_apm_stop,
2847 .stop = iwl3945_apm_stop,
2848 .config = iwl3945_nic_config, 2781 .config = iwl3945_nic_config,
2849 .set_pwr_src = iwl3945_set_pwr_src, 2782 .set_pwr_src = iwl3945_set_pwr_src,
2850 }, 2783 },
@@ -2873,6 +2806,7 @@ static struct iwl_lib_ops iwl3945_lib = {
2873static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = { 2806static struct iwl_hcmd_utils_ops iwl3945_hcmd_utils = {
2874 .get_hcmd_size = iwl3945_get_hcmd_size, 2807 .get_hcmd_size = iwl3945_get_hcmd_size,
2875 .build_addsta_hcmd = iwl3945_build_addsta_hcmd, 2808 .build_addsta_hcmd = iwl3945_build_addsta_hcmd,
2809 .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
2876}; 2810};
2877 2811
2878static struct iwl_ops iwl3945_ops = { 2812static struct iwl_ops iwl3945_ops = {
@@ -2880,6 +2814,7 @@ static struct iwl_ops iwl3945_ops = {
2880 .lib = &iwl3945_lib, 2814 .lib = &iwl3945_lib,
2881 .hcmd = &iwl3945_hcmd, 2815 .hcmd = &iwl3945_hcmd,
2882 .utils = &iwl3945_hcmd_utils, 2816 .utils = &iwl3945_hcmd_utils,
2817 .led = &iwl3945_led_ops,
2883}; 2818};
2884 2819
2885static struct iwl_cfg iwl3945_bg_cfg = { 2820static struct iwl_cfg iwl3945_bg_cfg = {
@@ -2891,9 +2826,14 @@ static struct iwl_cfg iwl3945_bg_cfg = {
2891 .eeprom_size = IWL3945_EEPROM_IMG_SIZE, 2826 .eeprom_size = IWL3945_EEPROM_IMG_SIZE,
2892 .eeprom_ver = EEPROM_3945_EEPROM_VERSION, 2827 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2893 .ops = &iwl3945_ops, 2828 .ops = &iwl3945_ops,
2829 .num_of_queues = IWL39_NUM_QUEUES,
2894 .mod_params = &iwl3945_mod_params, 2830 .mod_params = &iwl3945_mod_params,
2831 .pll_cfg_val = CSR39_ANA_PLL_CFG_VAL,
2832 .set_l0s = false,
2833 .use_bsm = true,
2895 .use_isr_legacy = true, 2834 .use_isr_legacy = true,
2896 .ht_greenfield_support = false, 2835 .ht_greenfield_support = false,
2836 .led_compensation = 64,
2897}; 2837};
2898 2838
2899static struct iwl_cfg iwl3945_abg_cfg = { 2839static struct iwl_cfg iwl3945_abg_cfg = {
@@ -2905,9 +2845,11 @@ static struct iwl_cfg iwl3945_abg_cfg = {
2905 .eeprom_size = IWL3945_EEPROM_IMG_SIZE, 2845 .eeprom_size = IWL3945_EEPROM_IMG_SIZE,
2906 .eeprom_ver = EEPROM_3945_EEPROM_VERSION, 2846 .eeprom_ver = EEPROM_3945_EEPROM_VERSION,
2907 .ops = &iwl3945_ops, 2847 .ops = &iwl3945_ops,
2848 .num_of_queues = IWL39_NUM_QUEUES,
2908 .mod_params = &iwl3945_mod_params, 2849 .mod_params = &iwl3945_mod_params,
2909 .use_isr_legacy = true, 2850 .use_isr_legacy = true,
2910 .ht_greenfield_support = false, 2851 .ht_greenfield_support = false,
2852 .led_compensation = 64,
2911}; 2853};
2912 2854
2913struct pci_device_id iwl3945_hw_card_ids[] = { 2855struct pci_device_id iwl3945_hw_card_ids[] = {
diff --git a/drivers/net/wireless/iwlwifi/iwl-3945.h b/drivers/net/wireless/iwlwifi/iwl-3945.h
index 21679bf3a1aa..2b0d65c5780a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-3945.h
+++ b/drivers/net/wireless/iwlwifi/iwl-3945.h
@@ -46,7 +46,7 @@ extern struct pci_device_id iwl3945_hw_card_ids[];
46#include "iwl-debug.h" 46#include "iwl-debug.h"
47#include "iwl-power.h" 47#include "iwl-power.h"
48#include "iwl-dev.h" 48#include "iwl-dev.h"
49#include "iwl-3945-led.h" 49#include "iwl-led.h"
50 50
51/* Highest firmware API version supported */ 51/* Highest firmware API version supported */
52#define IWL3945_UCODE_API_MAX 2 52#define IWL3945_UCODE_API_MAX 2
@@ -74,8 +74,41 @@ extern struct pci_device_id iwl3945_hw_card_ids[];
74/* Module parameters accessible from iwl-*.c */ 74/* Module parameters accessible from iwl-*.c */
75extern struct iwl_mod_params iwl3945_mod_params; 75extern struct iwl_mod_params iwl3945_mod_params;
76 76
77struct iwl3945_rate_scale_data {
78 u64 data;
79 s32 success_counter;
80 s32 success_ratio;
81 s32 counter;
82 s32 average_tpt;
83 unsigned long stamp;
84};
85
86struct iwl3945_rs_sta {
87 spinlock_t lock;
88 struct iwl_priv *priv;
89 s32 *expected_tpt;
90 unsigned long last_partial_flush;
91 unsigned long last_flush;
92 u32 flush_time;
93 u32 last_tx_packets;
94 u32 tx_packets;
95 u8 tgg;
96 u8 flush_pending;
97 u8 start_rate;
98 u8 ibss_sta_added;
99 struct timer_list rate_scale_flush;
100 struct iwl3945_rate_scale_data win[IWL_RATE_COUNT_3945];
101#ifdef CONFIG_MAC80211_DEBUGFS
102 struct dentry *rs_sta_dbgfs_stats_table_file;
103#endif
104
105 /* used to be in sta_info */
106 int last_txrate_idx;
107};
108
109
77struct iwl3945_sta_priv { 110struct iwl3945_sta_priv {
78 struct iwl3945_rs_sta *rs_sta; 111 struct iwl3945_rs_sta rs_sta;
79}; 112};
80 113
81enum iwl3945_antenna { 114enum iwl3945_antenna {
@@ -130,12 +163,6 @@ struct iwl3945_frame {
130#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ) 163#define SN_TO_SEQ(ssn) (((ssn) << 4) & IEEE80211_SCTL_SEQ)
131#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4) 164#define MAX_SN ((IEEE80211_SCTL_SEQ) >> 4)
132 165
133/*
134 * RX related structures and functions
135 */
136#define RX_FREE_BUFFERS 64
137#define RX_LOW_WATERMARK 8
138
139#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 166#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
140#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 167#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
141#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 168#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
@@ -280,8 +307,6 @@ extern void iwl3945_config_ap(struct iwl_priv *priv);
280 */ 307 */
281extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid); 308extern u8 iwl3945_hw_find_station(struct iwl_priv *priv, const u8 *bssid);
282 309
283extern int iwl3945_hw_channel_switch(struct iwl_priv *priv, u16 channel);
284
285/* 310/*
286 * Forward declare iwl-3945.c functions for iwl-base.c 311 * Forward declare iwl-3945.c functions for iwl-base.c
287 */ 312 */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
index b34322a32458..c606366b582c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
+++ b/drivers/net/wireless/iwlwifi/iwl-4965-hw.h
@@ -76,12 +76,9 @@
76 76
77/* 77/*
78 * uCode queue management definitions ... 78 * uCode queue management definitions ...
79 * Queue #4 is the command queue for 3945 and 4965; map it to Tx FIFO chnl 4.
80 * The first queue used for block-ack aggregation is #7 (4965 only). 79 * The first queue used for block-ack aggregation is #7 (4965 only).
81 * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7. 80 * All block-ack aggregation queues should map to Tx DMA/FIFO channel 7.
82 */ 81 */
83#define IWL_CMD_QUEUE_NUM 4
84#define IWL_CMD_FIFO_NUM 4
85#define IWL49_FIRST_AMPDU_QUEUE 7 82#define IWL49_FIRST_AMPDU_QUEUE 7
86 83
87/* Time constants */ 84/* Time constants */
diff --git a/drivers/net/wireless/iwlwifi/iwl-4965.c b/drivers/net/wireless/iwlwifi/iwl-4965.c
index a22a0501c190..1ff465ad40d8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-4965.c
+++ b/drivers/net/wireless/iwlwifi/iwl-4965.c
@@ -44,6 +44,7 @@
44#include "iwl-helpers.h" 44#include "iwl-helpers.h"
45#include "iwl-calib.h" 45#include "iwl-calib.h"
46#include "iwl-sta.h" 46#include "iwl-sta.h"
47#include "iwl-agn-led.h"
47 48
48static int iwl4965_send_tx_power(struct iwl_priv *priv); 49static int iwl4965_send_tx_power(struct iwl_priv *priv);
49static int iwl4965_hw_get_temperature(struct iwl_priv *priv); 50static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
@@ -61,8 +62,6 @@ static int iwl4965_hw_get_temperature(struct iwl_priv *priv);
61 62
62/* module parameters */ 63/* module parameters */
63static struct iwl_mod_params iwl4965_mod_params = { 64static struct iwl_mod_params iwl4965_mod_params = {
64 .num_of_queues = IWL49_NUM_QUEUES,
65 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
66 .amsdu_size_8K = 1, 65 .amsdu_size_8K = 1,
67 .restart_fw = 1, 66 .restart_fw = 1,
68 /* the rest are 0 by default */ 67 /* the rest are 0 by default */
@@ -318,63 +317,13 @@ static void iwl4965_txq_set_sched(struct iwl_priv *priv, u32 mask)
318 iwl_write_prph(priv, IWL49_SCD_TXFACT, mask); 317 iwl_write_prph(priv, IWL49_SCD_TXFACT, mask);
319} 318}
320 319
321static int iwl4965_apm_init(struct iwl_priv *priv)
322{
323 int ret = 0;
324
325 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
326 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
327
328 /* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
329 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
330 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
331
332 /* set "initialization complete" bit to move adapter
333 * D0U* --> D0A* state */
334 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
335
336 /* wait for clock stabilization */
337 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
338 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
339 if (ret < 0) {
340 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
341 goto out;
342 }
343
344 /* enable DMA */
345 iwl_write_prph(priv, APMG_CLK_CTRL_REG, APMG_CLK_VAL_DMA_CLK_RQT |
346 APMG_CLK_VAL_BSM_CLK_RQT);
347
348 udelay(20);
349
350 /* disable L1-Active */
351 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
352 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
353
354out:
355 return ret;
356}
357
358
359static void iwl4965_nic_config(struct iwl_priv *priv) 320static void iwl4965_nic_config(struct iwl_priv *priv)
360{ 321{
361 unsigned long flags; 322 unsigned long flags;
362 u16 radio_cfg; 323 u16 radio_cfg;
363 u16 lctl;
364 324
365 spin_lock_irqsave(&priv->lock, flags); 325 spin_lock_irqsave(&priv->lock, flags);
366 326
367 lctl = iwl_pcie_link_ctl(priv);
368
369 /* HW bug W/A - negligible power consumption */
370 /* L1-ASPM is enabled by BIOS */
371 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN)
372 /* L1-ASPM enabled: disable L0S */
373 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
374 else
375 /* L1-ASPM disabled: enable L0S */
376 iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
377
378 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); 327 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
379 328
380 /* write radio config values to register */ 329 /* write radio config values to register */
@@ -395,79 +344,6 @@ static void iwl4965_nic_config(struct iwl_priv *priv)
395 spin_unlock_irqrestore(&priv->lock, flags); 344 spin_unlock_irqrestore(&priv->lock, flags);
396} 345}
397 346
398static int iwl4965_apm_stop_master(struct iwl_priv *priv)
399{
400 unsigned long flags;
401
402 spin_lock_irqsave(&priv->lock, flags);
403
404 /* set stop master bit */
405 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
406
407 iwl_poll_direct_bit(priv, CSR_RESET,
408 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
409
410 spin_unlock_irqrestore(&priv->lock, flags);
411 IWL_DEBUG_INFO(priv, "stop master\n");
412
413 return 0;
414}
415
416static void iwl4965_apm_stop(struct iwl_priv *priv)
417{
418 unsigned long flags;
419
420 iwl4965_apm_stop_master(priv);
421
422 spin_lock_irqsave(&priv->lock, flags);
423
424 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
425
426 udelay(10);
427 /* clear "init complete" move adapter D0A* --> D0U state */
428 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
429 spin_unlock_irqrestore(&priv->lock, flags);
430}
431
432static int iwl4965_apm_reset(struct iwl_priv *priv)
433{
434 int ret = 0;
435
436 iwl4965_apm_stop_master(priv);
437
438
439 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
440
441 udelay(10);
442
443 /* FIXME: put here L1A -L0S w/a */
444
445 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
446
447 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
448 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
449 if (ret < 0)
450 goto out;
451
452 udelay(10);
453
454 /* Enable DMA and BSM Clock */
455 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT |
456 APMG_CLK_VAL_BSM_CLK_RQT);
457
458 udelay(10);
459
460 /* disable L1A */
461 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
462 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
463
464 clear_bit(STATUS_HCMD_ACTIVE, &priv->status);
465 wake_up_interruptible(&priv->wait_command_queue);
466
467out:
468 return ret;
469}
470
471/* Reset differential Rx gains in NIC to prepare for chain noise calibration. 347/* Reset differential Rx gains in NIC to prepare for chain noise calibration.
472 * Called after every association, but this runs only once! 348 * Called after every association, but this runs only once!
473 * ... once chain noise is calibrated the first time, it's good forever. */ 349 * ... once chain noise is calibrated the first time, it's good forever. */
@@ -495,14 +371,15 @@ static void iwl4965_chain_noise_reset(struct iwl_priv *priv)
495static void iwl4965_gain_computation(struct iwl_priv *priv, 371static void iwl4965_gain_computation(struct iwl_priv *priv,
496 u32 *average_noise, 372 u32 *average_noise,
497 u16 min_average_noise_antenna_i, 373 u16 min_average_noise_antenna_i,
498 u32 min_average_noise) 374 u32 min_average_noise,
375 u8 default_chain)
499{ 376{
500 int i, ret; 377 int i, ret;
501 struct iwl_chain_noise_data *data = &priv->chain_noise_data; 378 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
502 379
503 data->delta_gain_code[min_average_noise_antenna_i] = 0; 380 data->delta_gain_code[min_average_noise_antenna_i] = 0;
504 381
505 for (i = 0; i < NUM_RX_CHAINS; i++) { 382 for (i = default_chain; i < NUM_RX_CHAINS; i++) {
506 s32 delta_g = 0; 383 s32 delta_g = 0;
507 384
508 if (!(data->disconn_array[i]) && 385 if (!(data->disconn_array[i]) &&
@@ -556,18 +433,6 @@ static void iwl4965_gain_computation(struct iwl_priv *priv,
556 data->beacon_count = 0; 433 data->beacon_count = 0;
557} 434}
558 435
559static void iwl4965_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
560 __le32 *tx_flags)
561{
562 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
563 *tx_flags |= TX_CMD_FLG_RTS_MSK;
564 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
565 } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
566 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
567 *tx_flags |= TX_CMD_FLG_CTS_MSK;
568 }
569}
570
571static void iwl4965_bg_txpower_work(struct work_struct *work) 436static void iwl4965_bg_txpower_work(struct work_struct *work)
572{ 437{
573 struct iwl_priv *priv = container_of(work, struct iwl_priv, 438 struct iwl_priv *priv = container_of(work, struct iwl_priv,
@@ -662,7 +527,8 @@ static int iwl4965_alive_notify(struct iwl_priv *priv)
662 iwl_write_targ_mem(priv, a, 0); 527 iwl_write_targ_mem(priv, a, 0);
663 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4) 528 for (; a < priv->scd_base_addr + IWL49_SCD_TRANSLATE_TBL_OFFSET; a += 4)
664 iwl_write_targ_mem(priv, a, 0); 529 iwl_write_targ_mem(priv, a, 0);
665 for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4) 530 for (; a < priv->scd_base_addr +
531 IWL49_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
666 iwl_write_targ_mem(priv, a, 0); 532 iwl_write_targ_mem(priv, a, 0);
667 533
668 /* Tel 4965 where to find Tx byte count tables */ 534 /* Tel 4965 where to find Tx byte count tables */
@@ -747,6 +613,10 @@ static struct iwl_sensitivity_ranges iwl4965_sensitivity = {
747 613
748 .nrg_th_cck = 100, 614 .nrg_th_cck = 100,
749 .nrg_th_ofdm = 100, 615 .nrg_th_ofdm = 100,
616
617 .barker_corr_th_min = 190,
618 .barker_corr_th_min_mrc = 390,
619 .nrg_th_cca = 62,
750}; 620};
751 621
752static void iwl4965_set_ct_threshold(struct iwl_priv *priv) 622static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
@@ -763,19 +633,16 @@ static void iwl4965_set_ct_threshold(struct iwl_priv *priv)
763 */ 633 */
764static int iwl4965_hw_set_hw_params(struct iwl_priv *priv) 634static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
765{ 635{
636 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
637 priv->cfg->mod_params->num_of_queues <= IWL49_NUM_QUEUES)
638 priv->cfg->num_of_queues =
639 priv->cfg->mod_params->num_of_queues;
766 640
767 if ((priv->cfg->mod_params->num_of_queues > IWL49_NUM_QUEUES) || 641 priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
768 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) {
769 IWL_ERR(priv,
770 "invalid queues_num, should be between %d and %d\n",
771 IWL_MIN_NUM_QUEUES, IWL49_NUM_QUEUES);
772 return -EINVAL;
773 }
774
775 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues;
776 priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM; 642 priv->hw_params.dma_chnl_num = FH49_TCSR_CHNL_NUM;
777 priv->hw_params.scd_bc_tbls_size = 643 priv->hw_params.scd_bc_tbls_size =
778 IWL49_NUM_QUEUES * sizeof(struct iwl4965_scd_bc_tbl); 644 priv->cfg->num_of_queues *
645 sizeof(struct iwl4965_scd_bc_tbl);
779 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 646 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
780 priv->hw_params.max_stations = IWL4965_STATION_COUNT; 647 priv->hw_params.max_stations = IWL4965_STATION_COUNT;
781 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID; 648 priv->hw_params.bcast_sta_id = IWL4965_BROADCAST_ID;
@@ -786,10 +653,10 @@ static int iwl4965_hw_set_hw_params(struct iwl_priv *priv)
786 653
787 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR; 654 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
788 655
789 priv->hw_params.tx_chains_num = 2; 656 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
790 priv->hw_params.rx_chains_num = 2; 657 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
791 priv->hw_params.valid_tx_ant = ANT_A | ANT_B; 658 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
792 priv->hw_params.valid_rx_ant = ANT_A | ANT_B; 659 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
793 if (priv->cfg->ops->lib->temp_ops.set_ct_kill) 660 if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
794 priv->cfg->ops->lib->temp_ops.set_ct_kill(priv); 661 priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
795 662
@@ -1566,14 +1433,13 @@ static int iwl4965_send_rxon_assoc(struct iwl_priv *priv)
1566 return ret; 1433 return ret;
1567} 1434}
1568 1435
1569#ifdef IEEE80211_CONF_CHANNEL_SWITCH
1570static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel) 1436static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1571{ 1437{
1572 int rc; 1438 int rc;
1573 u8 band = 0; 1439 u8 band = 0;
1574 bool is_ht40 = false; 1440 bool is_ht40 = false;
1575 u8 ctrl_chan_high = 0; 1441 u8 ctrl_chan_high = 0;
1576 struct iwl4965_channel_switch_cmd cmd = { 0 }; 1442 struct iwl4965_channel_switch_cmd cmd;
1577 const struct iwl_channel_info *ch_info; 1443 const struct iwl_channel_info *ch_info;
1578 1444
1579 band = priv->band == IEEE80211_BAND_2GHZ; 1445 band = priv->band == IEEE80211_BAND_2GHZ;
@@ -1594,8 +1460,11 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1594 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time); 1460 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
1595 if (ch_info) 1461 if (ch_info)
1596 cmd.expect_beacon = is_channel_radar(ch_info); 1462 cmd.expect_beacon = is_channel_radar(ch_info);
1597 else 1463 else {
1598 cmd.expect_beacon = 1; 1464 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
1465 priv->active_rxon.channel, channel);
1466 return -EFAULT;
1467 }
1599 1468
1600 rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_ht40, 1469 rc = iwl4965_fill_txpower_tbl(priv, band, channel, is_ht40,
1601 ctrl_chan_high, &cmd.tx_power); 1470 ctrl_chan_high, &cmd.tx_power);
@@ -1607,7 +1476,6 @@ static int iwl4965_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1607 rc = iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd); 1476 rc = iwl_send_cmd_pdu(priv, REPLY_CHANNEL_SWITCH, sizeof(cmd), &cmd);
1608 return rc; 1477 return rc;
1609} 1478}
1610#endif
1611 1479
1612/** 1480/**
1613 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array 1481 * iwl4965_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
@@ -1804,11 +1672,13 @@ static int iwl4965_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
1804 u16 ssn_idx, u8 tx_fifo) 1672 u16 ssn_idx, u8 tx_fifo)
1805{ 1673{
1806 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) || 1674 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
1807 (IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES <= txq_id)) { 1675 (IWL49_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
1676 <= txq_id)) {
1808 IWL_WARN(priv, 1677 IWL_WARN(priv,
1809 "queue number out of range: %d, must be %d to %d\n", 1678 "queue number out of range: %d, must be %d to %d\n",
1810 txq_id, IWL49_FIRST_AMPDU_QUEUE, 1679 txq_id, IWL49_FIRST_AMPDU_QUEUE,
1811 IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES - 1); 1680 IWL49_FIRST_AMPDU_QUEUE +
1681 priv->cfg->num_of_ampdu_queues - 1);
1812 return -EINVAL; 1682 return -EINVAL;
1813 } 1683 }
1814 1684
@@ -1869,11 +1739,13 @@ static int iwl4965_txq_agg_enable(struct iwl_priv *priv, int txq_id,
1869 u16 ra_tid; 1739 u16 ra_tid;
1870 1740
1871 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) || 1741 if ((IWL49_FIRST_AMPDU_QUEUE > txq_id) ||
1872 (IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES <= txq_id)) { 1742 (IWL49_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
1743 <= txq_id)) {
1873 IWL_WARN(priv, 1744 IWL_WARN(priv,
1874 "queue number out of range: %d, must be %d to %d\n", 1745 "queue number out of range: %d, must be %d to %d\n",
1875 txq_id, IWL49_FIRST_AMPDU_QUEUE, 1746 txq_id, IWL49_FIRST_AMPDU_QUEUE,
1876 IWL49_FIRST_AMPDU_QUEUE + IWL49_NUM_AMPDU_QUEUES - 1); 1747 IWL49_FIRST_AMPDU_QUEUE +
1748 priv->cfg->num_of_ampdu_queues - 1);
1877 return -EINVAL; 1749 return -EINVAL;
1878 } 1750 }
1879 1751
@@ -2077,7 +1949,7 @@ static int iwl4965_tx_status_reply_tx(struct iwl_priv *priv,
2077static void iwl4965_rx_reply_tx(struct iwl_priv *priv, 1949static void iwl4965_rx_reply_tx(struct iwl_priv *priv,
2078 struct iwl_rx_mem_buffer *rxb) 1950 struct iwl_rx_mem_buffer *rxb)
2079{ 1951{
2080 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1952 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2081 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1953 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
2082 int txq_id = SEQ_TO_QUEUE(sequence); 1954 int txq_id = SEQ_TO_QUEUE(sequence);
2083 int index = SEQ_TO_INDEX(sequence); 1955 int index = SEQ_TO_INDEX(sequence);
@@ -2278,7 +2150,7 @@ static struct iwl_hcmd_utils_ops iwl4965_hcmd_utils = {
2278 .build_addsta_hcmd = iwl4965_build_addsta_hcmd, 2150 .build_addsta_hcmd = iwl4965_build_addsta_hcmd,
2279 .chain_noise_reset = iwl4965_chain_noise_reset, 2151 .chain_noise_reset = iwl4965_chain_noise_reset,
2280 .gain_computation = iwl4965_gain_computation, 2152 .gain_computation = iwl4965_gain_computation,
2281 .rts_tx_cmd_flag = iwl4965_rts_tx_cmd_flag, 2153 .rts_tx_cmd_flag = iwlcore_rts_tx_cmd_flag,
2282 .calc_rssi = iwl4965_calc_rssi, 2154 .calc_rssi = iwl4965_calc_rssi,
2283}; 2155};
2284 2156
@@ -2300,10 +2172,10 @@ static struct iwl_lib_ops iwl4965_lib = {
2300 .load_ucode = iwl4965_load_bsm, 2172 .load_ucode = iwl4965_load_bsm,
2301 .dump_nic_event_log = iwl_dump_nic_event_log, 2173 .dump_nic_event_log = iwl_dump_nic_event_log,
2302 .dump_nic_error_log = iwl_dump_nic_error_log, 2174 .dump_nic_error_log = iwl_dump_nic_error_log,
2175 .set_channel_switch = iwl4965_hw_channel_switch,
2303 .apm_ops = { 2176 .apm_ops = {
2304 .init = iwl4965_apm_init, 2177 .init = iwl_apm_init,
2305 .reset = iwl4965_apm_reset, 2178 .stop = iwl_apm_stop,
2306 .stop = iwl4965_apm_stop,
2307 .config = iwl4965_nic_config, 2179 .config = iwl4965_nic_config,
2308 .set_pwr_src = iwl_set_pwr_src, 2180 .set_pwr_src = iwl_set_pwr_src,
2309 }, 2181 },
@@ -2339,6 +2211,7 @@ static struct iwl_ops iwl4965_ops = {
2339 .lib = &iwl4965_lib, 2211 .lib = &iwl4965_lib,
2340 .hcmd = &iwl4965_hcmd, 2212 .hcmd = &iwl4965_hcmd,
2341 .utils = &iwl4965_hcmd_utils, 2213 .utils = &iwl4965_hcmd_utils,
2214 .led = &iwlagn_led_ops,
2342}; 2215};
2343 2216
2344struct iwl_cfg iwl4965_agn_cfg = { 2217struct iwl_cfg iwl4965_agn_cfg = {
@@ -2351,30 +2224,40 @@ struct iwl_cfg iwl4965_agn_cfg = {
2351 .eeprom_ver = EEPROM_4965_EEPROM_VERSION, 2224 .eeprom_ver = EEPROM_4965_EEPROM_VERSION,
2352 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION, 2225 .eeprom_calib_ver = EEPROM_4965_TX_POWER_VERSION,
2353 .ops = &iwl4965_ops, 2226 .ops = &iwl4965_ops,
2227 .num_of_queues = IWL49_NUM_QUEUES,
2228 .num_of_ampdu_queues = IWL49_NUM_AMPDU_QUEUES,
2354 .mod_params = &iwl4965_mod_params, 2229 .mod_params = &iwl4965_mod_params,
2230 .valid_tx_ant = ANT_AB,
2231 .valid_rx_ant = ANT_AB,
2232 .pll_cfg_val = 0,
2233 .set_l0s = true,
2234 .use_bsm = true,
2355 .use_isr_legacy = true, 2235 .use_isr_legacy = true,
2356 .ht_greenfield_support = false, 2236 .ht_greenfield_support = false,
2357 .broken_powersave = true, 2237 .broken_powersave = true,
2238 .led_compensation = 61,
2239 .chain_noise_num_beacons = IWL4965_CAL_NUM_BEACONS,
2358}; 2240};
2359 2241
2360/* Module firmware */ 2242/* Module firmware */
2361MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX)); 2243MODULE_FIRMWARE(IWL4965_MODULE_FIRMWARE(IWL4965_UCODE_API_MAX));
2362 2244
2363module_param_named(antenna, iwl4965_mod_params.antenna, int, 0444); 2245module_param_named(antenna, iwl4965_mod_params.antenna, int, S_IRUGO);
2364MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); 2246MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
2365module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, 0444); 2247module_param_named(swcrypto, iwl4965_mod_params.sw_crypto, int, S_IRUGO);
2366MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])"); 2248MODULE_PARM_DESC(swcrypto, "using crypto in software (default 0 [hardware])");
2367module_param_named( 2249module_param_named(
2368 disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, 0444); 2250 disable_hw_scan, iwl4965_mod_params.disable_hw_scan, int, S_IRUGO);
2369MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)"); 2251MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
2370 2252
2371module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, 0444); 2253module_param_named(queues_num, iwl4965_mod_params.num_of_queues, int, S_IRUGO);
2372MODULE_PARM_DESC(queues_num, "number of hw queues."); 2254MODULE_PARM_DESC(queues_num, "number of hw queues.");
2373/* 11n */ 2255/* 11n */
2374module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, 0444); 2256module_param_named(11n_disable, iwl4965_mod_params.disable_11n, int, S_IRUGO);
2375MODULE_PARM_DESC(11n_disable, "disable 11n functionality"); 2257MODULE_PARM_DESC(11n_disable, "disable 11n functionality");
2376module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K, int, 0444); 2258module_param_named(amsdu_size_8K, iwl4965_mod_params.amsdu_size_8K,
2259 int, S_IRUGO);
2377MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size"); 2260MODULE_PARM_DESC(amsdu_size_8K, "enable 8K amsdu size");
2378 2261
2379module_param_named(fw_restart4965, iwl4965_mod_params.restart_fw, int, 0444); 2262module_param_named(fw_restart4965, iwl4965_mod_params.restart_fw, int, S_IRUGO);
2380MODULE_PARM_DESC(fw_restart4965, "restart firmware in case of error"); 2263MODULE_PARM_DESC(fw_restart4965, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/iwl-5000.c b/drivers/net/wireless/iwlwifi/iwl-5000.c
index 524e7e4c51d1..910217f0ad8a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-5000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-5000.c
@@ -42,6 +42,7 @@
42#include "iwl-io.h" 42#include "iwl-io.h"
43#include "iwl-sta.h" 43#include "iwl-sta.h"
44#include "iwl-helpers.h" 44#include "iwl-helpers.h"
45#include "iwl-agn-led.h"
45#include "iwl-5000-hw.h" 46#include "iwl-5000-hw.h"
46#include "iwl-6000-hw.h" 47#include "iwl-6000-hw.h"
47 48
@@ -71,157 +72,18 @@ static const u16 iwl5000_default_queue_to_tx_fifo[] = {
71 IWL_TX_FIFO_HCCA_2 72 IWL_TX_FIFO_HCCA_2
72}; 73};
73 74
74/* FIXME: same implementation as 4965 */ 75/* NIC configuration for 5000 series */
75static int iwl5000_apm_stop_master(struct iwl_priv *priv)
76{
77 unsigned long flags;
78
79 spin_lock_irqsave(&priv->lock, flags);
80
81 /* set stop master bit */
82 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
83
84 iwl_poll_direct_bit(priv, CSR_RESET,
85 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
86
87 spin_unlock_irqrestore(&priv->lock, flags);
88 IWL_DEBUG_INFO(priv, "stop master\n");
89
90 return 0;
91}
92
93
94int iwl5000_apm_init(struct iwl_priv *priv)
95{
96 int ret = 0;
97
98 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
99 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
100
101 /* disable L0s without affecting L1 :don't wait for ICH L0s bug W/A) */
102 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
103 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
104
105 /* Set FH wait threshold to maximum (HW error during stress W/A) */
106 iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
107
108 /* enable HAP INTA to move device L1a -> L0s */
109 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
110 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
111
112 if (priv->cfg->need_pll_cfg)
113 iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
114
115 /* set "initialization complete" bit to move adapter
116 * D0U* --> D0A* state */
117 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
118
119 /* wait for clock stabilization */
120 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
121 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
122 if (ret < 0) {
123 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
124 return ret;
125 }
126
127 /* enable DMA */
128 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
129
130 udelay(20);
131
132 /* disable L1-Active */
133 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
134 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
135
136 return ret;
137}
138
139/* FIXME: this is identical to 4965 */
140void iwl5000_apm_stop(struct iwl_priv *priv)
141{
142 unsigned long flags;
143
144 iwl5000_apm_stop_master(priv);
145
146 spin_lock_irqsave(&priv->lock, flags);
147
148 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
149
150 udelay(10);
151
152 /* clear "init complete" move adapter D0A* --> D0U state */
153 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
154
155 spin_unlock_irqrestore(&priv->lock, flags);
156}
157
158
159int iwl5000_apm_reset(struct iwl_priv *priv)
160{
161 int ret = 0;
162
163 iwl5000_apm_stop_master(priv);
164
165 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
166
167 udelay(10);
168
169
170 /* FIXME: put here L1A -L0S w/a */
171
172 if (priv->cfg->need_pll_cfg)
173 iwl_set_bit(priv, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
174
175 /* set "initialization complete" bit to move adapter
176 * D0U* --> D0A* state */
177 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
178
179 /* wait for clock stabilization */
180 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL,
181 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
182 if (ret < 0) {
183 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
184 goto out;
185 }
186
187 /* enable DMA */
188 iwl_write_prph(priv, APMG_CLK_EN_REG, APMG_CLK_VAL_DMA_CLK_RQT);
189
190 udelay(20);
191
192 /* disable L1-Active */
193 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
194 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
195out:
196
197 return ret;
198}
199
200
201/* NIC configuration for 5000 series and up */
202void iwl5000_nic_config(struct iwl_priv *priv) 76void iwl5000_nic_config(struct iwl_priv *priv)
203{ 77{
204 unsigned long flags; 78 unsigned long flags;
205 u16 radio_cfg; 79 u16 radio_cfg;
206 u16 lctl;
207 80
208 spin_lock_irqsave(&priv->lock, flags); 81 spin_lock_irqsave(&priv->lock, flags);
209 82
210 lctl = iwl_pcie_link_ctl(priv);
211
212 /* HW bug W/A */
213 /* L1-ASPM is enabled by BIOS */
214 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) == PCI_CFG_LINK_CTRL_VAL_L1_EN)
215 /* L1-APSM enabled: disable L0S */
216 iwl_set_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
217 else
218 /* L1-ASPM disabled: enable L0S */
219 iwl_clear_bit(priv, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_ENABLED);
220
221 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG); 83 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
222 84
223 /* write radio config values to register */ 85 /* write radio config values to register */
224 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) < EEPROM_5000_RF_CFG_TYPE_MAX) 86 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) < EEPROM_RF_CONFIG_TYPE_MAX)
225 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG, 87 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
226 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) | 88 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
227 EEPROM_RF_CFG_STEP_MSK(radio_cfg) | 89 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
@@ -301,19 +163,22 @@ u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv)
301static void iwl5000_gain_computation(struct iwl_priv *priv, 163static void iwl5000_gain_computation(struct iwl_priv *priv,
302 u32 average_noise[NUM_RX_CHAINS], 164 u32 average_noise[NUM_RX_CHAINS],
303 u16 min_average_noise_antenna_i, 165 u16 min_average_noise_antenna_i,
304 u32 min_average_noise) 166 u32 min_average_noise,
167 u8 default_chain)
305{ 168{
306 int i; 169 int i;
307 s32 delta_g; 170 s32 delta_g;
308 struct iwl_chain_noise_data *data = &priv->chain_noise_data; 171 struct iwl_chain_noise_data *data = &priv->chain_noise_data;
309 172
310 /* Find Gain Code for the antennas B and C */ 173 /*
311 for (i = 1; i < NUM_RX_CHAINS; i++) { 174 * Find Gain Code for the chains based on "default chain"
175 */
176 for (i = default_chain + 1; i < NUM_RX_CHAINS; i++) {
312 if ((data->disconn_array[i])) { 177 if ((data->disconn_array[i])) {
313 data->delta_gain_code[i] = 0; 178 data->delta_gain_code[i] = 0;
314 continue; 179 continue;
315 } 180 }
316 delta_g = (1000 * ((s32)average_noise[0] - 181 delta_g = (1000 * ((s32)average_noise[default_chain] -
317 (s32)average_noise[i])) / 1500; 182 (s32)average_noise[i])) / 1500;
318 /* bound gain by 2 bits value max, 3rd bit is sign */ 183 /* bound gain by 2 bits value max, 3rd bit is sign */
319 data->delta_gain_code[i] = 184 data->delta_gain_code[i] =
@@ -406,6 +271,10 @@ static struct iwl_sensitivity_ranges iwl5000_sensitivity = {
406 .auto_corr_max_cck_mrc = 400, 271 .auto_corr_max_cck_mrc = 400,
407 .nrg_th_cck = 95, 272 .nrg_th_cck = 95,
408 .nrg_th_ofdm = 95, 273 .nrg_th_ofdm = 95,
274
275 .barker_corr_th_min = 190,
276 .barker_corr_th_min_mrc = 390,
277 .nrg_th_cca = 62,
409}; 278};
410 279
411static struct iwl_sensitivity_ranges iwl5150_sensitivity = { 280static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
@@ -428,6 +297,10 @@ static struct iwl_sensitivity_ranges iwl5150_sensitivity = {
428 .auto_corr_max_cck_mrc = 400, 297 .auto_corr_max_cck_mrc = 400,
429 .nrg_th_cck = 95, 298 .nrg_th_cck = 95,
430 .nrg_th_ofdm = 95, 299 .nrg_th_ofdm = 95,
300
301 .barker_corr_th_min = 190,
302 .barker_corr_th_min_mrc = 390,
303 .nrg_th_cca = 62,
431}; 304};
432 305
433const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv, 306const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
@@ -492,7 +365,7 @@ static int iwl5000_send_calib_cfg(struct iwl_priv *priv)
492static void iwl5000_rx_calib_result(struct iwl_priv *priv, 365static void iwl5000_rx_calib_result(struct iwl_priv *priv,
493 struct iwl_rx_mem_buffer *rxb) 366 struct iwl_rx_mem_buffer *rxb)
494{ 367{
495 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 368 struct iwl_rx_packet *pkt = rxb_addr(rxb);
496 struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw; 369 struct iwl_calib_hdr *hdr = (struct iwl_calib_hdr *)pkt->u.raw;
497 int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 370 int len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
498 int index; 371 int index;
@@ -718,16 +591,6 @@ static void iwl5000_tx_queue_set_status(struct iwl_priv *priv,
718 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id); 591 scd_retry ? "BA" : "AC", txq_id, tx_fifo_id);
719} 592}
720 593
721static int iwl5000_send_wimax_coex(struct iwl_priv *priv)
722{
723 struct iwl_wimax_coex_cmd coex_cmd;
724
725 memset(&coex_cmd, 0, sizeof(coex_cmd));
726
727 return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
728 sizeof(coex_cmd), &coex_cmd);
729}
730
731int iwl5000_alive_notify(struct iwl_priv *priv) 594int iwl5000_alive_notify(struct iwl_priv *priv)
732{ 595{
733 u32 a; 596 u32 a;
@@ -745,7 +608,8 @@ int iwl5000_alive_notify(struct iwl_priv *priv)
745 for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET; 608 for (; a < priv->scd_base_addr + IWL50_SCD_TRANSLATE_TBL_OFFSET;
746 a += 4) 609 a += 4)
747 iwl_write_targ_mem(priv, a, 0); 610 iwl_write_targ_mem(priv, a, 0);
748 for (; a < sizeof(u16) * priv->hw_params.max_txq_num; a += 4) 611 for (; a < priv->scd_base_addr +
612 IWL50_SCD_TRANSLATE_TBL_OFFSET_QUEUE(priv->hw_params.max_txq_num); a += 4)
749 iwl_write_targ_mem(priv, a, 0); 613 iwl_write_targ_mem(priv, a, 0);
750 614
751 iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR, 615 iwl_write_prph(priv, IWL50_SCD_DRAM_BASE_ADDR,
@@ -807,7 +671,7 @@ int iwl5000_alive_notify(struct iwl_priv *priv)
807 spin_unlock_irqrestore(&priv->lock, flags); 671 spin_unlock_irqrestore(&priv->lock, flags);
808 672
809 673
810 iwl5000_send_wimax_coex(priv); 674 iwl_send_wimax_coex(priv);
811 675
812 iwl5000_set_Xtal_calib(priv); 676 iwl5000_set_Xtal_calib(priv);
813 iwl_send_calib_results(priv); 677 iwl_send_calib_results(priv);
@@ -817,32 +681,22 @@ int iwl5000_alive_notify(struct iwl_priv *priv)
817 681
818int iwl5000_hw_set_hw_params(struct iwl_priv *priv) 682int iwl5000_hw_set_hw_params(struct iwl_priv *priv)
819{ 683{
820 if ((priv->cfg->mod_params->num_of_queues > IWL50_NUM_QUEUES) || 684 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
821 (priv->cfg->mod_params->num_of_queues < IWL_MIN_NUM_QUEUES)) { 685 priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
822 IWL_ERR(priv, 686 priv->cfg->num_of_queues =
823 "invalid queues_num, should be between %d and %d\n", 687 priv->cfg->mod_params->num_of_queues;
824 IWL_MIN_NUM_QUEUES, IWL50_NUM_QUEUES);
825 return -EINVAL;
826 }
827 688
828 priv->hw_params.max_txq_num = priv->cfg->mod_params->num_of_queues; 689 priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
829 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM; 690 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
830 priv->hw_params.scd_bc_tbls_size = 691 priv->hw_params.scd_bc_tbls_size =
831 IWL50_NUM_QUEUES * sizeof(struct iwl5000_scd_bc_tbl); 692 priv->cfg->num_of_queues *
693 sizeof(struct iwl5000_scd_bc_tbl);
832 priv->hw_params.tfd_size = sizeof(struct iwl_tfd); 694 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
833 priv->hw_params.max_stations = IWL5000_STATION_COUNT; 695 priv->hw_params.max_stations = IWL5000_STATION_COUNT;
834 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID; 696 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
835 697
836 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) { 698 priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE;
837 case CSR_HW_REV_TYPE_6x00: 699 priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE;
838 case CSR_HW_REV_TYPE_6x50:
839 priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
840 priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
841 break;
842 default:
843 priv->hw_params.max_data_size = IWL50_RTC_DATA_SIZE;
844 priv->hw_params.max_inst_size = IWL50_RTC_INST_SIZE;
845 }
846 700
847 priv->hw_params.max_bsm_size = 0; 701 priv->hw_params.max_bsm_size = 0;
848 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) | 702 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
@@ -988,11 +842,13 @@ int iwl5000_txq_agg_enable(struct iwl_priv *priv, int txq_id,
988 u16 ra_tid; 842 u16 ra_tid;
989 843
990 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) || 844 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
991 (IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES <= txq_id)) { 845 (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
846 <= txq_id)) {
992 IWL_WARN(priv, 847 IWL_WARN(priv,
993 "queue number out of range: %d, must be %d to %d\n", 848 "queue number out of range: %d, must be %d to %d\n",
994 txq_id, IWL50_FIRST_AMPDU_QUEUE, 849 txq_id, IWL50_FIRST_AMPDU_QUEUE,
995 IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES - 1); 850 IWL50_FIRST_AMPDU_QUEUE +
851 priv->cfg->num_of_ampdu_queues - 1);
996 return -EINVAL; 852 return -EINVAL;
997 } 853 }
998 854
@@ -1046,11 +902,13 @@ int iwl5000_txq_agg_disable(struct iwl_priv *priv, u16 txq_id,
1046 u16 ssn_idx, u8 tx_fifo) 902 u16 ssn_idx, u8 tx_fifo)
1047{ 903{
1048 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) || 904 if ((IWL50_FIRST_AMPDU_QUEUE > txq_id) ||
1049 (IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES <= txq_id)) { 905 (IWL50_FIRST_AMPDU_QUEUE + priv->cfg->num_of_ampdu_queues
906 <= txq_id)) {
1050 IWL_ERR(priv, 907 IWL_ERR(priv,
1051 "queue number out of range: %d, must be %d to %d\n", 908 "queue number out of range: %d, must be %d to %d\n",
1052 txq_id, IWL50_FIRST_AMPDU_QUEUE, 909 txq_id, IWL50_FIRST_AMPDU_QUEUE,
1053 IWL50_FIRST_AMPDU_QUEUE + IWL50_NUM_AMPDU_QUEUES - 1); 910 IWL50_FIRST_AMPDU_QUEUE +
911 priv->cfg->num_of_ampdu_queues - 1);
1054 return -EINVAL; 912 return -EINVAL;
1055 } 913 }
1056 914
@@ -1217,7 +1075,7 @@ static int iwl5000_tx_status_reply_tx(struct iwl_priv *priv,
1217static void iwl5000_rx_reply_tx(struct iwl_priv *priv, 1075static void iwl5000_rx_reply_tx(struct iwl_priv *priv,
1218 struct iwl_rx_mem_buffer *rxb) 1076 struct iwl_rx_mem_buffer *rxb)
1219{ 1077{
1220 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1078 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1221 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1079 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1222 int txq_id = SEQ_TO_QUEUE(sequence); 1080 int txq_id = SEQ_TO_QUEUE(sequence);
1223 int index = SEQ_TO_INDEX(sequence); 1081 int index = SEQ_TO_INDEX(sequence);
@@ -1458,6 +1316,24 @@ int iwl5000_calc_rssi(struct iwl_priv *priv,
1458 return max_rssi - agc - IWL49_RSSI_OFFSET; 1316 return max_rssi - agc - IWL49_RSSI_OFFSET;
1459} 1317}
1460 1318
1319static int iwl5000_send_tx_ant_config(struct iwl_priv *priv, u8 valid_tx_ant)
1320{
1321 struct iwl_tx_ant_config_cmd tx_ant_cmd = {
1322 .valid = cpu_to_le32(valid_tx_ant),
1323 };
1324
1325 if (IWL_UCODE_API(priv->ucode_ver) > 1) {
1326 IWL_DEBUG_HC(priv, "select valid tx ant: %u\n", valid_tx_ant);
1327 return iwl_send_cmd_pdu(priv, TX_ANT_CONFIGURATION_CMD,
1328 sizeof(struct iwl_tx_ant_config_cmd),
1329 &tx_ant_cmd);
1330 } else {
1331 IWL_DEBUG_HC(priv, "TX_ANT_CONFIGURATION_CMD not supported\n");
1332 return -EOPNOTSUPP;
1333 }
1334}
1335
1336
1461#define IWL5000_UCODE_GET(item) \ 1337#define IWL5000_UCODE_GET(item) \
1462static u32 iwl5000_ucode_get_##item(const struct iwl_ucode_header *ucode,\ 1338static u32 iwl5000_ucode_get_##item(const struct iwl_ucode_header *ucode,\
1463 u32 api_ver) \ 1339 u32 api_ver) \
@@ -1496,10 +1372,41 @@ IWL5000_UCODE_GET(init_size);
1496IWL5000_UCODE_GET(init_data_size); 1372IWL5000_UCODE_GET(init_data_size);
1497IWL5000_UCODE_GET(boot_size); 1373IWL5000_UCODE_GET(boot_size);
1498 1374
1375static int iwl5000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
1376{
1377 struct iwl5000_channel_switch_cmd cmd;
1378 const struct iwl_channel_info *ch_info;
1379 struct iwl_host_cmd hcmd = {
1380 .id = REPLY_CHANNEL_SWITCH,
1381 .len = sizeof(cmd),
1382 .flags = CMD_SIZE_HUGE,
1383 .data = &cmd,
1384 };
1385
1386 IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
1387 priv->active_rxon.channel, channel);
1388 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
1389 cmd.channel = cpu_to_le16(channel);
1390 cmd.rxon_flags = priv->active_rxon.flags;
1391 cmd.rxon_filter_flags = priv->active_rxon.filter_flags;
1392 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
1393 ch_info = iwl_get_channel_info(priv, priv->band, channel);
1394 if (ch_info)
1395 cmd.expect_beacon = is_channel_radar(ch_info);
1396 else {
1397 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
1398 priv->active_rxon.channel, channel);
1399 return -EFAULT;
1400 }
1401
1402 return iwl_send_cmd_sync(priv, &hcmd);
1403}
1404
1499struct iwl_hcmd_ops iwl5000_hcmd = { 1405struct iwl_hcmd_ops iwl5000_hcmd = {
1500 .rxon_assoc = iwl5000_send_rxon_assoc, 1406 .rxon_assoc = iwl5000_send_rxon_assoc,
1501 .commit_rxon = iwl_commit_rxon, 1407 .commit_rxon = iwl_commit_rxon,
1502 .set_rxon_chain = iwl_set_rxon_chain, 1408 .set_rxon_chain = iwl_set_rxon_chain,
1409 .set_tx_ant = iwl5000_send_tx_ant_config,
1503}; 1410};
1504 1411
1505struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = { 1412struct iwl_hcmd_utils_ops iwl5000_hcmd_utils = {
@@ -1542,10 +1449,10 @@ struct iwl_lib_ops iwl5000_lib = {
1542 .alive_notify = iwl5000_alive_notify, 1449 .alive_notify = iwl5000_alive_notify,
1543 .send_tx_power = iwl5000_send_tx_power, 1450 .send_tx_power = iwl5000_send_tx_power,
1544 .update_chain_flags = iwl_update_chain_flags, 1451 .update_chain_flags = iwl_update_chain_flags,
1452 .set_channel_switch = iwl5000_hw_channel_switch,
1545 .apm_ops = { 1453 .apm_ops = {
1546 .init = iwl5000_apm_init, 1454 .init = iwl_apm_init,
1547 .reset = iwl5000_apm_reset, 1455 .stop = iwl_apm_stop,
1548 .stop = iwl5000_apm_stop,
1549 .config = iwl5000_nic_config, 1456 .config = iwl5000_nic_config,
1550 .set_pwr_src = iwl_set_pwr_src, 1457 .set_pwr_src = iwl_set_pwr_src,
1551 }, 1458 },
@@ -1594,10 +1501,10 @@ static struct iwl_lib_ops iwl5150_lib = {
1594 .alive_notify = iwl5000_alive_notify, 1501 .alive_notify = iwl5000_alive_notify,
1595 .send_tx_power = iwl5000_send_tx_power, 1502 .send_tx_power = iwl5000_send_tx_power,
1596 .update_chain_flags = iwl_update_chain_flags, 1503 .update_chain_flags = iwl_update_chain_flags,
1504 .set_channel_switch = iwl5000_hw_channel_switch,
1597 .apm_ops = { 1505 .apm_ops = {
1598 .init = iwl5000_apm_init, 1506 .init = iwl_apm_init,
1599 .reset = iwl5000_apm_reset, 1507 .stop = iwl_apm_stop,
1600 .stop = iwl5000_apm_stop,
1601 .config = iwl5000_nic_config, 1508 .config = iwl5000_nic_config,
1602 .set_pwr_src = iwl_set_pwr_src, 1509 .set_pwr_src = iwl_set_pwr_src,
1603 }, 1510 },
@@ -1626,11 +1533,12 @@ static struct iwl_lib_ops iwl5150_lib = {
1626 }, 1533 },
1627}; 1534};
1628 1535
1629struct iwl_ops iwl5000_ops = { 1536static struct iwl_ops iwl5000_ops = {
1630 .ucode = &iwl5000_ucode, 1537 .ucode = &iwl5000_ucode,
1631 .lib = &iwl5000_lib, 1538 .lib = &iwl5000_lib,
1632 .hcmd = &iwl5000_hcmd, 1539 .hcmd = &iwl5000_hcmd,
1633 .utils = &iwl5000_hcmd_utils, 1540 .utils = &iwl5000_hcmd_utils,
1541 .led = &iwlagn_led_ops,
1634}; 1542};
1635 1543
1636static struct iwl_ops iwl5150_ops = { 1544static struct iwl_ops iwl5150_ops = {
@@ -1638,11 +1546,10 @@ static struct iwl_ops iwl5150_ops = {
1638 .lib = &iwl5150_lib, 1546 .lib = &iwl5150_lib,
1639 .hcmd = &iwl5000_hcmd, 1547 .hcmd = &iwl5000_hcmd,
1640 .utils = &iwl5000_hcmd_utils, 1548 .utils = &iwl5000_hcmd_utils,
1549 .led = &iwlagn_led_ops,
1641}; 1550};
1642 1551
1643struct iwl_mod_params iwl50_mod_params = { 1552struct iwl_mod_params iwl50_mod_params = {
1644 .num_of_queues = IWL50_NUM_QUEUES,
1645 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
1646 .amsdu_size_8K = 1, 1553 .amsdu_size_8K = 1,
1647 .restart_fw = 1, 1554 .restart_fw = 1,
1648 /* the rest are 0 by default */ 1555 /* the rest are 0 by default */
@@ -1659,11 +1566,17 @@ struct iwl_cfg iwl5300_agn_cfg = {
1659 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1566 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1660 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 1567 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1661 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 1568 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1569 .num_of_queues = IWL50_NUM_QUEUES,
1570 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
1662 .mod_params = &iwl50_mod_params, 1571 .mod_params = &iwl50_mod_params,
1663 .valid_tx_ant = ANT_ABC, 1572 .valid_tx_ant = ANT_ABC,
1664 .valid_rx_ant = ANT_ABC, 1573 .valid_rx_ant = ANT_ABC,
1665 .need_pll_cfg = true, 1574 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
1575 .set_l0s = true,
1576 .use_bsm = false,
1666 .ht_greenfield_support = true, 1577 .ht_greenfield_support = true,
1578 .led_compensation = 51,
1579 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1667}; 1580};
1668 1581
1669struct iwl_cfg iwl5100_bg_cfg = { 1582struct iwl_cfg iwl5100_bg_cfg = {
@@ -1676,11 +1589,17 @@ struct iwl_cfg iwl5100_bg_cfg = {
1676 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1589 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1677 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 1590 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1678 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 1591 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1592 .num_of_queues = IWL50_NUM_QUEUES,
1593 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
1679 .mod_params = &iwl50_mod_params, 1594 .mod_params = &iwl50_mod_params,
1680 .valid_tx_ant = ANT_B, 1595 .valid_tx_ant = ANT_B,
1681 .valid_rx_ant = ANT_AB, 1596 .valid_rx_ant = ANT_AB,
1682 .need_pll_cfg = true, 1597 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
1598 .set_l0s = true,
1599 .use_bsm = false,
1683 .ht_greenfield_support = true, 1600 .ht_greenfield_support = true,
1601 .led_compensation = 51,
1602 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1684}; 1603};
1685 1604
1686struct iwl_cfg iwl5100_abg_cfg = { 1605struct iwl_cfg iwl5100_abg_cfg = {
@@ -1693,11 +1612,17 @@ struct iwl_cfg iwl5100_abg_cfg = {
1693 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1612 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1694 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 1613 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1695 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 1614 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1615 .num_of_queues = IWL50_NUM_QUEUES,
1616 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
1696 .mod_params = &iwl50_mod_params, 1617 .mod_params = &iwl50_mod_params,
1697 .valid_tx_ant = ANT_B, 1618 .valid_tx_ant = ANT_B,
1698 .valid_rx_ant = ANT_AB, 1619 .valid_rx_ant = ANT_AB,
1699 .need_pll_cfg = true, 1620 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
1621 .set_l0s = true,
1622 .use_bsm = false,
1700 .ht_greenfield_support = true, 1623 .ht_greenfield_support = true,
1624 .led_compensation = 51,
1625 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1701}; 1626};
1702 1627
1703struct iwl_cfg iwl5100_agn_cfg = { 1628struct iwl_cfg iwl5100_agn_cfg = {
@@ -1710,11 +1635,17 @@ struct iwl_cfg iwl5100_agn_cfg = {
1710 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1635 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1711 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 1636 .eeprom_ver = EEPROM_5000_EEPROM_VERSION,
1712 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 1637 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
1638 .num_of_queues = IWL50_NUM_QUEUES,
1639 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
1713 .mod_params = &iwl50_mod_params, 1640 .mod_params = &iwl50_mod_params,
1714 .valid_tx_ant = ANT_B, 1641 .valid_tx_ant = ANT_B,
1715 .valid_rx_ant = ANT_AB, 1642 .valid_rx_ant = ANT_AB,
1716 .need_pll_cfg = true, 1643 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
1644 .set_l0s = true,
1645 .use_bsm = false,
1717 .ht_greenfield_support = true, 1646 .ht_greenfield_support = true,
1647 .led_compensation = 51,
1648 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1718}; 1649};
1719 1650
1720struct iwl_cfg iwl5350_agn_cfg = { 1651struct iwl_cfg iwl5350_agn_cfg = {
@@ -1727,11 +1658,17 @@ struct iwl_cfg iwl5350_agn_cfg = {
1727 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1658 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1728 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 1659 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
1729 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 1660 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
1661 .num_of_queues = IWL50_NUM_QUEUES,
1662 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
1730 .mod_params = &iwl50_mod_params, 1663 .mod_params = &iwl50_mod_params,
1731 .valid_tx_ant = ANT_ABC, 1664 .valid_tx_ant = ANT_ABC,
1732 .valid_rx_ant = ANT_ABC, 1665 .valid_rx_ant = ANT_ABC,
1733 .need_pll_cfg = true, 1666 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
1667 .set_l0s = true,
1668 .use_bsm = false,
1734 .ht_greenfield_support = true, 1669 .ht_greenfield_support = true,
1670 .led_compensation = 51,
1671 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1735}; 1672};
1736 1673
1737struct iwl_cfg iwl5150_agn_cfg = { 1674struct iwl_cfg iwl5150_agn_cfg = {
@@ -1744,24 +1681,31 @@ struct iwl_cfg iwl5150_agn_cfg = {
1744 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE, 1681 .eeprom_size = IWL_5000_EEPROM_IMG_SIZE,
1745 .eeprom_ver = EEPROM_5050_EEPROM_VERSION, 1682 .eeprom_ver = EEPROM_5050_EEPROM_VERSION,
1746 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION, 1683 .eeprom_calib_ver = EEPROM_5050_TX_POWER_VERSION,
1684 .num_of_queues = IWL50_NUM_QUEUES,
1685 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
1747 .mod_params = &iwl50_mod_params, 1686 .mod_params = &iwl50_mod_params,
1748 .valid_tx_ant = ANT_A, 1687 .valid_tx_ant = ANT_A,
1749 .valid_rx_ant = ANT_AB, 1688 .valid_rx_ant = ANT_AB,
1750 .need_pll_cfg = true, 1689 .pll_cfg_val = CSR50_ANA_PLL_CFG_VAL,
1690 .set_l0s = true,
1691 .use_bsm = false,
1751 .ht_greenfield_support = true, 1692 .ht_greenfield_support = true,
1693 .led_compensation = 51,
1694 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
1752}; 1695};
1753 1696
1754MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX)); 1697MODULE_FIRMWARE(IWL5000_MODULE_FIRMWARE(IWL5000_UCODE_API_MAX));
1755MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX)); 1698MODULE_FIRMWARE(IWL5150_MODULE_FIRMWARE(IWL5150_UCODE_API_MAX));
1756 1699
1757module_param_named(swcrypto50, iwl50_mod_params.sw_crypto, bool, 0444); 1700module_param_named(swcrypto50, iwl50_mod_params.sw_crypto, bool, S_IRUGO);
1758MODULE_PARM_DESC(swcrypto50, 1701MODULE_PARM_DESC(swcrypto50,
1759 "using software crypto engine (default 0 [hardware])\n"); 1702 "using software crypto engine (default 0 [hardware])\n");
1760module_param_named(queues_num50, iwl50_mod_params.num_of_queues, int, 0444); 1703module_param_named(queues_num50, iwl50_mod_params.num_of_queues, int, S_IRUGO);
1761MODULE_PARM_DESC(queues_num50, "number of hw queues in 50xx series"); 1704MODULE_PARM_DESC(queues_num50, "number of hw queues in 50xx series");
1762module_param_named(11n_disable50, iwl50_mod_params.disable_11n, int, 0444); 1705module_param_named(11n_disable50, iwl50_mod_params.disable_11n, int, S_IRUGO);
1763MODULE_PARM_DESC(11n_disable50, "disable 50XX 11n functionality"); 1706MODULE_PARM_DESC(11n_disable50, "disable 50XX 11n functionality");
1764module_param_named(amsdu_size_8K50, iwl50_mod_params.amsdu_size_8K, int, 0444); 1707module_param_named(amsdu_size_8K50, iwl50_mod_params.amsdu_size_8K,
1708 int, S_IRUGO);
1765MODULE_PARM_DESC(amsdu_size_8K50, "enable 8K amsdu size in 50XX series"); 1709MODULE_PARM_DESC(amsdu_size_8K50, "enable 8K amsdu size in 50XX series");
1766module_param_named(fw_restart50, iwl50_mod_params.restart_fw, int, 0444); 1710module_param_named(fw_restart50, iwl50_mod_params.restart_fw, int, S_IRUGO);
1767MODULE_PARM_DESC(fw_restart50, "restart firmware in case of error"); 1711MODULE_PARM_DESC(fw_restart50, "restart firmware in case of error");
diff --git a/drivers/net/wireless/iwlwifi/iwl-6000.c b/drivers/net/wireless/iwlwifi/iwl-6000.c
index 1473452ba22f..70e117f8d0c4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-6000.c
+++ b/drivers/net/wireless/iwlwifi/iwl-6000.c
@@ -44,14 +44,16 @@
44#include "iwl-sta.h" 44#include "iwl-sta.h"
45#include "iwl-helpers.h" 45#include "iwl-helpers.h"
46#include "iwl-5000-hw.h" 46#include "iwl-5000-hw.h"
47#include "iwl-6000-hw.h"
48#include "iwl-agn-led.h"
47 49
48/* Highest firmware API version supported */ 50/* Highest firmware API version supported */
49#define IWL6000_UCODE_API_MAX 4 51#define IWL6000_UCODE_API_MAX 4
50#define IWL6050_UCODE_API_MAX 4 52#define IWL6050_UCODE_API_MAX 4
51 53
52/* Lowest firmware API version supported */ 54/* Lowest firmware API version supported */
53#define IWL6000_UCODE_API_MIN 1 55#define IWL6000_UCODE_API_MIN 4
54#define IWL6050_UCODE_API_MIN 1 56#define IWL6050_UCODE_API_MIN 4
55 57
56#define IWL6000_FW_PRE "iwlwifi-6000-" 58#define IWL6000_FW_PRE "iwlwifi-6000-"
57#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode" 59#define _IWL6000_MODULE_FIRMWARE(api) IWL6000_FW_PRE #api ".ucode"
@@ -71,7 +73,21 @@ static void iwl6000_set_ct_threshold(struct iwl_priv *priv)
71/* NIC configuration for 6000 series */ 73/* NIC configuration for 6000 series */
72static void iwl6000_nic_config(struct iwl_priv *priv) 74static void iwl6000_nic_config(struct iwl_priv *priv)
73{ 75{
74 iwl5000_nic_config(priv); 76 u16 radio_cfg;
77
78 radio_cfg = iwl_eeprom_query16(priv, EEPROM_RADIO_CONFIG);
79
80 /* write radio config values to register */
81 if (EEPROM_RF_CFG_TYPE_MSK(radio_cfg) <= EEPROM_RF_CONFIG_TYPE_MAX)
82 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
83 EEPROM_RF_CFG_TYPE_MSK(radio_cfg) |
84 EEPROM_RF_CFG_STEP_MSK(radio_cfg) |
85 EEPROM_RF_CFG_DASH_MSK(radio_cfg));
86
87 /* set CSR_HW_CONFIG_REG for uCode use */
88 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
89 CSR_HW_IF_CONFIG_REG_BIT_RADIO_SI |
90 CSR_HW_IF_CONFIG_REG_BIT_MAC_SI);
75 91
76 /* no locking required for register write */ 92 /* no locking required for register write */
77 if (priv->cfg->pa_type == IWL_PA_HYBRID) { 93 if (priv->cfg->pa_type == IWL_PA_HYBRID) {
@@ -86,8 +102,109 @@ static void iwl6000_nic_config(struct iwl_priv *priv)
86 /* else do nothing, uCode configured */ 102 /* else do nothing, uCode configured */
87} 103}
88 104
105static struct iwl_sensitivity_ranges iwl6000_sensitivity = {
106 .min_nrg_cck = 97,
107 .max_nrg_cck = 0, /* not used, set to 0 */
108 .auto_corr_min_ofdm = 80,
109 .auto_corr_min_ofdm_mrc = 128,
110 .auto_corr_min_ofdm_x1 = 105,
111 .auto_corr_min_ofdm_mrc_x1 = 192,
112
113 .auto_corr_max_ofdm = 145,
114 .auto_corr_max_ofdm_mrc = 232,
115 .auto_corr_max_ofdm_x1 = 145,
116 .auto_corr_max_ofdm_mrc_x1 = 232,
117
118 .auto_corr_min_cck = 125,
119 .auto_corr_max_cck = 175,
120 .auto_corr_min_cck_mrc = 160,
121 .auto_corr_max_cck_mrc = 310,
122 .nrg_th_cck = 97,
123 .nrg_th_ofdm = 100,
124
125 .barker_corr_th_min = 190,
126 .barker_corr_th_min_mrc = 390,
127 .nrg_th_cca = 62,
128};
129
130static int iwl6000_hw_set_hw_params(struct iwl_priv *priv)
131{
132 if (priv->cfg->mod_params->num_of_queues >= IWL_MIN_NUM_QUEUES &&
133 priv->cfg->mod_params->num_of_queues <= IWL50_NUM_QUEUES)
134 priv->cfg->num_of_queues =
135 priv->cfg->mod_params->num_of_queues;
136
137 priv->hw_params.max_txq_num = priv->cfg->num_of_queues;
138 priv->hw_params.dma_chnl_num = FH50_TCSR_CHNL_NUM;
139 priv->hw_params.scd_bc_tbls_size =
140 priv->cfg->num_of_queues *
141 sizeof(struct iwl5000_scd_bc_tbl);
142 priv->hw_params.tfd_size = sizeof(struct iwl_tfd);
143 priv->hw_params.max_stations = IWL5000_STATION_COUNT;
144 priv->hw_params.bcast_sta_id = IWL5000_BROADCAST_ID;
145
146 priv->hw_params.max_data_size = IWL60_RTC_DATA_SIZE;
147 priv->hw_params.max_inst_size = IWL60_RTC_INST_SIZE;
148
149 priv->hw_params.max_bsm_size = 0;
150 priv->hw_params.ht40_channel = BIT(IEEE80211_BAND_2GHZ) |
151 BIT(IEEE80211_BAND_5GHZ);
152 priv->hw_params.rx_wrt_ptr_reg = FH_RSCSR_CHNL0_WPTR;
153
154 priv->hw_params.tx_chains_num = num_of_ant(priv->cfg->valid_tx_ant);
155 priv->hw_params.rx_chains_num = num_of_ant(priv->cfg->valid_rx_ant);
156 priv->hw_params.valid_tx_ant = priv->cfg->valid_tx_ant;
157 priv->hw_params.valid_rx_ant = priv->cfg->valid_rx_ant;
158
159 if (priv->cfg->ops->lib->temp_ops.set_ct_kill)
160 priv->cfg->ops->lib->temp_ops.set_ct_kill(priv);
161
162 /* Set initial sensitivity parameters */
163 /* Set initial calibration set */
164 priv->hw_params.sens = &iwl6000_sensitivity;
165 priv->hw_params.calib_init_cfg =
166 BIT(IWL_CALIB_XTAL) |
167 BIT(IWL_CALIB_LO) |
168 BIT(IWL_CALIB_TX_IQ) |
169 BIT(IWL_CALIB_TX_IQ_PERD) |
170 BIT(IWL_CALIB_BASE_BAND);
171
172 return 0;
173}
174
175static int iwl6000_hw_channel_switch(struct iwl_priv *priv, u16 channel)
176{
177 struct iwl6000_channel_switch_cmd cmd;
178 const struct iwl_channel_info *ch_info;
179 struct iwl_host_cmd hcmd = {
180 .id = REPLY_CHANNEL_SWITCH,
181 .len = sizeof(cmd),
182 .flags = CMD_SIZE_HUGE,
183 .data = &cmd,
184 };
185
186 IWL_DEBUG_11H(priv, "channel switch from %d to %d\n",
187 priv->active_rxon.channel, channel);
188
189 cmd.band = priv->band == IEEE80211_BAND_2GHZ;
190 cmd.channel = cpu_to_le16(channel);
191 cmd.rxon_flags = priv->active_rxon.flags;
192 cmd.rxon_filter_flags = priv->active_rxon.filter_flags;
193 cmd.switch_time = cpu_to_le32(priv->ucode_beacon_time);
194 ch_info = iwl_get_channel_info(priv, priv->band, channel);
195 if (ch_info)
196 cmd.expect_beacon = is_channel_radar(ch_info);
197 else {
198 IWL_ERR(priv, "invalid channel switch from %u to %u\n",
199 priv->active_rxon.channel, channel);
200 return -EFAULT;
201 }
202
203 return iwl_send_cmd_sync(priv, &hcmd);
204}
205
89static struct iwl_lib_ops iwl6000_lib = { 206static struct iwl_lib_ops iwl6000_lib = {
90 .set_hw_params = iwl5000_hw_set_hw_params, 207 .set_hw_params = iwl6000_hw_set_hw_params,
91 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl, 208 .txq_update_byte_cnt_tbl = iwl5000_txq_update_byte_cnt_tbl,
92 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl, 209 .txq_inval_byte_cnt_tbl = iwl5000_txq_inval_byte_cnt_tbl,
93 .txq_set_sched = iwl5000_txq_set_sched, 210 .txq_set_sched = iwl5000_txq_set_sched,
@@ -106,10 +223,10 @@ static struct iwl_lib_ops iwl6000_lib = {
106 .alive_notify = iwl5000_alive_notify, 223 .alive_notify = iwl5000_alive_notify,
107 .send_tx_power = iwl5000_send_tx_power, 224 .send_tx_power = iwl5000_send_tx_power,
108 .update_chain_flags = iwl_update_chain_flags, 225 .update_chain_flags = iwl_update_chain_flags,
226 .set_channel_switch = iwl6000_hw_channel_switch,
109 .apm_ops = { 227 .apm_ops = {
110 .init = iwl5000_apm_init, 228 .init = iwl_apm_init,
111 .reset = iwl5000_apm_reset, 229 .stop = iwl_apm_stop,
112 .stop = iwl5000_apm_stop,
113 .config = iwl6000_nic_config, 230 .config = iwl6000_nic_config,
114 .set_pwr_src = iwl_set_pwr_src, 231 .set_pwr_src = iwl_set_pwr_src,
115 }, 232 },
@@ -139,18 +256,27 @@ static struct iwl_lib_ops iwl6000_lib = {
139 }, 256 },
140}; 257};
141 258
142static struct iwl_hcmd_utils_ops iwl6000_hcmd_utils = { 259static struct iwl_ops iwl6000_ops = {
260 .ucode = &iwl5000_ucode,
261 .lib = &iwl6000_lib,
262 .hcmd = &iwl5000_hcmd,
263 .utils = &iwl5000_hcmd_utils,
264 .led = &iwlagn_led_ops,
265};
266
267static struct iwl_hcmd_utils_ops iwl6050_hcmd_utils = {
143 .get_hcmd_size = iwl5000_get_hcmd_size, 268 .get_hcmd_size = iwl5000_get_hcmd_size,
144 .build_addsta_hcmd = iwl5000_build_addsta_hcmd, 269 .build_addsta_hcmd = iwl5000_build_addsta_hcmd,
145 .rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag, 270 .rts_tx_cmd_flag = iwl5000_rts_tx_cmd_flag,
146 .calc_rssi = iwl5000_calc_rssi, 271 .calc_rssi = iwl5000_calc_rssi,
147}; 272};
148 273
149static struct iwl_ops iwl6000_ops = { 274static struct iwl_ops iwl6050_ops = {
150 .ucode = &iwl5000_ucode, 275 .ucode = &iwl5000_ucode,
151 .lib = &iwl6000_lib, 276 .lib = &iwl6000_lib,
152 .hcmd = &iwl5000_hcmd, 277 .hcmd = &iwl5000_hcmd,
153 .utils = &iwl6000_hcmd_utils, 278 .utils = &iwl6050_hcmd_utils,
279 .led = &iwlagn_led_ops,
154}; 280};
155 281
156 282
@@ -165,17 +291,84 @@ struct iwl_cfg iwl6000h_2agn_cfg = {
165 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 291 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
166 .ops = &iwl6000_ops, 292 .ops = &iwl6000_ops,
167 .eeprom_size = OTP_LOW_IMAGE_SIZE, 293 .eeprom_size = OTP_LOW_IMAGE_SIZE,
168 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 294 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
169 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 295 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
296 .num_of_queues = IWL50_NUM_QUEUES,
297 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
170 .mod_params = &iwl50_mod_params, 298 .mod_params = &iwl50_mod_params,
171 .valid_tx_ant = ANT_AB, 299 .valid_tx_ant = ANT_AB,
172 .valid_rx_ant = ANT_AB, 300 .valid_rx_ant = ANT_AB,
173 .need_pll_cfg = false, 301 .pll_cfg_val = 0,
302 .set_l0s = true,
303 .use_bsm = false,
174 .pa_type = IWL_PA_HYBRID, 304 .pa_type = IWL_PA_HYBRID,
175 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 305 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
176 .shadow_ram_support = true, 306 .shadow_ram_support = true,
177 .ht_greenfield_support = true, 307 .ht_greenfield_support = true,
308 .led_compensation = 51,
178 .use_rts_for_ht = true, /* use rts/cts protection */ 309 .use_rts_for_ht = true, /* use rts/cts protection */
310 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
311 .supports_idle = true,
312 .adv_thermal_throttle = true,
313 .support_ct_kill_exit = true,
314};
315
316struct iwl_cfg iwl6000h_2abg_cfg = {
317 .name = "6000 Series 2x2 ABG",
318 .fw_name_pre = IWL6000_FW_PRE,
319 .ucode_api_max = IWL6000_UCODE_API_MAX,
320 .ucode_api_min = IWL6000_UCODE_API_MIN,
321 .sku = IWL_SKU_A|IWL_SKU_G,
322 .ops = &iwl6000_ops,
323 .eeprom_size = OTP_LOW_IMAGE_SIZE,
324 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
325 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
326 .num_of_queues = IWL50_NUM_QUEUES,
327 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
328 .mod_params = &iwl50_mod_params,
329 .valid_tx_ant = ANT_AB,
330 .valid_rx_ant = ANT_AB,
331 .pll_cfg_val = 0,
332 .set_l0s = true,
333 .use_bsm = false,
334 .pa_type = IWL_PA_HYBRID,
335 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
336 .shadow_ram_support = true,
337 .ht_greenfield_support = true,
338 .led_compensation = 51,
339 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
340 .supports_idle = true,
341 .adv_thermal_throttle = true,
342 .support_ct_kill_exit = true,
343};
344
345struct iwl_cfg iwl6000h_2bg_cfg = {
346 .name = "6000 Series 2x2 BG",
347 .fw_name_pre = IWL6000_FW_PRE,
348 .ucode_api_max = IWL6000_UCODE_API_MAX,
349 .ucode_api_min = IWL6000_UCODE_API_MIN,
350 .sku = IWL_SKU_G,
351 .ops = &iwl6000_ops,
352 .eeprom_size = OTP_LOW_IMAGE_SIZE,
353 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
354 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
355 .num_of_queues = IWL50_NUM_QUEUES,
356 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
357 .mod_params = &iwl50_mod_params,
358 .valid_tx_ant = ANT_AB,
359 .valid_rx_ant = ANT_AB,
360 .pll_cfg_val = 0,
361 .set_l0s = true,
362 .use_bsm = false,
363 .pa_type = IWL_PA_HYBRID,
364 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
365 .shadow_ram_support = true,
366 .ht_greenfield_support = true,
367 .led_compensation = 51,
368 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
369 .supports_idle = true,
370 .adv_thermal_throttle = true,
371 .support_ct_kill_exit = true,
179}; 372};
180 373
181/* 374/*
@@ -189,17 +382,84 @@ struct iwl_cfg iwl6000i_2agn_cfg = {
189 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 382 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
190 .ops = &iwl6000_ops, 383 .ops = &iwl6000_ops,
191 .eeprom_size = OTP_LOW_IMAGE_SIZE, 384 .eeprom_size = OTP_LOW_IMAGE_SIZE,
192 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 385 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
193 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 386 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
387 .num_of_queues = IWL50_NUM_QUEUES,
388 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
194 .mod_params = &iwl50_mod_params, 389 .mod_params = &iwl50_mod_params,
195 .valid_tx_ant = ANT_BC, 390 .valid_tx_ant = ANT_BC,
196 .valid_rx_ant = ANT_BC, 391 .valid_rx_ant = ANT_BC,
197 .need_pll_cfg = false, 392 .pll_cfg_val = 0,
393 .set_l0s = true,
394 .use_bsm = false,
198 .pa_type = IWL_PA_INTERNAL, 395 .pa_type = IWL_PA_INTERNAL,
199 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 396 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
200 .shadow_ram_support = true, 397 .shadow_ram_support = true,
201 .ht_greenfield_support = true, 398 .ht_greenfield_support = true,
399 .led_compensation = 51,
202 .use_rts_for_ht = true, /* use rts/cts protection */ 400 .use_rts_for_ht = true, /* use rts/cts protection */
401 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
402 .supports_idle = true,
403 .adv_thermal_throttle = true,
404 .support_ct_kill_exit = true,
405};
406
407struct iwl_cfg iwl6000i_2abg_cfg = {
408 .name = "6000 Series 2x2 ABG",
409 .fw_name_pre = IWL6000_FW_PRE,
410 .ucode_api_max = IWL6000_UCODE_API_MAX,
411 .ucode_api_min = IWL6000_UCODE_API_MIN,
412 .sku = IWL_SKU_A|IWL_SKU_G,
413 .ops = &iwl6000_ops,
414 .eeprom_size = OTP_LOW_IMAGE_SIZE,
415 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
416 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
417 .num_of_queues = IWL50_NUM_QUEUES,
418 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
419 .mod_params = &iwl50_mod_params,
420 .valid_tx_ant = ANT_BC,
421 .valid_rx_ant = ANT_BC,
422 .pll_cfg_val = 0,
423 .set_l0s = true,
424 .use_bsm = false,
425 .pa_type = IWL_PA_INTERNAL,
426 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
427 .shadow_ram_support = true,
428 .ht_greenfield_support = true,
429 .led_compensation = 51,
430 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
431 .supports_idle = true,
432 .adv_thermal_throttle = true,
433 .support_ct_kill_exit = true,
434};
435
436struct iwl_cfg iwl6000i_2bg_cfg = {
437 .name = "6000 Series 2x2 BG",
438 .fw_name_pre = IWL6000_FW_PRE,
439 .ucode_api_max = IWL6000_UCODE_API_MAX,
440 .ucode_api_min = IWL6000_UCODE_API_MIN,
441 .sku = IWL_SKU_G,
442 .ops = &iwl6000_ops,
443 .eeprom_size = OTP_LOW_IMAGE_SIZE,
444 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
445 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
446 .num_of_queues = IWL50_NUM_QUEUES,
447 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
448 .mod_params = &iwl50_mod_params,
449 .valid_tx_ant = ANT_BC,
450 .valid_rx_ant = ANT_BC,
451 .pll_cfg_val = 0,
452 .set_l0s = true,
453 .use_bsm = false,
454 .pa_type = IWL_PA_INTERNAL,
455 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
456 .shadow_ram_support = true,
457 .ht_greenfield_support = true,
458 .led_compensation = 51,
459 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
460 .supports_idle = true,
461 .adv_thermal_throttle = true,
462 .support_ct_kill_exit = true,
203}; 463};
204 464
205struct iwl_cfg iwl6050_2agn_cfg = { 465struct iwl_cfg iwl6050_2agn_cfg = {
@@ -208,19 +468,60 @@ struct iwl_cfg iwl6050_2agn_cfg = {
208 .ucode_api_max = IWL6050_UCODE_API_MAX, 468 .ucode_api_max = IWL6050_UCODE_API_MAX,
209 .ucode_api_min = IWL6050_UCODE_API_MIN, 469 .ucode_api_min = IWL6050_UCODE_API_MIN,
210 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 470 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
211 .ops = &iwl6000_ops, 471 .ops = &iwl6050_ops,
212 .eeprom_size = OTP_LOW_IMAGE_SIZE, 472 .eeprom_size = OTP_LOW_IMAGE_SIZE,
213 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 473 .eeprom_ver = EEPROM_6050_EEPROM_VERSION,
214 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 474 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
475 .num_of_queues = IWL50_NUM_QUEUES,
476 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
215 .mod_params = &iwl50_mod_params, 477 .mod_params = &iwl50_mod_params,
216 .valid_tx_ant = ANT_AB, 478 .valid_tx_ant = ANT_AB,
217 .valid_rx_ant = ANT_AB, 479 .valid_rx_ant = ANT_AB,
218 .need_pll_cfg = false, 480 .pll_cfg_val = 0,
481 .set_l0s = true,
482 .use_bsm = false,
219 .pa_type = IWL_PA_SYSTEM, 483 .pa_type = IWL_PA_SYSTEM,
220 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 484 .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
221 .shadow_ram_support = true, 485 .shadow_ram_support = true,
222 .ht_greenfield_support = true, 486 .ht_greenfield_support = true,
487 .led_compensation = 51,
223 .use_rts_for_ht = true, /* use rts/cts protection */ 488 .use_rts_for_ht = true, /* use rts/cts protection */
489 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
490 .supports_idle = true,
491 .adv_thermal_throttle = true,
492 .support_ct_kill_exit = true,
493 .support_sm_ps = true,
494 .support_wimax_coexist = true,
495};
496
497struct iwl_cfg iwl6050_2abg_cfg = {
498 .name = "6050 Series 2x2 ABG",
499 .fw_name_pre = IWL6050_FW_PRE,
500 .ucode_api_max = IWL6050_UCODE_API_MAX,
501 .ucode_api_min = IWL6050_UCODE_API_MIN,
502 .sku = IWL_SKU_A|IWL_SKU_G,
503 .ops = &iwl6050_ops,
504 .eeprom_size = OTP_LOW_IMAGE_SIZE,
505 .eeprom_ver = EEPROM_6050_EEPROM_VERSION,
506 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
507 .num_of_queues = IWL50_NUM_QUEUES,
508 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
509 .mod_params = &iwl50_mod_params,
510 .valid_tx_ant = ANT_AB,
511 .valid_rx_ant = ANT_AB,
512 .pll_cfg_val = 0,
513 .set_l0s = true,
514 .use_bsm = false,
515 .pa_type = IWL_PA_SYSTEM,
516 .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
517 .shadow_ram_support = true,
518 .ht_greenfield_support = true,
519 .led_compensation = 51,
520 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
521 .supports_idle = true,
522 .adv_thermal_throttle = true,
523 .support_ct_kill_exit = true,
524 .support_wimax_coexist = true,
224}; 525};
225 526
226struct iwl_cfg iwl6000_3agn_cfg = { 527struct iwl_cfg iwl6000_3agn_cfg = {
@@ -231,17 +532,26 @@ struct iwl_cfg iwl6000_3agn_cfg = {
231 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 532 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
232 .ops = &iwl6000_ops, 533 .ops = &iwl6000_ops,
233 .eeprom_size = OTP_LOW_IMAGE_SIZE, 534 .eeprom_size = OTP_LOW_IMAGE_SIZE,
234 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 535 .eeprom_ver = EEPROM_6000_EEPROM_VERSION,
235 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 536 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
537 .num_of_queues = IWL50_NUM_QUEUES,
538 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
236 .mod_params = &iwl50_mod_params, 539 .mod_params = &iwl50_mod_params,
237 .valid_tx_ant = ANT_ABC, 540 .valid_tx_ant = ANT_ABC,
238 .valid_rx_ant = ANT_ABC, 541 .valid_rx_ant = ANT_ABC,
239 .need_pll_cfg = false, 542 .pll_cfg_val = 0,
543 .set_l0s = true,
544 .use_bsm = false,
240 .pa_type = IWL_PA_SYSTEM, 545 .pa_type = IWL_PA_SYSTEM,
241 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 546 .max_ll_items = OTP_MAX_LL_ITEMS_6x00,
242 .shadow_ram_support = true, 547 .shadow_ram_support = true,
243 .ht_greenfield_support = true, 548 .ht_greenfield_support = true,
549 .led_compensation = 51,
244 .use_rts_for_ht = true, /* use rts/cts protection */ 550 .use_rts_for_ht = true, /* use rts/cts protection */
551 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
552 .supports_idle = true,
553 .adv_thermal_throttle = true,
554 .support_ct_kill_exit = true,
245}; 555};
246 556
247struct iwl_cfg iwl6050_3agn_cfg = { 557struct iwl_cfg iwl6050_3agn_cfg = {
@@ -250,19 +560,30 @@ struct iwl_cfg iwl6050_3agn_cfg = {
250 .ucode_api_max = IWL6050_UCODE_API_MAX, 560 .ucode_api_max = IWL6050_UCODE_API_MAX,
251 .ucode_api_min = IWL6050_UCODE_API_MIN, 561 .ucode_api_min = IWL6050_UCODE_API_MIN,
252 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N, 562 .sku = IWL_SKU_A|IWL_SKU_G|IWL_SKU_N,
253 .ops = &iwl6000_ops, 563 .ops = &iwl6050_ops,
254 .eeprom_size = OTP_LOW_IMAGE_SIZE, 564 .eeprom_size = OTP_LOW_IMAGE_SIZE,
255 .eeprom_ver = EEPROM_5000_EEPROM_VERSION, 565 .eeprom_ver = EEPROM_6050_EEPROM_VERSION,
256 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION, 566 .eeprom_calib_ver = EEPROM_5000_TX_POWER_VERSION,
567 .num_of_queues = IWL50_NUM_QUEUES,
568 .num_of_ampdu_queues = IWL50_NUM_AMPDU_QUEUES,
257 .mod_params = &iwl50_mod_params, 569 .mod_params = &iwl50_mod_params,
258 .valid_tx_ant = ANT_ABC, 570 .valid_tx_ant = ANT_ABC,
259 .valid_rx_ant = ANT_ABC, 571 .valid_rx_ant = ANT_ABC,
260 .need_pll_cfg = false, 572 .pll_cfg_val = 0,
573 .set_l0s = true,
574 .use_bsm = false,
261 .pa_type = IWL_PA_SYSTEM, 575 .pa_type = IWL_PA_SYSTEM,
262 .max_ll_items = OTP_MAX_LL_ITEMS_6x00, 576 .max_ll_items = OTP_MAX_LL_ITEMS_6x50,
263 .shadow_ram_support = true, 577 .shadow_ram_support = true,
264 .ht_greenfield_support = true, 578 .ht_greenfield_support = true,
579 .led_compensation = 51,
265 .use_rts_for_ht = true, /* use rts/cts protection */ 580 .use_rts_for_ht = true, /* use rts/cts protection */
581 .chain_noise_num_beacons = IWL_CAL_NUM_BEACONS,
582 .supports_idle = true,
583 .adv_thermal_throttle = true,
584 .support_ct_kill_exit = true,
585 .support_sm_ps = true,
586 .support_wimax_coexist = true,
266}; 587};
267 588
268MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX)); 589MODULE_FIRMWARE(IWL6000_MODULE_FIRMWARE(IWL6000_UCODE_API_MAX));
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.c b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
new file mode 100644
index 000000000000..3bccba20f6da
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.c
@@ -0,0 +1,85 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/init.h>
30#include <linux/pci.h>
31#include <linux/dma-mapping.h>
32#include <linux/delay.h>
33#include <linux/skbuff.h>
34#include <linux/netdevice.h>
35#include <linux/wireless.h>
36#include <net/mac80211.h>
37#include <linux/etherdevice.h>
38#include <asm/unaligned.h>
39
40#include "iwl-commands.h"
41#include "iwl-dev.h"
42#include "iwl-core.h"
43#include "iwl-io.h"
44#include "iwl-agn-led.h"
45
46/* Send led command */
47static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd)
48{
49 struct iwl_host_cmd cmd = {
50 .id = REPLY_LEDS_CMD,
51 .len = sizeof(struct iwl_led_cmd),
52 .data = led_cmd,
53 .flags = CMD_ASYNC,
54 .callback = NULL,
55 };
56 u32 reg;
57
58 reg = iwl_read32(priv, CSR_LED_REG);
59 if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
60 iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
61
62 return iwl_send_cmd(priv, &cmd);
63}
64
65/* Set led register off */
66static int iwl_led_on_reg(struct iwl_priv *priv)
67{
68 IWL_DEBUG_LED(priv, "led on\n");
69 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
70 return 0;
71}
72
73/* Set led register off */
74static int iwl_led_off_reg(struct iwl_priv *priv)
75{
76 IWL_DEBUG_LED(priv, "LED Reg off\n");
77 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_OFF);
78 return 0;
79}
80
81const struct iwl_led_ops iwlagn_led_ops = {
82 .cmd = iwl_send_led_cmd,
83 .on = iwl_led_on_reg,
84 .off = iwl_led_off_reg,
85};
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-led.h b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
new file mode 100644
index 000000000000..ab55f92a161d
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-led.h
@@ -0,0 +1,32 @@
1/******************************************************************************
2 *
3 * Copyright(c) 2003 - 2009 Intel Corporation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of version 2 of the GNU General Public License as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
17 *
18 * The full GNU General Public License is included in this distribution in the
19 * file called LICENSE.
20 *
21 * Contact Information:
22 * Intel Linux Wireless <ilw@linux.intel.com>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 *****************************************************************************/
26
27#ifndef __iwl_agn_led_h__
28#define __iwl_agn_led_h__
29
30extern const struct iwl_led_ops iwlagn_led_ops;
31
32#endif /* __iwl_agn_led_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
index 81726ee32858..43edd8fd4405 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.c
@@ -75,104 +75,6 @@ static const u8 ant_toggle_lookup[] = {
75 /*ANT_ABC -> */ ANT_ABC, 75 /*ANT_ABC -> */ ANT_ABC,
76}; 76};
77 77
78/**
79 * struct iwl_rate_scale_data -- tx success history for one rate
80 */
81struct iwl_rate_scale_data {
82 u64 data; /* bitmap of successful frames */
83 s32 success_counter; /* number of frames successful */
84 s32 success_ratio; /* per-cent * 128 */
85 s32 counter; /* number of frames attempted */
86 s32 average_tpt; /* success ratio * expected throughput */
87 unsigned long stamp;
88};
89
90/**
91 * struct iwl_scale_tbl_info -- tx params and success history for all rates
92 *
93 * There are two of these in struct iwl_lq_sta,
94 * one for "active", and one for "search".
95 */
96struct iwl_scale_tbl_info {
97 enum iwl_table_type lq_type;
98 u8 ant_type;
99 u8 is_SGI; /* 1 = short guard interval */
100 u8 is_ht40; /* 1 = 40 MHz channel width */
101 u8 is_dup; /* 1 = duplicated data streams */
102 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
103 u8 max_search; /* maximun number of tables we can search */
104 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
105 u32 current_rate; /* rate_n_flags, uCode API format */
106 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
107};
108
109struct iwl_traffic_load {
110 unsigned long time_stamp; /* age of the oldest statistics */
111 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
112 * slice */
113 u32 total; /* total num of packets during the
114 * last TID_MAX_TIME_DIFF */
115 u8 queue_count; /* number of queues that has
116 * been used since the last cleanup */
117 u8 head; /* start of the circular buffer */
118};
119
120/**
121 * struct iwl_lq_sta -- driver's rate scaling private structure
122 *
123 * Pointer to this gets passed back and forth between driver and mac80211.
124 */
125struct iwl_lq_sta {
126 u8 active_tbl; /* index of active table, range 0-1 */
127 u8 enable_counter; /* indicates HT mode */
128 u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
129 u8 search_better_tbl; /* 1: currently trying alternate mode */
130 s32 last_tpt;
131
132 /* The following determine when to search for a new mode */
133 u32 table_count_limit;
134 u32 max_failure_limit; /* # failed frames before new search */
135 u32 max_success_limit; /* # successful frames before new search */
136 u32 table_count;
137 u32 total_failed; /* total failed frames, any/all rates */
138 u32 total_success; /* total successful frames, any/all rates */
139 u64 flush_timer; /* time staying in mode before new search */
140
141 u8 action_counter; /* # mode-switch actions tried */
142 u8 is_green;
143 u8 is_dup;
144 enum ieee80211_band band;
145 u8 ibss_sta_added;
146
147 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
148 u32 supp_rates;
149 u16 active_legacy_rate;
150 u16 active_siso_rate;
151 u16 active_mimo2_rate;
152 u16 active_mimo3_rate;
153 u16 active_rate_basic;
154 s8 max_rate_idx; /* Max rate set by user */
155 u8 missed_rate_counter;
156
157 struct iwl_link_quality_cmd lq;
158 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
159 struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
160 u8 tx_agg_tid_en;
161#ifdef CONFIG_MAC80211_DEBUGFS
162 struct dentry *rs_sta_dbgfs_scale_table_file;
163 struct dentry *rs_sta_dbgfs_stats_table_file;
164 struct dentry *rs_sta_dbgfs_rate_scale_data_file;
165 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
166 u32 dbg_fixed_rate;
167#endif
168 struct iwl_priv *drv;
169
170 /* used to be in sta_info */
171 int last_txrate_idx;
172 /* last tx rate_n_flags */
173 u32 last_rate_n_flags;
174};
175
176static void rs_rate_scale_perform(struct iwl_priv *priv, 78static void rs_rate_scale_perform(struct iwl_priv *priv,
177 struct sk_buff *skb, 79 struct sk_buff *skb,
178 struct ieee80211_sta *sta, 80 struct ieee80211_sta *sta,
@@ -190,84 +92,78 @@ static void rs_dbgfs_set_mcs(struct iwl_lq_sta *lq_sta,
190{} 92{}
191#endif 93#endif
192 94
193/* 95/**
194 * Expected throughput metrics for following rates: 96 * The following tables contain the expected throughput metrics for all rates
195 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits 97 *
196 * "G" is the only table that supports CCK (the first 4 rates). 98 * 1, 2, 5.5, 11, 6, 9, 12, 18, 24, 36, 48, 54, 60 MBits
99 *
100 * where invalid entries are zeros.
101 *
102 * CCK rates are only valid in legacy table and will only be used in G
103 * (2.4 GHz) band.
197 */ 104 */
198 105
199static s32 expected_tpt_A[IWL_RATE_COUNT] = { 106static s32 expected_tpt_legacy[IWL_RATE_COUNT] = {
200 0, 0, 0, 0, 40, 57, 72, 98, 121, 154, 177, 186, 186 107 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 0
201};
202
203static s32 expected_tpt_G[IWL_RATE_COUNT] = {
204 7, 13, 35, 58, 40, 57, 72, 98, 121, 154, 177, 186, 186
205};
206
207static s32 expected_tpt_siso20MHz[IWL_RATE_COUNT] = {
208 0, 0, 0, 0, 42, 42, 76, 102, 124, 159, 183, 193, 202
209};
210
211static s32 expected_tpt_siso20MHzSGI[IWL_RATE_COUNT] = {
212 0, 0, 0, 0, 46, 46, 82, 110, 132, 168, 192, 202, 211
213};
214
215static s32 expected_tpt_mimo2_20MHz[IWL_RATE_COUNT] = {
216 0, 0, 0, 0, 74, 74, 123, 155, 179, 214, 236, 244, 251
217};
218
219static s32 expected_tpt_mimo2_20MHzSGI[IWL_RATE_COUNT] = {
220 0, 0, 0, 0, 81, 81, 131, 164, 188, 222, 243, 251, 257
221}; 108};
222 109
223static s32 expected_tpt_siso40MHz[IWL_RATE_COUNT] = { 110static s32 expected_tpt_siso20MHz[4][IWL_RATE_COUNT] = {
224 0, 0, 0, 0, 77, 77, 127, 160, 184, 220, 242, 250, 257 111 {0, 0, 0, 0, 42, 0, 76, 102, 124, 158, 183, 193, 202}, /* Norm */
112 {0, 0, 0, 0, 46, 0, 82, 110, 132, 167, 192, 202, 210}, /* SGI */
113 {0, 0, 0, 0, 48, 0, 93, 135, 176, 251, 319, 351, 381}, /* AGG */
114 {0, 0, 0, 0, 53, 0, 102, 149, 193, 275, 348, 381, 413}, /* AGG+SGI */
225}; 115};
226 116
227static s32 expected_tpt_siso40MHzSGI[IWL_RATE_COUNT] = { 117static s32 expected_tpt_siso40MHz[4][IWL_RATE_COUNT] = {
228 0, 0, 0, 0, 83, 83, 135, 169, 193, 229, 250, 257, 264 118 {0, 0, 0, 0, 77, 0, 127, 160, 184, 220, 242, 250, 257}, /* Norm */
119 {0, 0, 0, 0, 83, 0, 135, 169, 193, 229, 250, 257, 264}, /* SGI */
120 {0, 0, 0, 0, 96, 0, 182, 259, 328, 451, 553, 598, 640}, /* AGG */
121 {0, 0, 0, 0, 106, 0, 199, 282, 357, 487, 593, 640, 683}, /* AGG+SGI */
229}; 122};
230 123
231static s32 expected_tpt_mimo2_40MHz[IWL_RATE_COUNT] = { 124static s32 expected_tpt_mimo2_20MHz[4][IWL_RATE_COUNT] = {
232 0, 0, 0, 0, 123, 123, 182, 214, 235, 264, 279, 285, 289 125 {0, 0, 0, 0, 74, 0, 123, 155, 179, 213, 235, 243, 250}, /* Norm */
126 {0, 0, 0, 0, 81, 0, 131, 164, 187, 221, 242, 250, 256}, /* SGI */
127 {0, 0, 0, 0, 92, 0, 175, 250, 317, 436, 534, 578, 619}, /* AGG */
128 {0, 0, 0, 0, 102, 0, 192, 273, 344, 470, 573, 619, 660}, /* AGG+SGI*/
233}; 129};
234 130
235static s32 expected_tpt_mimo2_40MHzSGI[IWL_RATE_COUNT] = { 131static s32 expected_tpt_mimo2_40MHz[4][IWL_RATE_COUNT] = {
236 0, 0, 0, 0, 131, 131, 191, 222, 242, 270, 284, 289, 293 132 {0, 0, 0, 0, 123, 0, 182, 214, 235, 264, 279, 285, 289}, /* Norm */
133 {0, 0, 0, 0, 131, 0, 191, 222, 242, 270, 284, 289, 293}, /* SGI */
134 {0, 0, 0, 0, 180, 0, 327, 446, 545, 708, 828, 878, 922}, /* AGG */
135 {0, 0, 0, 0, 197, 0, 355, 481, 584, 752, 872, 922, 966}, /* AGG+SGI */
237}; 136};
238 137
239/* Expected throughput metric MIMO3 */ 138static s32 expected_tpt_mimo3_20MHz[4][IWL_RATE_COUNT] = {
240static s32 expected_tpt_mimo3_20MHz[IWL_RATE_COUNT] = { 139 {0, 0, 0, 0, 99, 0, 153, 186, 208, 239, 256, 263, 268}, /* Norm */
241 0, 0, 0, 0, 99, 99, 153, 186, 208, 239, 256, 263, 268 140 {0, 0, 0, 0, 106, 0, 162, 194, 215, 246, 262, 268, 273}, /* SGI */
141 {0, 0, 0, 0, 134, 0, 249, 346, 431, 574, 685, 732, 775}, /* AGG */
142 {0, 0, 0, 0, 148, 0, 272, 376, 465, 614, 727, 775, 818}, /* AGG+SGI */
242}; 143};
243 144
244static s32 expected_tpt_mimo3_20MHzSGI[IWL_RATE_COUNT] = { 145static s32 expected_tpt_mimo3_40MHz[4][IWL_RATE_COUNT] = {
245 0, 0, 0, 0, 106, 106, 162, 194, 215, 246, 262, 268, 273 146 {0, 0, 0, 0, 152, 0, 211, 239, 255, 279, 290, 294, 297}, /* Norm */
246}; 147 {0, 0, 0, 0, 160, 0, 219, 245, 261, 284, 294, 297, 300}, /* SGI */
247 148 {0, 0, 0, 0, 254, 0, 443, 584, 695, 868, 984, 1030, 1070}, /* AGG */
248static s32 expected_tpt_mimo3_40MHz[IWL_RATE_COUNT] = { 149 {0, 0, 0, 0, 277, 0, 478, 624, 737, 911, 1026, 1070, 1109}, /* AGG+SGI */
249 0, 0, 0, 0, 152, 152, 211, 239, 255, 279, 290, 294, 297
250};
251
252static s32 expected_tpt_mimo3_40MHzSGI[IWL_RATE_COUNT] = {
253 0, 0, 0, 0, 160, 160, 219, 245, 261, 284, 294, 297, 300
254}; 150};
255 151
256/* mbps, mcs */ 152/* mbps, mcs */
257const static struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = { 153const static struct iwl_rate_mcs_info iwl_rate_mcs[IWL_RATE_COUNT] = {
258 {"1", ""}, 154 { "1", "BPSK DSSS"},
259 {"2", ""}, 155 { "2", "QPSK DSSS"},
260 {"5.5", ""}, 156 {"5.5", "BPSK CCK"},
261 {"11", ""}, 157 { "11", "QPSK CCK"},
262 {"6", "BPSK 1/2"}, 158 { "6", "BPSK 1/2"},
263 {"9", "BPSK 1/2"}, 159 { "9", "BPSK 1/2"},
264 {"12", "QPSK 1/2"}, 160 { "12", "QPSK 1/2"},
265 {"18", "QPSK 3/4"}, 161 { "18", "QPSK 3/4"},
266 {"24", "16QAM 1/2"}, 162 { "24", "16QAM 1/2"},
267 {"36", "16QAM 3/4"}, 163 { "36", "16QAM 3/4"},
268 {"48", "64QAM 2/3"}, 164 { "48", "64QAM 2/3"},
269 {"54", "64QAM 3/4"}, 165 { "54", "64QAM 3/4"},
270 {"60", "64QAM 5/6"} 166 { "60", "64QAM 5/6"},
271}; 167};
272 168
273#define MCS_INDEX_PER_STREAM (8) 169#define MCS_INDEX_PER_STREAM (8)
@@ -444,7 +340,7 @@ static inline int get_num_of_ant_from_rate(u32 rate_n_flags)
444 * packets. 340 * packets.
445 */ 341 */
446static int rs_collect_tx_data(struct iwl_rate_scale_data *windows, 342static int rs_collect_tx_data(struct iwl_rate_scale_data *windows,
447 int scale_index, s32 tpt, int retries, 343 int scale_index, s32 tpt, int attempts,
448 int successes) 344 int successes)
449{ 345{
450 struct iwl_rate_scale_data *window = NULL; 346 struct iwl_rate_scale_data *window = NULL;
@@ -454,7 +350,7 @@ static int rs_collect_tx_data(struct iwl_rate_scale_data *windows,
454 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT) 350 if (scale_index < 0 || scale_index >= IWL_RATE_COUNT)
455 return -EINVAL; 351 return -EINVAL;
456 352
457 /* Select data for current tx bit rate */ 353 /* Select window for current tx bit rate */
458 window = &(windows[scale_index]); 354 window = &(windows[scale_index]);
459 355
460 /* 356 /*
@@ -465,7 +361,7 @@ static int rs_collect_tx_data(struct iwl_rate_scale_data *windows,
465 * subtract "1" from the success counter (this is the main reason 361 * subtract "1" from the success counter (this is the main reason
466 * we keep these bitmaps!). 362 * we keep these bitmaps!).
467 */ 363 */
468 while (retries > 0) { 364 while (attempts > 0) {
469 if (window->counter >= IWL_RATE_MAX_WINDOW) { 365 if (window->counter >= IWL_RATE_MAX_WINDOW) {
470 366
471 /* remove earliest */ 367 /* remove earliest */
@@ -480,17 +376,17 @@ static int rs_collect_tx_data(struct iwl_rate_scale_data *windows,
480 /* Increment frames-attempted counter */ 376 /* Increment frames-attempted counter */
481 window->counter++; 377 window->counter++;
482 378
483 /* Shift bitmap by one frame (throw away oldest history), 379 /* Shift bitmap by one frame to throw away oldest history */
484 * OR in "1", and increment "success" if this
485 * frame was successful. */
486 window->data <<= 1; 380 window->data <<= 1;
381
382 /* Mark the most recent #successes attempts as successful */
487 if (successes > 0) { 383 if (successes > 0) {
488 window->success_counter++; 384 window->success_counter++;
489 window->data |= 0x1; 385 window->data |= 0x1;
490 successes--; 386 successes--;
491 } 387 }
492 388
493 retries--; 389 attempts--;
494 } 390 }
495 391
496 /* Calculate current success ratio, avoid divide-by-0! */ 392 /* Calculate current success ratio, avoid divide-by-0! */
@@ -671,7 +567,7 @@ static int rs_toggle_antenna(u32 valid_ant, u32 *rate_n_flags,
671 * there are no non-GF stations present in the BSS. 567 * there are no non-GF stations present in the BSS.
672 */ 568 */
673static inline u8 rs_use_green(struct ieee80211_sta *sta, 569static inline u8 rs_use_green(struct ieee80211_sta *sta,
674 struct iwl_ht_info *ht_conf) 570 struct iwl_ht_config *ht_conf)
675{ 571{
676 return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) && 572 return (sta->ht_cap.cap & IEEE80211_HT_CAP_GRN_FLD) &&
677 !(ht_conf->non_GF_STA_present); 573 !(ht_conf->non_GF_STA_present);
@@ -821,27 +717,45 @@ out:
821} 717}
822 718
823/* 719/*
720 * Simple function to compare two rate scale table types
721 */
722static bool table_type_matches(struct iwl_scale_tbl_info *a,
723 struct iwl_scale_tbl_info *b)
724{
725 return (a->lq_type == b->lq_type) && (a->ant_type == b->ant_type) &&
726 (a->is_SGI == b->is_SGI);
727}
728/*
729 * Static function to get the expected throughput from an iwl_scale_tbl_info
730 * that wraps a NULL pointer check
731 */
732static s32 get_expected_tpt(struct iwl_scale_tbl_info *tbl, int rs_index)
733{
734 if (tbl->expected_tpt)
735 return tbl->expected_tpt[rs_index];
736 return 0;
737}
738
739/*
824 * mac80211 sends us Tx status 740 * mac80211 sends us Tx status
825 */ 741 */
826static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband, 742static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
827 struct ieee80211_sta *sta, void *priv_sta, 743 struct ieee80211_sta *sta, void *priv_sta,
828 struct sk_buff *skb) 744 struct sk_buff *skb)
829{ 745{
830 int status; 746 int legacy_success;
831 u8 retries; 747 int retries;
832 int rs_index, mac_index, index = 0; 748 int rs_index, mac_index, i;
833 struct iwl_lq_sta *lq_sta = priv_sta; 749 struct iwl_lq_sta *lq_sta = priv_sta;
834 struct iwl_link_quality_cmd *table; 750 struct iwl_link_quality_cmd *table;
835 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 751 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
836 struct iwl_priv *priv = (struct iwl_priv *)priv_r; 752 struct iwl_priv *priv = (struct iwl_priv *)priv_r;
837 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 753 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
838 struct iwl_rate_scale_data *window = NULL; 754 struct iwl_rate_scale_data *window = NULL;
839 struct iwl_rate_scale_data *search_win = NULL;
840 enum mac80211_rate_control_flags mac_flags; 755 enum mac80211_rate_control_flags mac_flags;
841 u32 tx_rate; 756 u32 tx_rate;
842 struct iwl_scale_tbl_info tbl_type; 757 struct iwl_scale_tbl_info tbl_type;
843 struct iwl_scale_tbl_info *curr_tbl, *search_tbl; 758 struct iwl_scale_tbl_info *curr_tbl, *other_tbl;
844 u8 active_index = 0;
845 s32 tpt = 0; 759 s32 tpt = 0;
846 760
847 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n"); 761 IWL_DEBUG_RATE_LIMIT(priv, "get frame ack response, update rate scale window\n");
@@ -850,30 +764,14 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
850 info->flags & IEEE80211_TX_CTL_NO_ACK) 764 info->flags & IEEE80211_TX_CTL_NO_ACK)
851 return; 765 return;
852 766
853 /* This packet was aggregated but doesn't carry rate scale info */ 767 /* This packet was aggregated but doesn't carry status info */
854 if ((info->flags & IEEE80211_TX_CTL_AMPDU) && 768 if ((info->flags & IEEE80211_TX_CTL_AMPDU) &&
855 !(info->flags & IEEE80211_TX_STAT_AMPDU)) 769 !(info->flags & IEEE80211_TX_STAT_AMPDU))
856 return; 770 return;
857 771
858 if (info->flags & IEEE80211_TX_STAT_AMPDU)
859 retries = 0;
860 else
861 retries = info->status.rates[0].count - 1;
862
863 if (retries > 15)
864 retries = 15;
865
866 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) && 772 if ((priv->iw_mode == NL80211_IFTYPE_ADHOC) &&
867 !lq_sta->ibss_sta_added) 773 !lq_sta->ibss_sta_added)
868 goto out; 774 return;
869
870 table = &lq_sta->lq;
871 active_index = lq_sta->active_tbl;
872
873 curr_tbl = &(lq_sta->lq_info[active_index]);
874 search_tbl = &(lq_sta->lq_info[(1 - active_index)]);
875 window = (struct iwl_rate_scale_data *)&(curr_tbl->win[0]);
876 search_win = (struct iwl_rate_scale_data *)&(search_tbl->win[0]);
877 775
878 /* 776 /*
879 * Ignore this Tx frame response if its initial rate doesn't match 777 * Ignore this Tx frame response if its initial rate doesn't match
@@ -883,6 +781,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
883 * to check "search" mode, or a prior "search" mode after we've moved 781 * to check "search" mode, or a prior "search" mode after we've moved
884 * to a new "search" mode (which might become the new "active" mode). 782 * to a new "search" mode (which might become the new "active" mode).
885 */ 783 */
784 table = &lq_sta->lq;
886 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags); 785 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
887 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index); 786 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index);
888 if (priv->band == IEEE80211_BAND_5GHZ) 787 if (priv->band == IEEE80211_BAND_5GHZ)
@@ -901,7 +800,7 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
901 if (priv->band == IEEE80211_BAND_2GHZ) 800 if (priv->band == IEEE80211_BAND_2GHZ)
902 mac_index += IWL_FIRST_OFDM_RATE; 801 mac_index += IWL_FIRST_OFDM_RATE;
903 } 802 }
904 803 /* Here we actually compare this rate to the latest LQ command */
905 if ((mac_index < 0) || 804 if ((mac_index < 0) ||
906 (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) || 805 (tbl_type.is_SGI != !!(mac_flags & IEEE80211_TX_RC_SHORT_GI)) ||
907 (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) || 806 (tbl_type.is_ht40 != !!(mac_flags & IEEE80211_TX_RC_40_MHZ_WIDTH)) ||
@@ -911,124 +810,106 @@ static void rs_tx_status(void *priv_r, struct ieee80211_supported_band *sband,
911 (!!(tx_rate & RATE_MCS_GF_MSK) != !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) || 810 (!!(tx_rate & RATE_MCS_GF_MSK) != !!(mac_flags & IEEE80211_TX_RC_GREEN_FIELD)) ||
912 (rs_index != mac_index)) { 811 (rs_index != mac_index)) {
913 IWL_DEBUG_RATE(priv, "initial rate %d does not match %d (0x%x)\n", mac_index, rs_index, tx_rate); 812 IWL_DEBUG_RATE(priv, "initial rate %d does not match %d (0x%x)\n", mac_index, rs_index, tx_rate);
914 /* the last LQ command could failed so the LQ in ucode not 813 /*
915 * the same in driver sync up 814 * Since rates mis-match, the last LQ command may have failed.
815 * After IWL_MISSED_RATE_MAX mis-matches, resync the uCode with
816 * ... driver.
916 */ 817 */
917 lq_sta->missed_rate_counter++; 818 lq_sta->missed_rate_counter++;
918 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) { 819 if (lq_sta->missed_rate_counter > IWL_MISSED_RATE_MAX) {
919 lq_sta->missed_rate_counter = 0; 820 lq_sta->missed_rate_counter = 0;
920 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC); 821 iwl_send_lq_cmd(priv, &lq_sta->lq, CMD_ASYNC);
921 } 822 }
922 goto out; 823 /* Regardless, ignore this status info for outdated rate */
824 return;
825 } else
826 /* Rate did match, so reset the missed_rate_counter */
827 lq_sta->missed_rate_counter = 0;
828
829 /* Figure out if rate scale algorithm is in active or search table */
830 if (table_type_matches(&tbl_type,
831 &(lq_sta->lq_info[lq_sta->active_tbl]))) {
832 curr_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
833 other_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
834 } else if (table_type_matches(&tbl_type,
835 &lq_sta->lq_info[1 - lq_sta->active_tbl])) {
836 curr_tbl = &(lq_sta->lq_info[1 - lq_sta->active_tbl]);
837 other_tbl = &(lq_sta->lq_info[lq_sta->active_tbl]);
838 } else {
839 IWL_DEBUG_RATE(priv, "Neither active nor search matches tx rate\n");
840 return;
923 } 841 }
842 window = (struct iwl_rate_scale_data *)&(curr_tbl->win[0]);
924 843
925 lq_sta->missed_rate_counter = 0; 844 /*
926 /* Update frame history window with "failure" for each Tx retry. */ 845 * Updating the frame history depends on whether packets were
927 while (retries) { 846 * aggregated.
928 /* Look up the rate and other info used for each tx attempt. 847 *
929 * Each tx attempt steps one entry deeper in the rate table. */ 848 * For aggregation, all packets were transmitted at the same rate, the
930 tx_rate = le32_to_cpu(table->rs_table[index].rate_n_flags); 849 * first index into rate scale table.
931 rs_get_tbl_info_from_mcs(tx_rate, priv->band, 850 */
932 &tbl_type, &rs_index); 851 if (info->flags & IEEE80211_TX_STAT_AMPDU) {
933 852 tx_rate = le32_to_cpu(table->rs_table[0].rate_n_flags);
934 /* If type matches "search" table, 853 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type,
935 * add failure to "search" history */ 854 &rs_index);
936 if ((tbl_type.lq_type == search_tbl->lq_type) && 855 tpt = get_expected_tpt(curr_tbl, rs_index);
937 (tbl_type.ant_type == search_tbl->ant_type) && 856 rs_collect_tx_data(window, rs_index, tpt,
938 (tbl_type.is_SGI == search_tbl->is_SGI)) { 857 info->status.ampdu_ack_len,
939 if (search_tbl->expected_tpt) 858 info->status.ampdu_ack_map);
940 tpt = search_tbl->expected_tpt[rs_index]; 859
941 else 860 /* Update success/fail counts if not searching for new mode */
942 tpt = 0; 861 if (lq_sta->stay_in_tbl) {
943 rs_collect_tx_data(search_win, rs_index, tpt, 1, 0); 862 lq_sta->total_success += info->status.ampdu_ack_map;
944 863 lq_sta->total_failed += (info->status.ampdu_ack_len -
945 /* Else if type matches "current/active" table, 864 info->status.ampdu_ack_map);
946 * add failure to "current/active" history */
947 } else if ((tbl_type.lq_type == curr_tbl->lq_type) &&
948 (tbl_type.ant_type == curr_tbl->ant_type) &&
949 (tbl_type.is_SGI == curr_tbl->is_SGI)) {
950 if (curr_tbl->expected_tpt)
951 tpt = curr_tbl->expected_tpt[rs_index];
952 else
953 tpt = 0;
954 rs_collect_tx_data(window, rs_index, tpt, 1, 0);
955 } 865 }
956 866 } else {
957 /* If not searching for a new mode, increment failed counter
958 * ... this helps determine when to start searching again */
959 if (lq_sta->stay_in_tbl)
960 lq_sta->total_failed++;
961 --retries;
962 index++;
963
964 }
965
966 /* 867 /*
967 * Find (by rate) the history window to update with final Tx attempt; 868 * For legacy, update frame history with for each Tx retry.
968 * if Tx was successful first try, use original rate,
969 * else look up the rate that was, finally, successful.
970 */ 869 */
971 tx_rate = le32_to_cpu(table->rs_table[index].rate_n_flags); 870 retries = info->status.rates[0].count - 1;
972 lq_sta->last_rate_n_flags = tx_rate; 871 /* HW doesn't send more than 15 retries */
973 rs_get_tbl_info_from_mcs(tx_rate, priv->band, &tbl_type, &rs_index); 872 retries = min(retries, 15);
974 873
975 /* Update frame history window with "success" if Tx got ACKed ... */ 874 /* The last transmission may have been successful */
976 status = !!(info->flags & IEEE80211_TX_STAT_ACK); 875 legacy_success = !!(info->flags & IEEE80211_TX_STAT_ACK);
977 876 /* Collect data for each rate used during failed TX attempts */
978 /* If type matches "search" table, 877 for (i = 0; i <= retries; ++i) {
979 * add final tx status to "search" history */ 878 tx_rate = le32_to_cpu(table->rs_table[i].rate_n_flags);
980 if ((tbl_type.lq_type == search_tbl->lq_type) && 879 rs_get_tbl_info_from_mcs(tx_rate, priv->band,
981 (tbl_type.ant_type == search_tbl->ant_type) && 880 &tbl_type, &rs_index);
982 (tbl_type.is_SGI == search_tbl->is_SGI)) { 881 /*
983 if (search_tbl->expected_tpt) 882 * Only collect stats if retried rate is in the same RS
984 tpt = search_tbl->expected_tpt[rs_index]; 883 * table as active/search.
985 else 884 */
986 tpt = 0; 885 if (table_type_matches(&tbl_type, curr_tbl))
987 if (info->flags & IEEE80211_TX_STAT_AMPDU) 886 tpt = get_expected_tpt(curr_tbl, rs_index);
988 rs_collect_tx_data(search_win, rs_index, tpt, 887 else if (table_type_matches(&tbl_type, other_tbl))
989 info->status.ampdu_ack_len, 888 tpt = get_expected_tpt(other_tbl, rs_index);
990 info->status.ampdu_ack_map); 889 else
991 else 890 continue;
992 rs_collect_tx_data(search_win, rs_index, tpt,
993 1, status);
994 /* Else if type matches "current/active" table,
995 * add final tx status to "current/active" history */
996 } else if ((tbl_type.lq_type == curr_tbl->lq_type) &&
997 (tbl_type.ant_type == curr_tbl->ant_type) &&
998 (tbl_type.is_SGI == curr_tbl->is_SGI)) {
999 if (curr_tbl->expected_tpt)
1000 tpt = curr_tbl->expected_tpt[rs_index];
1001 else
1002 tpt = 0;
1003 if (info->flags & IEEE80211_TX_STAT_AMPDU)
1004 rs_collect_tx_data(window, rs_index, tpt,
1005 info->status.ampdu_ack_len,
1006 info->status.ampdu_ack_map);
1007 else
1008 rs_collect_tx_data(window, rs_index, tpt,
1009 1, status);
1010 }
1011 891
1012 /* If not searching for new mode, increment success/failed counter 892 /* Constants mean 1 transmission, 0 successes */
1013 * ... these help determine when to start searching again */ 893 if (i < retries)
1014 if (lq_sta->stay_in_tbl) { 894 rs_collect_tx_data(window, rs_index, tpt, 1,
1015 if (info->flags & IEEE80211_TX_STAT_AMPDU) { 895 0);
1016 lq_sta->total_success += info->status.ampdu_ack_map;
1017 lq_sta->total_failed +=
1018 (info->status.ampdu_ack_len - info->status.ampdu_ack_map);
1019 } else {
1020 if (status)
1021 lq_sta->total_success++;
1022 else 896 else
1023 lq_sta->total_failed++; 897 rs_collect_tx_data(window, rs_index, tpt, 1,
898 legacy_success);
899 }
900
901 /* Update success/fail counts if not searching for new mode */
902 if (lq_sta->stay_in_tbl) {
903 lq_sta->total_success += legacy_success;
904 lq_sta->total_failed += retries + (1 - legacy_success);
1024 } 905 }
1025 } 906 }
907 /* The last TX rate is cached in lq_sta; it's set in if/else above */
908 lq_sta->last_rate_n_flags = tx_rate;
1026 909
1027 /* See if there's a better rate or modulation mode to try. */ 910 /* See if there's a better rate or modulation mode to try. */
1028 if (sta && sta->supp_rates[sband->band]) 911 if (sta && sta->supp_rates[sband->band])
1029 rs_rate_scale_perform(priv, skb, sta, lq_sta); 912 rs_rate_scale_perform(priv, skb, sta, lq_sta);
1030out:
1031 return;
1032} 913}
1033 914
1034/* 915/*
@@ -1066,43 +947,45 @@ static void rs_set_stay_in_table(struct iwl_priv *priv, u8 is_legacy,
1066static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta, 947static void rs_set_expected_tpt_table(struct iwl_lq_sta *lq_sta,
1067 struct iwl_scale_tbl_info *tbl) 948 struct iwl_scale_tbl_info *tbl)
1068{ 949{
950 /* Used to choose among HT tables */
951 s32 (*ht_tbl_pointer)[IWL_RATE_COUNT];
952
953 /* Check for invalid LQ type */
954 if (WARN_ON_ONCE(!is_legacy(tbl->lq_type) && !is_Ht(tbl->lq_type))) {
955 tbl->expected_tpt = expected_tpt_legacy;
956 return;
957 }
958
959 /* Legacy rates have only one table */
1069 if (is_legacy(tbl->lq_type)) { 960 if (is_legacy(tbl->lq_type)) {
1070 if (!is_a_band(tbl->lq_type)) 961 tbl->expected_tpt = expected_tpt_legacy;
1071 tbl->expected_tpt = expected_tpt_G; 962 return;
1072 else 963 }
1073 tbl->expected_tpt = expected_tpt_A; 964
1074 } else if (is_siso(tbl->lq_type)) { 965 /* Choose among many HT tables depending on number of streams
1075 if (tbl->is_ht40 && !lq_sta->is_dup) 966 * (SISO/MIMO2/MIMO3), channel width (20/40), SGI, and aggregation
1076 if (tbl->is_SGI) 967 * status */
1077 tbl->expected_tpt = expected_tpt_siso40MHzSGI; 968 if (is_siso(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1078 else 969 ht_tbl_pointer = expected_tpt_siso20MHz;
1079 tbl->expected_tpt = expected_tpt_siso40MHz; 970 else if (is_siso(tbl->lq_type))
1080 else if (tbl->is_SGI) 971 ht_tbl_pointer = expected_tpt_siso40MHz;
1081 tbl->expected_tpt = expected_tpt_siso20MHzSGI; 972 else if (is_mimo2(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1082 else 973 ht_tbl_pointer = expected_tpt_mimo2_20MHz;
1083 tbl->expected_tpt = expected_tpt_siso20MHz; 974 else if (is_mimo2(tbl->lq_type))
1084 } else if (is_mimo2(tbl->lq_type)) { 975 ht_tbl_pointer = expected_tpt_mimo2_40MHz;
1085 if (tbl->is_ht40 && !lq_sta->is_dup) 976 else if (is_mimo3(tbl->lq_type) && (!tbl->is_ht40 || lq_sta->is_dup))
1086 if (tbl->is_SGI) 977 ht_tbl_pointer = expected_tpt_mimo3_20MHz;
1087 tbl->expected_tpt = expected_tpt_mimo2_40MHzSGI; 978 else /* if (is_mimo3(tbl->lq_type)) <-- must be true */
1088 else 979 ht_tbl_pointer = expected_tpt_mimo3_40MHz;
1089 tbl->expected_tpt = expected_tpt_mimo2_40MHz; 980
1090 else if (tbl->is_SGI) 981 if (!tbl->is_SGI && !lq_sta->is_agg) /* Normal */
1091 tbl->expected_tpt = expected_tpt_mimo2_20MHzSGI; 982 tbl->expected_tpt = ht_tbl_pointer[0];
1092 else 983 else if (tbl->is_SGI && !lq_sta->is_agg) /* SGI */
1093 tbl->expected_tpt = expected_tpt_mimo2_20MHz; 984 tbl->expected_tpt = ht_tbl_pointer[1];
1094 } else if (is_mimo3(tbl->lq_type)) { 985 else if (!tbl->is_SGI && lq_sta->is_agg) /* AGG */
1095 if (tbl->is_ht40 && !lq_sta->is_dup) 986 tbl->expected_tpt = ht_tbl_pointer[2];
1096 if (tbl->is_SGI) 987 else /* AGG+SGI */
1097 tbl->expected_tpt = expected_tpt_mimo3_40MHzSGI; 988 tbl->expected_tpt = ht_tbl_pointer[3];
1098 else
1099 tbl->expected_tpt = expected_tpt_mimo3_40MHz;
1100 else if (tbl->is_SGI)
1101 tbl->expected_tpt = expected_tpt_mimo3_20MHzSGI;
1102 else
1103 tbl->expected_tpt = expected_tpt_mimo3_20MHz;
1104 } else
1105 tbl->expected_tpt = expected_tpt_G;
1106} 989}
1107 990
1108/* 991/*
@@ -2077,6 +1960,14 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2077 lq_sta->supp_rates = sta->supp_rates[lq_sta->band]; 1960 lq_sta->supp_rates = sta->supp_rates[lq_sta->band];
2078 1961
2079 tid = rs_tl_add_packet(lq_sta, hdr); 1962 tid = rs_tl_add_packet(lq_sta, hdr);
1963 if ((tid != MAX_TID_COUNT) && (lq_sta->tx_agg_tid_en & (1 << tid))) {
1964 tid_data = &priv->stations[lq_sta->lq.sta_id].tid[tid];
1965 if (tid_data->agg.state == IWL_AGG_OFF)
1966 lq_sta->is_agg = 0;
1967 else
1968 lq_sta->is_agg = 1;
1969 } else
1970 lq_sta->is_agg = 0;
2080 1971
2081 /* 1972 /*
2082 * Select rate-scale / modulation-mode table to work with in 1973 * Select rate-scale / modulation-mode table to work with in
@@ -2177,10 +2068,10 @@ static void rs_rate_scale_perform(struct iwl_priv *priv,
2177 2068
2178 goto out; 2069 goto out;
2179 } 2070 }
2180
2181 /* Else we have enough samples; calculate estimate of 2071 /* Else we have enough samples; calculate estimate of
2182 * actual average throughput */ 2072 * actual average throughput */
2183 2073
2074 /* Sanity-check TPT calculations */
2184 BUG_ON(window->average_tpt != ((window->success_ratio * 2075 BUG_ON(window->average_tpt != ((window->success_ratio *
2185 tbl->expected_tpt[index] + 64) / 128)); 2076 tbl->expected_tpt[index] + 64) / 128));
2186 2077
@@ -2584,22 +2475,13 @@ static void *rs_alloc_sta(void *priv_rate, struct ieee80211_sta *sta,
2584 gfp_t gfp) 2475 gfp_t gfp)
2585{ 2476{
2586 struct iwl_lq_sta *lq_sta; 2477 struct iwl_lq_sta *lq_sta;
2478 struct iwl_station_priv *sta_priv = (struct iwl_station_priv *) sta->drv_priv;
2587 struct iwl_priv *priv; 2479 struct iwl_priv *priv;
2588 int i, j;
2589 2480
2590 priv = (struct iwl_priv *)priv_rate; 2481 priv = (struct iwl_priv *)priv_rate;
2591 IWL_DEBUG_RATE(priv, "create station rate scale window\n"); 2482 IWL_DEBUG_RATE(priv, "create station rate scale window\n");
2592 2483
2593 lq_sta = kzalloc(sizeof(struct iwl_lq_sta), gfp); 2484 lq_sta = &sta_priv->lq_sta;
2594
2595 if (lq_sta == NULL)
2596 return NULL;
2597 lq_sta->lq.sta_id = 0xff;
2598
2599
2600 for (j = 0; j < LQ_SIZE; j++)
2601 for (i = 0; i < IWL_RATE_COUNT; i++)
2602 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
2603 2485
2604 return lq_sta; 2486 return lq_sta;
2605} 2487}
@@ -2613,6 +2495,12 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
2613 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 2495 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2614 struct iwl_lq_sta *lq_sta = priv_sta; 2496 struct iwl_lq_sta *lq_sta = priv_sta;
2615 2497
2498 lq_sta->lq.sta_id = 0xff;
2499
2500 for (j = 0; j < LQ_SIZE; j++)
2501 for (i = 0; i < IWL_RATE_COUNT; i++)
2502 rs_rate_scale_clear_window(&lq_sta->lq_info[j].win[i]);
2503
2616 lq_sta->flush_timer = 0; 2504 lq_sta->flush_timer = 0;
2617 lq_sta->supp_rates = sta->supp_rates[sband->band]; 2505 lq_sta->supp_rates = sta->supp_rates[sband->band];
2618 for (j = 0; j < LQ_SIZE; j++) 2506 for (j = 0; j < LQ_SIZE; j++)
@@ -2690,6 +2578,7 @@ static void rs_rate_init(void *priv_r, struct ieee80211_supported_band *sband,
2690 lq_sta->last_txrate_idx = rate_lowest_index(sband, sta); 2578 lq_sta->last_txrate_idx = rate_lowest_index(sband, sta);
2691 if (sband->band == IEEE80211_BAND_5GHZ) 2579 if (sband->band == IEEE80211_BAND_5GHZ)
2692 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE; 2580 lq_sta->last_txrate_idx += IWL_FIRST_OFDM_RATE;
2581 lq_sta->is_agg = 0;
2693 2582
2694 rs_initialize_lq(priv, conf, sta, lq_sta); 2583 rs_initialize_lq(priv, conf, sta, lq_sta);
2695} 2584}
@@ -2808,7 +2697,7 @@ static void rs_fill_link_cmd(struct iwl_priv *priv,
2808 repeat_rate--; 2697 repeat_rate--;
2809 } 2698 }
2810 2699
2811 lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_MAX; 2700 lq_cmd->agg_params.agg_frame_cnt_limit = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
2812 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF; 2701 lq_cmd->agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
2813 lq_cmd->agg_params.agg_time_limit = 2702 lq_cmd->agg_params.agg_time_limit =
2814 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF); 2703 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
@@ -2827,11 +2716,9 @@ static void rs_free(void *priv_rate)
2827static void rs_free_sta(void *priv_r, struct ieee80211_sta *sta, 2716static void rs_free_sta(void *priv_r, struct ieee80211_sta *sta,
2828 void *priv_sta) 2717 void *priv_sta)
2829{ 2718{
2830 struct iwl_lq_sta *lq_sta = priv_sta;
2831 struct iwl_priv *priv __maybe_unused = priv_r; 2719 struct iwl_priv *priv __maybe_unused = priv_r;
2832 2720
2833 IWL_DEBUG_RATE(priv, "enter\n"); 2721 IWL_DEBUG_RATE(priv, "enter\n");
2834 kfree(lq_sta);
2835 IWL_DEBUG_RATE(priv, "leave\n"); 2722 IWL_DEBUG_RATE(priv, "leave\n");
2836} 2723}
2837 2724
@@ -2942,8 +2829,9 @@ static ssize_t rs_sta_dbgfs_scale_table_read(struct file *file,
2942 ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3")); 2829 ((is_mimo2(tbl->lq_type)) ? "MIMO2" : "MIMO3"));
2943 desc += sprintf(buff+desc, " %s", 2830 desc += sprintf(buff+desc, " %s",
2944 (tbl->is_ht40) ? "40MHz" : "20MHz"); 2831 (tbl->is_ht40) ? "40MHz" : "20MHz");
2945 desc += sprintf(buff+desc, " %s %s\n", (tbl->is_SGI) ? "SGI" : "", 2832 desc += sprintf(buff+desc, " %s %s %s\n", (tbl->is_SGI) ? "SGI" : "",
2946 (lq_sta->is_green) ? "GF enabled" : ""); 2833 (lq_sta->is_green) ? "GF enabled" : "",
2834 (lq_sta->is_agg) ? "AGG on" : "");
2947 } 2835 }
2948 desc += sprintf(buff+desc, "last tx rate=0x%X\n", 2836 desc += sprintf(buff+desc, "last tx rate=0x%X\n",
2949 lq_sta->last_rate_n_flags); 2837 lq_sta->last_rate_n_flags);
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
index 9fac530cfb7e..affc0c5a2f2c 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-rs.h
@@ -54,6 +54,7 @@ struct iwl3945_rate_info {
54 u8 prev_table_rs; /* prev in rate table cmd */ 54 u8 prev_table_rs; /* prev in rate table cmd */
55}; 55};
56 56
57
57/* 58/*
58 * These serve as indexes into 59 * These serve as indexes into
59 * struct iwl_rate_info iwl_rates[IWL_RATE_COUNT]; 60 * struct iwl_rate_info iwl_rates[IWL_RATE_COUNT];
@@ -335,6 +336,106 @@ struct iwl_rate_mcs_info {
335 char mcs[IWL_MAX_MCS_DISPLAY_SIZE]; 336 char mcs[IWL_MAX_MCS_DISPLAY_SIZE];
336}; 337};
337 338
339/**
340 * struct iwl_rate_scale_data -- tx success history for one rate
341 */
342struct iwl_rate_scale_data {
343 u64 data; /* bitmap of successful frames */
344 s32 success_counter; /* number of frames successful */
345 s32 success_ratio; /* per-cent * 128 */
346 s32 counter; /* number of frames attempted */
347 s32 average_tpt; /* success ratio * expected throughput */
348 unsigned long stamp;
349};
350
351/**
352 * struct iwl_scale_tbl_info -- tx params and success history for all rates
353 *
354 * There are two of these in struct iwl_lq_sta,
355 * one for "active", and one for "search".
356 */
357struct iwl_scale_tbl_info {
358 enum iwl_table_type lq_type;
359 u8 ant_type;
360 u8 is_SGI; /* 1 = short guard interval */
361 u8 is_ht40; /* 1 = 40 MHz channel width */
362 u8 is_dup; /* 1 = duplicated data streams */
363 u8 action; /* change modulation; IWL_[LEGACY/SISO/MIMO]_SWITCH_* */
364 u8 max_search; /* maximun number of tables we can search */
365 s32 *expected_tpt; /* throughput metrics; expected_tpt_G, etc. */
366 u32 current_rate; /* rate_n_flags, uCode API format */
367 struct iwl_rate_scale_data win[IWL_RATE_COUNT]; /* rate histories */
368};
369
370struct iwl_traffic_load {
371 unsigned long time_stamp; /* age of the oldest statistics */
372 u32 packet_count[TID_QUEUE_MAX_SIZE]; /* packet count in this time
373 * slice */
374 u32 total; /* total num of packets during the
375 * last TID_MAX_TIME_DIFF */
376 u8 queue_count; /* number of queues that has
377 * been used since the last cleanup */
378 u8 head; /* start of the circular buffer */
379};
380
381/**
382 * struct iwl_lq_sta -- driver's rate scaling private structure
383 *
384 * Pointer to this gets passed back and forth between driver and mac80211.
385 */
386struct iwl_lq_sta {
387 u8 active_tbl; /* index of active table, range 0-1 */
388 u8 enable_counter; /* indicates HT mode */
389 u8 stay_in_tbl; /* 1: disallow, 0: allow search for new mode */
390 u8 search_better_tbl; /* 1: currently trying alternate mode */
391 s32 last_tpt;
392
393 /* The following determine when to search for a new mode */
394 u32 table_count_limit;
395 u32 max_failure_limit; /* # failed frames before new search */
396 u32 max_success_limit; /* # successful frames before new search */
397 u32 table_count;
398 u32 total_failed; /* total failed frames, any/all rates */
399 u32 total_success; /* total successful frames, any/all rates */
400 u64 flush_timer; /* time staying in mode before new search */
401
402 u8 action_counter; /* # mode-switch actions tried */
403 u8 is_green;
404 u8 is_dup;
405 enum ieee80211_band band;
406 u8 ibss_sta_added;
407
408 /* The following are bitmaps of rates; IWL_RATE_6M_MASK, etc. */
409 u32 supp_rates;
410 u16 active_legacy_rate;
411 u16 active_siso_rate;
412 u16 active_mimo2_rate;
413 u16 active_mimo3_rate;
414 u16 active_rate_basic;
415 s8 max_rate_idx; /* Max rate set by user */
416 u8 missed_rate_counter;
417
418 struct iwl_link_quality_cmd lq;
419 struct iwl_scale_tbl_info lq_info[LQ_SIZE]; /* "active", "search" */
420 struct iwl_traffic_load load[TID_MAX_LOAD_COUNT];
421 u8 tx_agg_tid_en;
422#ifdef CONFIG_MAC80211_DEBUGFS
423 struct dentry *rs_sta_dbgfs_scale_table_file;
424 struct dentry *rs_sta_dbgfs_stats_table_file;
425 struct dentry *rs_sta_dbgfs_rate_scale_data_file;
426 struct dentry *rs_sta_dbgfs_tx_agg_tid_en_file;
427 u32 dbg_fixed_rate;
428#endif
429 struct iwl_priv *drv;
430
431 /* used to be in sta_info */
432 int last_txrate_idx;
433 /* last tx rate_n_flags */
434 u32 last_rate_n_flags;
435 /* packets destined for this STA are aggregated */
436 u8 is_agg;
437};
438
338static inline u8 num_of_ant(u8 mask) 439static inline u8 num_of_ant(u8 mask)
339{ 440{
340 return !!((mask) & ANT_A) + 441 return !!((mask) & ANT_A) +
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn.c b/drivers/net/wireless/iwlwifi/iwl-agn.c
index 877af443aac0..b5fe8f87aa7e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn.c
@@ -190,11 +190,7 @@ int iwl_commit_rxon(struct iwl_priv *priv)
190 priv->start_calib = 0; 190 priv->start_calib = 0;
191 191
192 /* Add the broadcast address so we can send broadcast frames */ 192 /* Add the broadcast address so we can send broadcast frames */
193 if (iwl_rxon_add_station(priv, iwl_bcast_addr, 0) == 193 iwl_add_bcast_station(priv);
194 IWL_INVALID_STATION) {
195 IWL_ERR(priv, "Error adding BROADCAST address for transmit.\n");
196 return -EIO;
197 }
198 194
199 /* If we have set the ASSOC_MSK and we are in BSS mode then 195 /* If we have set the ASSOC_MSK and we are in BSS mode then
200 * add the IWL_AP_ID to the station rate table */ 196 * add the IWL_AP_ID to the station rate table */
@@ -524,7 +520,7 @@ int iwl_hw_tx_queue_init(struct iwl_priv *priv,
524static void iwl_rx_reply_alive(struct iwl_priv *priv, 520static void iwl_rx_reply_alive(struct iwl_priv *priv,
525 struct iwl_rx_mem_buffer *rxb) 521 struct iwl_rx_mem_buffer *rxb)
526{ 522{
527 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 523 struct iwl_rx_packet *pkt = rxb_addr(rxb);
528 struct iwl_alive_resp *palive; 524 struct iwl_alive_resp *palive;
529 struct delayed_work *pwork; 525 struct delayed_work *pwork;
530 526
@@ -610,7 +606,7 @@ static void iwl_rx_beacon_notif(struct iwl_priv *priv,
610 struct iwl_rx_mem_buffer *rxb) 606 struct iwl_rx_mem_buffer *rxb)
611{ 607{
612#ifdef CONFIG_IWLWIFI_DEBUG 608#ifdef CONFIG_IWLWIFI_DEBUG
613 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 609 struct iwl_rx_packet *pkt = rxb_addr(rxb);
614 struct iwl4965_beacon_notif *beacon = 610 struct iwl4965_beacon_notif *beacon =
615 (struct iwl4965_beacon_notif *)pkt->u.raw; 611 (struct iwl4965_beacon_notif *)pkt->u.raw;
616 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags); 612 u8 rate = iwl_hw_get_rate(beacon->beacon_notify_hdr.rate_n_flags);
@@ -634,7 +630,7 @@ static void iwl_rx_beacon_notif(struct iwl_priv *priv,
634static void iwl_rx_card_state_notif(struct iwl_priv *priv, 630static void iwl_rx_card_state_notif(struct iwl_priv *priv,
635 struct iwl_rx_mem_buffer *rxb) 631 struct iwl_rx_mem_buffer *rxb)
636{ 632{
637 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 633 struct iwl_rx_packet *pkt = rxb_addr(rxb);
638 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); 634 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
639 unsigned long status = priv->status; 635 unsigned long status = priv->status;
640 636
@@ -769,7 +765,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
769 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i); 765 IWL_DEBUG_RX(priv, "r = %d, i = %d\n", r, i);
770 766
771 /* calculate total frames need to be restock after handling RX */ 767 /* calculate total frames need to be restock after handling RX */
772 total_empty = r - priv->rxq.write_actual; 768 total_empty = r - rxq->write_actual;
773 if (total_empty < 0) 769 if (total_empty < 0)
774 total_empty += RX_QUEUE_SIZE; 770 total_empty += RX_QUEUE_SIZE;
775 771
@@ -786,10 +782,13 @@ void iwl_rx_handle(struct iwl_priv *priv)
786 782
787 rxq->queue[i] = NULL; 783 rxq->queue[i] = NULL;
788 784
789 pci_unmap_single(priv->pci_dev, rxb->real_dma_addr, 785 pci_unmap_page(priv->pci_dev, rxb->page_dma,
790 priv->hw_params.rx_buf_size + 256, 786 PAGE_SIZE << priv->hw_params.rx_page_order,
791 PCI_DMA_FROMDEVICE); 787 PCI_DMA_FROMDEVICE);
792 pkt = (struct iwl_rx_packet *)rxb->skb->data; 788 pkt = rxb_addr(rxb);
789
790 trace_iwlwifi_dev_rx(priv, pkt,
791 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
793 792
794 /* Reclaim a command buffer only if this packet is a response 793 /* Reclaim a command buffer only if this packet is a response
795 * to a (driver-originated) command. 794 * to a (driver-originated) command.
@@ -811,8 +810,8 @@ void iwl_rx_handle(struct iwl_priv *priv)
811 if (priv->rx_handlers[pkt->hdr.cmd]) { 810 if (priv->rx_handlers[pkt->hdr.cmd]) {
812 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, 811 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r,
813 i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 812 i, get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
814 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
815 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; 813 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
814 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
816 } else { 815 } else {
817 /* No handling needed */ 816 /* No handling needed */
818 IWL_DEBUG_RX(priv, 817 IWL_DEBUG_RX(priv,
@@ -821,35 +820,45 @@ void iwl_rx_handle(struct iwl_priv *priv)
821 pkt->hdr.cmd); 820 pkt->hdr.cmd);
822 } 821 }
823 822
823 /*
824 * XXX: After here, we should always check rxb->page
825 * against NULL before touching it or its virtual
826 * memory (pkt). Because some rx_handler might have
827 * already taken or freed the pages.
828 */
829
824 if (reclaim) { 830 if (reclaim) {
825 /* Invoke any callbacks, transfer the skb to caller, and 831 /* Invoke any callbacks, transfer the buffer to caller,
826 * fire off the (possibly) blocking iwl_send_cmd() 832 * and fire off the (possibly) blocking iwl_send_cmd()
827 * as we reclaim the driver command queue */ 833 * as we reclaim the driver command queue */
828 if (rxb && rxb->skb) 834 if (rxb->page)
829 iwl_tx_cmd_complete(priv, rxb); 835 iwl_tx_cmd_complete(priv, rxb);
830 else 836 else
831 IWL_WARN(priv, "Claim null rxb?\n"); 837 IWL_WARN(priv, "Claim null rxb?\n");
832 } 838 }
833 839
834 /* For now we just don't re-use anything. We can tweak this 840 /* Reuse the page if possible. For notification packets and
835 * later to try and re-use notification packets and SKBs that 841 * SKBs that fail to Rx correctly, add them back into the
836 * fail to Rx correctly */ 842 * rx_free list for reuse later. */
837 if (rxb->skb != NULL) {
838 priv->alloc_rxb_skb--;
839 dev_kfree_skb_any(rxb->skb);
840 rxb->skb = NULL;
841 }
842
843 spin_lock_irqsave(&rxq->lock, flags); 843 spin_lock_irqsave(&rxq->lock, flags);
844 list_add_tail(&rxb->list, &priv->rxq.rx_used); 844 if (rxb->page != NULL) {
845 rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
846 0, PAGE_SIZE << priv->hw_params.rx_page_order,
847 PCI_DMA_FROMDEVICE);
848 list_add_tail(&rxb->list, &rxq->rx_free);
849 rxq->free_count++;
850 } else
851 list_add_tail(&rxb->list, &rxq->rx_used);
852
845 spin_unlock_irqrestore(&rxq->lock, flags); 853 spin_unlock_irqrestore(&rxq->lock, flags);
854
846 i = (i + 1) & RX_QUEUE_MASK; 855 i = (i + 1) & RX_QUEUE_MASK;
847 /* If there are a lot of unused frames, 856 /* If there are a lot of unused frames,
848 * restock the Rx queue so ucode wont assert. */ 857 * restock the Rx queue so ucode wont assert. */
849 if (fill_rx) { 858 if (fill_rx) {
850 count++; 859 count++;
851 if (count >= 8) { 860 if (count >= 8) {
852 priv->rxq.read = i; 861 rxq->read = i;
853 iwl_rx_replenish_now(priv); 862 iwl_rx_replenish_now(priv);
854 count = 0; 863 count = 0;
855 } 864 }
@@ -857,7 +866,7 @@ void iwl_rx_handle(struct iwl_priv *priv)
857 } 866 }
858 867
859 /* Backtrack one entry */ 868 /* Backtrack one entry */
860 priv->rxq.read = i; 869 rxq->read = i;
861 if (fill_rx) 870 if (fill_rx)
862 iwl_rx_replenish_now(priv); 871 iwl_rx_replenish_now(priv);
863 else 872 else
@@ -877,6 +886,7 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
877 u32 inta, handled = 0; 886 u32 inta, handled = 0;
878 u32 inta_fh; 887 u32 inta_fh;
879 unsigned long flags; 888 unsigned long flags;
889 u32 i;
880#ifdef CONFIG_IWLWIFI_DEBUG 890#ifdef CONFIG_IWLWIFI_DEBUG
881 u32 inta_mask; 891 u32 inta_mask;
882#endif 892#endif
@@ -904,6 +914,8 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
904 } 914 }
905#endif 915#endif
906 916
917 spin_unlock_irqrestore(&priv->lock, flags);
918
907 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not 919 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
908 * atomic, make sure that inta covers all the interrupts that 920 * atomic, make sure that inta covers all the interrupts that
909 * we've discovered, even if FH interrupt came in just after 921 * we've discovered, even if FH interrupt came in just after
@@ -925,8 +937,6 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
925 937
926 handled |= CSR_INT_BIT_HW_ERR; 938 handled |= CSR_INT_BIT_HW_ERR;
927 939
928 spin_unlock_irqrestore(&priv->lock, flags);
929
930 return; 940 return;
931 } 941 }
932 942
@@ -994,19 +1004,17 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
994 handled |= CSR_INT_BIT_SW_ERR; 1004 handled |= CSR_INT_BIT_SW_ERR;
995 } 1005 }
996 1006
997 /* uCode wakes up after power-down sleep */ 1007 /*
1008 * uCode wakes up after power-down sleep.
1009 * Tell device about any new tx or host commands enqueued,
1010 * and about any Rx buffers made available while asleep.
1011 */
998 if (inta & CSR_INT_BIT_WAKEUP) { 1012 if (inta & CSR_INT_BIT_WAKEUP) {
999 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n"); 1013 IWL_DEBUG_ISR(priv, "Wakeup interrupt\n");
1000 iwl_rx_queue_update_write_ptr(priv, &priv->rxq); 1014 iwl_rx_queue_update_write_ptr(priv, &priv->rxq);
1001 iwl_txq_update_write_ptr(priv, &priv->txq[0]); 1015 for (i = 0; i < priv->hw_params.max_txq_num; i++)
1002 iwl_txq_update_write_ptr(priv, &priv->txq[1]); 1016 iwl_txq_update_write_ptr(priv, &priv->txq[i]);
1003 iwl_txq_update_write_ptr(priv, &priv->txq[2]);
1004 iwl_txq_update_write_ptr(priv, &priv->txq[3]);
1005 iwl_txq_update_write_ptr(priv, &priv->txq[4]);
1006 iwl_txq_update_write_ptr(priv, &priv->txq[5]);
1007
1008 priv->isr_stats.wakeup++; 1017 priv->isr_stats.wakeup++;
1009
1010 handled |= CSR_INT_BIT_WAKEUP; 1018 handled |= CSR_INT_BIT_WAKEUP;
1011 } 1019 }
1012 1020
@@ -1016,14 +1024,16 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
1016 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) { 1024 if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1017 iwl_rx_handle(priv); 1025 iwl_rx_handle(priv);
1018 priv->isr_stats.rx++; 1026 priv->isr_stats.rx++;
1027 iwl_leds_background(priv);
1019 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX); 1028 handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1020 } 1029 }
1021 1030
1031 /* This "Tx" DMA channel is used only for loading uCode */
1022 if (inta & CSR_INT_BIT_FH_TX) { 1032 if (inta & CSR_INT_BIT_FH_TX) {
1023 IWL_DEBUG_ISR(priv, "Tx interrupt\n"); 1033 IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
1024 priv->isr_stats.tx++; 1034 priv->isr_stats.tx++;
1025 handled |= CSR_INT_BIT_FH_TX; 1035 handled |= CSR_INT_BIT_FH_TX;
1026 /* FH finished to write, send event */ 1036 /* Wake up uCode load routine, now that load is complete */
1027 priv->ucode_write_complete = 1; 1037 priv->ucode_write_complete = 1;
1028 wake_up_interruptible(&priv->wait_command_queue); 1038 wake_up_interruptible(&priv->wait_command_queue);
1029 } 1039 }
@@ -1053,7 +1063,6 @@ static void iwl_irq_tasklet_legacy(struct iwl_priv *priv)
1053 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); 1063 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1054 } 1064 }
1055#endif 1065#endif
1056 spin_unlock_irqrestore(&priv->lock, flags);
1057} 1066}
1058 1067
1059/* tasklet for iwlagn interrupt */ 1068/* tasklet for iwlagn interrupt */
@@ -1083,6 +1092,9 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1083 inta, inta_mask); 1092 inta, inta_mask);
1084 } 1093 }
1085#endif 1094#endif
1095
1096 spin_unlock_irqrestore(&priv->lock, flags);
1097
1086 /* saved interrupt in inta variable now we can reset priv->inta */ 1098 /* saved interrupt in inta variable now we can reset priv->inta */
1087 priv->inta = 0; 1099 priv->inta = 0;
1088 1100
@@ -1098,8 +1110,6 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1098 1110
1099 handled |= CSR_INT_BIT_HW_ERR; 1111 handled |= CSR_INT_BIT_HW_ERR;
1100 1112
1101 spin_unlock_irqrestore(&priv->lock, flags);
1102
1103 return; 1113 return;
1104 } 1114 }
1105 1115
@@ -1217,14 +1227,16 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1217 CSR_INT_PERIODIC_ENA); 1227 CSR_INT_PERIODIC_ENA);
1218 1228
1219 priv->isr_stats.rx++; 1229 priv->isr_stats.rx++;
1230 iwl_leds_background(priv);
1220 } 1231 }
1221 1232
1233 /* This "Tx" DMA channel is used only for loading uCode */
1222 if (inta & CSR_INT_BIT_FH_TX) { 1234 if (inta & CSR_INT_BIT_FH_TX) {
1223 iwl_write32(priv, CSR_FH_INT_STATUS, CSR49_FH_INT_TX_MASK); 1235 iwl_write32(priv, CSR_FH_INT_STATUS, CSR49_FH_INT_TX_MASK);
1224 IWL_DEBUG_ISR(priv, "Tx interrupt\n"); 1236 IWL_DEBUG_ISR(priv, "uCode load interrupt\n");
1225 priv->isr_stats.tx++; 1237 priv->isr_stats.tx++;
1226 handled |= CSR_INT_BIT_FH_TX; 1238 handled |= CSR_INT_BIT_FH_TX;
1227 /* FH finished to write, send event */ 1239 /* Wake up uCode load routine, now that load is complete */
1228 priv->ucode_write_complete = 1; 1240 priv->ucode_write_complete = 1;
1229 wake_up_interruptible(&priv->wait_command_queue); 1241 wake_up_interruptible(&priv->wait_command_queue);
1230 } 1242 }
@@ -1239,14 +1251,10 @@ static void iwl_irq_tasklet(struct iwl_priv *priv)
1239 inta & ~priv->inta_mask); 1251 inta & ~priv->inta_mask);
1240 } 1252 }
1241 1253
1242
1243 /* Re-enable all interrupts */ 1254 /* Re-enable all interrupts */
1244 /* only Re-enable if diabled by irq */ 1255 /* only Re-enable if diabled by irq */
1245 if (test_bit(STATUS_INT_ENABLED, &priv->status)) 1256 if (test_bit(STATUS_INT_ENABLED, &priv->status))
1246 iwl_enable_interrupts(priv); 1257 iwl_enable_interrupts(priv);
1247
1248 spin_unlock_irqrestore(&priv->lock, flags);
1249
1250} 1258}
1251 1259
1252 1260
@@ -1366,6 +1374,14 @@ static int iwl_read_ucode(struct iwl_priv *priv)
1366 IWL_UCODE_API(priv->ucode_ver), 1374 IWL_UCODE_API(priv->ucode_ver),
1367 IWL_UCODE_SERIAL(priv->ucode_ver)); 1375 IWL_UCODE_SERIAL(priv->ucode_ver));
1368 1376
1377 snprintf(priv->hw->wiphy->fw_version,
1378 sizeof(priv->hw->wiphy->fw_version),
1379 "%u.%u.%u.%u",
1380 IWL_UCODE_MAJOR(priv->ucode_ver),
1381 IWL_UCODE_MINOR(priv->ucode_ver),
1382 IWL_UCODE_API(priv->ucode_ver),
1383 IWL_UCODE_SERIAL(priv->ucode_ver));
1384
1369 if (build) 1385 if (build)
1370 IWL_DEBUG_INFO(priv, "Build %u\n", build); 1386 IWL_DEBUG_INFO(priv, "Build %u\n", build);
1371 1387
@@ -1610,6 +1626,9 @@ void iwl_dump_nic_error_log(struct iwl_priv *priv)
1610 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32)); 1626 line = iwl_read_targ_mem(priv, base + 9 * sizeof(u32));
1611 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32)); 1627 time = iwl_read_targ_mem(priv, base + 11 * sizeof(u32));
1612 1628
1629 trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, data2, line,
1630 blink1, blink2, ilink1, ilink2);
1631
1613 IWL_ERR(priv, "Desc Time " 1632 IWL_ERR(priv, "Desc Time "
1614 "data1 data2 line\n"); 1633 "data1 data2 line\n");
1615 IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n", 1634 IWL_ERR(priv, "%-28s (#%02d) %010u 0x%08X 0x%08X %u\n",
@@ -1658,12 +1677,14 @@ static void iwl_print_event_log(struct iwl_priv *priv, u32 start_idx,
1658 ptr += sizeof(u32); 1677 ptr += sizeof(u32);
1659 if (mode == 0) { 1678 if (mode == 0) {
1660 /* data, ev */ 1679 /* data, ev */
1680 trace_iwlwifi_dev_ucode_event(priv, 0, time, ev);
1661 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", time, ev); 1681 IWL_ERR(priv, "EVT_LOG:0x%08x:%04u\n", time, ev);
1662 } else { 1682 } else {
1663 data = iwl_read_targ_mem(priv, ptr); 1683 data = iwl_read_targ_mem(priv, ptr);
1664 ptr += sizeof(u32); 1684 ptr += sizeof(u32);
1665 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n", 1685 IWL_ERR(priv, "EVT_LOGT:%010u:0x%08x:%04u\n",
1666 time, data, ev); 1686 time, data, ev);
1687 trace_iwlwifi_dev_ucode_event(priv, time, data, ev);
1667 } 1688 }
1668 } 1689 }
1669} 1690}
@@ -1762,6 +1783,10 @@ static void iwl_alive_start(struct iwl_priv *priv)
1762 priv->active_rate = priv->rates_mask; 1783 priv->active_rate = priv->rates_mask;
1763 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK; 1784 priv->active_rate_basic = priv->rates_mask & IWL_BASIC_RATES_MASK;
1764 1785
1786 /* Configure Tx antenna selection based on H/W config */
1787 if (priv->cfg->ops->hcmd->set_tx_ant)
1788 priv->cfg->ops->hcmd->set_tx_ant(priv, priv->cfg->valid_tx_ant);
1789
1765 if (iwl_is_associated(priv)) { 1790 if (iwl_is_associated(priv)) {
1766 struct iwl_rxon_cmd *active_rxon = 1791 struct iwl_rxon_cmd *active_rxon =
1767 (struct iwl_rxon_cmd *)&priv->active_rxon; 1792 (struct iwl_rxon_cmd *)&priv->active_rxon;
@@ -1789,7 +1814,7 @@ static void iwl_alive_start(struct iwl_priv *priv)
1789 /* At this point, the NIC is initialized and operational */ 1814 /* At this point, the NIC is initialized and operational */
1790 iwl_rf_kill_ct_config(priv); 1815 iwl_rf_kill_ct_config(priv);
1791 1816
1792 iwl_leds_register(priv); 1817 iwl_leds_init(priv);
1793 1818
1794 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); 1819 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
1795 set_bit(STATUS_READY, &priv->status); 1820 set_bit(STATUS_READY, &priv->status);
@@ -1827,8 +1852,6 @@ static void __iwl_down(struct iwl_priv *priv)
1827 if (!exit_pending) 1852 if (!exit_pending)
1828 set_bit(STATUS_EXIT_PENDING, &priv->status); 1853 set_bit(STATUS_EXIT_PENDING, &priv->status);
1829 1854
1830 iwl_leds_unregister(priv);
1831
1832 iwl_clear_stations_table(priv); 1855 iwl_clear_stations_table(priv);
1833 1856
1834 /* Unblock any waiting calls */ 1857 /* Unblock any waiting calls */
@@ -1889,11 +1912,9 @@ static void __iwl_down(struct iwl_priv *priv)
1889 1912
1890 udelay(5); 1913 udelay(5);
1891 1914
1892 /* FIXME: apm_ops.suspend(priv) */ 1915 /* Stop the device, and put it in low power state */
1893 if (exit_pending) 1916 priv->cfg->ops->lib->apm_ops.stop(priv);
1894 priv->cfg->ops->lib->apm_ops.stop(priv); 1917
1895 else
1896 priv->cfg->ops->lib->apm_ops.reset(priv);
1897 exit: 1918 exit:
1898 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); 1919 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
1899 1920
@@ -2280,6 +2301,69 @@ void iwl_post_associate(struct iwl_priv *priv)
2280 2301
2281#define UCODE_READY_TIMEOUT (4 * HZ) 2302#define UCODE_READY_TIMEOUT (4 * HZ)
2282 2303
2304/*
2305 * Not a mac80211 entry point function, but it fits in with all the
2306 * other mac80211 functions grouped here.
2307 */
2308static int iwl_setup_mac(struct iwl_priv *priv)
2309{
2310 int ret;
2311 struct ieee80211_hw *hw = priv->hw;
2312 hw->rate_control_algorithm = "iwl-agn-rs";
2313
2314 /* Tell mac80211 our characteristics */
2315 hw->flags = IEEE80211_HW_SIGNAL_DBM |
2316 IEEE80211_HW_NOISE_DBM |
2317 IEEE80211_HW_AMPDU_AGGREGATION |
2318 IEEE80211_HW_SPECTRUM_MGMT;
2319
2320 if (!priv->cfg->broken_powersave)
2321 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
2322 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
2323
2324 hw->sta_data_size = sizeof(struct iwl_station_priv);
2325 hw->wiphy->interface_modes =
2326 BIT(NL80211_IFTYPE_STATION) |
2327 BIT(NL80211_IFTYPE_ADHOC);
2328
2329 hw->wiphy->custom_regulatory = true;
2330
2331 /* Firmware does not support this */
2332 hw->wiphy->disable_beacon_hints = true;
2333
2334 /*
2335 * For now, disable PS by default because it affects
2336 * RX performance significantly.
2337 */
2338 hw->wiphy->ps_default = false;
2339
2340 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
2341 /* we create the 802.11 header and a zero-length SSID element */
2342 hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
2343
2344 /* Default value; 4 EDCA QOS priorities */
2345 hw->queues = 4;
2346
2347 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
2348
2349 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
2350 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
2351 &priv->bands[IEEE80211_BAND_2GHZ];
2352 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
2353 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
2354 &priv->bands[IEEE80211_BAND_5GHZ];
2355
2356 ret = ieee80211_register_hw(priv->hw);
2357 if (ret) {
2358 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
2359 return ret;
2360 }
2361 priv->mac80211_registered = 1;
2362
2363 return 0;
2364}
2365
2366
2283static int iwl_mac_start(struct ieee80211_hw *hw) 2367static int iwl_mac_start(struct ieee80211_hw *hw)
2284{ 2368{
2285 struct iwl_priv *priv = hw->priv; 2369 struct iwl_priv *priv = hw->priv;
@@ -2327,6 +2411,8 @@ static int iwl_mac_start(struct ieee80211_hw *hw)
2327 } 2411 }
2328 } 2412 }
2329 2413
2414 iwl_led_start(priv);
2415
2330out: 2416out:
2331 priv->is_open = 1; 2417 priv->is_open = 1;
2332 IWL_DEBUG_MAC80211(priv, "leave\n"); 2418 IWL_DEBUG_MAC80211(priv, "leave\n");
@@ -2434,7 +2520,7 @@ void iwl_config_ap(struct iwl_priv *priv)
2434 spin_lock_irqsave(&priv->lock, flags); 2520 spin_lock_irqsave(&priv->lock, flags);
2435 iwl_activate_qos(priv, 1); 2521 iwl_activate_qos(priv, 1);
2436 spin_unlock_irqrestore(&priv->lock, flags); 2522 spin_unlock_irqrestore(&priv->lock, flags);
2437 iwl_rxon_add_station(priv, iwl_bcast_addr, 0); 2523 iwl_add_bcast_station(priv);
2438 } 2524 }
2439 iwl_send_beacon_cmd(priv); 2525 iwl_send_beacon_cmd(priv);
2440 2526
@@ -2798,6 +2884,40 @@ static ssize_t show_statistics(struct device *d,
2798 2884
2799static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL); 2885static DEVICE_ATTR(statistics, S_IRUGO, show_statistics, NULL);
2800 2886
2887static ssize_t show_rts_ht_protection(struct device *d,
2888 struct device_attribute *attr, char *buf)
2889{
2890 struct iwl_priv *priv = dev_get_drvdata(d);
2891
2892 return sprintf(buf, "%s\n",
2893 priv->cfg->use_rts_for_ht ? "RTS/CTS" : "CTS-to-self");
2894}
2895
2896static ssize_t store_rts_ht_protection(struct device *d,
2897 struct device_attribute *attr,
2898 const char *buf, size_t count)
2899{
2900 struct iwl_priv *priv = dev_get_drvdata(d);
2901 unsigned long val;
2902 int ret;
2903
2904 ret = strict_strtoul(buf, 10, &val);
2905 if (ret)
2906 IWL_INFO(priv, "Input is not in decimal form.\n");
2907 else {
2908 if (!iwl_is_associated(priv))
2909 priv->cfg->use_rts_for_ht = val ? true : false;
2910 else
2911 IWL_ERR(priv, "Sta associated with AP - "
2912 "Change protection mechanism is not allowed\n");
2913 ret = count;
2914 }
2915 return ret;
2916}
2917
2918static DEVICE_ATTR(rts_ht_protection, S_IWUSR | S_IRUGO,
2919 show_rts_ht_protection, store_rts_ht_protection);
2920
2801 2921
2802/***************************************************************************** 2922/*****************************************************************************
2803 * 2923 *
@@ -2848,12 +2968,107 @@ static void iwl_cancel_deferred_work(struct iwl_priv *priv)
2848 del_timer_sync(&priv->statistics_periodic); 2968 del_timer_sync(&priv->statistics_periodic);
2849} 2969}
2850 2970
2971static void iwl_init_hw_rates(struct iwl_priv *priv,
2972 struct ieee80211_rate *rates)
2973{
2974 int i;
2975
2976 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
2977 rates[i].bitrate = iwl_rates[i].ieee * 5;
2978 rates[i].hw_value = i; /* Rate scaling will work on indexes */
2979 rates[i].hw_value_short = i;
2980 rates[i].flags = 0;
2981 if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
2982 /*
2983 * If CCK != 1M then set short preamble rate flag.
2984 */
2985 rates[i].flags |=
2986 (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ?
2987 0 : IEEE80211_RATE_SHORT_PREAMBLE;
2988 }
2989 }
2990}
2991
2992static int iwl_init_drv(struct iwl_priv *priv)
2993{
2994 int ret;
2995
2996 priv->ibss_beacon = NULL;
2997
2998 spin_lock_init(&priv->lock);
2999 spin_lock_init(&priv->sta_lock);
3000 spin_lock_init(&priv->hcmd_lock);
3001
3002 INIT_LIST_HEAD(&priv->free_frames);
3003
3004 mutex_init(&priv->mutex);
3005
3006 /* Clear the driver's (not device's) station table */
3007 iwl_clear_stations_table(priv);
3008
3009 priv->ieee_channels = NULL;
3010 priv->ieee_rates = NULL;
3011 priv->band = IEEE80211_BAND_2GHZ;
3012
3013 priv->iw_mode = NL80211_IFTYPE_STATION;
3014 if (priv->cfg->support_sm_ps)
3015 priv->current_ht_config.sm_ps = WLAN_HT_CAP_SM_PS_DYNAMIC;
3016 else
3017 priv->current_ht_config.sm_ps = WLAN_HT_CAP_SM_PS_DISABLED;
3018
3019 /* Choose which receivers/antennas to use */
3020 if (priv->cfg->ops->hcmd->set_rxon_chain)
3021 priv->cfg->ops->hcmd->set_rxon_chain(priv);
3022
3023 iwl_init_scan_params(priv);
3024
3025 iwl_reset_qos(priv);
3026
3027 priv->qos_data.qos_active = 0;
3028 priv->qos_data.qos_cap.val = 0;
3029
3030 priv->rates_mask = IWL_RATES_MASK;
3031 /* Set the tx_power_user_lmt to the lowest power level
3032 * this value will get overwritten by channel max power avg
3033 * from eeprom */
3034 priv->tx_power_user_lmt = IWL_TX_POWER_TARGET_POWER_MIN;
3035
3036 ret = iwl_init_channel_map(priv);
3037 if (ret) {
3038 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
3039 goto err;
3040 }
3041
3042 ret = iwlcore_init_geos(priv);
3043 if (ret) {
3044 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
3045 goto err_free_channel_map;
3046 }
3047 iwl_init_hw_rates(priv, priv->ieee_rates);
3048
3049 return 0;
3050
3051err_free_channel_map:
3052 iwl_free_channel_map(priv);
3053err:
3054 return ret;
3055}
3056
3057static void iwl_uninit_drv(struct iwl_priv *priv)
3058{
3059 iwl_calib_free_results(priv);
3060 iwlcore_free_geos(priv);
3061 iwl_free_channel_map(priv);
3062 kfree(priv->scan);
3063}
3064
2851static struct attribute *iwl_sysfs_entries[] = { 3065static struct attribute *iwl_sysfs_entries[] = {
2852 &dev_attr_flags.attr, 3066 &dev_attr_flags.attr,
2853 &dev_attr_filter_flags.attr, 3067 &dev_attr_filter_flags.attr,
2854 &dev_attr_statistics.attr, 3068 &dev_attr_statistics.attr,
2855 &dev_attr_temperature.attr, 3069 &dev_attr_temperature.attr,
2856 &dev_attr_tx_power.attr, 3070 &dev_attr_tx_power.attr,
3071 &dev_attr_rts_ht_protection.attr,
2857#ifdef CONFIG_IWLWIFI_DEBUG 3072#ifdef CONFIG_IWLWIFI_DEBUG
2858 &dev_attr_debug_level.attr, 3073 &dev_attr_debug_level.attr,
2859#endif 3074#endif
@@ -2989,12 +3204,6 @@ static int iwl_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
2989 goto out_iounmap; 3204 goto out_iounmap;
2990 } 3205 }
2991 3206
2992 /* amp init */
2993 err = priv->cfg->ops->lib->apm_ops.init(priv);
2994 if (err < 0) {
2995 IWL_ERR(priv, "Failed to init APMG\n");
2996 goto out_iounmap;
2997 }
2998 /***************** 3207 /*****************
2999 * 4. Read EEPROM 3208 * 4. Read EEPROM
3000 *****************/ 3209 *****************/
@@ -3140,6 +3349,15 @@ static void __devexit iwl_pci_remove(struct pci_dev *pdev)
3140 iwl_down(priv); 3349 iwl_down(priv);
3141 } 3350 }
3142 3351
3352 /*
3353 * Make sure device is reset to low power before unloading driver.
3354 * This may be redundant with iwl_down(), but there are paths to
3355 * run iwl_down() without calling apm_ops.stop(), and there are
3356 * paths to avoid running iwl_down() at all before leaving driver.
3357 * This (inexpensive) call *makes sure* device is reset.
3358 */
3359 priv->cfg->ops->lib->apm_ops.stop(priv);
3360
3143 iwl_tt_exit(priv); 3361 iwl_tt_exit(priv);
3144 3362
3145 /* make sure we flush any pending irq or 3363 /* make sure we flush any pending irq or
@@ -3219,20 +3437,51 @@ static struct pci_device_id iwl_hw_card_ids[] = {
3219/* 5150 Wifi/WiMax */ 3437/* 5150 Wifi/WiMax */
3220 {IWL_PCI_DEVICE(0x423C, PCI_ANY_ID, iwl5150_agn_cfg)}, 3438 {IWL_PCI_DEVICE(0x423C, PCI_ANY_ID, iwl5150_agn_cfg)},
3221 {IWL_PCI_DEVICE(0x423D, PCI_ANY_ID, iwl5150_agn_cfg)}, 3439 {IWL_PCI_DEVICE(0x423D, PCI_ANY_ID, iwl5150_agn_cfg)},
3222/* 6000/6050 Series */ 3440
3223 {IWL_PCI_DEVICE(0x008D, PCI_ANY_ID, iwl6000h_2agn_cfg)}, 3441/* 6x00 Series */
3224 {IWL_PCI_DEVICE(0x008E, PCI_ANY_ID, iwl6000h_2agn_cfg)}, 3442 {IWL_PCI_DEVICE(0x008D, 0x1301, iwl6000h_2agn_cfg)},
3225 {IWL_PCI_DEVICE(0x422B, PCI_ANY_ID, iwl6000_3agn_cfg)}, 3443 {IWL_PCI_DEVICE(0x008D, 0x1321, iwl6000h_2agn_cfg)},
3226 {IWL_PCI_DEVICE(0x422C, PCI_ANY_ID, iwl6000i_2agn_cfg)}, 3444 {IWL_PCI_DEVICE(0x008D, 0x1326, iwl6000h_2abg_cfg)},
3227 {IWL_PCI_DEVICE(0x4238, PCI_ANY_ID, iwl6000_3agn_cfg)}, 3445 {IWL_PCI_DEVICE(0x008D, 0x1306, iwl6000h_2abg_cfg)},
3228 {IWL_PCI_DEVICE(0x4239, PCI_ANY_ID, iwl6000i_2agn_cfg)}, 3446 {IWL_PCI_DEVICE(0x008D, 0x1307, iwl6000h_2bg_cfg)},
3229 {IWL_PCI_DEVICE(0x0086, PCI_ANY_ID, iwl6050_3agn_cfg)}, 3447 {IWL_PCI_DEVICE(0x008E, 0x1311, iwl6000h_2agn_cfg)},
3230 {IWL_PCI_DEVICE(0x0087, PCI_ANY_ID, iwl6050_2agn_cfg)}, 3448 {IWL_PCI_DEVICE(0x008E, 0x1316, iwl6000h_2abg_cfg)},
3231 {IWL_PCI_DEVICE(0x0088, PCI_ANY_ID, iwl6050_3agn_cfg)}, 3449
3232 {IWL_PCI_DEVICE(0x0089, PCI_ANY_ID, iwl6050_2agn_cfg)}, 3450 {IWL_PCI_DEVICE(0x422B, 0x1101, iwl6000_3agn_cfg)},
3451 {IWL_PCI_DEVICE(0x422B, 0x1121, iwl6000_3agn_cfg)},
3452 {IWL_PCI_DEVICE(0x422C, 0x1301, iwl6000i_2agn_cfg)},
3453 {IWL_PCI_DEVICE(0x422C, 0x1306, iwl6000i_2abg_cfg)},
3454 {IWL_PCI_DEVICE(0x422C, 0x1307, iwl6000i_2bg_cfg)},
3455 {IWL_PCI_DEVICE(0x422C, 0x1321, iwl6000i_2agn_cfg)},
3456 {IWL_PCI_DEVICE(0x422C, 0x1326, iwl6000i_2abg_cfg)},
3457 {IWL_PCI_DEVICE(0x4238, 0x1111, iwl6000_3agn_cfg)},
3458 {IWL_PCI_DEVICE(0x4239, 0x1311, iwl6000i_2agn_cfg)},
3459 {IWL_PCI_DEVICE(0x4239, 0x1316, iwl6000i_2abg_cfg)},
3460
3461/* 6x50 WiFi/WiMax Series */
3462 {IWL_PCI_DEVICE(0x0086, 0x1101, iwl6050_3agn_cfg)},
3463 {IWL_PCI_DEVICE(0x0086, 0x1121, iwl6050_3agn_cfg)},
3464 {IWL_PCI_DEVICE(0x0087, 0x1301, iwl6050_2agn_cfg)},
3465 {IWL_PCI_DEVICE(0x0087, 0x1306, iwl6050_2abg_cfg)},
3466 {IWL_PCI_DEVICE(0x0087, 0x1321, iwl6050_2agn_cfg)},
3467 {IWL_PCI_DEVICE(0x0087, 0x1326, iwl6050_2abg_cfg)},
3468 {IWL_PCI_DEVICE(0x0088, 0x1111, iwl6050_3agn_cfg)},
3469 {IWL_PCI_DEVICE(0x0089, 0x1311, iwl6050_2agn_cfg)},
3470 {IWL_PCI_DEVICE(0x0089, 0x1316, iwl6050_2abg_cfg)},
3471
3233/* 1000 Series WiFi */ 3472/* 1000 Series WiFi */
3234 {IWL_PCI_DEVICE(0x0083, PCI_ANY_ID, iwl1000_bgn_cfg)}, 3473 {IWL_PCI_DEVICE(0x0083, 0x1205, iwl1000_bgn_cfg)},
3235 {IWL_PCI_DEVICE(0x0084, PCI_ANY_ID, iwl1000_bgn_cfg)}, 3474 {IWL_PCI_DEVICE(0x0083, 0x1305, iwl1000_bgn_cfg)},
3475 {IWL_PCI_DEVICE(0x0083, 0x1225, iwl1000_bgn_cfg)},
3476 {IWL_PCI_DEVICE(0x0083, 0x1325, iwl1000_bgn_cfg)},
3477 {IWL_PCI_DEVICE(0x0084, 0x1215, iwl1000_bgn_cfg)},
3478 {IWL_PCI_DEVICE(0x0084, 0x1315, iwl1000_bgn_cfg)},
3479 {IWL_PCI_DEVICE(0x0083, 0x1206, iwl1000_bg_cfg)},
3480 {IWL_PCI_DEVICE(0x0083, 0x1306, iwl1000_bg_cfg)},
3481 {IWL_PCI_DEVICE(0x0083, 0x1226, iwl1000_bg_cfg)},
3482 {IWL_PCI_DEVICE(0x0083, 0x1326, iwl1000_bg_cfg)},
3483 {IWL_PCI_DEVICE(0x0084, 0x1216, iwl1000_bg_cfg)},
3484 {IWL_PCI_DEVICE(0x0084, 0x1316, iwl1000_bg_cfg)},
3236#endif /* CONFIG_IWL5000 */ 3485#endif /* CONFIG_IWL5000 */
3237 3486
3238 {0} 3487 {0}
@@ -3287,9 +3536,9 @@ module_exit(iwl_exit);
3287module_init(iwl_init); 3536module_init(iwl_init);
3288 3537
3289#ifdef CONFIG_IWLWIFI_DEBUG 3538#ifdef CONFIG_IWLWIFI_DEBUG
3290module_param_named(debug50, iwl_debug_level, uint, 0444); 3539module_param_named(debug50, iwl_debug_level, uint, S_IRUGO);
3291MODULE_PARM_DESC(debug50, "50XX debug output mask (deprecated)"); 3540MODULE_PARM_DESC(debug50, "50XX debug output mask (deprecated)");
3292module_param_named(debug, iwl_debug_level, uint, 0644); 3541module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
3293MODULE_PARM_DESC(debug, "debug output mask"); 3542MODULE_PARM_DESC(debug, "debug output mask");
3294#endif 3543#endif
3295 3544
diff --git a/drivers/net/wireless/iwlwifi/iwl-calib.c b/drivers/net/wireless/iwlwifi/iwl-calib.c
index c4b565a2de94..d994de7438d8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-calib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-calib.c
@@ -132,6 +132,7 @@ void iwl_calib_free_results(struct iwl_priv *priv)
132 priv->calib_results[i].buf_len = 0; 132 priv->calib_results[i].buf_len = 0;
133 } 133 }
134} 134}
135EXPORT_SYMBOL(iwl_calib_free_results);
135 136
136/***************************************************************************** 137/*****************************************************************************
137 * RUNTIME calibrations framework 138 * RUNTIME calibrations framework
@@ -447,11 +448,11 @@ static int iwl_sensitivity_write(struct iwl_priv *priv)
447 cpu_to_le16((u16)data->nrg_th_ofdm); 448 cpu_to_le16((u16)data->nrg_th_ofdm);
448 449
449 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] = 450 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_INDEX] =
450 cpu_to_le16(190); 451 cpu_to_le16(data->barker_corr_th_min);
451 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] = 452 cmd.table[HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX] =
452 cpu_to_le16(390); 453 cpu_to_le16(data->barker_corr_th_min_mrc);
453 cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] = 454 cmd.table[HD_OFDM_ENERGY_TH_IN_INDEX] =
454 cpu_to_le16(62); 455 cpu_to_le16(data->nrg_th_cca);
455 456
456 IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n", 457 IWL_DEBUG_CALIB(priv, "ofdm: ac %u mrc %u x1 %u mrc_x1 %u thresh %u\n",
457 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc, 458 data->auto_corr_ofdm, data->auto_corr_ofdm_mrc,
@@ -516,7 +517,7 @@ void iwl_init_sensitivity(struct iwl_priv *priv)
516 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++) 517 for (i = 0; i < NRG_NUM_PREV_STAT_L; i++)
517 data->nrg_silence_rssi[i] = 0; 518 data->nrg_silence_rssi[i] = 0;
518 519
519 data->auto_corr_ofdm = 90; 520 data->auto_corr_ofdm = ranges->auto_corr_min_ofdm;
520 data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc; 521 data->auto_corr_ofdm_mrc = ranges->auto_corr_min_ofdm_mrc;
521 data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1; 522 data->auto_corr_ofdm_x1 = ranges->auto_corr_min_ofdm_x1;
522 data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1; 523 data->auto_corr_ofdm_mrc_x1 = ranges->auto_corr_min_ofdm_mrc_x1;
@@ -524,6 +525,9 @@ void iwl_init_sensitivity(struct iwl_priv *priv)
524 data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc; 525 data->auto_corr_cck_mrc = ranges->auto_corr_min_cck_mrc;
525 data->nrg_th_cck = ranges->nrg_th_cck; 526 data->nrg_th_cck = ranges->nrg_th_cck;
526 data->nrg_th_ofdm = ranges->nrg_th_ofdm; 527 data->nrg_th_ofdm = ranges->nrg_th_ofdm;
528 data->barker_corr_th_min = ranges->barker_corr_th_min;
529 data->barker_corr_th_min_mrc = ranges->barker_corr_th_min_mrc;
530 data->nrg_th_cca = ranges->nrg_th_cca;
527 531
528 data->last_bad_plcp_cnt_ofdm = 0; 532 data->last_bad_plcp_cnt_ofdm = 0;
529 data->last_fa_cnt_ofdm = 0; 533 data->last_fa_cnt_ofdm = 0;
@@ -643,6 +647,15 @@ void iwl_sensitivity_calibration(struct iwl_priv *priv,
643} 647}
644EXPORT_SYMBOL(iwl_sensitivity_calibration); 648EXPORT_SYMBOL(iwl_sensitivity_calibration);
645 649
650static inline u8 find_first_chain(u8 mask)
651{
652 if (mask & ANT_A)
653 return CHAIN_A;
654 if (mask & ANT_B)
655 return CHAIN_B;
656 return CHAIN_C;
657}
658
646/* 659/*
647 * Accumulate 20 beacons of signal and noise statistics for each of 660 * Accumulate 20 beacons of signal and noise statistics for each of
648 * 3 receivers/antennas/rx-chains, then figure out: 661 * 3 receivers/antennas/rx-chains, then figure out:
@@ -675,14 +688,17 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
675 u8 num_tx_chains; 688 u8 num_tx_chains;
676 unsigned long flags; 689 unsigned long flags;
677 struct statistics_rx_non_phy *rx_info = &(stat_resp->rx.general); 690 struct statistics_rx_non_phy *rx_info = &(stat_resp->rx.general);
691 u8 first_chain;
678 692
679 if (priv->disable_chain_noise_cal) 693 if (priv->disable_chain_noise_cal)
680 return; 694 return;
681 695
682 data = &(priv->chain_noise_data); 696 data = &(priv->chain_noise_data);
683 697
684 /* Accumulate just the first 20 beacons after the first association, 698 /*
685 * then we're done forever. */ 699 * Accumulate just the first "chain_noise_num_beacons" after
700 * the first association, then we're done forever.
701 */
686 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) { 702 if (data->state != IWL_CHAIN_NOISE_ACCUMULATE) {
687 if (data->state == IWL_CHAIN_NOISE_ALIVE) 703 if (data->state == IWL_CHAIN_NOISE_ALIVE)
688 IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n"); 704 IWL_DEBUG_CALIB(priv, "Wait for noise calib reset\n");
@@ -710,7 +726,10 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
710 return; 726 return;
711 } 727 }
712 728
713 /* Accumulate beacon statistics values across 20 beacons */ 729 /*
730 * Accumulate beacon statistics values across
731 * "chain_noise_num_beacons"
732 */
714 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) & 733 chain_noise_a = le32_to_cpu(rx_info->beacon_silence_rssi_a) &
715 IN_BAND_FILTER; 734 IN_BAND_FILTER;
716 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) & 735 chain_noise_b = le32_to_cpu(rx_info->beacon_silence_rssi_b) &
@@ -741,16 +760,19 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
741 IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n", 760 IWL_DEBUG_CALIB(priv, "chain_noise: a %d b %d c %d\n",
742 chain_noise_a, chain_noise_b, chain_noise_c); 761 chain_noise_a, chain_noise_b, chain_noise_c);
743 762
744 /* If this is the 20th beacon, determine: 763 /* If this is the "chain_noise_num_beacons", determine:
745 * 1) Disconnected antennas (using signal strengths) 764 * 1) Disconnected antennas (using signal strengths)
746 * 2) Differential gain (using silence noise) to balance receivers */ 765 * 2) Differential gain (using silence noise) to balance receivers */
747 if (data->beacon_count != CAL_NUM_OF_BEACONS) 766 if (data->beacon_count != priv->cfg->chain_noise_num_beacons)
748 return; 767 return;
749 768
750 /* Analyze signal for disconnected antenna */ 769 /* Analyze signal for disconnected antenna */
751 average_sig[0] = (data->chain_signal_a) / CAL_NUM_OF_BEACONS; 770 average_sig[0] =
752 average_sig[1] = (data->chain_signal_b) / CAL_NUM_OF_BEACONS; 771 (data->chain_signal_a) / priv->cfg->chain_noise_num_beacons;
753 average_sig[2] = (data->chain_signal_c) / CAL_NUM_OF_BEACONS; 772 average_sig[1] =
773 (data->chain_signal_b) / priv->cfg->chain_noise_num_beacons;
774 average_sig[2] =
775 (data->chain_signal_c) / priv->cfg->chain_noise_num_beacons;
754 776
755 if (average_sig[0] >= average_sig[1]) { 777 if (average_sig[0] >= average_sig[1]) {
756 max_average_sig = average_sig[0]; 778 max_average_sig = average_sig[0];
@@ -803,13 +825,17 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
803 /* there is a Tx antenna connected */ 825 /* there is a Tx antenna connected */
804 break; 826 break;
805 if (num_tx_chains == priv->hw_params.tx_chains_num && 827 if (num_tx_chains == priv->hw_params.tx_chains_num &&
806 data->disconn_array[i]) { 828 data->disconn_array[i]) {
807 /* This is the last TX antenna and is also 829 /*
808 * disconnected connect it anyway */ 830 * If all chains are disconnected
809 data->disconn_array[i] = 0; 831 * connect the first valid tx chain
810 active_chains |= ant_msk; 832 */
811 IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected W/A - " 833 first_chain =
812 "declare %d as connected\n", i); 834 find_first_chain(priv->cfg->valid_tx_ant);
835 data->disconn_array[first_chain] = 0;
836 active_chains |= BIT(first_chain);
837 IWL_DEBUG_CALIB(priv, "All Tx chains are disconnected W/A - declare %d as connected\n",
838 first_chain);
813 break; 839 break;
814 } 840 }
815 } 841 }
@@ -820,9 +846,12 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
820 active_chains); 846 active_chains);
821 847
822 /* Analyze noise for rx balance */ 848 /* Analyze noise for rx balance */
823 average_noise[0] = ((data->chain_noise_a)/CAL_NUM_OF_BEACONS); 849 average_noise[0] =
824 average_noise[1] = ((data->chain_noise_b)/CAL_NUM_OF_BEACONS); 850 ((data->chain_noise_a) / priv->cfg->chain_noise_num_beacons);
825 average_noise[2] = ((data->chain_noise_c)/CAL_NUM_OF_BEACONS); 851 average_noise[1] =
852 ((data->chain_noise_b) / priv->cfg->chain_noise_num_beacons);
853 average_noise[2] =
854 ((data->chain_noise_c) / priv->cfg->chain_noise_num_beacons);
826 855
827 for (i = 0; i < NUM_RX_CHAINS; i++) { 856 for (i = 0; i < NUM_RX_CHAINS; i++) {
828 if (!(data->disconn_array[i]) && 857 if (!(data->disconn_array[i]) &&
@@ -843,7 +872,8 @@ void iwl_chain_noise_calibration(struct iwl_priv *priv,
843 872
844 if (priv->cfg->ops->utils->gain_computation) 873 if (priv->cfg->ops->utils->gain_computation)
845 priv->cfg->ops->utils->gain_computation(priv, average_noise, 874 priv->cfg->ops->utils->gain_computation(priv, average_noise,
846 min_average_noise_antenna_i, min_average_noise); 875 min_average_noise_antenna_i, min_average_noise,
876 find_first_chain(priv->cfg->valid_rx_ant));
847 877
848 /* Some power changes may have been made during the calibration. 878 /* Some power changes may have been made during the calibration.
849 * Update and commit the RXON 879 * Update and commit the RXON
diff --git a/drivers/net/wireless/iwlwifi/iwl-commands.h b/drivers/net/wireless/iwlwifi/iwl-commands.h
index 4afaf773aeac..b62c90ec9e1e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-commands.h
+++ b/drivers/net/wireless/iwlwifi/iwl-commands.h
@@ -109,11 +109,12 @@ enum {
109 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */ 109 REPLY_TX_LINK_QUALITY_CMD = 0x4e, /* 4965 only */
110 110
111 /* WiMAX coexistence */ 111 /* WiMAX coexistence */
112 COEX_PRIORITY_TABLE_CMD = 0x5a, /*5000 only */ 112 COEX_PRIORITY_TABLE_CMD = 0x5a, /* for 5000 series and up */
113 COEX_MEDIUM_NOTIFICATION = 0x5b, 113 COEX_MEDIUM_NOTIFICATION = 0x5b,
114 COEX_EVENT_CMD = 0x5c, 114 COEX_EVENT_CMD = 0x5c,
115 115
116 /* Calibration */ 116 /* Calibration */
117 TEMPERATURE_NOTIFICATION = 0x62,
117 CALIBRATION_CFG_CMD = 0x65, 118 CALIBRATION_CFG_CMD = 0x65,
118 CALIBRATION_RES_NOTIFICATION = 0x66, 119 CALIBRATION_RES_NOTIFICATION = 0x66,
119 CALIBRATION_COMPLETE_NOTIFICATION = 0x67, 120 CALIBRATION_COMPLETE_NOTIFICATION = 0x67,
@@ -148,7 +149,7 @@ enum {
148 QUIET_NOTIFICATION = 0x96, /* not used */ 149 QUIET_NOTIFICATION = 0x96, /* not used */
149 REPLY_TX_PWR_TABLE_CMD = 0x97, 150 REPLY_TX_PWR_TABLE_CMD = 0x97,
150 REPLY_TX_POWER_DBM_CMD_V1 = 0x98, /* old version of API */ 151 REPLY_TX_POWER_DBM_CMD_V1 = 0x98, /* old version of API */
151 TX_ANT_CONFIGURATION_CMD = 0x98, /* not used */ 152 TX_ANT_CONFIGURATION_CMD = 0x98,
152 MEASURE_ABORT_NOTIFICATION = 0x99, /* not used */ 153 MEASURE_ABORT_NOTIFICATION = 0x99, /* not used */
153 154
154 /* Bluetooth device coexistence config command */ 155 /* Bluetooth device coexistence config command */
@@ -353,6 +354,9 @@ struct iwl3945_power_per_rate {
353#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32 354#define POWER_TABLE_NUM_HT_OFDM_ENTRIES 32
354#define POWER_TABLE_CCK_ENTRY 32 355#define POWER_TABLE_CCK_ENTRY 32
355 356
357#define IWL_PWR_NUM_HT_OFDM_ENTRIES 24
358#define IWL_PWR_CCK_ENTRIES 2
359
356/** 360/**
357 * union iwl4965_tx_power_dual_stream 361 * union iwl4965_tx_power_dual_stream
358 * 362 *
@@ -411,6 +415,16 @@ struct iwl5000_tx_power_dbm_cmd {
411 u8 reserved; 415 u8 reserved;
412} __attribute__ ((packed)); 416} __attribute__ ((packed));
413 417
418/**
419 * Command TX_ANT_CONFIGURATION_CMD = 0x98
420 * This command is used to configure valid Tx antenna.
421 * By default uCode concludes the valid antenna according to the radio flavor.
422 * This command enables the driver to override/modify this conclusion.
423 */
424struct iwl_tx_ant_config_cmd {
425 __le32 valid;
426} __attribute__ ((packed));
427
414/****************************************************************************** 428/******************************************************************************
415 * (0a) 429 * (0a)
416 * Alive and Error Commands & Responses: 430 * Alive and Error Commands & Responses:
@@ -793,7 +807,7 @@ struct iwl3945_channel_switch_cmd {
793 struct iwl3945_power_per_rate power[IWL_MAX_RATES]; 807 struct iwl3945_power_per_rate power[IWL_MAX_RATES];
794} __attribute__ ((packed)); 808} __attribute__ ((packed));
795 809
796struct iwl_channel_switch_cmd { 810struct iwl4965_channel_switch_cmd {
797 u8 band; 811 u8 band;
798 u8 expect_beacon; 812 u8 expect_beacon;
799 __le16 channel; 813 __le16 channel;
@@ -803,6 +817,48 @@ struct iwl_channel_switch_cmd {
803 struct iwl4965_tx_power_db tx_power; 817 struct iwl4965_tx_power_db tx_power;
804} __attribute__ ((packed)); 818} __attribute__ ((packed));
805 819
820/**
821 * struct iwl5000_channel_switch_cmd
822 * @band: 0- 5.2GHz, 1- 2.4GHz
823 * @expect_beacon: 0- resume transmits after channel switch
824 * 1- wait for beacon to resume transmits
825 * @channel: new channel number
826 * @rxon_flags: Rx on flags
827 * @rxon_filter_flags: filtering parameters
828 * @switch_time: switch time in extended beacon format
829 * @reserved: reserved bytes
830 */
831struct iwl5000_channel_switch_cmd {
832 u8 band;
833 u8 expect_beacon;
834 __le16 channel;
835 __le32 rxon_flags;
836 __le32 rxon_filter_flags;
837 __le32 switch_time;
838 __le32 reserved[2][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
839} __attribute__ ((packed));
840
841/**
842 * struct iwl6000_channel_switch_cmd
843 * @band: 0- 5.2GHz, 1- 2.4GHz
844 * @expect_beacon: 0- resume transmits after channel switch
845 * 1- wait for beacon to resume transmits
846 * @channel: new channel number
847 * @rxon_flags: Rx on flags
848 * @rxon_filter_flags: filtering parameters
849 * @switch_time: switch time in extended beacon format
850 * @reserved: reserved bytes
851 */
852struct iwl6000_channel_switch_cmd {
853 u8 band;
854 u8 expect_beacon;
855 __le16 channel;
856 __le32 rxon_flags;
857 __le32 rxon_filter_flags;
858 __le32 switch_time;
859 __le32 reserved[3][IWL_PWR_NUM_HT_OFDM_ENTRIES + IWL_PWR_CCK_ENTRIES];
860} __attribute__ ((packed));
861
806/* 862/*
807 * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command) 863 * CHANNEL_SWITCH_NOTIFICATION = 0x73 (notification only, not a command)
808 */ 864 */
@@ -2162,6 +2218,19 @@ struct iwl_link_quality_cmd {
2162 __le32 reserved2; 2218 __le32 reserved2;
2163} __attribute__ ((packed)); 2219} __attribute__ ((packed));
2164 2220
2221#define BT_COEX_DISABLE (0x0)
2222#define BT_COEX_MODE_2W (0x1)
2223#define BT_COEX_MODE_3W (0x2)
2224#define BT_COEX_MODE_4W (0x3)
2225
2226#define BT_LEAD_TIME_MIN (0x0)
2227#define BT_LEAD_TIME_DEF (0x1E)
2228#define BT_LEAD_TIME_MAX (0xFF)
2229
2230#define BT_MAX_KILL_MIN (0x1)
2231#define BT_MAX_KILL_DEF (0x5)
2232#define BT_MAX_KILL_MAX (0xFF)
2233
2165/* 2234/*
2166 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response) 2235 * REPLY_BT_CONFIG = 0x9b (command, has simple generic response)
2167 * 2236 *
@@ -3237,12 +3306,6 @@ struct iwl_missed_beacon_notif {
3237 * Lower values mean higher energy; this means making sure that the value 3306 * Lower values mean higher energy; this means making sure that the value
3238 * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy". 3307 * in HD_MIN_ENERGY_CCK_DET_INDEX is at or *above* "Max cck energy".
3239 * 3308 *
3240 * Driver should set the following entries to fixed values:
3241 *
3242 * HD_MIN_ENERGY_OFDM_DET_INDEX 100
3243 * HD_BARKER_CORR_TH_ADD_MIN_INDEX 190
3244 * HD_BARKER_CORR_TH_ADD_MIN_MRC_INDEX 390
3245 * HD_OFDM_ENERGY_TH_IN_INDEX 62
3246 */ 3309 */
3247 3310
3248/* 3311/*
@@ -3440,30 +3503,134 @@ struct iwl_led_cmd {
3440} __attribute__ ((packed)); 3503} __attribute__ ((packed));
3441 3504
3442/* 3505/*
3443 * Coexistence WIFI/WIMAX Command 3506 * station priority table entries
3444 * COEX_PRIORITY_TABLE_CMD = 0x5a 3507 * also used as potential "events" value for both
3445 * 3508 * COEX_MEDIUM_NOTIFICATION and COEX_EVENT_CMD
3509 */
3510
3511/*
3512 * COEX events entry flag masks
3513 * RP - Requested Priority
3514 * WP - Win Medium Priority: priority assigned when the contention has been won
3515 */
3516#define COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG (0x1)
3517#define COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG (0x2)
3518#define COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG (0x4)
3519
3520#define COEX_CU_UNASSOC_IDLE_RP 4
3521#define COEX_CU_UNASSOC_MANUAL_SCAN_RP 4
3522#define COEX_CU_UNASSOC_AUTO_SCAN_RP 4
3523#define COEX_CU_CALIBRATION_RP 4
3524#define COEX_CU_PERIODIC_CALIBRATION_RP 4
3525#define COEX_CU_CONNECTION_ESTAB_RP 4
3526#define COEX_CU_ASSOCIATED_IDLE_RP 4
3527#define COEX_CU_ASSOC_MANUAL_SCAN_RP 4
3528#define COEX_CU_ASSOC_AUTO_SCAN_RP 4
3529#define COEX_CU_ASSOC_ACTIVE_LEVEL_RP 4
3530#define COEX_CU_RF_ON_RP 6
3531#define COEX_CU_RF_OFF_RP 4
3532#define COEX_CU_STAND_ALONE_DEBUG_RP 6
3533#define COEX_CU_IPAN_ASSOC_LEVEL_RP 4
3534#define COEX_CU_RSRVD1_RP 4
3535#define COEX_CU_RSRVD2_RP 4
3536
3537#define COEX_CU_UNASSOC_IDLE_WP 3
3538#define COEX_CU_UNASSOC_MANUAL_SCAN_WP 3
3539#define COEX_CU_UNASSOC_AUTO_SCAN_WP 3
3540#define COEX_CU_CALIBRATION_WP 3
3541#define COEX_CU_PERIODIC_CALIBRATION_WP 3
3542#define COEX_CU_CONNECTION_ESTAB_WP 3
3543#define COEX_CU_ASSOCIATED_IDLE_WP 3
3544#define COEX_CU_ASSOC_MANUAL_SCAN_WP 3
3545#define COEX_CU_ASSOC_AUTO_SCAN_WP 3
3546#define COEX_CU_ASSOC_ACTIVE_LEVEL_WP 3
3547#define COEX_CU_RF_ON_WP 3
3548#define COEX_CU_RF_OFF_WP 3
3549#define COEX_CU_STAND_ALONE_DEBUG_WP 6
3550#define COEX_CU_IPAN_ASSOC_LEVEL_WP 3
3551#define COEX_CU_RSRVD1_WP 3
3552#define COEX_CU_RSRVD2_WP 3
3553
3554#define COEX_UNASSOC_IDLE_FLAGS 0
3555#define COEX_UNASSOC_MANUAL_SCAN_FLAGS \
3556 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3557 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3558#define COEX_UNASSOC_AUTO_SCAN_FLAGS \
3559 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3560 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3561#define COEX_CALIBRATION_FLAGS \
3562 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3563 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3564#define COEX_PERIODIC_CALIBRATION_FLAGS 0
3565/*
3566 * COEX_CONNECTION_ESTAB:
3567 * we need DELAY_MEDIUM_FREE_NTFY to let WiMAX disconnect from network.
3568 */
3569#define COEX_CONNECTION_ESTAB_FLAGS \
3570 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3571 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG | \
3572 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG)
3573#define COEX_ASSOCIATED_IDLE_FLAGS 0
3574#define COEX_ASSOC_MANUAL_SCAN_FLAGS \
3575 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3576 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3577#define COEX_ASSOC_AUTO_SCAN_FLAGS \
3578 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3579 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3580#define COEX_ASSOC_ACTIVE_LEVEL_FLAGS 0
3581#define COEX_RF_ON_FLAGS 0
3582#define COEX_RF_OFF_FLAGS 0
3583#define COEX_STAND_ALONE_DEBUG_FLAGS \
3584 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3585 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG)
3586#define COEX_IPAN_ASSOC_LEVEL_FLAGS \
3587 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3588 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG | \
3589 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG)
3590#define COEX_RSRVD1_FLAGS 0
3591#define COEX_RSRVD2_FLAGS 0
3592/*
3593 * COEX_CU_RF_ON is the event wrapping all radio ownership.
3594 * We need DELAY_MEDIUM_FREE_NTFY to let WiMAX disconnect from network.
3446 */ 3595 */
3596#define COEX_CU_RF_ON_FLAGS \
3597 (COEX_EVT_FLAG_MEDIUM_FREE_NTFY_FLG | \
3598 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_FLG | \
3599 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_FLG)
3600
3601
3447enum { 3602enum {
3603 /* un-association part */
3448 COEX_UNASSOC_IDLE = 0, 3604 COEX_UNASSOC_IDLE = 0,
3449 COEX_UNASSOC_MANUAL_SCAN = 1, 3605 COEX_UNASSOC_MANUAL_SCAN = 1,
3450 COEX_UNASSOC_AUTO_SCAN = 2, 3606 COEX_UNASSOC_AUTO_SCAN = 2,
3607 /* calibration */
3451 COEX_CALIBRATION = 3, 3608 COEX_CALIBRATION = 3,
3452 COEX_PERIODIC_CALIBRATION = 4, 3609 COEX_PERIODIC_CALIBRATION = 4,
3610 /* connection */
3453 COEX_CONNECTION_ESTAB = 5, 3611 COEX_CONNECTION_ESTAB = 5,
3612 /* association part */
3454 COEX_ASSOCIATED_IDLE = 6, 3613 COEX_ASSOCIATED_IDLE = 6,
3455 COEX_ASSOC_MANUAL_SCAN = 7, 3614 COEX_ASSOC_MANUAL_SCAN = 7,
3456 COEX_ASSOC_AUTO_SCAN = 8, 3615 COEX_ASSOC_AUTO_SCAN = 8,
3457 COEX_ASSOC_ACTIVE_LEVEL = 9, 3616 COEX_ASSOC_ACTIVE_LEVEL = 9,
3617 /* RF ON/OFF */
3458 COEX_RF_ON = 10, 3618 COEX_RF_ON = 10,
3459 COEX_RF_OFF = 11, 3619 COEX_RF_OFF = 11,
3460 COEX_STAND_ALONE_DEBUG = 12, 3620 COEX_STAND_ALONE_DEBUG = 12,
3621 /* IPAN */
3461 COEX_IPAN_ASSOC_LEVEL = 13, 3622 COEX_IPAN_ASSOC_LEVEL = 13,
3623 /* reserved */
3462 COEX_RSRVD1 = 14, 3624 COEX_RSRVD1 = 14,
3463 COEX_RSRVD2 = 15, 3625 COEX_RSRVD2 = 15,
3464 COEX_NUM_OF_EVENTS = 16 3626 COEX_NUM_OF_EVENTS = 16
3465}; 3627};
3466 3628
3629/*
3630 * Coexistence WIFI/WIMAX Command
3631 * COEX_PRIORITY_TABLE_CMD = 0x5a
3632 *
3633 */
3467struct iwl_wimax_coex_event_entry { 3634struct iwl_wimax_coex_event_entry {
3468 u8 request_prio; 3635 u8 request_prio;
3469 u8 win_medium_prio; 3636 u8 win_medium_prio;
@@ -3488,6 +3655,55 @@ struct iwl_wimax_coex_cmd {
3488 struct iwl_wimax_coex_event_entry sta_prio[COEX_NUM_OF_EVENTS]; 3655 struct iwl_wimax_coex_event_entry sta_prio[COEX_NUM_OF_EVENTS];
3489} __attribute__ ((packed)); 3656} __attribute__ ((packed));
3490 3657
3658/*
3659 * Coexistence MEDIUM NOTIFICATION
3660 * COEX_MEDIUM_NOTIFICATION = 0x5b
3661 *
3662 * notification from uCode to host to indicate medium changes
3663 *
3664 */
3665/*
3666 * status field
3667 * bit 0 - 2: medium status
3668 * bit 3: medium change indication
3669 * bit 4 - 31: reserved
3670 */
3671/* status option values, (0 - 2 bits) */
3672#define COEX_MEDIUM_BUSY (0x0) /* radio belongs to WiMAX */
3673#define COEX_MEDIUM_ACTIVE (0x1) /* radio belongs to WiFi */
3674#define COEX_MEDIUM_PRE_RELEASE (0x2) /* received radio release */
3675#define COEX_MEDIUM_MSK (0x7)
3676
3677/* send notification status (1 bit) */
3678#define COEX_MEDIUM_CHANGED (0x8)
3679#define COEX_MEDIUM_CHANGED_MSK (0x8)
3680#define COEX_MEDIUM_SHIFT (3)
3681
3682struct iwl_coex_medium_notification {
3683 __le32 status;
3684 __le32 events;
3685} __attribute__ ((packed));
3686
3687/*
3688 * Coexistence EVENT Command
3689 * COEX_EVENT_CMD = 0x5c
3690 *
3691 * send from host to uCode for coex event request.
3692 */
3693/* flags options */
3694#define COEX_EVENT_REQUEST_MSK (0x1)
3695
3696struct iwl_coex_event_cmd {
3697 u8 flags;
3698 u8 event;
3699 __le16 reserved;
3700} __attribute__ ((packed));
3701
3702struct iwl_coex_event_resp {
3703 __le32 status;
3704} __attribute__ ((packed));
3705
3706
3491/****************************************************************************** 3707/******************************************************************************
3492 * (13) 3708 * (13)
3493 * Union of all expected notifications/responses: 3709 * Union of all expected notifications/responses:
@@ -3495,6 +3711,16 @@ struct iwl_wimax_coex_cmd {
3495 *****************************************************************************/ 3711 *****************************************************************************/
3496 3712
3497struct iwl_rx_packet { 3713struct iwl_rx_packet {
3714 /*
3715 * The first 4 bytes of the RX frame header contain both the RX frame
3716 * size and some flags.
3717 * Bit fields:
3718 * 31: flag flush RB request
3719 * 30: flag ignore TC (terminal counter) request
3720 * 29: flag fast IRQ request
3721 * 28-14: Reserved
3722 * 13-00: RX frame size
3723 */
3498 __le32 len_n_flags; 3724 __le32 len_n_flags;
3499 struct iwl_cmd_header hdr; 3725 struct iwl_cmd_header hdr;
3500 union { 3726 union {
@@ -3514,6 +3740,8 @@ struct iwl_rx_packet {
3514 struct iwl_notif_statistics stats; 3740 struct iwl_notif_statistics stats;
3515 struct iwl_compressed_ba_resp compressed_ba; 3741 struct iwl_compressed_ba_resp compressed_ba;
3516 struct iwl_missed_beacon_notif missed_beacon; 3742 struct iwl_missed_beacon_notif missed_beacon;
3743 struct iwl_coex_medium_notification coex_medium_notif;
3744 struct iwl_coex_event_resp coex_event;
3517 __le32 status; 3745 __le32 status;
3518 u8 raw[0]; 3746 u8 raw[0];
3519 } u; 3747 } u;
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.c b/drivers/net/wireless/iwlwifi/iwl-core.c
index 484d5c1a7312..e0b5b4aef41d 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.c
+++ b/drivers/net/wireless/iwlwifi/iwl-core.c
@@ -46,6 +46,37 @@ MODULE_VERSION(IWLWIFI_VERSION);
46MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR); 46MODULE_AUTHOR(DRV_COPYRIGHT " " DRV_AUTHOR);
47MODULE_LICENSE("GPL"); 47MODULE_LICENSE("GPL");
48 48
49static struct iwl_wimax_coex_event_entry cu_priorities[COEX_NUM_OF_EVENTS] = {
50 {COEX_CU_UNASSOC_IDLE_RP, COEX_CU_UNASSOC_IDLE_WP,
51 0, COEX_UNASSOC_IDLE_FLAGS},
52 {COEX_CU_UNASSOC_MANUAL_SCAN_RP, COEX_CU_UNASSOC_MANUAL_SCAN_WP,
53 0, COEX_UNASSOC_MANUAL_SCAN_FLAGS},
54 {COEX_CU_UNASSOC_AUTO_SCAN_RP, COEX_CU_UNASSOC_AUTO_SCAN_WP,
55 0, COEX_UNASSOC_AUTO_SCAN_FLAGS},
56 {COEX_CU_CALIBRATION_RP, COEX_CU_CALIBRATION_WP,
57 0, COEX_CALIBRATION_FLAGS},
58 {COEX_CU_PERIODIC_CALIBRATION_RP, COEX_CU_PERIODIC_CALIBRATION_WP,
59 0, COEX_PERIODIC_CALIBRATION_FLAGS},
60 {COEX_CU_CONNECTION_ESTAB_RP, COEX_CU_CONNECTION_ESTAB_WP,
61 0, COEX_CONNECTION_ESTAB_FLAGS},
62 {COEX_CU_ASSOCIATED_IDLE_RP, COEX_CU_ASSOCIATED_IDLE_WP,
63 0, COEX_ASSOCIATED_IDLE_FLAGS},
64 {COEX_CU_ASSOC_MANUAL_SCAN_RP, COEX_CU_ASSOC_MANUAL_SCAN_WP,
65 0, COEX_ASSOC_MANUAL_SCAN_FLAGS},
66 {COEX_CU_ASSOC_AUTO_SCAN_RP, COEX_CU_ASSOC_AUTO_SCAN_WP,
67 0, COEX_ASSOC_AUTO_SCAN_FLAGS},
68 {COEX_CU_ASSOC_ACTIVE_LEVEL_RP, COEX_CU_ASSOC_ACTIVE_LEVEL_WP,
69 0, COEX_ASSOC_ACTIVE_LEVEL_FLAGS},
70 {COEX_CU_RF_ON_RP, COEX_CU_RF_ON_WP, 0, COEX_CU_RF_ON_FLAGS},
71 {COEX_CU_RF_OFF_RP, COEX_CU_RF_OFF_WP, 0, COEX_RF_OFF_FLAGS},
72 {COEX_CU_STAND_ALONE_DEBUG_RP, COEX_CU_STAND_ALONE_DEBUG_WP,
73 0, COEX_STAND_ALONE_DEBUG_FLAGS},
74 {COEX_CU_IPAN_ASSOC_LEVEL_RP, COEX_CU_IPAN_ASSOC_LEVEL_WP,
75 0, COEX_IPAN_ASSOC_LEVEL_FLAGS},
76 {COEX_CU_RSRVD1_RP, COEX_CU_RSRVD1_WP, 0, COEX_RSRVD1_FLAGS},
77 {COEX_CU_RSRVD2_RP, COEX_CU_RSRVD2_WP, 0, COEX_RSRVD2_FLAGS}
78};
79
49#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \ 80#define IWL_DECLARE_RATE_INFO(r, s, ip, in, rp, rn, pp, np) \
50 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \ 81 [IWL_RATE_##r##M_INDEX] = { IWL_RATE_##r##M_PLCP, \
51 IWL_RATE_SISO_##s##M_PLCP, \ 82 IWL_RATE_SISO_##s##M_PLCP, \
@@ -414,8 +445,12 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
414 if (priv->cfg->ht_greenfield_support) 445 if (priv->cfg->ht_greenfield_support)
415 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD; 446 ht_info->cap |= IEEE80211_HT_CAP_GRN_FLD;
416 ht_info->cap |= IEEE80211_HT_CAP_SGI_20; 447 ht_info->cap |= IEEE80211_HT_CAP_SGI_20;
417 ht_info->cap |= (IEEE80211_HT_CAP_SM_PS & 448 if (priv->cfg->support_sm_ps)
418 (WLAN_HT_CAP_SM_PS_DISABLED << 2)); 449 ht_info->cap |= (IEEE80211_HT_CAP_SM_PS &
450 (WLAN_HT_CAP_SM_PS_DYNAMIC << 2));
451 else
452 ht_info->cap |= (IEEE80211_HT_CAP_SM_PS &
453 (WLAN_HT_CAP_SM_PS_DISABLED << 2));
419 454
420 max_bit_rate = MAX_BIT_RATE_20_MHZ; 455 max_bit_rate = MAX_BIT_RATE_20_MHZ;
421 if (priv->hw_params.ht40_channel & BIT(band)) { 456 if (priv->hw_params.ht40_channel & BIT(band)) {
@@ -451,28 +486,6 @@ static void iwlcore_init_ht_hw_capab(const struct iwl_priv *priv,
451 } 486 }
452} 487}
453 488
454static void iwlcore_init_hw_rates(struct iwl_priv *priv,
455 struct ieee80211_rate *rates)
456{
457 int i;
458
459 for (i = 0; i < IWL_RATE_COUNT_LEGACY; i++) {
460 rates[i].bitrate = iwl_rates[i].ieee * 5;
461 rates[i].hw_value = i; /* Rate scaling will work on indexes */
462 rates[i].hw_value_short = i;
463 rates[i].flags = 0;
464 if ((i >= IWL_FIRST_CCK_RATE) && (i <= IWL_LAST_CCK_RATE)) {
465 /*
466 * If CCK != 1M then set short preamble rate flag.
467 */
468 rates[i].flags |=
469 (iwl_rates[i].plcp == IWL_RATE_1M_PLCP) ?
470 0 : IEEE80211_RATE_SHORT_PREAMBLE;
471 }
472 }
473}
474
475
476/** 489/**
477 * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom 490 * iwlcore_init_geos - Initialize mac80211's geo/channel info based from eeprom
478 */ 491 */
@@ -604,11 +617,27 @@ void iwlcore_free_geos(struct iwl_priv *priv)
604} 617}
605EXPORT_SYMBOL(iwlcore_free_geos); 618EXPORT_SYMBOL(iwlcore_free_geos);
606 619
620/*
621 * iwlcore_rts_tx_cmd_flag: Set rts/cts. 3945 and 4965 only share this
622 * function.
623 */
624void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
625 __le32 *tx_flags)
626{
627 if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_RTS_CTS) {
628 *tx_flags |= TX_CMD_FLG_RTS_MSK;
629 *tx_flags &= ~TX_CMD_FLG_CTS_MSK;
630 } else if (info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
631 *tx_flags &= ~TX_CMD_FLG_RTS_MSK;
632 *tx_flags |= TX_CMD_FLG_CTS_MSK;
633 }
634}
635EXPORT_SYMBOL(iwlcore_rts_tx_cmd_flag);
636
607static bool is_single_rx_stream(struct iwl_priv *priv) 637static bool is_single_rx_stream(struct iwl_priv *priv)
608{ 638{
609 return !priv->current_ht_config.is_ht || 639 return !priv->current_ht_config.is_ht ||
610 ((priv->current_ht_config.mcs.rx_mask[1] == 0) && 640 priv->current_ht_config.single_chain_sufficient;
611 (priv->current_ht_config.mcs.rx_mask[2] == 0));
612} 641}
613 642
614static u8 iwl_is_channel_extension(struct iwl_priv *priv, 643static u8 iwl_is_channel_extension(struct iwl_priv *priv,
@@ -634,10 +663,9 @@ static u8 iwl_is_channel_extension(struct iwl_priv *priv,
634u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv, 663u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
635 struct ieee80211_sta_ht_cap *sta_ht_inf) 664 struct ieee80211_sta_ht_cap *sta_ht_inf)
636{ 665{
637 struct iwl_ht_info *iwl_ht_conf = &priv->current_ht_config; 666 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
638 667
639 if ((!iwl_ht_conf->is_ht) || 668 if (!ht_conf->is_ht || !ht_conf->is_40mhz)
640 (iwl_ht_conf->supported_chan_width != IWL_CHANNEL_WIDTH_40MHZ))
641 return 0; 669 return 0;
642 670
643 /* We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40 671 /* We do not check for IEEE80211_HT_CAP_SUP_WIDTH_20_40
@@ -653,7 +681,7 @@ u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
653#endif 681#endif
654 return iwl_is_channel_extension(priv, priv->band, 682 return iwl_is_channel_extension(priv, priv->band,
655 le16_to_cpu(priv->staging_rxon.channel), 683 le16_to_cpu(priv->staging_rxon.channel),
656 iwl_ht_conf->extension_chan_offset); 684 ht_conf->extension_chan_offset);
657} 685}
658EXPORT_SYMBOL(iwl_is_ht40_tx_allowed); 686EXPORT_SYMBOL(iwl_is_ht40_tx_allowed);
659 687
@@ -877,11 +905,11 @@ u8 iwl_rate_get_lowest_plcp(struct iwl_priv *priv)
877} 905}
878EXPORT_SYMBOL(iwl_rate_get_lowest_plcp); 906EXPORT_SYMBOL(iwl_rate_get_lowest_plcp);
879 907
880void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info) 908void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf)
881{ 909{
882 struct iwl_rxon_cmd *rxon = &priv->staging_rxon; 910 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
883 911
884 if (!ht_info->is_ht) { 912 if (!ht_conf->is_ht) {
885 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK | 913 rxon->flags &= ~(RXON_FLG_CHANNEL_MODE_MSK |
886 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK | 914 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK |
887 RXON_FLG_HT40_PROT_MSK | 915 RXON_FLG_HT40_PROT_MSK |
@@ -892,7 +920,7 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
892 /* FIXME: if the definition of ht_protection changed, the "translation" 920 /* FIXME: if the definition of ht_protection changed, the "translation"
893 * will be needed for rxon->flags 921 * will be needed for rxon->flags
894 */ 922 */
895 rxon->flags |= cpu_to_le32(ht_info->ht_protection << RXON_FLG_HT_OPERATING_MODE_POS); 923 rxon->flags |= cpu_to_le32(ht_conf->ht_protection << RXON_FLG_HT_OPERATING_MODE_POS);
896 924
897 /* Set up channel bandwidth: 925 /* Set up channel bandwidth:
898 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */ 926 * 20 MHz only, 20/40 mixed or pure 40 if ht40 ok */
@@ -901,10 +929,10 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
901 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); 929 RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
902 if (iwl_is_ht40_tx_allowed(priv, NULL)) { 930 if (iwl_is_ht40_tx_allowed(priv, NULL)) {
903 /* pure ht40 */ 931 /* pure ht40 */
904 if (ht_info->ht_protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) { 932 if (ht_conf->ht_protection == IEEE80211_HT_OP_MODE_PROTECTION_20MHZ) {
905 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40; 933 rxon->flags |= RXON_FLG_CHANNEL_MODE_PURE_40;
906 /* Note: control channel is opposite of extension channel */ 934 /* Note: control channel is opposite of extension channel */
907 switch (ht_info->extension_chan_offset) { 935 switch (ht_conf->extension_chan_offset) {
908 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 936 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
909 rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK; 937 rxon->flags &= ~RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK;
910 break; 938 break;
@@ -914,7 +942,7 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
914 } 942 }
915 } else { 943 } else {
916 /* Note: control channel is opposite of extension channel */ 944 /* Note: control channel is opposite of extension channel */
917 switch (ht_info->extension_chan_offset) { 945 switch (ht_conf->extension_chan_offset) {
918 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE: 946 case IEEE80211_HT_PARAM_CHA_SEC_ABOVE:
919 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK); 947 rxon->flags &= ~(RXON_FLG_CTRL_CHANNEL_LOC_HI_MSK);
920 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED; 948 rxon->flags |= RXON_FLG_CHANNEL_MODE_MIXED;
@@ -937,14 +965,10 @@ void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info)
937 if (priv->cfg->ops->hcmd->set_rxon_chain) 965 if (priv->cfg->ops->hcmd->set_rxon_chain)
938 priv->cfg->ops->hcmd->set_rxon_chain(priv); 966 priv->cfg->ops->hcmd->set_rxon_chain(priv);
939 967
940 IWL_DEBUG_ASSOC(priv, "supported HT rate 0x%X 0x%X 0x%X " 968 IWL_DEBUG_ASSOC(priv, "rxon flags 0x%X operation mode :0x%X "
941 "rxon flags 0x%X operation mode :0x%X "
942 "extension channel offset 0x%x\n", 969 "extension channel offset 0x%x\n",
943 ht_info->mcs.rx_mask[0], 970 le32_to_cpu(rxon->flags), ht_conf->ht_protection,
944 ht_info->mcs.rx_mask[1], 971 ht_conf->extension_chan_offset);
945 ht_info->mcs.rx_mask[2],
946 le32_to_cpu(rxon->flags), ht_info->ht_protection,
947 ht_info->extension_chan_offset);
948 return; 972 return;
949} 973}
950EXPORT_SYMBOL(iwl_set_rxon_ht); 974EXPORT_SYMBOL(iwl_set_rxon_ht);
@@ -954,45 +978,53 @@ EXPORT_SYMBOL(iwl_set_rxon_ht);
954#define IWL_NUM_IDLE_CHAINS_DUAL 2 978#define IWL_NUM_IDLE_CHAINS_DUAL 2
955#define IWL_NUM_IDLE_CHAINS_SINGLE 1 979#define IWL_NUM_IDLE_CHAINS_SINGLE 1
956 980
957/* Determine how many receiver/antenna chains to use. 981/*
958 * More provides better reception via diversity. Fewer saves power. 982 * Determine how many receiver/antenna chains to use.
983 *
984 * More provides better reception via diversity. Fewer saves power
985 * at the expense of throughput, but only when not in powersave to
986 * start with.
987 *
959 * MIMO (dual stream) requires at least 2, but works better with 3. 988 * MIMO (dual stream) requires at least 2, but works better with 3.
960 * This does not determine *which* chains to use, just how many. 989 * This does not determine *which* chains to use, just how many.
961 */ 990 */
962static int iwl_get_active_rx_chain_count(struct iwl_priv *priv) 991static int iwl_get_active_rx_chain_count(struct iwl_priv *priv)
963{ 992{
964 bool is_single = is_single_rx_stream(priv);
965 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
966
967 /* # of Rx chains to use when expecting MIMO. */ 993 /* # of Rx chains to use when expecting MIMO. */
968 if (is_single || (!is_cam && (priv->current_ht_config.sm_ps == 994 if (is_single_rx_stream(priv))
969 WLAN_HT_CAP_SM_PS_STATIC)))
970 return IWL_NUM_RX_CHAINS_SINGLE; 995 return IWL_NUM_RX_CHAINS_SINGLE;
971 else 996 else
972 return IWL_NUM_RX_CHAINS_MULTIPLE; 997 return IWL_NUM_RX_CHAINS_MULTIPLE;
973} 998}
974 999
1000/*
1001 * When we are in power saving mode, unless device support spatial
1002 * multiplexing power save, use the active count for rx chain count.
1003 */
975static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt) 1004static int iwl_get_idle_rx_chain_count(struct iwl_priv *priv, int active_cnt)
976{ 1005{
977 int idle_cnt; 1006 int idle_cnt = active_cnt;
978 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status); 1007 bool is_cam = !test_bit(STATUS_POWER_PMI, &priv->status);
979 /* # Rx chains when idling and maybe trying to save power */ 1008
980 switch (priv->current_ht_config.sm_ps) { 1009 if (priv->cfg->support_sm_ps) {
981 case WLAN_HT_CAP_SM_PS_STATIC: 1010 /* # Rx chains when idling and maybe trying to save power */
982 case WLAN_HT_CAP_SM_PS_DYNAMIC: 1011 switch (priv->current_ht_config.sm_ps) {
983 idle_cnt = (is_cam) ? IWL_NUM_IDLE_CHAINS_DUAL : 1012 case WLAN_HT_CAP_SM_PS_STATIC:
984 IWL_NUM_IDLE_CHAINS_SINGLE; 1013 case WLAN_HT_CAP_SM_PS_DYNAMIC:
985 break; 1014 idle_cnt = (is_cam) ? IWL_NUM_IDLE_CHAINS_DUAL :
986 case WLAN_HT_CAP_SM_PS_DISABLED: 1015 IWL_NUM_IDLE_CHAINS_SINGLE;
987 idle_cnt = (is_cam) ? active_cnt : IWL_NUM_IDLE_CHAINS_SINGLE; 1016 break;
988 break; 1017 case WLAN_HT_CAP_SM_PS_DISABLED:
989 case WLAN_HT_CAP_SM_PS_INVALID: 1018 idle_cnt = (is_cam) ? active_cnt :
990 default: 1019 IWL_NUM_IDLE_CHAINS_SINGLE;
991 IWL_ERR(priv, "invalid mimo ps mode %d\n", 1020 break;
992 priv->current_ht_config.sm_ps); 1021 case WLAN_HT_CAP_SM_PS_INVALID:
993 WARN_ON(1); 1022 default:
994 idle_cnt = -1; 1023 IWL_ERR(priv, "invalid sm_ps mode %d\n",
995 break; 1024 priv->current_ht_config.sm_ps);
1025 WARN_ON(1);
1026 break;
1027 }
996 } 1028 }
997 return idle_cnt; 1029 return idle_cnt;
998} 1030}
@@ -1004,7 +1036,7 @@ static u8 iwl_count_chain_bitmap(u32 chain_bitmap)
1004 res = (chain_bitmap & BIT(0)) >> 0; 1036 res = (chain_bitmap & BIT(0)) >> 0;
1005 res += (chain_bitmap & BIT(1)) >> 1; 1037 res += (chain_bitmap & BIT(1)) >> 1;
1006 res += (chain_bitmap & BIT(2)) >> 2; 1038 res += (chain_bitmap & BIT(2)) >> 2;
1007 res += (chain_bitmap & BIT(4)) >> 4; 1039 res += (chain_bitmap & BIT(3)) >> 3;
1008 return res; 1040 return res;
1009} 1041}
1010 1042
@@ -1280,13 +1312,18 @@ static void iwl_set_rate(struct iwl_priv *priv)
1280 1312
1281void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) 1313void iwl_rx_csa(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1282{ 1314{
1283 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1315 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1284 struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon; 1316 struct iwl_rxon_cmd *rxon = (void *)&priv->active_rxon;
1285 struct iwl_csa_notification *csa = &(pkt->u.csa_notif); 1317 struct iwl_csa_notification *csa = &(pkt->u.csa_notif);
1286 IWL_DEBUG_11H(priv, "CSA notif: channel %d, status %d\n", 1318
1287 le16_to_cpu(csa->channel), le32_to_cpu(csa->status)); 1319 if (!le32_to_cpu(csa->status)) {
1288 rxon->channel = csa->channel; 1320 rxon->channel = csa->channel;
1289 priv->staging_rxon.channel = csa->channel; 1321 priv->staging_rxon.channel = csa->channel;
1322 IWL_DEBUG_11H(priv, "CSA notif: channel %d\n",
1323 le16_to_cpu(csa->channel));
1324 } else
1325 IWL_ERR(priv, "CSA notif (fail) : channel %d\n",
1326 le16_to_cpu(csa->channel));
1290} 1327}
1291EXPORT_SYMBOL(iwl_rx_csa); 1328EXPORT_SYMBOL(iwl_rx_csa);
1292 1329
@@ -1345,6 +1382,160 @@ void iwl_irq_handle_error(struct iwl_priv *priv)
1345} 1382}
1346EXPORT_SYMBOL(iwl_irq_handle_error); 1383EXPORT_SYMBOL(iwl_irq_handle_error);
1347 1384
1385int iwl_apm_stop_master(struct iwl_priv *priv)
1386{
1387 int ret = 0;
1388
1389 /* stop device's busmaster DMA activity */
1390 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
1391
1392 ret = iwl_poll_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_MASTER_DISABLED,
1393 CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
1394 if (ret)
1395 IWL_WARN(priv, "Master Disable Timed Out, 100 usec\n");
1396
1397 IWL_DEBUG_INFO(priv, "stop master\n");
1398
1399 return ret;
1400}
1401EXPORT_SYMBOL(iwl_apm_stop_master);
1402
1403void iwl_apm_stop(struct iwl_priv *priv)
1404{
1405 IWL_DEBUG_INFO(priv, "Stop card, put in low power state\n");
1406
1407 /* Stop device's DMA activity */
1408 iwl_apm_stop_master(priv);
1409
1410 /* Reset the entire device */
1411 iwl_set_bit(priv, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
1412
1413 udelay(10);
1414
1415 /*
1416 * Clear "initialization complete" bit to move adapter from
1417 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
1418 */
1419 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1420}
1421EXPORT_SYMBOL(iwl_apm_stop);
1422
1423
1424/*
1425 * Start up NIC's basic functionality after it has been reset
1426 * (e.g. after platform boot, or shutdown via iwl_apm_stop())
1427 * NOTE: This does not load uCode nor start the embedded processor
1428 */
1429int iwl_apm_init(struct iwl_priv *priv)
1430{
1431 int ret = 0;
1432 u16 lctl;
1433
1434 IWL_DEBUG_INFO(priv, "Init card's basic functions\n");
1435
1436 /*
1437 * Use "set_bit" below rather than "write", to preserve any hardware
1438 * bits already set by default after reset.
1439 */
1440
1441 /* Disable L0S exit timer (platform NMI Work/Around) */
1442 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1443 CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
1444
1445 /*
1446 * Disable L0s without affecting L1;
1447 * don't wait for ICH L0s (ICH bug W/A)
1448 */
1449 iwl_set_bit(priv, CSR_GIO_CHICKEN_BITS,
1450 CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
1451
1452 /* Set FH wait threshold to maximum (HW error during stress W/A) */
1453 iwl_set_bit(priv, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
1454
1455 /*
1456 * Enable HAP INTA (interrupt from management bus) to
1457 * wake device's PCI Express link L1a -> L0s
1458 * NOTE: This is no-op for 3945 (non-existant bit)
1459 */
1460 iwl_set_bit(priv, CSR_HW_IF_CONFIG_REG,
1461 CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
1462
1463 /*
1464 * HW bug W/A for instability in PCIe bus L0->L0S->L1 transition.
1465 * Check if BIOS (or OS) enabled L1-ASPM on this device.
1466 * If so (likely), disable L0S, so device moves directly L0->L1;
1467 * costs negligible amount of power savings.
1468 * If not (unlikely), enable L0S, so there is at least some
1469 * power savings, even without L1.
1470 */
1471 if (priv->cfg->set_l0s) {
1472 lctl = iwl_pcie_link_ctl(priv);
1473 if ((lctl & PCI_CFG_LINK_CTRL_VAL_L1_EN) ==
1474 PCI_CFG_LINK_CTRL_VAL_L1_EN) {
1475 /* L1-ASPM enabled; disable(!) L0S */
1476 iwl_set_bit(priv, CSR_GIO_REG,
1477 CSR_GIO_REG_VAL_L0S_ENABLED);
1478 IWL_DEBUG_POWER(priv, "L1 Enabled; Disabling L0S\n");
1479 } else {
1480 /* L1-ASPM disabled; enable(!) L0S */
1481 iwl_clear_bit(priv, CSR_GIO_REG,
1482 CSR_GIO_REG_VAL_L0S_ENABLED);
1483 IWL_DEBUG_POWER(priv, "L1 Disabled; Enabling L0S\n");
1484 }
1485 }
1486
1487 /* Configure analog phase-lock-loop before activating to D0A */
1488 if (priv->cfg->pll_cfg_val)
1489 iwl_set_bit(priv, CSR_ANA_PLL_CFG, priv->cfg->pll_cfg_val);
1490
1491 /*
1492 * Set "initialization complete" bit to move adapter from
1493 * D0U* --> D0A* (powered-up active) state.
1494 */
1495 iwl_set_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1496
1497 /*
1498 * Wait for clock stabilization; once stabilized, access to
1499 * device-internal resources is supported, e.g. iwl_write_prph()
1500 * and accesses to uCode SRAM.
1501 */
1502 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
1503 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
1504 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 25000);
1505 if (ret < 0) {
1506 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
1507 goto out;
1508 }
1509
1510 /*
1511 * Enable DMA and BSM (if used) clocks, wait for them to stabilize.
1512 * BSM (Boostrap State Machine) is only in 3945 and 4965;
1513 * later devices (i.e. 5000 and later) have non-volatile SRAM,
1514 * and don't need BSM to restore data after power-saving sleep.
1515 *
1516 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0" bits
1517 * do not disable clocks. This preserves any hardware bits already
1518 * set by default in "CLK_CTRL_REG" after reset.
1519 */
1520 if (priv->cfg->use_bsm)
1521 iwl_write_prph(priv, APMG_CLK_EN_REG,
1522 APMG_CLK_VAL_DMA_CLK_RQT | APMG_CLK_VAL_BSM_CLK_RQT);
1523 else
1524 iwl_write_prph(priv, APMG_CLK_EN_REG,
1525 APMG_CLK_VAL_DMA_CLK_RQT);
1526 udelay(20);
1527
1528 /* Disable L1-Active */
1529 iwl_set_bits_prph(priv, APMG_PCIDEV_STT_REG,
1530 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
1531
1532out:
1533 return ret;
1534}
1535EXPORT_SYMBOL(iwl_apm_init);
1536
1537
1538
1348void iwl_configure_filter(struct ieee80211_hw *hw, 1539void iwl_configure_filter(struct ieee80211_hw *hw,
1349 unsigned int changed_flags, 1540 unsigned int changed_flags,
1350 unsigned int *total_flags, 1541 unsigned int *total_flags,
@@ -1392,73 +1583,14 @@ void iwl_configure_filter(struct ieee80211_hw *hw,
1392} 1583}
1393EXPORT_SYMBOL(iwl_configure_filter); 1584EXPORT_SYMBOL(iwl_configure_filter);
1394 1585
1395int iwl_setup_mac(struct iwl_priv *priv)
1396{
1397 int ret;
1398 struct ieee80211_hw *hw = priv->hw;
1399 hw->rate_control_algorithm = "iwl-agn-rs";
1400
1401 /* Tell mac80211 our characteristics */
1402 hw->flags = IEEE80211_HW_SIGNAL_DBM |
1403 IEEE80211_HW_NOISE_DBM |
1404 IEEE80211_HW_AMPDU_AGGREGATION |
1405 IEEE80211_HW_SPECTRUM_MGMT;
1406
1407 if (!priv->cfg->broken_powersave)
1408 hw->flags |= IEEE80211_HW_SUPPORTS_PS |
1409 IEEE80211_HW_SUPPORTS_DYNAMIC_PS;
1410
1411 hw->wiphy->interface_modes =
1412 BIT(NL80211_IFTYPE_STATION) |
1413 BIT(NL80211_IFTYPE_ADHOC);
1414
1415 hw->wiphy->custom_regulatory = true;
1416
1417 /* Firmware does not support this */
1418 hw->wiphy->disable_beacon_hints = true;
1419
1420 /*
1421 * For now, disable PS by default because it affects
1422 * RX performance significantly.
1423 */
1424 hw->wiphy->ps_default = false;
1425
1426 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX;
1427 /* we create the 802.11 header and a zero-length SSID element */
1428 hw->wiphy->max_scan_ie_len = IWL_MAX_PROBE_REQUEST - 24 - 2;
1429
1430 /* Default value; 4 EDCA QOS priorities */
1431 hw->queues = 4;
1432
1433 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL;
1434
1435 if (priv->bands[IEEE80211_BAND_2GHZ].n_channels)
1436 priv->hw->wiphy->bands[IEEE80211_BAND_2GHZ] =
1437 &priv->bands[IEEE80211_BAND_2GHZ];
1438 if (priv->bands[IEEE80211_BAND_5GHZ].n_channels)
1439 priv->hw->wiphy->bands[IEEE80211_BAND_5GHZ] =
1440 &priv->bands[IEEE80211_BAND_5GHZ];
1441
1442 ret = ieee80211_register_hw(priv->hw);
1443 if (ret) {
1444 IWL_ERR(priv, "Failed to register hw (error %d)\n", ret);
1445 return ret;
1446 }
1447 priv->mac80211_registered = 1;
1448
1449 return 0;
1450}
1451EXPORT_SYMBOL(iwl_setup_mac);
1452
1453int iwl_set_hw_params(struct iwl_priv *priv) 1586int iwl_set_hw_params(struct iwl_priv *priv)
1454{ 1587{
1455 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE; 1588 priv->hw_params.max_rxq_size = RX_QUEUE_SIZE;
1456 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG; 1589 priv->hw_params.max_rxq_log = RX_QUEUE_SIZE_LOG;
1457 if (priv->cfg->mod_params->amsdu_size_8K) 1590 if (priv->cfg->mod_params->amsdu_size_8K)
1458 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_8K; 1591 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_8K);
1459 else 1592 else
1460 priv->hw_params.rx_buf_size = IWL_RX_BUF_SIZE_4K; 1593 priv->hw_params.rx_page_order = get_order(IWL_RX_BUF_SIZE_4K);
1461 priv->hw_params.max_pkt_size = priv->hw_params.rx_buf_size - 256;
1462 1594
1463 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL; 1595 priv->hw_params.max_beacon_itrvl = IWL_MAX_UCODE_BEACON_INTERVAL;
1464 1596
@@ -1470,71 +1602,6 @@ int iwl_set_hw_params(struct iwl_priv *priv)
1470} 1602}
1471EXPORT_SYMBOL(iwl_set_hw_params); 1603EXPORT_SYMBOL(iwl_set_hw_params);
1472 1604
1473int iwl_init_drv(struct iwl_priv *priv)
1474{
1475 int ret;
1476
1477 priv->ibss_beacon = NULL;
1478
1479 spin_lock_init(&priv->lock);
1480 spin_lock_init(&priv->sta_lock);
1481 spin_lock_init(&priv->hcmd_lock);
1482
1483 INIT_LIST_HEAD(&priv->free_frames);
1484
1485 mutex_init(&priv->mutex);
1486
1487 /* Clear the driver's (not device's) station table */
1488 iwl_clear_stations_table(priv);
1489
1490 priv->data_retry_limit = -1;
1491 priv->ieee_channels = NULL;
1492 priv->ieee_rates = NULL;
1493 priv->band = IEEE80211_BAND_2GHZ;
1494
1495 priv->iw_mode = NL80211_IFTYPE_STATION;
1496
1497 priv->current_ht_config.sm_ps = WLAN_HT_CAP_SM_PS_DISABLED;
1498
1499 /* Choose which receivers/antennas to use */
1500 if (priv->cfg->ops->hcmd->set_rxon_chain)
1501 priv->cfg->ops->hcmd->set_rxon_chain(priv);
1502
1503 iwl_init_scan_params(priv);
1504
1505 iwl_reset_qos(priv);
1506
1507 priv->qos_data.qos_active = 0;
1508 priv->qos_data.qos_cap.val = 0;
1509
1510 priv->rates_mask = IWL_RATES_MASK;
1511 /* Set the tx_power_user_lmt to the lowest power level
1512 * this value will get overwritten by channel max power avg
1513 * from eeprom */
1514 priv->tx_power_user_lmt = IWL_TX_POWER_TARGET_POWER_MIN;
1515
1516 ret = iwl_init_channel_map(priv);
1517 if (ret) {
1518 IWL_ERR(priv, "initializing regulatory failed: %d\n", ret);
1519 goto err;
1520 }
1521
1522 ret = iwlcore_init_geos(priv);
1523 if (ret) {
1524 IWL_ERR(priv, "initializing geos failed: %d\n", ret);
1525 goto err_free_channel_map;
1526 }
1527 iwlcore_init_hw_rates(priv, priv->ieee_rates);
1528
1529 return 0;
1530
1531err_free_channel_map:
1532 iwl_free_channel_map(priv);
1533err:
1534 return ret;
1535}
1536EXPORT_SYMBOL(iwl_init_drv);
1537
1538int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force) 1605int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1539{ 1606{
1540 int ret = 0; 1607 int ret = 0;
@@ -1582,15 +1649,6 @@ int iwl_set_tx_power(struct iwl_priv *priv, s8 tx_power, bool force)
1582} 1649}
1583EXPORT_SYMBOL(iwl_set_tx_power); 1650EXPORT_SYMBOL(iwl_set_tx_power);
1584 1651
1585void iwl_uninit_drv(struct iwl_priv *priv)
1586{
1587 iwl_calib_free_results(priv);
1588 iwlcore_free_geos(priv);
1589 iwl_free_channel_map(priv);
1590 kfree(priv->scan);
1591}
1592EXPORT_SYMBOL(iwl_uninit_drv);
1593
1594#define ICT_COUNT (PAGE_SIZE/sizeof(u32)) 1652#define ICT_COUNT (PAGE_SIZE/sizeof(u32))
1595 1653
1596/* Free dram table */ 1654/* Free dram table */
@@ -1914,9 +1972,9 @@ EXPORT_SYMBOL(iwl_isr_legacy);
1914int iwl_send_bt_config(struct iwl_priv *priv) 1972int iwl_send_bt_config(struct iwl_priv *priv)
1915{ 1973{
1916 struct iwl_bt_cmd bt_cmd = { 1974 struct iwl_bt_cmd bt_cmd = {
1917 .flags = 3, 1975 .flags = BT_COEX_MODE_4W,
1918 .lead_time = 0xAA, 1976 .lead_time = BT_LEAD_TIME_DEF,
1919 .max_kill = 1, 1977 .max_kill = BT_MAX_KILL_DEF,
1920 .kill_ack_mask = 0, 1978 .kill_ack_mask = 0,
1921 .kill_cts_mask = 0, 1979 .kill_cts_mask = 0,
1922 }; 1980 };
@@ -2076,10 +2134,7 @@ void iwl_rf_kill_ct_config(struct iwl_priv *priv)
2076 spin_unlock_irqrestore(&priv->lock, flags); 2134 spin_unlock_irqrestore(&priv->lock, flags);
2077 priv->thermal_throttle.ct_kill_toggle = false; 2135 priv->thermal_throttle.ct_kill_toggle = false;
2078 2136
2079 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) { 2137 if (priv->cfg->support_ct_kill_exit) {
2080 case CSR_HW_REV_TYPE_1000:
2081 case CSR_HW_REV_TYPE_6x00:
2082 case CSR_HW_REV_TYPE_6x50:
2083 adv_cmd.critical_temperature_enter = 2138 adv_cmd.critical_temperature_enter =
2084 cpu_to_le32(priv->hw_params.ct_kill_threshold); 2139 cpu_to_le32(priv->hw_params.ct_kill_threshold);
2085 adv_cmd.critical_temperature_exit = 2140 adv_cmd.critical_temperature_exit =
@@ -2096,8 +2151,7 @@ void iwl_rf_kill_ct_config(struct iwl_priv *priv)
2096 "exit is %d\n", 2151 "exit is %d\n",
2097 priv->hw_params.ct_kill_threshold, 2152 priv->hw_params.ct_kill_threshold,
2098 priv->hw_params.ct_kill_exit_threshold); 2153 priv->hw_params.ct_kill_exit_threshold);
2099 break; 2154 } else {
2100 default:
2101 cmd.critical_temperature_R = 2155 cmd.critical_temperature_R =
2102 cpu_to_le32(priv->hw_params.ct_kill_threshold); 2156 cpu_to_le32(priv->hw_params.ct_kill_threshold);
2103 2157
@@ -2110,7 +2164,6 @@ void iwl_rf_kill_ct_config(struct iwl_priv *priv)
2110 "succeeded, " 2164 "succeeded, "
2111 "critical temperature is %d\n", 2165 "critical temperature is %d\n",
2112 priv->hw_params.ct_kill_threshold); 2166 priv->hw_params.ct_kill_threshold);
2113 break;
2114 } 2167 }
2115} 2168}
2116EXPORT_SYMBOL(iwl_rf_kill_ct_config); 2169EXPORT_SYMBOL(iwl_rf_kill_ct_config);
@@ -2142,7 +2195,7 @@ void iwl_rx_pm_sleep_notif(struct iwl_priv *priv,
2142 struct iwl_rx_mem_buffer *rxb) 2195 struct iwl_rx_mem_buffer *rxb)
2143{ 2196{
2144#ifdef CONFIG_IWLWIFI_DEBUG 2197#ifdef CONFIG_IWLWIFI_DEBUG
2145 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 2198 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2146 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif); 2199 struct iwl_sleep_notification *sleep = &(pkt->u.sleep_notif);
2147 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n", 2200 IWL_DEBUG_RX(priv, "sleep mode: %d, src: %d\n",
2148 sleep->pm_sleep_mode, sleep->pm_wakeup_src); 2201 sleep->pm_sleep_mode, sleep->pm_wakeup_src);
@@ -2153,7 +2206,7 @@ EXPORT_SYMBOL(iwl_rx_pm_sleep_notif);
2153void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv, 2206void iwl_rx_pm_debug_statistics_notif(struct iwl_priv *priv,
2154 struct iwl_rx_mem_buffer *rxb) 2207 struct iwl_rx_mem_buffer *rxb)
2155{ 2208{
2156 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 2209 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2157 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK; 2210 u32 len = le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK;
2158 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled " 2211 IWL_DEBUG_RADIO(priv, "Dumping %d bytes of unhandled "
2159 "notification for %s:\n", len, 2212 "notification for %s:\n", len,
@@ -2165,7 +2218,7 @@ EXPORT_SYMBOL(iwl_rx_pm_debug_statistics_notif);
2165void iwl_rx_reply_error(struct iwl_priv *priv, 2218void iwl_rx_reply_error(struct iwl_priv *priv,
2166 struct iwl_rx_mem_buffer *rxb) 2219 struct iwl_rx_mem_buffer *rxb)
2167{ 2220{
2168 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 2221 struct iwl_rx_packet *pkt = rxb_addr(rxb);
2169 2222
2170 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) " 2223 IWL_ERR(priv, "Error Reply type 0x%08X cmd %s (0x%02X) "
2171 "seq 0x%04X ser 0x%08X\n", 2224 "seq 0x%04X ser 0x%08X\n",
@@ -2227,42 +2280,64 @@ int iwl_mac_conf_tx(struct ieee80211_hw *hw, u16 queue,
2227EXPORT_SYMBOL(iwl_mac_conf_tx); 2280EXPORT_SYMBOL(iwl_mac_conf_tx);
2228 2281
2229static void iwl_ht_conf(struct iwl_priv *priv, 2282static void iwl_ht_conf(struct iwl_priv *priv,
2230 struct ieee80211_bss_conf *bss_conf) 2283 struct ieee80211_bss_conf *bss_conf)
2231{ 2284{
2232 struct ieee80211_sta_ht_cap *ht_conf; 2285 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2233 struct iwl_ht_info *iwl_conf = &priv->current_ht_config;
2234 struct ieee80211_sta *sta; 2286 struct ieee80211_sta *sta;
2235 2287
2236 IWL_DEBUG_MAC80211(priv, "enter: \n"); 2288 IWL_DEBUG_MAC80211(priv, "enter: \n");
2237 2289
2238 if (!iwl_conf->is_ht) 2290 if (!ht_conf->is_ht)
2239 return; 2291 return;
2240 2292
2293 ht_conf->ht_protection =
2294 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
2295 ht_conf->non_GF_STA_present =
2296 !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
2241 2297
2242 /* 2298 ht_conf->single_chain_sufficient = false;
2243 * It is totally wrong to base global information on something
2244 * that is valid only when associated, alas, this driver works
2245 * that way and I don't know how to fix it.
2246 */
2247 2299
2248 rcu_read_lock(); 2300 switch (priv->iw_mode) {
2249 sta = ieee80211_find_sta(priv->hw, priv->bssid); 2301 case NL80211_IFTYPE_STATION:
2250 if (!sta) { 2302 rcu_read_lock();
2303 sta = ieee80211_find_sta(priv->vif, priv->bssid);
2304 if (sta) {
2305 struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2306 int maxstreams;
2307
2308 maxstreams = (ht_cap->mcs.tx_params &
2309 IEEE80211_HT_MCS_TX_MAX_STREAMS_MASK)
2310 >> IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT;
2311 maxstreams += 1;
2312
2313 ht_conf->sm_ps =
2314 (u8)((ht_cap->cap & IEEE80211_HT_CAP_SM_PS)
2315 >> 2);
2316 IWL_DEBUG_MAC80211(priv, "sm_ps: 0x%x\n",
2317 ht_conf->sm_ps);
2318
2319 if ((ht_cap->mcs.rx_mask[1] == 0) &&
2320 (ht_cap->mcs.rx_mask[2] == 0))
2321 ht_conf->single_chain_sufficient = true;
2322 if (maxstreams <= 1)
2323 ht_conf->single_chain_sufficient = true;
2324 } else {
2325 /*
2326 * If at all, this can only happen through a race
2327 * when the AP disconnects us while we're still
2328 * setting up the connection, in that case mac80211
2329 * will soon tell us about that.
2330 */
2331 ht_conf->single_chain_sufficient = true;
2332 }
2251 rcu_read_unlock(); 2333 rcu_read_unlock();
2252 return; 2334 break;
2335 case NL80211_IFTYPE_ADHOC:
2336 ht_conf->single_chain_sufficient = true;
2337 break;
2338 default:
2339 break;
2253 } 2340 }
2254 ht_conf = &sta->ht_cap;
2255
2256 iwl_conf->sm_ps = (u8)((ht_conf->cap & IEEE80211_HT_CAP_SM_PS) >> 2);
2257
2258 memcpy(&iwl_conf->mcs, &ht_conf->mcs, 16);
2259
2260 iwl_conf->ht_protection =
2261 bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_PROTECTION;
2262 iwl_conf->non_GF_STA_present =
2263 !!(bss_conf->ht_operation_mode & IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT);
2264
2265 rcu_read_unlock();
2266 2341
2267 IWL_DEBUG_MAC80211(priv, "leave\n"); 2342 IWL_DEBUG_MAC80211(priv, "leave\n");
2268} 2343}
@@ -2386,6 +2461,8 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
2386 priv->timestamp = bss_conf->timestamp; 2461 priv->timestamp = bss_conf->timestamp;
2387 priv->assoc_capability = bss_conf->assoc_capability; 2462 priv->assoc_capability = bss_conf->assoc_capability;
2388 2463
2464 iwl_led_associate(priv);
2465
2389 /* 2466 /*
2390 * We have just associated, don't start scan too early 2467 * We have just associated, don't start scan too early
2391 * leave time for EAPOL exchange to complete. 2468 * leave time for EAPOL exchange to complete.
@@ -2396,9 +2473,10 @@ void iwl_bss_info_changed(struct ieee80211_hw *hw,
2396 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC; 2473 IWL_DELAY_NEXT_SCAN_AFTER_ASSOC;
2397 if (!iwl_is_rfkill(priv)) 2474 if (!iwl_is_rfkill(priv))
2398 priv->cfg->ops->lib->post_associate(priv); 2475 priv->cfg->ops->lib->post_associate(priv);
2399 } else 2476 } else {
2400 priv->assoc_id = 0; 2477 priv->assoc_id = 0;
2401 2478 iwl_led_disassociate(priv);
2479 }
2402 } 2480 }
2403 2481
2404 if (changes && iwl_is_associated(priv) && priv->assoc_id) { 2482 if (changes && iwl_is_associated(priv) && priv->assoc_id) {
@@ -2569,7 +2647,7 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2569 struct iwl_priv *priv = hw->priv; 2647 struct iwl_priv *priv = hw->priv;
2570 const struct iwl_channel_info *ch_info; 2648 const struct iwl_channel_info *ch_info;
2571 struct ieee80211_conf *conf = &hw->conf; 2649 struct ieee80211_conf *conf = &hw->conf;
2572 struct iwl_ht_info *ht_conf = &priv->current_ht_config; 2650 struct iwl_ht_config *ht_conf = &priv->current_ht_config;
2573 unsigned long flags = 0; 2651 unsigned long flags = 0;
2574 int ret = 0; 2652 int ret = 0;
2575 u16 ch; 2653 u16 ch;
@@ -2611,6 +2689,14 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2611 goto set_ch_out; 2689 goto set_ch_out;
2612 } 2690 }
2613 2691
2692 if (iwl_is_associated(priv) &&
2693 (le16_to_cpu(priv->active_rxon.channel) != ch) &&
2694 priv->cfg->ops->lib->set_channel_switch) {
2695 ret = priv->cfg->ops->lib->set_channel_switch(priv,
2696 ch);
2697 goto out;
2698 }
2699
2614 spin_lock_irqsave(&priv->lock, flags); 2700 spin_lock_irqsave(&priv->lock, flags);
2615 2701
2616 /* Configure HT40 channels */ 2702 /* Configure HT40 channels */
@@ -2619,21 +2705,18 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2619 if (conf_is_ht40_minus(conf)) { 2705 if (conf_is_ht40_minus(conf)) {
2620 ht_conf->extension_chan_offset = 2706 ht_conf->extension_chan_offset =
2621 IEEE80211_HT_PARAM_CHA_SEC_BELOW; 2707 IEEE80211_HT_PARAM_CHA_SEC_BELOW;
2622 ht_conf->supported_chan_width = 2708 ht_conf->is_40mhz = true;
2623 IWL_CHANNEL_WIDTH_40MHZ;
2624 } else if (conf_is_ht40_plus(conf)) { 2709 } else if (conf_is_ht40_plus(conf)) {
2625 ht_conf->extension_chan_offset = 2710 ht_conf->extension_chan_offset =
2626 IEEE80211_HT_PARAM_CHA_SEC_ABOVE; 2711 IEEE80211_HT_PARAM_CHA_SEC_ABOVE;
2627 ht_conf->supported_chan_width = 2712 ht_conf->is_40mhz = true;
2628 IWL_CHANNEL_WIDTH_40MHZ;
2629 } else { 2713 } else {
2630 ht_conf->extension_chan_offset = 2714 ht_conf->extension_chan_offset =
2631 IEEE80211_HT_PARAM_CHA_SEC_NONE; 2715 IEEE80211_HT_PARAM_CHA_SEC_NONE;
2632 ht_conf->supported_chan_width = 2716 ht_conf->is_40mhz = false;
2633 IWL_CHANNEL_WIDTH_20MHZ;
2634 } 2717 }
2635 } else 2718 } else
2636 ht_conf->supported_chan_width = IWL_CHANNEL_WIDTH_20MHZ; 2719 ht_conf->is_40mhz = false;
2637 /* Default to no protection. Protection mode will later be set 2720 /* Default to no protection. Protection mode will later be set
2638 * from BSS config in iwl_ht_conf */ 2721 * from BSS config in iwl_ht_conf */
2639 ht_conf->ht_protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE; 2722 ht_conf->ht_protection = IEEE80211_HT_OP_MODE_PROTECTION_NONE;
@@ -2655,7 +2738,8 @@ int iwl_mac_config(struct ieee80211_hw *hw, u32 changed)
2655 iwl_set_rate(priv); 2738 iwl_set_rate(priv);
2656 } 2739 }
2657 2740
2658 if (changed & IEEE80211_CONF_CHANGE_PS) { 2741 if (changed & (IEEE80211_CONF_CHANGE_PS |
2742 IEEE80211_CONF_CHANGE_IDLE)) {
2659 ret = iwl_power_update_mode(priv, false); 2743 ret = iwl_power_update_mode(priv, false);
2660 if (ret) 2744 if (ret)
2661 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n"); 2745 IWL_DEBUG_MAC80211(priv, "Error setting sleep level\n");
@@ -2739,7 +2823,7 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
2739 IWL_DEBUG_MAC80211(priv, "enter\n"); 2823 IWL_DEBUG_MAC80211(priv, "enter\n");
2740 2824
2741 spin_lock_irqsave(&priv->lock, flags); 2825 spin_lock_irqsave(&priv->lock, flags);
2742 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_info)); 2826 memset(&priv->current_ht_config, 0, sizeof(struct iwl_ht_config));
2743 spin_unlock_irqrestore(&priv->lock, flags); 2827 spin_unlock_irqrestore(&priv->lock, flags);
2744 2828
2745 iwl_reset_qos(priv); 2829 iwl_reset_qos(priv);
@@ -2791,6 +2875,55 @@ void iwl_mac_reset_tsf(struct ieee80211_hw *hw)
2791} 2875}
2792EXPORT_SYMBOL(iwl_mac_reset_tsf); 2876EXPORT_SYMBOL(iwl_mac_reset_tsf);
2793 2877
2878int iwl_alloc_txq_mem(struct iwl_priv *priv)
2879{
2880 if (!priv->txq)
2881 priv->txq = kzalloc(
2882 sizeof(struct iwl_tx_queue) * priv->cfg->num_of_queues,
2883 GFP_KERNEL);
2884 if (!priv->txq) {
2885 IWL_ERR(priv, "Not enough memory for txq \n");
2886 return -ENOMEM;
2887 }
2888 return 0;
2889}
2890EXPORT_SYMBOL(iwl_alloc_txq_mem);
2891
2892void iwl_free_txq_mem(struct iwl_priv *priv)
2893{
2894 kfree(priv->txq);
2895 priv->txq = NULL;
2896}
2897EXPORT_SYMBOL(iwl_free_txq_mem);
2898
2899int iwl_send_wimax_coex(struct iwl_priv *priv)
2900{
2901 struct iwl_wimax_coex_cmd uninitialized_var(coex_cmd);
2902
2903 if (priv->cfg->support_wimax_coexist) {
2904 /* UnMask wake up src at associated sleep */
2905 coex_cmd.flags |= COEX_FLAGS_ASSOC_WA_UNMASK_MSK;
2906
2907 /* UnMask wake up src at unassociated sleep */
2908 coex_cmd.flags |= COEX_FLAGS_UNASSOC_WA_UNMASK_MSK;
2909 memcpy(coex_cmd.sta_prio, cu_priorities,
2910 sizeof(struct iwl_wimax_coex_event_entry) *
2911 COEX_NUM_OF_EVENTS);
2912
2913 /* enabling the coexistence feature */
2914 coex_cmd.flags |= COEX_FLAGS_COEX_ENABLE_MSK;
2915
2916 /* enabling the priorities tables */
2917 coex_cmd.flags |= COEX_FLAGS_STA_TABLE_VALID_MSK;
2918 } else {
2919 /* coexistence is disabled */
2920 memset(&coex_cmd, 0, sizeof(coex_cmd));
2921 }
2922 return iwl_send_cmd_pdu(priv, COEX_PRIORITY_TABLE_CMD,
2923 sizeof(coex_cmd), &coex_cmd);
2924}
2925EXPORT_SYMBOL(iwl_send_wimax_coex);
2926
2794#ifdef CONFIG_IWLWIFI_DEBUGFS 2927#ifdef CONFIG_IWLWIFI_DEBUGFS
2795 2928
2796#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES) 2929#define IWL_TRAFFIC_DUMP_SIZE (IWL_TRAFFIC_ENTRY_SIZE * IWL_TRAFFIC_ENTRIES)
diff --git a/drivers/net/wireless/iwlwifi/iwl-core.h b/drivers/net/wireless/iwlwifi/iwl-core.h
index 7754538c2194..9574d8f33537 100644
--- a/drivers/net/wireless/iwlwifi/iwl-core.h
+++ b/drivers/net/wireless/iwlwifi/iwl-core.h
@@ -89,6 +89,7 @@ struct iwl_hcmd_ops {
89 int (*rxon_assoc)(struct iwl_priv *priv); 89 int (*rxon_assoc)(struct iwl_priv *priv);
90 int (*commit_rxon)(struct iwl_priv *priv); 90 int (*commit_rxon)(struct iwl_priv *priv);
91 void (*set_rxon_chain)(struct iwl_priv *priv); 91 void (*set_rxon_chain)(struct iwl_priv *priv);
92 int (*set_tx_ant)(struct iwl_priv *priv, u8 valid_tx_ant);
92}; 93};
93 94
94struct iwl_hcmd_utils_ops { 95struct iwl_hcmd_utils_ops {
@@ -97,7 +98,8 @@ struct iwl_hcmd_utils_ops {
97 void (*gain_computation)(struct iwl_priv *priv, 98 void (*gain_computation)(struct iwl_priv *priv,
98 u32 *average_noise, 99 u32 *average_noise,
99 u16 min_average_noise_antennat_i, 100 u16 min_average_noise_antennat_i,
100 u32 min_average_noise); 101 u32 min_average_noise,
102 u8 default_chain);
101 void (*chain_noise_reset)(struct iwl_priv *priv); 103 void (*chain_noise_reset)(struct iwl_priv *priv);
102 void (*rts_tx_cmd_flag)(struct ieee80211_tx_info *info, 104 void (*rts_tx_cmd_flag)(struct ieee80211_tx_info *info,
103 __le32 *tx_flags); 105 __le32 *tx_flags);
@@ -107,7 +109,6 @@ struct iwl_hcmd_utils_ops {
107 109
108struct iwl_apm_ops { 110struct iwl_apm_ops {
109 int (*init)(struct iwl_priv *priv); 111 int (*init)(struct iwl_priv *priv);
110 int (*reset)(struct iwl_priv *priv);
111 void (*stop)(struct iwl_priv *priv); 112 void (*stop)(struct iwl_priv *priv);
112 void (*config)(struct iwl_priv *priv); 113 void (*config)(struct iwl_priv *priv);
113 int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src); 114 int (*set_pwr_src)(struct iwl_priv *priv, enum iwl_pwr_src src);
@@ -168,6 +169,7 @@ struct iwl_lib_ops {
168 int (*load_ucode)(struct iwl_priv *priv); 169 int (*load_ucode)(struct iwl_priv *priv);
169 void (*dump_nic_event_log)(struct iwl_priv *priv); 170 void (*dump_nic_event_log)(struct iwl_priv *priv);
170 void (*dump_nic_error_log)(struct iwl_priv *priv); 171 void (*dump_nic_error_log)(struct iwl_priv *priv);
172 int (*set_channel_switch)(struct iwl_priv *priv, u16 channel);
171 /* power management */ 173 /* power management */
172 struct iwl_apm_ops apm_ops; 174 struct iwl_apm_ops apm_ops;
173 175
@@ -185,18 +187,24 @@ struct iwl_lib_ops {
185 struct iwl_temp_ops temp_ops; 187 struct iwl_temp_ops temp_ops;
186}; 188};
187 189
190struct iwl_led_ops {
191 int (*cmd)(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd);
192 int (*on)(struct iwl_priv *priv);
193 int (*off)(struct iwl_priv *priv);
194};
195
188struct iwl_ops { 196struct iwl_ops {
189 const struct iwl_ucode_ops *ucode; 197 const struct iwl_ucode_ops *ucode;
190 const struct iwl_lib_ops *lib; 198 const struct iwl_lib_ops *lib;
191 const struct iwl_hcmd_ops *hcmd; 199 const struct iwl_hcmd_ops *hcmd;
192 const struct iwl_hcmd_utils_ops *utils; 200 const struct iwl_hcmd_utils_ops *utils;
201 const struct iwl_led_ops *led;
193}; 202};
194 203
195struct iwl_mod_params { 204struct iwl_mod_params {
196 int sw_crypto; /* def: 0 = using hardware encryption */ 205 int sw_crypto; /* def: 0 = using hardware encryption */
197 int disable_hw_scan; /* def: 0 = use h/w scan */ 206 int disable_hw_scan; /* def: 0 = use h/w scan */
198 int num_of_queues; /* def: HW dependent */ 207 int num_of_queues; /* def: HW dependent */
199 int num_of_ampdu_queues;/* def: HW dependent */
200 int disable_11n; /* def: 0 = 11n capabilities enabled */ 208 int disable_11n; /* def: 0 = 11n capabilities enabled */
201 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */ 209 int amsdu_size_8K; /* def: 1 = enable 8K amsdu size */
202 int antenna; /* def: 0 = both antennas (use diversity) */ 210 int antenna; /* def: 0 = both antennas (use diversity) */
@@ -213,7 +221,15 @@ struct iwl_mod_params {
213 * @pa_type: used by 6000 series only to identify the type of Power Amplifier 221 * @pa_type: used by 6000 series only to identify the type of Power Amplifier
214 * @max_ll_items: max number of OTP blocks 222 * @max_ll_items: max number of OTP blocks
215 * @shadow_ram_support: shadow support for OTP memory 223 * @shadow_ram_support: shadow support for OTP memory
224 * @led_compensation: compensate on the led on/off time per HW according
225 * to the deviation to achieve the desired led frequency.
226 * The detail algorithm is described in iwl-led.c
216 * @use_rts_for_ht: use rts/cts protection for HT traffic 227 * @use_rts_for_ht: use rts/cts protection for HT traffic
228 * @chain_noise_num_beacons: number of beacons used to compute chain noise
229 * @adv_thermal_throttle: support advance thermal throttle
230 * @support_ct_kill_exit: support ct kill exit condition
231 * @support_sm_ps: support spatial multiplexing power save
232 * @support_wimax_coexist: support wimax/wifi co-exist
217 * 233 *
218 * We enable the driver to be backward compatible wrt API version. The 234 * We enable the driver to be backward compatible wrt API version. The
219 * driver specifies which APIs it supports (with @ucode_api_max being the 235 * driver specifies which APIs it supports (with @ucode_api_max being the
@@ -245,18 +261,32 @@ struct iwl_cfg {
245 int eeprom_size; 261 int eeprom_size;
246 u16 eeprom_ver; 262 u16 eeprom_ver;
247 u16 eeprom_calib_ver; 263 u16 eeprom_calib_ver;
264 int num_of_queues; /* def: HW dependent */
265 int num_of_ampdu_queues;/* def: HW dependent */
248 const struct iwl_ops *ops; 266 const struct iwl_ops *ops;
249 const struct iwl_mod_params *mod_params; 267 const struct iwl_mod_params *mod_params;
250 u8 valid_tx_ant; 268 u8 valid_tx_ant;
251 u8 valid_rx_ant; 269 u8 valid_rx_ant;
252 bool need_pll_cfg; 270
271 /* for iwl_apm_init() */
272 u32 pll_cfg_val;
273 bool set_l0s;
274 bool use_bsm;
275
253 bool use_isr_legacy; 276 bool use_isr_legacy;
254 enum iwl_pa_type pa_type; 277 enum iwl_pa_type pa_type;
255 const u16 max_ll_items; 278 const u16 max_ll_items;
256 const bool shadow_ram_support; 279 const bool shadow_ram_support;
257 const bool ht_greenfield_support; 280 const bool ht_greenfield_support;
281 u16 led_compensation;
258 const bool broken_powersave; 282 const bool broken_powersave;
259 bool use_rts_for_ht; 283 bool use_rts_for_ht;
284 int chain_noise_num_beacons;
285 const bool supports_idle;
286 bool adv_thermal_throttle;
287 bool support_ct_kill_exit;
288 bool support_sm_ps;
289 const bool support_wimax_coexist;
260}; 290};
261 291
262/*************************** 292/***************************
@@ -275,7 +305,7 @@ int iwl_check_rxon_cmd(struct iwl_priv *priv);
275int iwl_full_rxon_required(struct iwl_priv *priv); 305int iwl_full_rxon_required(struct iwl_priv *priv);
276void iwl_set_rxon_chain(struct iwl_priv *priv); 306void iwl_set_rxon_chain(struct iwl_priv *priv);
277int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch); 307int iwl_set_rxon_channel(struct iwl_priv *priv, struct ieee80211_channel *ch);
278void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_info *ht_info); 308void iwl_set_rxon_ht(struct iwl_priv *priv, struct iwl_ht_config *ht_conf);
279u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv, 309u8 iwl_is_ht40_tx_allowed(struct iwl_priv *priv,
280 struct ieee80211_sta_ht_cap *sta_ht_inf); 310 struct ieee80211_sta_ht_cap *sta_ht_inf);
281void iwl_set_flags_for_band(struct iwl_priv *priv, enum ieee80211_band band); 311void iwl_set_flags_for_band(struct iwl_priv *priv, enum ieee80211_band band);
@@ -289,10 +319,7 @@ void iwl_configure_filter(struct ieee80211_hw *hw,
289 unsigned int changed_flags, 319 unsigned int changed_flags,
290 unsigned int *total_flags, u64 multicast); 320 unsigned int *total_flags, u64 multicast);
291int iwl_hw_nic_init(struct iwl_priv *priv); 321int iwl_hw_nic_init(struct iwl_priv *priv);
292int iwl_setup_mac(struct iwl_priv *priv);
293int iwl_set_hw_params(struct iwl_priv *priv); 322int iwl_set_hw_params(struct iwl_priv *priv);
294int iwl_init_drv(struct iwl_priv *priv);
295void iwl_uninit_drv(struct iwl_priv *priv);
296bool iwl_is_monitor_mode(struct iwl_priv *priv); 323bool iwl_is_monitor_mode(struct iwl_priv *priv);
297void iwl_post_associate(struct iwl_priv *priv); 324void iwl_post_associate(struct iwl_priv *priv);
298void iwl_bss_info_changed(struct ieee80211_hw *hw, 325void iwl_bss_info_changed(struct ieee80211_hw *hw,
@@ -311,6 +338,11 @@ void iwl_config_ap(struct iwl_priv *priv);
311int iwl_mac_get_tx_stats(struct ieee80211_hw *hw, 338int iwl_mac_get_tx_stats(struct ieee80211_hw *hw,
312 struct ieee80211_tx_queue_stats *stats); 339 struct ieee80211_tx_queue_stats *stats);
313void iwl_mac_reset_tsf(struct ieee80211_hw *hw); 340void iwl_mac_reset_tsf(struct ieee80211_hw *hw);
341int iwl_alloc_txq_mem(struct iwl_priv *priv);
342void iwl_free_txq_mem(struct iwl_priv *priv);
343void iwlcore_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
344 __le32 *tx_flags);
345int iwl_send_wimax_coex(struct iwl_priv *priv);
314#ifdef CONFIG_IWLWIFI_DEBUGFS 346#ifdef CONFIG_IWLWIFI_DEBUGFS
315int iwl_alloc_traffic_mem(struct iwl_priv *priv); 347int iwl_alloc_traffic_mem(struct iwl_priv *priv);
316void iwl_free_traffic_mem(struct iwl_priv *priv); 348void iwl_free_traffic_mem(struct iwl_priv *priv);
@@ -511,7 +543,7 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv, u8 id, u16 len,
511 const void *data, 543 const void *data,
512 void (*callback)(struct iwl_priv *priv, 544 void (*callback)(struct iwl_priv *priv,
513 struct iwl_device_cmd *cmd, 545 struct iwl_device_cmd *cmd,
514 struct sk_buff *skb)); 546 struct iwl_rx_packet *pkt));
515 547
516int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd); 548int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd);
517 549
@@ -571,6 +603,7 @@ void iwlcore_free_geos(struct iwl_priv *priv);
571#define STATUS_HCMD_SYNC_ACTIVE 1 /* sync host command in progress */ 603#define STATUS_HCMD_SYNC_ACTIVE 1 /* sync host command in progress */
572#define STATUS_INT_ENABLED 2 604#define STATUS_INT_ENABLED 2
573#define STATUS_RF_KILL_HW 3 605#define STATUS_RF_KILL_HW 3
606#define STATUS_CT_KILL 4
574#define STATUS_INIT 5 607#define STATUS_INIT 5
575#define STATUS_ALIVE 6 608#define STATUS_ALIVE 6
576#define STATUS_READY 7 609#define STATUS_READY 7
@@ -615,6 +648,11 @@ static inline int iwl_is_rfkill(struct iwl_priv *priv)
615 return iwl_is_rfkill_hw(priv); 648 return iwl_is_rfkill_hw(priv);
616} 649}
617 650
651static inline int iwl_is_ctkill(struct iwl_priv *priv)
652{
653 return test_bit(STATUS_CT_KILL, &priv->status);
654}
655
618static inline int iwl_is_ready_rf(struct iwl_priv *priv) 656static inline int iwl_is_ready_rf(struct iwl_priv *priv)
619{ 657{
620 658
@@ -636,6 +674,9 @@ extern void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
636 struct iwl_rx_mem_buffer *rxb); 674 struct iwl_rx_mem_buffer *rxb);
637void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, 675void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
638 struct iwl_rx_mem_buffer *rxb); 676 struct iwl_rx_mem_buffer *rxb);
677void iwl_apm_stop(struct iwl_priv *priv);
678int iwl_apm_stop_master(struct iwl_priv *priv);
679int iwl_apm_init(struct iwl_priv *priv);
639 680
640void iwl_setup_rxon_timing(struct iwl_priv *priv); 681void iwl_setup_rxon_timing(struct iwl_priv *priv);
641static inline int iwl_send_rxon_assoc(struct iwl_priv *priv) 682static inline int iwl_send_rxon_assoc(struct iwl_priv *priv)
@@ -655,5 +696,4 @@ static inline const struct ieee80211_supported_band *iwl_get_hw_mode(
655{ 696{
656 return priv->hw->wiphy->bands[band]; 697 return priv->hw->wiphy->bands[band];
657} 698}
658
659#endif /* __iwl_core_h__ */ 699#endif /* __iwl_core_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-csr.h b/drivers/net/wireless/iwlwifi/iwl-csr.h
index 06437d13e73e..b6ed5a3147a1 100644
--- a/drivers/net/wireless/iwlwifi/iwl-csr.h
+++ b/drivers/net/wireless/iwlwifi/iwl-csr.h
@@ -109,8 +109,9 @@
109 * Bit fields: 109 * Bit fields:
110 * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step 110 * 3-2: 0 = A, 1 = B, 2 = C, 3 = D step
111 */ 111 */
112#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C) 112#define CSR_HW_REV_WA_REG (CSR_BASE+0x22C)
113#define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240) 113#define CSR_DBG_HPET_MEM_REG (CSR_BASE+0x240)
114#define CSR_DBG_LINK_PWR_MGMT_REG (CSR_BASE+0x250)
114 115
115/* Bits for CSR_HW_IF_CONFIG_REG */ 116/* Bits for CSR_HW_IF_CONFIG_REG */
116#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010) 117#define CSR49_HW_IF_CONFIG_REG_BIT_4965_R (0x00000010)
@@ -195,6 +196,7 @@
195#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080) 196#define CSR_RESET_REG_FLAG_SW_RESET (0x00000080)
196#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100) 197#define CSR_RESET_REG_FLAG_MASTER_DISABLED (0x00000100)
197#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200) 198#define CSR_RESET_REG_FLAG_STOP_MASTER (0x00000200)
199#define CSR_RESET_LINK_PWR_MGMT_DISABLED (0x80000000)
198 200
199/* GP (general purpose) CONTROL */ 201/* GP (general purpose) CONTROL */
200#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001) 202#define CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY (0x00000001)
@@ -230,12 +232,22 @@
230 232
231/* EEPROM GP */ 233/* EEPROM GP */
232#define CSR_EEPROM_GP_VALID_MSK (0x00000007) 234#define CSR_EEPROM_GP_VALID_MSK (0x00000007)
233#define CSR_EEPROM_GP_BAD_SIGNATURE (0x00000000)
234#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180) 235#define CSR_EEPROM_GP_IF_OWNER_MSK (0x00000180)
235#define CSR_OTP_GP_REG_DEVICE_SELECT (0x00010000) /* 0 - EEPROM, 1 - OTP */ 236#define CSR_OTP_GP_REG_DEVICE_SELECT (0x00010000) /* 0 - EEPROM, 1 - OTP */
236#define CSR_OTP_GP_REG_OTP_ACCESS_MODE (0x00020000) /* 0 - absolute, 1 - relative */ 237#define CSR_OTP_GP_REG_OTP_ACCESS_MODE (0x00020000) /* 0 - absolute, 1 - relative */
237#define CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK (0x00100000) /* bit 20 */ 238#define CSR_OTP_GP_REG_ECC_CORR_STATUS_MSK (0x00100000) /* bit 20 */
238#define CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK (0x00200000) /* bit 21 */ 239#define CSR_OTP_GP_REG_ECC_UNCORR_STATUS_MSK (0x00200000) /* bit 21 */
240#define CSR_GP_REG_POWER_SAVE_STATUS_MSK (0x03000000) /* bit 24/25 */
241#define CSR_GP_REG_NO_POWER_SAVE (0x00000000)
242#define CSR_GP_REG_MAC_POWER_SAVE (0x01000000)
243#define CSR_GP_REG_PHY_POWER_SAVE (0x02000000)
244#define CSR_GP_REG_POWER_SAVE_ERROR (0x03000000)
245
246/* EEPROM signature */
247#define CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP (0x00000000)
248#define CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP (0x00000001)
249#define CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K (0x00000002)
250#define CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K (0x00000004)
239 251
240/* CSR GIO */ 252/* CSR GIO */
241#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002) 253#define CSR_GIO_REG_VAL_L0S_ENABLED (0x00000002)
diff --git a/drivers/net/wireless/iwlwifi/iwl-debug.h b/drivers/net/wireless/iwlwifi/iwl-debug.h
index cbc62904655d..96c92eab692a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debug.h
+++ b/drivers/net/wireless/iwlwifi/iwl-debug.h
@@ -84,9 +84,7 @@ struct iwl_debugfs {
84 struct dentry *file_interrupt; 84 struct dentry *file_interrupt;
85 struct dentry *file_qos; 85 struct dentry *file_qos;
86 struct dentry *file_thermal_throttling; 86 struct dentry *file_thermal_throttling;
87#ifdef CONFIG_IWLWIFI_LEDS
88 struct dentry *file_led; 87 struct dentry *file_led;
89#endif
90 struct dentry *file_disable_ht40; 88 struct dentry *file_disable_ht40;
91 struct dentry *file_sleep_level_override; 89 struct dentry *file_sleep_level_override;
92 struct dentry *file_current_sleep_command; 90 struct dentry *file_current_sleep_command;
@@ -108,6 +106,7 @@ struct iwl_debugfs {
108 struct dentry *file_sensitivity; 106 struct dentry *file_sensitivity;
109 struct dentry *file_chain_noise; 107 struct dentry *file_chain_noise;
110 struct dentry *file_tx_power; 108 struct dentry *file_tx_power;
109 struct dentry *file_power_save_status;
111 } dbgfs_debug_files; 110 } dbgfs_debug_files;
112 u32 sram_offset; 111 u32 sram_offset;
113 u32 sram_len; 112 u32 sram_len;
diff --git a/drivers/net/wireless/iwlwifi/iwl-debugfs.c b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
index a198bcf61022..8784911fd56e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-debugfs.c
+++ b/drivers/net/wireless/iwlwifi/iwl-debugfs.c
@@ -383,6 +383,7 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
383 int pos = 0, ofs = 0, buf_size = 0; 383 int pos = 0, ofs = 0, buf_size = 0;
384 const u8 *ptr; 384 const u8 *ptr;
385 char *buf; 385 char *buf;
386 u16 eeprom_ver;
386 size_t eeprom_len = priv->cfg->eeprom_size; 387 size_t eeprom_len = priv->cfg->eeprom_size;
387 buf_size = 4 * eeprom_len + 256; 388 buf_size = 4 * eeprom_len + 256;
388 389
@@ -403,9 +404,11 @@ static ssize_t iwl_dbgfs_nvm_read(struct file *file,
403 IWL_ERR(priv, "Can not allocate Buffer\n"); 404 IWL_ERR(priv, "Can not allocate Buffer\n");
404 return -ENOMEM; 405 return -ENOMEM;
405 } 406 }
406 pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s\n", 407 eeprom_ver = iwl_eeprom_query16(priv, EEPROM_VERSION);
408 pos += scnprintf(buf + pos, buf_size - pos, "NVM Type: %s, "
409 "version: 0x%x\n",
407 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) 410 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
408 ? "OTP" : "EEPROM"); 411 ? "OTP" : "EEPROM", eeprom_ver);
409 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) { 412 for (ofs = 0 ; ofs < eeprom_len ; ofs += 16) {
410 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs); 413 pos += scnprintf(buf + pos, buf_size - pos, "0x%.4x ", ofs);
411 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos, 414 hex_dump_to_buffer(ptr + ofs, 16 , 16, 2, buf + pos,
@@ -532,6 +535,8 @@ static ssize_t iwl_dbgfs_status_read(struct file *file,
532 test_bit(STATUS_INT_ENABLED, &priv->status)); 535 test_bit(STATUS_INT_ENABLED, &priv->status));
533 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n", 536 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_RF_KILL_HW:\t %d\n",
534 test_bit(STATUS_RF_KILL_HW, &priv->status)); 537 test_bit(STATUS_RF_KILL_HW, &priv->status));
538 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_CT_KILL:\t\t %d\n",
539 test_bit(STATUS_CT_KILL, &priv->status));
535 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n", 540 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_INIT:\t\t %d\n",
536 test_bit(STATUS_INIT, &priv->status)); 541 test_bit(STATUS_INIT, &priv->status));
537 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n", 542 pos += scnprintf(buf + pos, bufsz - pos, "STATUS_ALIVE:\t\t %d\n",
@@ -672,7 +677,6 @@ static ssize_t iwl_dbgfs_qos_read(struct file *file, char __user *user_buf,
672 return ret; 677 return ret;
673} 678}
674 679
675#ifdef CONFIG_IWLWIFI_LEDS
676static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf, 680static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
677 size_t count, loff_t *ppos) 681 size_t count, loff_t *ppos)
678{ 682{
@@ -697,7 +701,6 @@ static ssize_t iwl_dbgfs_led_read(struct file *file, char __user *user_buf,
697 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 701 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
698 return ret; 702 return ret;
699} 703}
700#endif
701 704
702static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file, 705static ssize_t iwl_dbgfs_thermal_throttling_read(struct file *file,
703 char __user *user_buf, 706 char __user *user_buf,
@@ -798,15 +801,20 @@ static ssize_t iwl_dbgfs_sleep_level_override_write(struct file *file,
798 * valid here. However, let's not confuse them and present 801 * valid here. However, let's not confuse them and present
799 * IWL_POWER_INDEX_1 as "1", not "0". 802 * IWL_POWER_INDEX_1 as "1", not "0".
800 */ 803 */
801 if (value > 0) 804 if (value == 0)
805 return -EINVAL;
806 else if (value > 0)
802 value -= 1; 807 value -= 1;
803 808
804 if (value != -1 && (value < 0 || value >= IWL_POWER_NUM)) 809 if (value != -1 && (value < 0 || value >= IWL_POWER_NUM))
805 return -EINVAL; 810 return -EINVAL;
806 811
812 if (!iwl_is_ready_rf(priv))
813 return -EAGAIN;
814
807 priv->power_data.debug_sleep_level_override = value; 815 priv->power_data.debug_sleep_level_override = value;
808 816
809 iwl_power_update_mode(priv, false); 817 iwl_power_update_mode(priv, true);
810 818
811 return count; 819 return count;
812} 820}
@@ -861,9 +869,7 @@ DEBUGFS_READ_FILE_OPS(channels);
861DEBUGFS_READ_FILE_OPS(status); 869DEBUGFS_READ_FILE_OPS(status);
862DEBUGFS_READ_WRITE_FILE_OPS(interrupt); 870DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
863DEBUGFS_READ_FILE_OPS(qos); 871DEBUGFS_READ_FILE_OPS(qos);
864#ifdef CONFIG_IWLWIFI_LEDS
865DEBUGFS_READ_FILE_OPS(led); 872DEBUGFS_READ_FILE_OPS(led);
866#endif
867DEBUGFS_READ_FILE_OPS(thermal_throttling); 873DEBUGFS_READ_FILE_OPS(thermal_throttling);
868DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40); 874DEBUGFS_READ_WRITE_FILE_OPS(disable_ht40);
869DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override); 875DEBUGFS_READ_WRITE_FILE_OPS(sleep_level_override);
@@ -881,10 +887,14 @@ static ssize_t iwl_dbgfs_traffic_log_read(struct file *file,
881 struct iwl_rx_queue *rxq = &priv->rxq; 887 struct iwl_rx_queue *rxq = &priv->rxq;
882 char *buf; 888 char *buf;
883 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) + 889 int bufsz = ((IWL_TRAFFIC_ENTRIES * IWL_TRAFFIC_ENTRY_SIZE * 64) * 2) +
884 (IWL_MAX_NUM_QUEUES * 32 * 8) + 400; 890 (priv->cfg->num_of_queues * 32 * 8) + 400;
885 const u8 *ptr; 891 const u8 *ptr;
886 ssize_t ret; 892 ssize_t ret;
887 893
894 if (!priv->txq) {
895 IWL_ERR(priv, "txq not ready\n");
896 return -EAGAIN;
897 }
888 buf = kzalloc(bufsz, GFP_KERNEL); 898 buf = kzalloc(bufsz, GFP_KERNEL);
889 if (!buf) { 899 if (!buf) {
890 IWL_ERR(priv, "Can not allocate buffer\n"); 900 IWL_ERR(priv, "Can not allocate buffer\n");
@@ -976,8 +986,12 @@ static ssize_t iwl_dbgfs_tx_queue_read(struct file *file,
976 int pos = 0; 986 int pos = 0;
977 int cnt; 987 int cnt;
978 int ret; 988 int ret;
979 const size_t bufsz = sizeof(char) * 60 * IWL_MAX_NUM_QUEUES; 989 const size_t bufsz = sizeof(char) * 60 * priv->cfg->num_of_queues;
980 990
991 if (!priv->txq) {
992 IWL_ERR(priv, "txq not ready\n");
993 return -EAGAIN;
994 }
981 buf = kzalloc(bufsz, GFP_KERNEL); 995 buf = kzalloc(bufsz, GFP_KERNEL);
982 if (!buf) 996 if (!buf)
983 return -ENOMEM; 997 return -ENOMEM;
@@ -1068,10 +1082,10 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
1068 sizeof(struct statistics_rx_non_phy) * 20 + 1082 sizeof(struct statistics_rx_non_phy) * 20 +
1069 sizeof(struct statistics_rx_ht_phy) * 20 + 400; 1083 sizeof(struct statistics_rx_ht_phy) * 20 + 400;
1070 ssize_t ret; 1084 ssize_t ret;
1071 struct statistics_rx_phy *ofdm; 1085 struct statistics_rx_phy *ofdm, *accum_ofdm;
1072 struct statistics_rx_phy *cck; 1086 struct statistics_rx_phy *cck, *accum_cck;
1073 struct statistics_rx_non_phy *general; 1087 struct statistics_rx_non_phy *general, *accum_general;
1074 struct statistics_rx_ht_phy *ht; 1088 struct statistics_rx_ht_phy *ht, *accum_ht;
1075 1089
1076 if (!iwl_is_alive(priv)) 1090 if (!iwl_is_alive(priv))
1077 return -EAGAIN; 1091 return -EAGAIN;
@@ -1100,155 +1114,268 @@ static ssize_t iwl_dbgfs_ucode_rx_stats_read(struct file *file,
1100 cck = &priv->statistics.rx.cck; 1114 cck = &priv->statistics.rx.cck;
1101 general = &priv->statistics.rx.general; 1115 general = &priv->statistics.rx.general;
1102 ht = &priv->statistics.rx.ofdm_ht; 1116 ht = &priv->statistics.rx.ofdm_ht;
1117 accum_ofdm = &priv->accum_statistics.rx.ofdm;
1118 accum_cck = &priv->accum_statistics.rx.cck;
1119 accum_general = &priv->accum_statistics.rx.general;
1120 accum_ht = &priv->accum_statistics.rx.ofdm_ht;
1103 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); 1121 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
1104 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM:\n"); 1122 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM:\n");
1105 pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt: %u\n", 1123 pos += scnprintf(buf + pos, bufsz - pos,
1106 le32_to_cpu(ofdm->ina_cnt)); 1124 "\t\t\tcurrent\t\t\taccumulative\n");
1107 pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt: %u\n", 1125 pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt:\t\t%u\t\t\t%u\n",
1108 le32_to_cpu(ofdm->fina_cnt)); 1126 le32_to_cpu(ofdm->ina_cnt), accum_ofdm->ina_cnt);
1109 pos += scnprintf(buf + pos, bufsz - pos, "plcp_err: %u\n", 1127 pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt:\t\t%u\t\t\t%u\n",
1110 le32_to_cpu(ofdm->plcp_err)); 1128 le32_to_cpu(ofdm->fina_cnt), accum_ofdm->fina_cnt);
1111 pos += scnprintf(buf + pos, bufsz - pos, "crc32_err: %u\n", 1129 pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
1112 le32_to_cpu(ofdm->crc32_err)); 1130 le32_to_cpu(ofdm->plcp_err), accum_ofdm->plcp_err);
1113 pos += scnprintf(buf + pos, bufsz - pos, "overrun_err: %u\n", 1131 pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
1114 le32_to_cpu(ofdm->overrun_err)); 1132 le32_to_cpu(ofdm->crc32_err), accum_ofdm->crc32_err);
1115 pos += scnprintf(buf + pos, bufsz - pos, "early_overrun_err: %u\n", 1133 pos += scnprintf(buf + pos, bufsz - pos,
1116 le32_to_cpu(ofdm->early_overrun_err)); 1134 "overrun_err:\t\t%u\t\t\t%u\n",
1117 pos += scnprintf(buf + pos, bufsz - pos, "crc32_good: %u\n", 1135 le32_to_cpu(ofdm->overrun_err),
1118 le32_to_cpu(ofdm->crc32_good)); 1136 accum_ofdm->overrun_err);
1119 pos += scnprintf(buf + pos, bufsz - pos, "false_alarm_cnt: %u\n", 1137 pos += scnprintf(buf + pos, bufsz - pos,
1120 le32_to_cpu(ofdm->false_alarm_cnt)); 1138 "early_overrun_err:\t%u\t\t\t%u\n",
1121 pos += scnprintf(buf + pos, bufsz - pos, "fina_sync_err_cnt: %u\n", 1139 le32_to_cpu(ofdm->early_overrun_err),
1122 le32_to_cpu(ofdm->fina_sync_err_cnt)); 1140 accum_ofdm->early_overrun_err);
1123 pos += scnprintf(buf + pos, bufsz - pos, "sfd_timeout: %u\n", 1141 pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
1124 le32_to_cpu(ofdm->sfd_timeout)); 1142 le32_to_cpu(ofdm->crc32_good),
1125 pos += scnprintf(buf + pos, bufsz - pos, "fina_timeout: %u\n", 1143 accum_ofdm->crc32_good);
1126 le32_to_cpu(ofdm->fina_timeout)); 1144 pos += scnprintf(buf + pos, bufsz - pos,
1127 pos += scnprintf(buf + pos, bufsz - pos, "unresponded_rts: %u\n", 1145 "false_alarm_cnt:\t%u\t\t\t%u\n",
1128 le32_to_cpu(ofdm->unresponded_rts)); 1146 le32_to_cpu(ofdm->false_alarm_cnt),
1129 pos += scnprintf(buf + pos, bufsz - pos, 1147 accum_ofdm->false_alarm_cnt);
1130 "rxe_frame_limit_overrun: %u\n", 1148 pos += scnprintf(buf + pos, bufsz - pos,
1131 le32_to_cpu(ofdm->rxe_frame_limit_overrun)); 1149 "fina_sync_err_cnt:\t%u\t\t\t%u\n",
1132 pos += scnprintf(buf + pos, bufsz - pos, "sent_ack_cnt: %u\n", 1150 le32_to_cpu(ofdm->fina_sync_err_cnt),
1133 le32_to_cpu(ofdm->sent_ack_cnt)); 1151 accum_ofdm->fina_sync_err_cnt);
1134 pos += scnprintf(buf + pos, bufsz - pos, "sent_cts_cnt: %u\n", 1152 pos += scnprintf(buf + pos, bufsz - pos,
1135 le32_to_cpu(ofdm->sent_cts_cnt)); 1153 "sfd_timeout:\t\t%u\t\t\t%u\n",
1136 pos += scnprintf(buf + pos, bufsz - pos, "sent_ba_rsp_cnt: %u\n", 1154 le32_to_cpu(ofdm->sfd_timeout),
1137 le32_to_cpu(ofdm->sent_ba_rsp_cnt)); 1155 accum_ofdm->sfd_timeout);
1138 pos += scnprintf(buf + pos, bufsz - pos, "dsp_self_kill: %u\n", 1156 pos += scnprintf(buf + pos, bufsz - pos,
1139 le32_to_cpu(ofdm->dsp_self_kill)); 1157 "fina_timeout:\t\t%u\t\t\t%u\n",
1140 pos += scnprintf(buf + pos, bufsz - pos, "mh_format_err: %u\n", 1158 le32_to_cpu(ofdm->fina_timeout),
1141 le32_to_cpu(ofdm->mh_format_err)); 1159 accum_ofdm->fina_timeout);
1142 pos += scnprintf(buf + pos, bufsz - pos, "re_acq_main_rssi_sum: %u\n", 1160 pos += scnprintf(buf + pos, bufsz - pos,
1143 le32_to_cpu(ofdm->re_acq_main_rssi_sum)); 1161 "unresponded_rts:\t%u\t\t\t%u\n",
1162 le32_to_cpu(ofdm->unresponded_rts),
1163 accum_ofdm->unresponded_rts);
1164 pos += scnprintf(buf + pos, bufsz - pos,
1165 "rxe_frame_lmt_ovrun:\t%u\t\t\t%u\n",
1166 le32_to_cpu(ofdm->rxe_frame_limit_overrun),
1167 accum_ofdm->rxe_frame_limit_overrun);
1168 pos += scnprintf(buf + pos, bufsz - pos,
1169 "sent_ack_cnt:\t\t%u\t\t\t%u\n",
1170 le32_to_cpu(ofdm->sent_ack_cnt),
1171 accum_ofdm->sent_ack_cnt);
1172 pos += scnprintf(buf + pos, bufsz - pos,
1173 "sent_cts_cnt:\t\t%u\t\t\t%u\n",
1174 le32_to_cpu(ofdm->sent_cts_cnt),
1175 accum_ofdm->sent_cts_cnt);
1176 pos += scnprintf(buf + pos, bufsz - pos,
1177 "sent_ba_rsp_cnt:\t%u\t\t\t%u\n",
1178 le32_to_cpu(ofdm->sent_ba_rsp_cnt),
1179 accum_ofdm->sent_ba_rsp_cnt);
1180 pos += scnprintf(buf + pos, bufsz - pos,
1181 "dsp_self_kill:\t\t%u\t\t\t%u\n",
1182 le32_to_cpu(ofdm->dsp_self_kill),
1183 accum_ofdm->dsp_self_kill);
1184 pos += scnprintf(buf + pos, bufsz - pos,
1185 "mh_format_err:\t\t%u\t\t\t%u\n",
1186 le32_to_cpu(ofdm->mh_format_err),
1187 accum_ofdm->mh_format_err);
1188 pos += scnprintf(buf + pos, bufsz - pos,
1189 "re_acq_main_rssi_sum:\t%u\t\t\t%u\n",
1190 le32_to_cpu(ofdm->re_acq_main_rssi_sum),
1191 accum_ofdm->re_acq_main_rssi_sum);
1144 1192
1145 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - CCK:\n"); 1193 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - CCK:\n");
1146 pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt: %u\n", 1194 pos += scnprintf(buf + pos, bufsz - pos,
1147 le32_to_cpu(cck->ina_cnt)); 1195 "\t\t\tcurrent\t\t\taccumulative\n");
1148 pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt: %u\n", 1196 pos += scnprintf(buf + pos, bufsz - pos, "ina_cnt:\t\t%u\t\t\t%u\n",
1149 le32_to_cpu(cck->fina_cnt)); 1197 le32_to_cpu(cck->ina_cnt), accum_cck->ina_cnt);
1150 pos += scnprintf(buf + pos, bufsz - pos, "plcp_err: %u\n", 1198 pos += scnprintf(buf + pos, bufsz - pos, "fina_cnt:\t\t%u\t\t\t%u\n",
1151 le32_to_cpu(cck->plcp_err)); 1199 le32_to_cpu(cck->fina_cnt), accum_cck->fina_cnt);
1152 pos += scnprintf(buf + pos, bufsz - pos, "crc32_err: %u\n", 1200 pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
1153 le32_to_cpu(cck->crc32_err)); 1201 le32_to_cpu(cck->plcp_err), accum_cck->plcp_err);
1154 pos += scnprintf(buf + pos, bufsz - pos, "overrun_err: %u\n", 1202 pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
1155 le32_to_cpu(cck->overrun_err)); 1203 le32_to_cpu(cck->crc32_err), accum_cck->crc32_err);
1156 pos += scnprintf(buf + pos, bufsz - pos, "early_overrun_err: %u\n", 1204 pos += scnprintf(buf + pos, bufsz - pos,
1157 le32_to_cpu(cck->early_overrun_err)); 1205 "overrun_err:\t\t%u\t\t\t%u\n",
1158 pos += scnprintf(buf + pos, bufsz - pos, "crc32_good: %u\n", 1206 le32_to_cpu(cck->overrun_err),
1159 le32_to_cpu(cck->crc32_good)); 1207 accum_cck->overrun_err);
1160 pos += scnprintf(buf + pos, bufsz - pos, "false_alarm_cnt: %u\n", 1208 pos += scnprintf(buf + pos, bufsz - pos,
1161 le32_to_cpu(cck->false_alarm_cnt)); 1209 "early_overrun_err:\t%u\t\t\t%u\n",
1162 pos += scnprintf(buf + pos, bufsz - pos, "fina_sync_err_cnt: %u\n", 1210 le32_to_cpu(cck->early_overrun_err),
1163 le32_to_cpu(cck->fina_sync_err_cnt)); 1211 accum_cck->early_overrun_err);
1164 pos += scnprintf(buf + pos, bufsz - pos, "sfd_timeout: %u\n", 1212 pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
1165 le32_to_cpu(cck->sfd_timeout)); 1213 le32_to_cpu(cck->crc32_good), accum_cck->crc32_good);
1166 pos += scnprintf(buf + pos, bufsz - pos, "fina_timeout: %u\n", 1214 pos += scnprintf(buf + pos, bufsz - pos,
1167 le32_to_cpu(cck->fina_timeout)); 1215 "false_alarm_cnt:\t%u\t\t\t%u\n",
1168 pos += scnprintf(buf + pos, bufsz - pos, "unresponded_rts: %u\n", 1216 le32_to_cpu(cck->false_alarm_cnt),
1169 le32_to_cpu(cck->unresponded_rts)); 1217 accum_cck->false_alarm_cnt);
1170 pos += scnprintf(buf + pos, bufsz - pos, 1218 pos += scnprintf(buf + pos, bufsz - pos,
1171 "rxe_frame_limit_overrun: %u\n", 1219 "fina_sync_err_cnt:\t%u\t\t\t%u\n",
1172 le32_to_cpu(cck->rxe_frame_limit_overrun)); 1220 le32_to_cpu(cck->fina_sync_err_cnt),
1173 pos += scnprintf(buf + pos, bufsz - pos, "sent_ack_cnt: %u\n", 1221 accum_cck->fina_sync_err_cnt);
1174 le32_to_cpu(cck->sent_ack_cnt)); 1222 pos += scnprintf(buf + pos, bufsz - pos,
1175 pos += scnprintf(buf + pos, bufsz - pos, "sent_cts_cnt: %u\n", 1223 "sfd_timeout:\t\t%u\t\t\t%u\n",
1176 le32_to_cpu(cck->sent_cts_cnt)); 1224 le32_to_cpu(cck->sfd_timeout),
1177 pos += scnprintf(buf + pos, bufsz - pos, "sent_ba_rsp_cnt: %u\n", 1225 accum_cck->sfd_timeout);
1178 le32_to_cpu(cck->sent_ba_rsp_cnt)); 1226 pos += scnprintf(buf + pos, bufsz - pos,
1179 pos += scnprintf(buf + pos, bufsz - pos, "dsp_self_kill: %u\n", 1227 "fina_timeout:\t\t%u\t\t\t%u\n",
1180 le32_to_cpu(cck->dsp_self_kill)); 1228 le32_to_cpu(cck->fina_timeout),
1181 pos += scnprintf(buf + pos, bufsz - pos, "mh_format_err: %u\n", 1229 accum_cck->fina_timeout);
1182 le32_to_cpu(cck->mh_format_err)); 1230 pos += scnprintf(buf + pos, bufsz - pos,
1183 pos += scnprintf(buf + pos, bufsz - pos, "re_acq_main_rssi_sum: %u\n", 1231 "unresponded_rts:\t%u\t\t\t%u\n",
1184 le32_to_cpu(cck->re_acq_main_rssi_sum)); 1232 le32_to_cpu(cck->unresponded_rts),
1233 accum_cck->unresponded_rts);
1234 pos += scnprintf(buf + pos, bufsz - pos,
1235 "rxe_frame_lmt_ovrun:\t%u\t\t\t%u\n",
1236 le32_to_cpu(cck->rxe_frame_limit_overrun),
1237 accum_cck->rxe_frame_limit_overrun);
1238 pos += scnprintf(buf + pos, bufsz - pos,
1239 "sent_ack_cnt:\t\t%u\t\t\t%u\n",
1240 le32_to_cpu(cck->sent_ack_cnt),
1241 accum_cck->sent_ack_cnt);
1242 pos += scnprintf(buf + pos, bufsz - pos,
1243 "sent_cts_cnt:\t\t%u\t\t\t%u\n",
1244 le32_to_cpu(cck->sent_cts_cnt),
1245 accum_cck->sent_cts_cnt);
1246 pos += scnprintf(buf + pos, bufsz - pos,
1247 "sent_ba_rsp_cnt:\t%u\t\t\t%u\n",
1248 le32_to_cpu(cck->sent_ba_rsp_cnt),
1249 accum_cck->sent_ba_rsp_cnt);
1250 pos += scnprintf(buf + pos, bufsz - pos,
1251 "dsp_self_kill:\t\t%u\t\t\t%u\n",
1252 le32_to_cpu(cck->dsp_self_kill),
1253 accum_cck->dsp_self_kill);
1254 pos += scnprintf(buf + pos, bufsz - pos,
1255 "mh_format_err:\t\t%u\t\t\t%u\n",
1256 le32_to_cpu(cck->mh_format_err),
1257 accum_cck->mh_format_err);
1258 pos += scnprintf(buf + pos, bufsz - pos,
1259 "re_acq_main_rssi_sum:\t%u\t\t\t%u\n",
1260 le32_to_cpu(cck->re_acq_main_rssi_sum),
1261 accum_cck->re_acq_main_rssi_sum);
1185 1262
1186 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - GENERAL:\n"); 1263 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - GENERAL:\n");
1187 pos += scnprintf(buf + pos, bufsz - pos, "bogus_cts: %u\n", 1264 pos += scnprintf(buf + pos, bufsz - pos,
1188 le32_to_cpu(general->bogus_cts)); 1265 "\t\t\tcurrent\t\t\taccumulative\n");
1189 pos += scnprintf(buf + pos, bufsz - pos, "bogus_ack: %u\n", 1266 pos += scnprintf(buf + pos, bufsz - pos, "bogus_cts:\t\t%u\t\t\t%u\n",
1190 le32_to_cpu(general->bogus_ack)); 1267 le32_to_cpu(general->bogus_cts),
1191 pos += scnprintf(buf + pos, bufsz - pos, "non_bssid_frames: %u\n", 1268 accum_general->bogus_cts);
1192 le32_to_cpu(general->non_bssid_frames)); 1269 pos += scnprintf(buf + pos, bufsz - pos, "bogus_ack:\t\t%u\t\t\t%u\n",
1193 pos += scnprintf(buf + pos, bufsz - pos, "filtered_frames: %u\n", 1270 le32_to_cpu(general->bogus_ack),
1194 le32_to_cpu(general->filtered_frames)); 1271 accum_general->bogus_ack);
1195 pos += scnprintf(buf + pos, bufsz - pos, "non_channel_beacons: %u\n", 1272 pos += scnprintf(buf + pos, bufsz - pos,
1196 le32_to_cpu(general->non_channel_beacons)); 1273 "non_bssid_frames:\t%u\t\t\t%u\n",
1197 pos += scnprintf(buf + pos, bufsz - pos, "channel_beacons: %u\n", 1274 le32_to_cpu(general->non_bssid_frames),
1198 le32_to_cpu(general->channel_beacons)); 1275 accum_general->non_bssid_frames);
1199 pos += scnprintf(buf + pos, bufsz - pos, "num_missed_bcon: %u\n", 1276 pos += scnprintf(buf + pos, bufsz - pos,
1200 le32_to_cpu(general->num_missed_bcon)); 1277 "filtered_frames:\t%u\t\t\t%u\n",
1201 pos += scnprintf(buf + pos, bufsz - pos, 1278 le32_to_cpu(general->filtered_frames),
1202 "adc_rx_saturation_time: %u\n", 1279 accum_general->filtered_frames);
1203 le32_to_cpu(general->adc_rx_saturation_time)); 1280 pos += scnprintf(buf + pos, bufsz - pos,
1204 pos += scnprintf(buf + pos, bufsz - pos, 1281 "non_channel_beacons:\t%u\t\t\t%u\n",
1205 "ina_detection_search_time: %u\n", 1282 le32_to_cpu(general->non_channel_beacons),
1206 le32_to_cpu(general->ina_detection_search_time)); 1283 accum_general->non_channel_beacons);
1207 pos += scnprintf(buf + pos, bufsz - pos, "beacon_silence_rssi_a: %u\n", 1284 pos += scnprintf(buf + pos, bufsz - pos,
1208 le32_to_cpu(general->beacon_silence_rssi_a)); 1285 "channel_beacons:\t%u\t\t\t%u\n",
1209 pos += scnprintf(buf + pos, bufsz - pos, "beacon_silence_rssi_b: %u\n", 1286 le32_to_cpu(general->channel_beacons),
1210 le32_to_cpu(general->beacon_silence_rssi_b)); 1287 accum_general->channel_beacons);
1211 pos += scnprintf(buf + pos, bufsz - pos, "beacon_silence_rssi_c: %u\n", 1288 pos += scnprintf(buf + pos, bufsz - pos,
1212 le32_to_cpu(general->beacon_silence_rssi_c)); 1289 "num_missed_bcon:\t%u\t\t\t%u\n",
1213 pos += scnprintf(buf + pos, bufsz - pos, 1290 le32_to_cpu(general->num_missed_bcon),
1214 "interference_data_flag: %u\n", 1291 accum_general->num_missed_bcon);
1215 le32_to_cpu(general->interference_data_flag)); 1292 pos += scnprintf(buf + pos, bufsz - pos,
1216 pos += scnprintf(buf + pos, bufsz - pos, "channel_load: %u\n", 1293 "adc_rx_saturation_time:\t%u\t\t\t%u\n",
1217 le32_to_cpu(general->channel_load)); 1294 le32_to_cpu(general->adc_rx_saturation_time),
1218 pos += scnprintf(buf + pos, bufsz - pos, "dsp_false_alarms: %u\n", 1295 accum_general->adc_rx_saturation_time);
1219 le32_to_cpu(general->dsp_false_alarms)); 1296 pos += scnprintf(buf + pos, bufsz - pos,
1220 pos += scnprintf(buf + pos, bufsz - pos, "beacon_rssi_a: %u\n", 1297 "ina_detect_search_tm:\t%u\t\t\t%u\n",
1221 le32_to_cpu(general->beacon_rssi_a)); 1298 le32_to_cpu(general->ina_detection_search_time),
1222 pos += scnprintf(buf + pos, bufsz - pos, "beacon_rssi_b: %u\n", 1299 accum_general->ina_detection_search_time);
1223 le32_to_cpu(general->beacon_rssi_b)); 1300 pos += scnprintf(buf + pos, bufsz - pos,
1224 pos += scnprintf(buf + pos, bufsz - pos, "beacon_rssi_c: %u\n", 1301 "beacon_silence_rssi_a:\t%u\t\t\t%u\n",
1225 le32_to_cpu(general->beacon_rssi_c)); 1302 le32_to_cpu(general->beacon_silence_rssi_a),
1226 pos += scnprintf(buf + pos, bufsz - pos, "beacon_energy_a: %u\n", 1303 accum_general->beacon_silence_rssi_a);
1227 le32_to_cpu(general->beacon_energy_a)); 1304 pos += scnprintf(buf + pos, bufsz - pos,
1228 pos += scnprintf(buf + pos, bufsz - pos, "beacon_energy_b: %u\n", 1305 "beacon_silence_rssi_b:\t%u\t\t\t%u\n",
1229 le32_to_cpu(general->beacon_energy_b)); 1306 le32_to_cpu(general->beacon_silence_rssi_b),
1230 pos += scnprintf(buf + pos, bufsz - pos, "beacon_energy_c: %u\n", 1307 accum_general->beacon_silence_rssi_b);
1231 le32_to_cpu(general->beacon_energy_c)); 1308 pos += scnprintf(buf + pos, bufsz - pos,
1309 "beacon_silence_rssi_c:\t%u\t\t\t%u\n",
1310 le32_to_cpu(general->beacon_silence_rssi_c),
1311 accum_general->beacon_silence_rssi_c);
1312 pos += scnprintf(buf + pos, bufsz - pos,
1313 "interference_data_flag:\t%u\t\t\t%u\n",
1314 le32_to_cpu(general->interference_data_flag),
1315 accum_general->interference_data_flag);
1316 pos += scnprintf(buf + pos, bufsz - pos,
1317 "channel_load:\t\t%u\t\t\t%u\n",
1318 le32_to_cpu(general->channel_load),
1319 accum_general->channel_load);
1320 pos += scnprintf(buf + pos, bufsz - pos,
1321 "dsp_false_alarms:\t%u\t\t\t%u\n",
1322 le32_to_cpu(general->dsp_false_alarms),
1323 accum_general->dsp_false_alarms);
1324 pos += scnprintf(buf + pos, bufsz - pos,
1325 "beacon_rssi_a:\t\t%u\t\t\t%u\n",
1326 le32_to_cpu(general->beacon_rssi_a),
1327 accum_general->beacon_rssi_a);
1328 pos += scnprintf(buf + pos, bufsz - pos,
1329 "beacon_rssi_b:\t\t%u\t\t\t%u\n",
1330 le32_to_cpu(general->beacon_rssi_b),
1331 accum_general->beacon_rssi_b);
1332 pos += scnprintf(buf + pos, bufsz - pos,
1333 "beacon_rssi_c:\t\t%u\t\t\t%u\n",
1334 le32_to_cpu(general->beacon_rssi_c),
1335 accum_general->beacon_rssi_c);
1336 pos += scnprintf(buf + pos, bufsz - pos,
1337 "beacon_energy_a:\t%u\t\t\t%u\n",
1338 le32_to_cpu(general->beacon_energy_a),
1339 accum_general->beacon_energy_a);
1340 pos += scnprintf(buf + pos, bufsz - pos,
1341 "beacon_energy_b:\t%u\t\t\t%u\n",
1342 le32_to_cpu(general->beacon_energy_b),
1343 accum_general->beacon_energy_b);
1344 pos += scnprintf(buf + pos, bufsz - pos,
1345 "beacon_energy_c:\t%u\t\t\t%u\n",
1346 le32_to_cpu(general->beacon_energy_c),
1347 accum_general->beacon_energy_c);
1232 1348
1233 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n"); 1349 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Rx - OFDM_HT:\n");
1234 pos += scnprintf(buf + pos, bufsz - pos, "plcp_err: %u\n", 1350 pos += scnprintf(buf + pos, bufsz - pos,
1235 le32_to_cpu(ht->plcp_err)); 1351 "\t\t\tcurrent\t\t\taccumulative\n");
1236 pos += scnprintf(buf + pos, bufsz - pos, "overrun_err: %u\n", 1352 pos += scnprintf(buf + pos, bufsz - pos, "plcp_err:\t\t%u\t\t\t%u\n",
1237 le32_to_cpu(ht->overrun_err)); 1353 le32_to_cpu(ht->plcp_err), accum_ht->plcp_err);
1238 pos += scnprintf(buf + pos, bufsz - pos, "early_overrun_err: %u\n", 1354 pos += scnprintf(buf + pos, bufsz - pos,
1239 le32_to_cpu(ht->early_overrun_err)); 1355 "overrun_err:\t\t%u\t\t\t%u\n",
1240 pos += scnprintf(buf + pos, bufsz - pos, "crc32_good: %u\n", 1356 le32_to_cpu(ht->overrun_err), accum_ht->overrun_err);
1241 le32_to_cpu(ht->crc32_good)); 1357 pos += scnprintf(buf + pos, bufsz - pos,
1242 pos += scnprintf(buf + pos, bufsz - pos, "crc32_err: %u\n", 1358 "early_overrun_err:\t%u\t\t\t%u\n",
1243 le32_to_cpu(ht->crc32_err)); 1359 le32_to_cpu(ht->early_overrun_err),
1244 pos += scnprintf(buf + pos, bufsz - pos, "mh_format_err: %u\n", 1360 accum_ht->early_overrun_err);
1245 le32_to_cpu(ht->mh_format_err)); 1361 pos += scnprintf(buf + pos, bufsz - pos, "crc32_good:\t\t%u\t\t\t%u\n",
1246 pos += scnprintf(buf + pos, bufsz - pos, "agg_crc32_good: %u\n", 1362 le32_to_cpu(ht->crc32_good), accum_ht->crc32_good);
1247 le32_to_cpu(ht->agg_crc32_good)); 1363 pos += scnprintf(buf + pos, bufsz - pos, "crc32_err:\t\t%u\t\t\t%u\n",
1248 pos += scnprintf(buf + pos, bufsz - pos, "agg_mpdu_cnt: %u\n", 1364 le32_to_cpu(ht->crc32_err), accum_ht->crc32_err);
1249 le32_to_cpu(ht->agg_mpdu_cnt)); 1365 pos += scnprintf(buf + pos, bufsz - pos,
1250 pos += scnprintf(buf + pos, bufsz - pos, "agg_cnt: %u\n", 1366 "mh_format_err:\t\t%u\t\t\t%u\n",
1251 le32_to_cpu(ht->agg_cnt)); 1367 le32_to_cpu(ht->mh_format_err),
1368 accum_ht->mh_format_err);
1369 pos += scnprintf(buf + pos, bufsz - pos,
1370 "agg_crc32_good:\t\t%u\t\t\t%u\n",
1371 le32_to_cpu(ht->agg_crc32_good),
1372 accum_ht->agg_crc32_good);
1373 pos += scnprintf(buf + pos, bufsz - pos,
1374 "agg_mpdu_cnt:\t\t%u\t\t\t%u\n",
1375 le32_to_cpu(ht->agg_mpdu_cnt),
1376 accum_ht->agg_mpdu_cnt);
1377 pos += scnprintf(buf + pos, bufsz - pos, "agg_cnt:\t\t%u\t\t\t%u\n",
1378 le32_to_cpu(ht->agg_cnt), accum_ht->agg_cnt);
1252 1379
1253 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1380 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1254 kfree(buf); 1381 kfree(buf);
@@ -1264,7 +1391,7 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
1264 char *buf; 1391 char *buf;
1265 int bufsz = (sizeof(struct statistics_tx) * 24) + 250; 1392 int bufsz = (sizeof(struct statistics_tx) * 24) + 250;
1266 ssize_t ret; 1393 ssize_t ret;
1267 struct statistics_tx *tx; 1394 struct statistics_tx *tx, *accum_tx;
1268 1395
1269 if (!iwl_is_alive(priv)) 1396 if (!iwl_is_alive(priv))
1270 return -EAGAIN; 1397 return -EAGAIN;
@@ -1290,62 +1417,107 @@ static ssize_t iwl_dbgfs_ucode_tx_stats_read(struct file *file,
1290 * might not reflect the current uCode activity 1417 * might not reflect the current uCode activity
1291 */ 1418 */
1292 tx = &priv->statistics.tx; 1419 tx = &priv->statistics.tx;
1420 accum_tx = &priv->accum_statistics.tx;
1293 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); 1421 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
1294 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Tx:\n"); 1422 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_Tx:\n");
1295 pos += scnprintf(buf + pos, bufsz - pos, "preamble: %u\n", 1423 pos += scnprintf(buf + pos, bufsz - pos,
1296 le32_to_cpu(tx->preamble_cnt)); 1424 "\t\t\tcurrent\t\t\taccumulative\n");
1297 pos += scnprintf(buf + pos, bufsz - pos, "rx_detected_cnt: %u\n", 1425 pos += scnprintf(buf + pos, bufsz - pos, "preamble:\t\t\t%u\t\t\t%u\n",
1298 le32_to_cpu(tx->rx_detected_cnt)); 1426 le32_to_cpu(tx->preamble_cnt),
1299 pos += scnprintf(buf + pos, bufsz - pos, "bt_prio_defer_cnt: %u\n", 1427 accum_tx->preamble_cnt);
1300 le32_to_cpu(tx->bt_prio_defer_cnt)); 1428 pos += scnprintf(buf + pos, bufsz - pos,
1301 pos += scnprintf(buf + pos, bufsz - pos, "bt_prio_kill_cnt: %u\n", 1429 "rx_detected_cnt:\t\t%u\t\t\t%u\n",
1302 le32_to_cpu(tx->bt_prio_kill_cnt)); 1430 le32_to_cpu(tx->rx_detected_cnt),
1303 pos += scnprintf(buf + pos, bufsz - pos, "few_bytes_cnt: %u\n", 1431 accum_tx->rx_detected_cnt);
1304 le32_to_cpu(tx->few_bytes_cnt)); 1432 pos += scnprintf(buf + pos, bufsz - pos,
1305 pos += scnprintf(buf + pos, bufsz - pos, "cts_timeout: %u\n", 1433 "bt_prio_defer_cnt:\t\t%u\t\t\t%u\n",
1306 le32_to_cpu(tx->cts_timeout)); 1434 le32_to_cpu(tx->bt_prio_defer_cnt),
1307 pos += scnprintf(buf + pos, bufsz - pos, "ack_timeout: %u\n", 1435 accum_tx->bt_prio_defer_cnt);
1308 le32_to_cpu(tx->ack_timeout)); 1436 pos += scnprintf(buf + pos, bufsz - pos,
1309 pos += scnprintf(buf + pos, bufsz - pos, "expected_ack_cnt: %u\n", 1437 "bt_prio_kill_cnt:\t\t%u\t\t\t%u\n",
1310 le32_to_cpu(tx->expected_ack_cnt)); 1438 le32_to_cpu(tx->bt_prio_kill_cnt),
1311 pos += scnprintf(buf + pos, bufsz - pos, "actual_ack_cnt: %u\n", 1439 accum_tx->bt_prio_kill_cnt);
1312 le32_to_cpu(tx->actual_ack_cnt)); 1440 pos += scnprintf(buf + pos, bufsz - pos,
1313 pos += scnprintf(buf + pos, bufsz - pos, "dump_msdu_cnt: %u\n", 1441 "few_bytes_cnt:\t\t\t%u\t\t\t%u\n",
1314 le32_to_cpu(tx->dump_msdu_cnt)); 1442 le32_to_cpu(tx->few_bytes_cnt),
1315 pos += scnprintf(buf + pos, bufsz - pos, 1443 accum_tx->few_bytes_cnt);
1316 "burst_abort_next_frame_mismatch_cnt: %u\n", 1444 pos += scnprintf(buf + pos, bufsz - pos,
1317 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt)); 1445 "cts_timeout:\t\t\t%u\t\t\t%u\n",
1318 pos += scnprintf(buf + pos, bufsz - pos, 1446 le32_to_cpu(tx->cts_timeout), accum_tx->cts_timeout);
1319 "burst_abort_missing_next_frame_cnt: %u\n", 1447 pos += scnprintf(buf + pos, bufsz - pos,
1320 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt)); 1448 "ack_timeout:\t\t\t%u\t\t\t%u\n",
1321 pos += scnprintf(buf + pos, bufsz - pos, "cts_timeout_collision: %u\n", 1449 le32_to_cpu(tx->ack_timeout),
1322 le32_to_cpu(tx->cts_timeout_collision)); 1450 accum_tx->ack_timeout);
1323 pos += scnprintf(buf + pos, bufsz - pos, 1451 pos += scnprintf(buf + pos, bufsz - pos,
1324 "ack_or_ba_timeout_collision: %u\n", 1452 "expected_ack_cnt:\t\t%u\t\t\t%u\n",
1325 le32_to_cpu(tx->ack_or_ba_timeout_collision)); 1453 le32_to_cpu(tx->expected_ack_cnt),
1326 pos += scnprintf(buf + pos, bufsz - pos, "agg ba_timeout: %u\n", 1454 accum_tx->expected_ack_cnt);
1327 le32_to_cpu(tx->agg.ba_timeout)); 1455 pos += scnprintf(buf + pos, bufsz - pos,
1328 pos += scnprintf(buf + pos, bufsz - pos, 1456 "actual_ack_cnt:\t\t\t%u\t\t\t%u\n",
1329 "agg ba_reschedule_frames: %u\n", 1457 le32_to_cpu(tx->actual_ack_cnt),
1330 le32_to_cpu(tx->agg.ba_reschedule_frames)); 1458 accum_tx->actual_ack_cnt);
1331 pos += scnprintf(buf + pos, bufsz - pos, 1459 pos += scnprintf(buf + pos, bufsz - pos,
1332 "agg scd_query_agg_frame_cnt: %u\n", 1460 "dump_msdu_cnt:\t\t\t%u\t\t\t%u\n",
1333 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt)); 1461 le32_to_cpu(tx->dump_msdu_cnt),
1334 pos += scnprintf(buf + pos, bufsz - pos, "agg scd_query_no_agg: %u\n", 1462 accum_tx->dump_msdu_cnt);
1335 le32_to_cpu(tx->agg.scd_query_no_agg)); 1463 pos += scnprintf(buf + pos, bufsz - pos,
1336 pos += scnprintf(buf + pos, bufsz - pos, "agg scd_query_agg: %u\n", 1464 "abort_nxt_frame_mismatch:"
1337 le32_to_cpu(tx->agg.scd_query_agg)); 1465 "\t%u\t\t\t%u\n",
1338 pos += scnprintf(buf + pos, bufsz - pos, 1466 le32_to_cpu(tx->burst_abort_next_frame_mismatch_cnt),
1339 "agg scd_query_mismatch: %u\n", 1467 accum_tx->burst_abort_next_frame_mismatch_cnt);
1340 le32_to_cpu(tx->agg.scd_query_mismatch)); 1468 pos += scnprintf(buf + pos, bufsz - pos,
1341 pos += scnprintf(buf + pos, bufsz - pos, "agg frame_not_ready: %u\n", 1469 "abort_missing_nxt_frame:"
1342 le32_to_cpu(tx->agg.frame_not_ready)); 1470 "\t%u\t\t\t%u\n",
1343 pos += scnprintf(buf + pos, bufsz - pos, "agg underrun: %u\n", 1471 le32_to_cpu(tx->burst_abort_missing_next_frame_cnt),
1344 le32_to_cpu(tx->agg.underrun)); 1472 accum_tx->burst_abort_missing_next_frame_cnt);
1345 pos += scnprintf(buf + pos, bufsz - pos, "agg bt_prio_kill: %u\n", 1473 pos += scnprintf(buf + pos, bufsz - pos,
1346 le32_to_cpu(tx->agg.bt_prio_kill)); 1474 "cts_timeout_collision:\t\t%u\t\t\t%u\n",
1347 pos += scnprintf(buf + pos, bufsz - pos, "agg rx_ba_rsp_cnt: %u\n", 1475 le32_to_cpu(tx->cts_timeout_collision),
1348 le32_to_cpu(tx->agg.rx_ba_rsp_cnt)); 1476 accum_tx->cts_timeout_collision);
1477 pos += scnprintf(buf + pos, bufsz - pos,
1478 "ack_ba_timeout_collision:\t%u\t\t\t%u\n",
1479 le32_to_cpu(tx->ack_or_ba_timeout_collision),
1480 accum_tx->ack_or_ba_timeout_collision);
1481 pos += scnprintf(buf + pos, bufsz - pos,
1482 "agg ba_timeout:\t\t\t%u\t\t\t%u\n",
1483 le32_to_cpu(tx->agg.ba_timeout),
1484 accum_tx->agg.ba_timeout);
1485 pos += scnprintf(buf + pos, bufsz - pos,
1486 "agg ba_resched_frames:\t\t%u\t\t\t%u\n",
1487 le32_to_cpu(tx->agg.ba_reschedule_frames),
1488 accum_tx->agg.ba_reschedule_frames);
1489 pos += scnprintf(buf + pos, bufsz - pos,
1490 "agg scd_query_agg_frame:\t%u\t\t\t%u\n",
1491 le32_to_cpu(tx->agg.scd_query_agg_frame_cnt),
1492 accum_tx->agg.scd_query_agg_frame_cnt);
1493 pos += scnprintf(buf + pos, bufsz - pos,
1494 "agg scd_query_no_agg:\t\t%u\t\t\t%u\n",
1495 le32_to_cpu(tx->agg.scd_query_no_agg),
1496 accum_tx->agg.scd_query_no_agg);
1497 pos += scnprintf(buf + pos, bufsz - pos,
1498 "agg scd_query_agg:\t\t%u\t\t\t%u\n",
1499 le32_to_cpu(tx->agg.scd_query_agg),
1500 accum_tx->agg.scd_query_agg);
1501 pos += scnprintf(buf + pos, bufsz - pos,
1502 "agg scd_query_mismatch:\t\t%u\t\t\t%u\n",
1503 le32_to_cpu(tx->agg.scd_query_mismatch),
1504 accum_tx->agg.scd_query_mismatch);
1505 pos += scnprintf(buf + pos, bufsz - pos,
1506 "agg frame_not_ready:\t\t%u\t\t\t%u\n",
1507 le32_to_cpu(tx->agg.frame_not_ready),
1508 accum_tx->agg.frame_not_ready);
1509 pos += scnprintf(buf + pos, bufsz - pos,
1510 "agg underrun:\t\t\t%u\t\t\t%u\n",
1511 le32_to_cpu(tx->agg.underrun),
1512 accum_tx->agg.underrun);
1513 pos += scnprintf(buf + pos, bufsz - pos,
1514 "agg bt_prio_kill:\t\t%u\t\t\t%u\n",
1515 le32_to_cpu(tx->agg.bt_prio_kill),
1516 accum_tx->agg.bt_prio_kill);
1517 pos += scnprintf(buf + pos, bufsz - pos,
1518 "agg rx_ba_rsp_cnt:\t\t%u\t\t\t%u\n",
1519 le32_to_cpu(tx->agg.rx_ba_rsp_cnt),
1520 accum_tx->agg.rx_ba_rsp_cnt);
1349 1521
1350 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1522 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1351 kfree(buf); 1523 kfree(buf);
@@ -1361,9 +1533,9 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
1361 char *buf; 1533 char *buf;
1362 int bufsz = sizeof(struct statistics_general) * 4 + 250; 1534 int bufsz = sizeof(struct statistics_general) * 4 + 250;
1363 ssize_t ret; 1535 ssize_t ret;
1364 struct statistics_general *general; 1536 struct statistics_general *general, *accum_general;
1365 struct statistics_dbg *dbg; 1537 struct statistics_dbg *dbg, *accum_dbg;
1366 struct statistics_div *div; 1538 struct statistics_div *div, *accum_div;
1367 1539
1368 if (!iwl_is_alive(priv)) 1540 if (!iwl_is_alive(priv))
1369 return -EAGAIN; 1541 return -EAGAIN;
@@ -1391,34 +1563,53 @@ static ssize_t iwl_dbgfs_ucode_general_stats_read(struct file *file,
1391 general = &priv->statistics.general; 1563 general = &priv->statistics.general;
1392 dbg = &priv->statistics.general.dbg; 1564 dbg = &priv->statistics.general.dbg;
1393 div = &priv->statistics.general.div; 1565 div = &priv->statistics.general.div;
1566 accum_general = &priv->accum_statistics.general;
1567 accum_dbg = &priv->accum_statistics.general.dbg;
1568 accum_div = &priv->accum_statistics.general.div;
1394 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz); 1569 pos += iwl_dbgfs_statistics_flag(priv, buf, bufsz);
1395 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_General:\n"); 1570 pos += scnprintf(buf + pos, bufsz - pos, "Statistics_General:\n");
1396 pos += scnprintf(buf + pos, bufsz - pos, "temperature: %u\n", 1571 pos += scnprintf(buf + pos, bufsz - pos,
1572 "\t\t\tcurrent\t\t\taccumulative\n");
1573 pos += scnprintf(buf + pos, bufsz - pos, "temperature:\t\t\t%u\n",
1397 le32_to_cpu(general->temperature)); 1574 le32_to_cpu(general->temperature));
1398 pos += scnprintf(buf + pos, bufsz - pos, "temperature_m: %u\n", 1575 pos += scnprintf(buf + pos, bufsz - pos, "temperature_m:\t\t\t%u\n",
1399 le32_to_cpu(general->temperature_m)); 1576 le32_to_cpu(general->temperature_m));
1400 pos += scnprintf(buf + pos, bufsz - pos, "burst_check: %u\n", 1577 pos += scnprintf(buf + pos, bufsz - pos,
1401 le32_to_cpu(dbg->burst_check)); 1578 "burst_check:\t\t\t%u\t\t\t%u\n",
1402 pos += scnprintf(buf + pos, bufsz - pos, "burst_count: %u\n", 1579 le32_to_cpu(dbg->burst_check),
1403 le32_to_cpu(dbg->burst_count)); 1580 accum_dbg->burst_check);
1404 pos += scnprintf(buf + pos, bufsz - pos, "sleep_time: %u\n", 1581 pos += scnprintf(buf + pos, bufsz - pos,
1405 le32_to_cpu(general->sleep_time)); 1582 "burst_count:\t\t\t%u\t\t\t%u\n",
1406 pos += scnprintf(buf + pos, bufsz - pos, "slots_out: %u\n", 1583 le32_to_cpu(dbg->burst_count),
1407 le32_to_cpu(general->slots_out)); 1584 accum_dbg->burst_count);
1408 pos += scnprintf(buf + pos, bufsz - pos, "slots_idle: %u\n", 1585 pos += scnprintf(buf + pos, bufsz - pos,
1409 le32_to_cpu(general->slots_idle)); 1586 "sleep_time:\t\t\t%u\t\t\t%u\n",
1410 pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp: %u\n", 1587 le32_to_cpu(general->sleep_time),
1588 accum_general->sleep_time);
1589 pos += scnprintf(buf + pos, bufsz - pos,
1590 "slots_out:\t\t\t%u\t\t\t%u\n",
1591 le32_to_cpu(general->slots_out),
1592 accum_general->slots_out);
1593 pos += scnprintf(buf + pos, bufsz - pos,
1594 "slots_idle:\t\t\t%u\t\t\t%u\n",
1595 le32_to_cpu(general->slots_idle),
1596 accum_general->slots_idle);
1597 pos += scnprintf(buf + pos, bufsz - pos, "ttl_timestamp:\t\t\t%u\n",
1411 le32_to_cpu(general->ttl_timestamp)); 1598 le32_to_cpu(general->ttl_timestamp));
1412 pos += scnprintf(buf + pos, bufsz - pos, "tx_on_a: %u\n", 1599 pos += scnprintf(buf + pos, bufsz - pos, "tx_on_a:\t\t\t%u\t\t\t%u\n",
1413 le32_to_cpu(div->tx_on_a)); 1600 le32_to_cpu(div->tx_on_a), accum_div->tx_on_a);
1414 pos += scnprintf(buf + pos, bufsz - pos, "tx_on_b: %u\n", 1601 pos += scnprintf(buf + pos, bufsz - pos, "tx_on_b:\t\t\t%u\t\t\t%u\n",
1415 le32_to_cpu(div->tx_on_b)); 1602 le32_to_cpu(div->tx_on_b), accum_div->tx_on_b);
1416 pos += scnprintf(buf + pos, bufsz - pos, "exec_time: %u\n", 1603 pos += scnprintf(buf + pos, bufsz - pos,
1417 le32_to_cpu(div->exec_time)); 1604 "exec_time:\t\t\t%u\t\t\t%u\n",
1418 pos += scnprintf(buf + pos, bufsz - pos, "probe_time: %u\n", 1605 le32_to_cpu(div->exec_time), accum_div->exec_time);
1419 le32_to_cpu(div->probe_time)); 1606 pos += scnprintf(buf + pos, bufsz - pos,
1420 pos += scnprintf(buf + pos, bufsz - pos, "rx_enable_counter: %u\n", 1607 "probe_time:\t\t\t%u\t\t\t%u\n",
1421 le32_to_cpu(general->rx_enable_counter)); 1608 le32_to_cpu(div->probe_time), accum_div->probe_time);
1609 pos += scnprintf(buf + pos, bufsz - pos,
1610 "rx_enable_counter:\t\t%u\t\t\t%u\n",
1611 le32_to_cpu(general->rx_enable_counter),
1612 accum_general->rx_enable_counter);
1422 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1613 ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1423 kfree(buf); 1614 kfree(buf);
1424 return ret; 1615 return ret;
@@ -1614,6 +1805,29 @@ static ssize_t iwl_dbgfs_tx_power_read(struct file *file,
1614 return simple_read_from_buffer(user_buf, count, ppos, buf, pos); 1805 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1615} 1806}
1616 1807
1808static ssize_t iwl_dbgfs_power_save_status_read(struct file *file,
1809 char __user *user_buf,
1810 size_t count, loff_t *ppos)
1811{
1812 struct iwl_priv *priv = (struct iwl_priv *)file->private_data;
1813 char buf[60];
1814 int pos = 0;
1815 const size_t bufsz = sizeof(buf);
1816 u32 pwrsave_status;
1817
1818 pwrsave_status = iwl_read32(priv, CSR_GP_CNTRL) &
1819 CSR_GP_REG_POWER_SAVE_STATUS_MSK;
1820
1821 pos += scnprintf(buf + pos, bufsz - pos, "Power Save Status: ");
1822 pos += scnprintf(buf + pos, bufsz - pos, "%s\n",
1823 (pwrsave_status == CSR_GP_REG_NO_POWER_SAVE) ? "none" :
1824 (pwrsave_status == CSR_GP_REG_MAC_POWER_SAVE) ? "MAC" :
1825 (pwrsave_status == CSR_GP_REG_PHY_POWER_SAVE) ? "PHY" :
1826 "error");
1827
1828 return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
1829}
1830
1617DEBUGFS_READ_WRITE_FILE_OPS(rx_statistics); 1831DEBUGFS_READ_WRITE_FILE_OPS(rx_statistics);
1618DEBUGFS_READ_WRITE_FILE_OPS(tx_statistics); 1832DEBUGFS_READ_WRITE_FILE_OPS(tx_statistics);
1619DEBUGFS_READ_WRITE_FILE_OPS(traffic_log); 1833DEBUGFS_READ_WRITE_FILE_OPS(traffic_log);
@@ -1625,6 +1839,7 @@ DEBUGFS_READ_FILE_OPS(ucode_general_stats);
1625DEBUGFS_READ_FILE_OPS(sensitivity); 1839DEBUGFS_READ_FILE_OPS(sensitivity);
1626DEBUGFS_READ_FILE_OPS(chain_noise); 1840DEBUGFS_READ_FILE_OPS(chain_noise);
1627DEBUGFS_READ_FILE_OPS(tx_power); 1841DEBUGFS_READ_FILE_OPS(tx_power);
1842DEBUGFS_READ_FILE_OPS(power_save_status);
1628 1843
1629/* 1844/*
1630 * Create the debugfs files and directories 1845 * Create the debugfs files and directories
@@ -1661,9 +1876,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
1661 DEBUGFS_ADD_FILE(status, data); 1876 DEBUGFS_ADD_FILE(status, data);
1662 DEBUGFS_ADD_FILE(interrupt, data); 1877 DEBUGFS_ADD_FILE(interrupt, data);
1663 DEBUGFS_ADD_FILE(qos, data); 1878 DEBUGFS_ADD_FILE(qos, data);
1664#ifdef CONFIG_IWLWIFI_LEDS
1665 DEBUGFS_ADD_FILE(led, data); 1879 DEBUGFS_ADD_FILE(led, data);
1666#endif
1667 DEBUGFS_ADD_FILE(sleep_level_override, data); 1880 DEBUGFS_ADD_FILE(sleep_level_override, data);
1668 DEBUGFS_ADD_FILE(current_sleep_command, data); 1881 DEBUGFS_ADD_FILE(current_sleep_command, data);
1669 DEBUGFS_ADD_FILE(thermal_throttling, data); 1882 DEBUGFS_ADD_FILE(thermal_throttling, data);
@@ -1674,6 +1887,7 @@ int iwl_dbgfs_register(struct iwl_priv *priv, const char *name)
1674 DEBUGFS_ADD_FILE(rx_queue, debug); 1887 DEBUGFS_ADD_FILE(rx_queue, debug);
1675 DEBUGFS_ADD_FILE(tx_queue, debug); 1888 DEBUGFS_ADD_FILE(tx_queue, debug);
1676 DEBUGFS_ADD_FILE(tx_power, debug); 1889 DEBUGFS_ADD_FILE(tx_power, debug);
1890 DEBUGFS_ADD_FILE(power_save_status, debug);
1677 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) { 1891 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
1678 DEBUGFS_ADD_FILE(ucode_rx_stats, debug); 1892 DEBUGFS_ADD_FILE(ucode_rx_stats, debug);
1679 DEBUGFS_ADD_FILE(ucode_tx_stats, debug); 1893 DEBUGFS_ADD_FILE(ucode_tx_stats, debug);
@@ -1716,9 +1930,7 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
1716 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_status); 1930 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_status);
1717 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_interrupt); 1931 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_interrupt);
1718 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_qos); 1932 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_qos);
1719#ifdef CONFIG_IWLWIFI_LEDS
1720 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_led); 1933 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_led);
1721#endif
1722 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_thermal_throttling); 1934 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_thermal_throttling);
1723 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_disable_ht40); 1935 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_data_files.file_disable_ht40);
1724 DEBUGFS_REMOVE(priv->dbgfs->dir_data); 1936 DEBUGFS_REMOVE(priv->dbgfs->dir_data);
@@ -1728,6 +1940,7 @@ void iwl_dbgfs_unregister(struct iwl_priv *priv)
1728 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_rx_queue); 1940 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_rx_queue);
1729 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_queue); 1941 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_queue);
1730 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_power); 1942 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_tx_power);
1943 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.file_power_save_status);
1731 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) { 1944 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) != CSR_HW_REV_TYPE_3945) {
1732 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files. 1945 DEBUGFS_REMOVE(priv->dbgfs->dbgfs_debug_files.
1733 file_ucode_rx_stats); 1946 file_ucode_rx_stats);
diff --git a/drivers/net/wireless/iwlwifi/iwl-dev.h b/drivers/net/wireless/iwlwifi/iwl-dev.h
index 028d50599550..cb2642c18da4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-dev.h
+++ b/drivers/net/wireless/iwlwifi/iwl-dev.h
@@ -43,7 +43,6 @@
43#include "iwl-debug.h" 43#include "iwl-debug.h"
44#include "iwl-4965-hw.h" 44#include "iwl-4965-hw.h"
45#include "iwl-3945-hw.h" 45#include "iwl-3945-hw.h"
46#include "iwl-3945-led.h"
47#include "iwl-led.h" 46#include "iwl-led.h"
48#include "iwl-power.h" 47#include "iwl-power.h"
49#include "iwl-agn-rs.h" 48#include "iwl-agn-rs.h"
@@ -57,17 +56,22 @@ extern struct iwl_cfg iwl5100_bg_cfg;
57extern struct iwl_cfg iwl5100_abg_cfg; 56extern struct iwl_cfg iwl5100_abg_cfg;
58extern struct iwl_cfg iwl5150_agn_cfg; 57extern struct iwl_cfg iwl5150_agn_cfg;
59extern struct iwl_cfg iwl6000h_2agn_cfg; 58extern struct iwl_cfg iwl6000h_2agn_cfg;
59extern struct iwl_cfg iwl6000h_2abg_cfg;
60extern struct iwl_cfg iwl6000h_2bg_cfg;
60extern struct iwl_cfg iwl6000i_2agn_cfg; 61extern struct iwl_cfg iwl6000i_2agn_cfg;
62extern struct iwl_cfg iwl6000i_2abg_cfg;
63extern struct iwl_cfg iwl6000i_2bg_cfg;
61extern struct iwl_cfg iwl6000_3agn_cfg; 64extern struct iwl_cfg iwl6000_3agn_cfg;
62extern struct iwl_cfg iwl6050_2agn_cfg; 65extern struct iwl_cfg iwl6050_2agn_cfg;
66extern struct iwl_cfg iwl6050_2abg_cfg;
63extern struct iwl_cfg iwl6050_3agn_cfg; 67extern struct iwl_cfg iwl6050_3agn_cfg;
64extern struct iwl_cfg iwl1000_bgn_cfg; 68extern struct iwl_cfg iwl1000_bgn_cfg;
69extern struct iwl_cfg iwl1000_bg_cfg;
65 70
66struct iwl_tx_queue; 71struct iwl_tx_queue;
67 72
68/* shared structures from iwl-5000.c */ 73/* shared structures from iwl-5000.c */
69extern struct iwl_mod_params iwl50_mod_params; 74extern struct iwl_mod_params iwl50_mod_params;
70extern struct iwl_ops iwl5000_ops;
71extern struct iwl_ucode_ops iwl5000_ucode; 75extern struct iwl_ucode_ops iwl5000_ucode;
72extern struct iwl_lib_ops iwl5000_lib; 76extern struct iwl_lib_ops iwl5000_lib;
73extern struct iwl_hcmd_ops iwl5000_hcmd; 77extern struct iwl_hcmd_ops iwl5000_hcmd;
@@ -81,9 +85,6 @@ extern void iwl5000_rts_tx_cmd_flag(struct ieee80211_tx_info *info,
81 __le32 *tx_flags); 85 __le32 *tx_flags);
82extern int iwl5000_calc_rssi(struct iwl_priv *priv, 86extern int iwl5000_calc_rssi(struct iwl_priv *priv,
83 struct iwl_rx_phy_res *rx_resp); 87 struct iwl_rx_phy_res *rx_resp);
84extern int iwl5000_apm_init(struct iwl_priv *priv);
85extern void iwl5000_apm_stop(struct iwl_priv *priv);
86extern int iwl5000_apm_reset(struct iwl_priv *priv);
87extern void iwl5000_nic_config(struct iwl_priv *priv); 88extern void iwl5000_nic_config(struct iwl_priv *priv);
88extern u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv); 89extern u16 iwl5000_eeprom_calib_version(struct iwl_priv *priv);
89extern const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv, 90extern const u8 *iwl5000_eeprom_query_addr(const struct iwl_priv *priv,
@@ -144,12 +145,13 @@ extern void iwl5000_temperature(struct iwl_priv *priv);
144#define DEFAULT_LONG_RETRY_LIMIT 4U 145#define DEFAULT_LONG_RETRY_LIMIT 4U
145 146
146struct iwl_rx_mem_buffer { 147struct iwl_rx_mem_buffer {
147 dma_addr_t real_dma_addr; 148 dma_addr_t page_dma;
148 dma_addr_t aligned_dma_addr; 149 struct page *page;
149 struct sk_buff *skb;
150 struct list_head list; 150 struct list_head list;
151}; 151};
152 152
153#define rxb_addr(r) page_address(r->page)
154
153/* defined below */ 155/* defined below */
154struct iwl_device_cmd; 156struct iwl_device_cmd;
155 157
@@ -165,7 +167,7 @@ struct iwl_cmd_meta {
165 */ 167 */
166 void (*callback)(struct iwl_priv *priv, 168 void (*callback)(struct iwl_priv *priv,
167 struct iwl_device_cmd *cmd, 169 struct iwl_device_cmd *cmd,
168 struct sk_buff *skb); 170 struct iwl_rx_packet *pkt);
169 171
170 /* The CMD_SIZE_HUGE flag bit indicates that the command 172 /* The CMD_SIZE_HUGE flag bit indicates that the command
171 * structure is stored at the end of the shared queue memory. */ 173 * structure is stored at the end of the shared queue memory. */
@@ -321,6 +323,12 @@ struct iwl_channel_info {
321 * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */ 323 * queue, 2 (unused) HCCA queues, and 4 HT queues (one for each AC) */
322#define IWL_MIN_NUM_QUEUES 10 324#define IWL_MIN_NUM_QUEUES 10
323 325
326/*
327 * uCode queue management definitions ...
328 * Queue #4 is the command queue for 3945/4965/5x00/1000/6x00.
329 */
330#define IWL_CMD_QUEUE_NUM 4
331
324/* Power management (not Tx power) structures */ 332/* Power management (not Tx power) structures */
325 333
326enum iwl_pwr_src { 334enum iwl_pwr_src {
@@ -356,7 +364,14 @@ enum {
356 CMD_WANT_SKB = (1 << 2), 364 CMD_WANT_SKB = (1 << 2),
357}; 365};
358 366
359#define IWL_CMD_MAX_PAYLOAD 320 367#define DEF_CMD_PAYLOAD_SIZE 320
368
369/*
370 * IWL_LINK_HDR_MAX should include ieee80211_hdr, radiotap header,
371 * SNAP header and alignment. It should also be big enough for 802.11
372 * control frames.
373 */
374#define IWL_LINK_HDR_MAX 64
360 375
361/** 376/**
362 * struct iwl_device_cmd 377 * struct iwl_device_cmd
@@ -373,7 +388,8 @@ struct iwl_device_cmd {
373 u16 val16; 388 u16 val16;
374 u32 val32; 389 u32 val32;
375 struct iwl_tx_cmd tx; 390 struct iwl_tx_cmd tx;
376 u8 payload[IWL_CMD_MAX_PAYLOAD]; 391 struct iwl6000_channel_switch_cmd chswitch;
392 u8 payload[DEF_CMD_PAYLOAD_SIZE];
377 } __attribute__ ((packed)) cmd; 393 } __attribute__ ((packed)) cmd;
378} __attribute__ ((packed)); 394} __attribute__ ((packed));
379 395
@@ -382,21 +398,15 @@ struct iwl_device_cmd {
382 398
383struct iwl_host_cmd { 399struct iwl_host_cmd {
384 const void *data; 400 const void *data;
385 struct sk_buff *reply_skb; 401 unsigned long reply_page;
386 void (*callback)(struct iwl_priv *priv, 402 void (*callback)(struct iwl_priv *priv,
387 struct iwl_device_cmd *cmd, 403 struct iwl_device_cmd *cmd,
388 struct sk_buff *skb); 404 struct iwl_rx_packet *pkt);
389 u32 flags; 405 u32 flags;
390 u16 len; 406 u16 len;
391 u8 id; 407 u8 id;
392}; 408};
393 409
394/*
395 * RX related structures and functions
396 */
397#define RX_FREE_BUFFERS 64
398#define RX_LOW_WATERMARK 8
399
400#define SUP_RATE_11A_MAX_NUM_CHANNELS 8 410#define SUP_RATE_11A_MAX_NUM_CHANNELS 8
401#define SUP_RATE_11B_MAX_NUM_CHANNELS 4 411#define SUP_RATE_11B_MAX_NUM_CHANNELS 4
402#define SUP_RATE_11G_MAX_NUM_CHANNELS 12 412#define SUP_RATE_11G_MAX_NUM_CHANNELS 12
@@ -502,12 +512,12 @@ union iwl_ht_rate_supp {
502#define CFG_HT_MPDU_DENSITY_4USEC (0x5) 512#define CFG_HT_MPDU_DENSITY_4USEC (0x5)
503#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC 513#define CFG_HT_MPDU_DENSITY_DEF CFG_HT_MPDU_DENSITY_4USEC
504 514
505struct iwl_ht_info { 515struct iwl_ht_config {
506 /* self configuration data */ 516 /* self configuration data */
507 u8 is_ht; 517 bool is_ht;
508 u8 supported_chan_width; 518 bool is_40mhz;
519 bool single_chain_sufficient;
509 u8 sm_ps; 520 u8 sm_ps;
510 struct ieee80211_mcs_info mcs;
511 /* BSS related data */ 521 /* BSS related data */
512 u8 extension_chan_offset; 522 u8 extension_chan_offset;
513 u8 ht_protection; 523 u8 ht_protection;
@@ -561,6 +571,19 @@ struct iwl_station_entry {
561 struct iwl_hw_key keyinfo; 571 struct iwl_hw_key keyinfo;
562}; 572};
563 573
574/*
575 * iwl_station_priv: Driver's private station information
576 *
577 * When mac80211 creates a station it reserves some space (hw->sta_data_size)
578 * in the structure for use by driver. This structure is places in that
579 * space.
580 *
581 * At the moment use it for the station's rate scaling information.
582 */
583struct iwl_station_priv {
584 struct iwl_lq_sta lq_sta;
585};
586
564/* one for each uCode image (inst/data, boot/init/runtime) */ 587/* one for each uCode image (inst/data, boot/init/runtime) */
565struct fw_desc { 588struct fw_desc {
566 void *v_addr; /* access by driver */ 589 void *v_addr; /* access by driver */
@@ -622,6 +645,10 @@ struct iwl_sensitivity_ranges {
622 u16 auto_corr_max_cck_mrc; 645 u16 auto_corr_max_cck_mrc;
623 u16 auto_corr_min_cck; 646 u16 auto_corr_min_cck;
624 u16 auto_corr_min_cck_mrc; 647 u16 auto_corr_min_cck_mrc;
648
649 u16 barker_corr_th_min;
650 u16 barker_corr_th_min_mrc;
651 u16 nrg_th_cca;
625}; 652};
626 653
627 654
@@ -639,7 +666,7 @@ struct iwl_sensitivity_ranges {
639 * @valid_tx/rx_ant: usable antennas 666 * @valid_tx/rx_ant: usable antennas
640 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2) 667 * @max_rxq_size: Max # Rx frames in Rx queue (must be power-of-2)
641 * @max_rxq_log: Log-base-2 of max_rxq_size 668 * @max_rxq_log: Log-base-2 of max_rxq_size
642 * @rx_buf_size: Rx buffer size 669 * @rx_page_order: Rx buffer page order
643 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR 670 * @rx_wrt_ptr_reg: FH{39}_RSCSR_CHNL0_WPTR
644 * @max_stations: 671 * @max_stations:
645 * @bcast_sta_id: 672 * @bcast_sta_id:
@@ -662,9 +689,8 @@ struct iwl_hw_params {
662 u8 valid_rx_ant; 689 u8 valid_rx_ant;
663 u16 max_rxq_size; 690 u16 max_rxq_size;
664 u16 max_rxq_log; 691 u16 max_rxq_log;
665 u32 rx_buf_size; 692 u32 rx_page_order;
666 u32 rx_wrt_ptr_reg; 693 u32 rx_wrt_ptr_reg;
667 u32 max_pkt_size;
668 u8 max_stations; 694 u8 max_stations;
669 u8 bcast_sta_id; 695 u8 bcast_sta_id;
670 u8 ht40_channel; 696 u8 ht40_channel;
@@ -711,7 +737,11 @@ static inline int iwl_queue_used(const struct iwl_queue *q, int i)
711 737
712static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge) 738static inline u8 get_cmd_index(struct iwl_queue *q, u32 index, int is_huge)
713{ 739{
714 /* This is for scan command, the big buffer at end of command array */ 740 /*
741 * This is for init calibration result and scan command which
742 * required buffer > TFD_MAX_PAYLOAD_SIZE,
743 * the big buffer at end of command array
744 */
715 if (is_huge) 745 if (is_huge)
716 return q->n_window; /* must be power of 2 */ 746 return q->n_window; /* must be power of 2 */
717 747
@@ -726,9 +756,6 @@ struct iwl_dma_ptr {
726 size_t size; 756 size_t size;
727}; 757};
728 758
729#define IWL_CHANNEL_WIDTH_20MHZ 0
730#define IWL_CHANNEL_WIDTH_40MHZ 1
731
732#define IWL_OPERATION_MODE_AUTO 0 759#define IWL_OPERATION_MODE_AUTO 0
733#define IWL_OPERATION_MODE_HT_ONLY 1 760#define IWL_OPERATION_MODE_HT_ONLY 1
734#define IWL_OPERATION_MODE_MIXED 2 761#define IWL_OPERATION_MODE_MIXED 2
@@ -741,7 +768,8 @@ struct iwl_dma_ptr {
741 768
742/* Sensitivity and chain noise calibration */ 769/* Sensitivity and chain noise calibration */
743#define INITIALIZATION_VALUE 0xFFFF 770#define INITIALIZATION_VALUE 0xFFFF
744#define CAL_NUM_OF_BEACONS 20 771#define IWL4965_CAL_NUM_BEACONS 20
772#define IWL_CAL_NUM_BEACONS 16
745#define MAXIMUM_ALLOWED_PATHLOSS 15 773#define MAXIMUM_ALLOWED_PATHLOSS 15
746 774
747#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3 775#define CHAIN_NOISE_MAX_DELTA_GAIN_CODE 3
@@ -845,6 +873,10 @@ struct iwl_sensitivity_data {
845 s32 nrg_auto_corr_silence_diff; 873 s32 nrg_auto_corr_silence_diff;
846 u32 num_in_cck_no_fa; 874 u32 num_in_cck_no_fa;
847 u32 nrg_th_ofdm; 875 u32 nrg_th_ofdm;
876
877 u16 barker_corr_th_min;
878 u16 barker_corr_th_min_mrc;
879 u16 nrg_th_cca;
848}; 880};
849 881
850/* Chain noise (differential Rx gain) calib data */ 882/* Chain noise (differential Rx gain) calib data */
@@ -961,8 +993,6 @@ struct traffic_stats {
961}; 993};
962#endif 994#endif
963 995
964#define IWL_MAX_NUM_QUEUES 20 /* FIXME: do dynamic allocation */
965
966struct iwl_priv { 996struct iwl_priv {
967 997
968 /* ieee device used by generic ieee processing code */ 998 /* ieee device used by generic ieee processing code */
@@ -976,7 +1006,7 @@ struct iwl_priv {
976 int frames_count; 1006 int frames_count;
977 1007
978 enum ieee80211_band band; 1008 enum ieee80211_band band;
979 int alloc_rxb_skb; 1009 int alloc_rxb_page;
980 1010
981 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv, 1011 void (*rx_handlers[REPLY_MAX])(struct iwl_priv *priv,
982 struct iwl_rx_mem_buffer *rxb); 1012 struct iwl_rx_mem_buffer *rxb);
@@ -1063,14 +1093,11 @@ struct iwl_priv {
1063 struct iwl_init_alive_resp card_alive_init; 1093 struct iwl_init_alive_resp card_alive_init;
1064 struct iwl_alive_resp card_alive; 1094 struct iwl_alive_resp card_alive;
1065 1095
1066#ifdef CONFIG_IWLWIFI_LEDS
1067 unsigned long last_blink_time; 1096 unsigned long last_blink_time;
1068 u8 last_blink_rate; 1097 u8 last_blink_rate;
1069 u8 allow_blinking; 1098 u8 allow_blinking;
1070 u64 led_tpt; 1099 u64 led_tpt;
1071 struct iwl_led led[IWL_LED_TRG_MAX]; 1100
1072 unsigned int rxtxpackets;
1073#endif
1074 u16 active_rate; 1101 u16 active_rate;
1075 u16 active_rate_basic; 1102 u16 active_rate_basic;
1076 1103
@@ -1080,11 +1107,10 @@ struct iwl_priv {
1080 struct iwl_chain_noise_data chain_noise_data; 1107 struct iwl_chain_noise_data chain_noise_data;
1081 __le16 sensitivity_tbl[HD_TABLE_SIZE]; 1108 __le16 sensitivity_tbl[HD_TABLE_SIZE];
1082 1109
1083 struct iwl_ht_info current_ht_config; 1110 struct iwl_ht_config current_ht_config;
1084 u8 last_phy_res[100]; 1111 u8 last_phy_res[100];
1085 1112
1086 /* Rate scaling data */ 1113 /* Rate scaling data */
1087 s8 data_retry_limit;
1088 u8 retry_rate; 1114 u8 retry_rate;
1089 1115
1090 wait_queue_head_t wait_command_queue; 1116 wait_queue_head_t wait_command_queue;
@@ -1093,7 +1119,7 @@ struct iwl_priv {
1093 1119
1094 /* Rx and Tx DMA processing queues */ 1120 /* Rx and Tx DMA processing queues */
1095 struct iwl_rx_queue rxq; 1121 struct iwl_rx_queue rxq;
1096 struct iwl_tx_queue txq[IWL_MAX_NUM_QUEUES]; 1122 struct iwl_tx_queue *txq;
1097 unsigned long txq_ctx_active_msk; 1123 unsigned long txq_ctx_active_msk;
1098 struct iwl_dma_ptr kw; /* keep warm address */ 1124 struct iwl_dma_ptr kw; /* keep warm address */
1099 struct iwl_dma_ptr scd_bc_tbls; 1125 struct iwl_dma_ptr scd_bc_tbls;
@@ -1116,7 +1142,9 @@ struct iwl_priv {
1116 struct iwl_tt_mgmt thermal_throttle; 1142 struct iwl_tt_mgmt thermal_throttle;
1117 1143
1118 struct iwl_notif_statistics statistics; 1144 struct iwl_notif_statistics statistics;
1119 unsigned long last_statistics_time; 1145#ifdef CONFIG_IWLWIFI_DEBUG
1146 struct iwl_notif_statistics accum_statistics;
1147#endif
1120 1148
1121 /* context information */ 1149 /* context information */
1122 u16 rates_mask; 1150 u16 rates_mask;
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.c b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
new file mode 100644
index 000000000000..4ef5acaa556d
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.c
@@ -0,0 +1,13 @@
1#include <linux/module.h>
2
3/* sparse doesn't like tracepoint macros */
4#ifndef __CHECKER__
5#define CREATE_TRACE_POINTS
6#include "iwl-devtrace.h"
7
8EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ioread32);
9EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_iowrite32);
10EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_rx);
11EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_event);
12EXPORT_TRACEPOINT_SYMBOL(iwlwifi_dev_ucode_error);
13#endif
diff --git a/drivers/net/wireless/iwlwifi/iwl-devtrace.h b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
new file mode 100644
index 000000000000..8c7159208da1
--- /dev/null
+++ b/drivers/net/wireless/iwlwifi/iwl-devtrace.h
@@ -0,0 +1,178 @@
1#if !defined(__IWLWIFI_DEVICE_TRACE) || defined(TRACE_HEADER_MULTI_READ)
2#define __IWLWIFI_DEVICE_TRACE
3
4#include <linux/tracepoint.h>
5#include "iwl-dev.h"
6
7#if !defined(CONFIG_IWLWIFI_DEVICE_TRACING) || defined(__CHECKER__)
8#undef TRACE_EVENT
9#define TRACE_EVENT(name, proto, ...) \
10static inline void trace_ ## name(proto) {}
11#endif
12
13#define PRIV_ENTRY __field(struct iwl_priv *, priv)
14#define PRIV_ASSIGN __entry->priv = priv
15
16#undef TRACE_SYSTEM
17#define TRACE_SYSTEM iwlwifi
18
19TRACE_EVENT(iwlwifi_dev_ioread32,
20 TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
21 TP_ARGS(priv, offs, val),
22 TP_STRUCT__entry(
23 PRIV_ENTRY
24 __field(u32, offs)
25 __field(u32, val)
26 ),
27 TP_fast_assign(
28 PRIV_ASSIGN;
29 __entry->offs = offs;
30 __entry->val = val;
31 ),
32 TP_printk("[%p] read io[%#x] = %#x", __entry->priv, __entry->offs, __entry->val)
33);
34
35TRACE_EVENT(iwlwifi_dev_iowrite32,
36 TP_PROTO(struct iwl_priv *priv, u32 offs, u32 val),
37 TP_ARGS(priv, offs, val),
38 TP_STRUCT__entry(
39 PRIV_ENTRY
40 __field(u32, offs)
41 __field(u32, val)
42 ),
43 TP_fast_assign(
44 PRIV_ASSIGN;
45 __entry->offs = offs;
46 __entry->val = val;
47 ),
48 TP_printk("[%p] write io[%#x] = %#x)", __entry->priv, __entry->offs, __entry->val)
49);
50
51TRACE_EVENT(iwlwifi_dev_hcmd,
52 TP_PROTO(struct iwl_priv *priv, void *hcmd, size_t len, u32 flags),
53 TP_ARGS(priv, hcmd, len, flags),
54 TP_STRUCT__entry(
55 PRIV_ENTRY
56 __dynamic_array(u8, hcmd, len)
57 __field(u32, flags)
58 ),
59 TP_fast_assign(
60 PRIV_ASSIGN;
61 memcpy(__get_dynamic_array(hcmd), hcmd, len);
62 __entry->flags = flags;
63 ),
64 TP_printk("[%p] hcmd %#.2x (%ssync)",
65 __entry->priv, ((u8 *)__get_dynamic_array(hcmd))[0],
66 __entry->flags & CMD_ASYNC ? "a" : "")
67);
68
69TRACE_EVENT(iwlwifi_dev_rx,
70 TP_PROTO(struct iwl_priv *priv, void *rxbuf, size_t len),
71 TP_ARGS(priv, rxbuf, len),
72 TP_STRUCT__entry(
73 PRIV_ENTRY
74 __dynamic_array(u8, rxbuf, len)
75 ),
76 TP_fast_assign(
77 PRIV_ASSIGN;
78 memcpy(__get_dynamic_array(rxbuf), rxbuf, len);
79 ),
80 TP_printk("[%p] RX cmd %#.2x",
81 __entry->priv, ((u8 *)__get_dynamic_array(rxbuf))[4])
82);
83
84TRACE_EVENT(iwlwifi_dev_tx,
85 TP_PROTO(struct iwl_priv *priv, void *tfd, size_t tfdlen,
86 void *buf0, size_t buf0_len,
87 void *buf1, size_t buf1_len),
88 TP_ARGS(priv, tfd, tfdlen, buf0, buf0_len, buf1, buf1_len),
89 TP_STRUCT__entry(
90 PRIV_ENTRY
91
92 __field(size_t, framelen)
93 __dynamic_array(u8, tfd, tfdlen)
94
95 /*
96 * Do not insert between or below these items,
97 * we want to keep the frame together (except
98 * for the possible padding).
99 */
100 __dynamic_array(u8, buf0, buf0_len)
101 __dynamic_array(u8, buf1, buf1_len)
102 ),
103 TP_fast_assign(
104 PRIV_ASSIGN;
105 __entry->framelen = buf0_len + buf1_len;
106 memcpy(__get_dynamic_array(tfd), tfd, tfdlen);
107 memcpy(__get_dynamic_array(buf0), buf0, buf0_len);
108 memcpy(__get_dynamic_array(buf1), buf1, buf0_len);
109 ),
110 TP_printk("[%p] TX %.2x (%zu bytes)",
111 __entry->priv,
112 ((u8 *)__get_dynamic_array(buf0))[0],
113 __entry->framelen)
114);
115
116TRACE_EVENT(iwlwifi_dev_ucode_error,
117 TP_PROTO(struct iwl_priv *priv, u32 desc, u32 time,
118 u32 data1, u32 data2, u32 line, u32 blink1,
119 u32 blink2, u32 ilink1, u32 ilink2),
120 TP_ARGS(priv, desc, time, data1, data2, line,
121 blink1, blink2, ilink1, ilink2),
122 TP_STRUCT__entry(
123 PRIV_ENTRY
124 __field(u32, desc)
125 __field(u32, time)
126 __field(u32, data1)
127 __field(u32, data2)
128 __field(u32, line)
129 __field(u32, blink1)
130 __field(u32, blink2)
131 __field(u32, ilink1)
132 __field(u32, ilink2)
133 ),
134 TP_fast_assign(
135 PRIV_ASSIGN;
136 __entry->desc = desc;
137 __entry->time = time;
138 __entry->data1 = data1;
139 __entry->data2 = data2;
140 __entry->line = line;
141 __entry->blink1 = blink1;
142 __entry->blink2 = blink2;
143 __entry->ilink1 = ilink1;
144 __entry->ilink2 = ilink2;
145 ),
146 TP_printk("[%p] #%02d %010u data 0x%08X 0x%08X line %u, "
147 "blink 0x%05X 0x%05X ilink 0x%05X 0x%05X",
148 __entry->priv, __entry->desc, __entry->time, __entry->data1,
149 __entry->data2, __entry->line, __entry->blink1,
150 __entry->blink2, __entry->ilink1, __entry->ilink2)
151);
152
153TRACE_EVENT(iwlwifi_dev_ucode_event,
154 TP_PROTO(struct iwl_priv *priv, u32 time, u32 data, u32 ev),
155 TP_ARGS(priv, time, data, ev),
156 TP_STRUCT__entry(
157 PRIV_ENTRY
158
159 __field(u32, time)
160 __field(u32, data)
161 __field(u32, ev)
162 ),
163 TP_fast_assign(
164 PRIV_ASSIGN;
165 __entry->time = time;
166 __entry->data = data;
167 __entry->ev = ev;
168 ),
169 TP_printk("[%p] EVT_LOGT:%010u:0x%08x:%04u",
170 __entry->priv, __entry->time, __entry->data, __entry->ev)
171);
172#endif /* __IWLWIFI_DEVICE_TRACE */
173
174#undef TRACE_INCLUDE_PATH
175#define TRACE_INCLUDE_PATH .
176#undef TRACE_INCLUDE_FILE
177#define TRACE_INCLUDE_FILE iwl-devtrace
178#include <trace/define_trace.h>
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.c b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
index e14c9952a935..8a0709e81a9f 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.c
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.c
@@ -215,12 +215,35 @@ static const struct iwl_txpwr_section enhinfo[] = {
215 215
216int iwlcore_eeprom_verify_signature(struct iwl_priv *priv) 216int iwlcore_eeprom_verify_signature(struct iwl_priv *priv)
217{ 217{
218 u32 gp = iwl_read32(priv, CSR_EEPROM_GP); 218 u32 gp = iwl_read32(priv, CSR_EEPROM_GP) & CSR_EEPROM_GP_VALID_MSK;
219 if ((gp & CSR_EEPROM_GP_VALID_MSK) == CSR_EEPROM_GP_BAD_SIGNATURE) { 219 int ret = 0;
220 IWL_ERR(priv, "EEPROM not found, EEPROM_GP=0x%08x\n", gp); 220
221 return -ENOENT; 221 IWL_DEBUG_INFO(priv, "EEPROM signature=0x%08x\n", gp);
222 switch (gp) {
223 case CSR_EEPROM_GP_BAD_SIG_EEP_GOOD_SIG_OTP:
224 if (priv->nvm_device_type != NVM_DEVICE_TYPE_OTP) {
225 IWL_ERR(priv, "EEPROM with bad signature: 0x%08x\n",
226 gp);
227 ret = -ENOENT;
228 }
229 break;
230 case CSR_EEPROM_GP_GOOD_SIG_EEP_LESS_THAN_4K:
231 case CSR_EEPROM_GP_GOOD_SIG_EEP_MORE_THAN_4K:
232 if (priv->nvm_device_type != NVM_DEVICE_TYPE_EEPROM) {
233 IWL_ERR(priv, "OTP with bad signature: 0x%08x\n", gp);
234 ret = -ENOENT;
235 }
236 break;
237 case CSR_EEPROM_GP_BAD_SIGNATURE_BOTH_EEP_AND_OTP:
238 default:
239 IWL_ERR(priv, "bad EEPROM/OTP signature, type=%s, "
240 "EEPROM_GP=0x%08x\n",
241 (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP)
242 ? "OTP" : "EEPROM", gp);
243 ret = -ENOENT;
244 break;
222 } 245 }
223 return 0; 246 return ret;
224} 247}
225EXPORT_SYMBOL(iwlcore_eeprom_verify_signature); 248EXPORT_SYMBOL(iwlcore_eeprom_verify_signature);
226 249
@@ -283,7 +306,8 @@ int iwlcore_eeprom_acquire_semaphore(struct iwl_priv *priv)
283 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM); 306 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM);
284 307
285 /* See if we got it */ 308 /* See if we got it */
286 ret = iwl_poll_direct_bit(priv, CSR_HW_IF_CONFIG_REG, 309 ret = iwl_poll_bit(priv, CSR_HW_IF_CONFIG_REG,
310 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
287 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM, 311 CSR_HW_IF_CONFIG_REG_BIT_EEPROM_OWN_SEM,
288 EEPROM_SEM_TIMEOUT); 312 EEPROM_SEM_TIMEOUT);
289 if (ret >= 0) { 313 if (ret >= 0) {
@@ -322,7 +346,8 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
322 CSR_GP_CNTRL_REG_FLAG_INIT_DONE); 346 CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
323 347
324 /* wait for clock to be ready */ 348 /* wait for clock to be ready */
325 ret = iwl_poll_direct_bit(priv, CSR_GP_CNTRL, 349 ret = iwl_poll_bit(priv, CSR_GP_CNTRL,
350 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
326 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY, 351 CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY,
327 25000); 352 25000);
328 if (ret < 0) 353 if (ret < 0)
@@ -333,6 +358,14 @@ static int iwl_init_otp_access(struct iwl_priv *priv)
333 udelay(5); 358 udelay(5);
334 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG, 359 iwl_clear_bits_prph(priv, APMG_PS_CTRL_REG,
335 APMG_PS_CTRL_VAL_RESET_REQ); 360 APMG_PS_CTRL_VAL_RESET_REQ);
361
362 /*
363 * CSR auto clock gate disable bit -
364 * this is only applicable for HW with OTP shadow RAM
365 */
366 if (priv->cfg->shadow_ram_support)
367 iwl_set_bit(priv, CSR_DBG_LINK_PWR_MGMT_REG,
368 CSR_RESET_LINK_PWR_MGMT_DISABLED);
336 } 369 }
337 return ret; 370 return ret;
338} 371}
@@ -345,7 +378,8 @@ static int iwl_read_otp_word(struct iwl_priv *priv, u16 addr, u16 *eeprom_data)
345 378
346 _iwl_write32(priv, CSR_EEPROM_REG, 379 _iwl_write32(priv, CSR_EEPROM_REG,
347 CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); 380 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
348 ret = iwl_poll_direct_bit(priv, CSR_EEPROM_REG, 381 ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
382 CSR_EEPROM_REG_READ_VALID_MSK,
349 CSR_EEPROM_REG_READ_VALID_MSK, 383 CSR_EEPROM_REG_READ_VALID_MSK,
350 IWL_EEPROM_ACCESS_TIMEOUT); 384 IWL_EEPROM_ACCESS_TIMEOUT);
351 if (ret < 0) { 385 if (ret < 0) {
@@ -499,6 +533,10 @@ int iwl_eeprom_init(struct iwl_priv *priv)
499 goto err; 533 goto err;
500 } 534 }
501 if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) { 535 if (priv->nvm_device_type == NVM_DEVICE_TYPE_OTP) {
536
537 /* OTP reads require powered-up chip */
538 priv->cfg->ops->lib->apm_ops.init(priv);
539
502 ret = iwl_init_otp_access(priv); 540 ret = iwl_init_otp_access(priv);
503 if (ret) { 541 if (ret) {
504 IWL_ERR(priv, "Failed to initialize OTP access.\n"); 542 IWL_ERR(priv, "Failed to initialize OTP access.\n");
@@ -529,6 +567,13 @@ int iwl_eeprom_init(struct iwl_priv *priv)
529 e[cache_addr / 2] = eeprom_data; 567 e[cache_addr / 2] = eeprom_data;
530 cache_addr += sizeof(u16); 568 cache_addr += sizeof(u16);
531 } 569 }
570
571 /*
572 * Now that OTP reads are complete, reset chip to save
573 * power until we load uCode during "up".
574 */
575 priv->cfg->ops->lib->apm_ops.stop(priv);
576
532 } else { 577 } else {
533 /* eeprom is an array of 16bit values */ 578 /* eeprom is an array of 16bit values */
534 for (addr = 0; addr < sz; addr += sizeof(u16)) { 579 for (addr = 0; addr < sz; addr += sizeof(u16)) {
@@ -537,7 +582,8 @@ int iwl_eeprom_init(struct iwl_priv *priv)
537 _iwl_write32(priv, CSR_EEPROM_REG, 582 _iwl_write32(priv, CSR_EEPROM_REG,
538 CSR_EEPROM_REG_MSK_ADDR & (addr << 1)); 583 CSR_EEPROM_REG_MSK_ADDR & (addr << 1));
539 584
540 ret = iwl_poll_direct_bit(priv, CSR_EEPROM_REG, 585 ret = iwl_poll_bit(priv, CSR_EEPROM_REG,
586 CSR_EEPROM_REG_READ_VALID_MSK,
541 CSR_EEPROM_REG_READ_VALID_MSK, 587 CSR_EEPROM_REG_READ_VALID_MSK,
542 IWL_EEPROM_ACCESS_TIMEOUT); 588 IWL_EEPROM_ACCESS_TIMEOUT);
543 if (ret < 0) { 589 if (ret < 0) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-eeprom.h b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
index 80b9e45d9b9c..5ba5a4e9e49a 100644
--- a/drivers/net/wireless/iwlwifi/iwl-eeprom.h
+++ b/drivers/net/wireless/iwlwifi/iwl-eeprom.h
@@ -63,6 +63,8 @@
63#ifndef __iwl_eeprom_h__ 63#ifndef __iwl_eeprom_h__
64#define __iwl_eeprom_h__ 64#define __iwl_eeprom_h__
65 65
66#include <net/mac80211.h>
67
66struct iwl_priv; 68struct iwl_priv;
67 69
68/* 70/*
@@ -256,6 +258,15 @@ struct iwl_eeprom_enhanced_txpwr {
256#define EEPROM_5050_TX_POWER_VERSION (4) 258#define EEPROM_5050_TX_POWER_VERSION (4)
257#define EEPROM_5050_EEPROM_VERSION (0x21E) 259#define EEPROM_5050_EEPROM_VERSION (0x21E)
258 260
261/* 1000 Specific */
262#define EEPROM_1000_EEPROM_VERSION (0x15C)
263
264/* 6x00 Specific */
265#define EEPROM_6000_EEPROM_VERSION (0x434)
266
267/* 6x50 Specific */
268#define EEPROM_6050_EEPROM_VERSION (0x532)
269
259/* OTP */ 270/* OTP */
260/* lower blocks contain EEPROM image and calibration data */ 271/* lower blocks contain EEPROM image and calibration data */
261#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */ 272#define OTP_LOW_IMAGE_SIZE (2 * 512 * sizeof(u16)) /* 2 KB */
@@ -370,12 +381,10 @@ struct iwl_eeprom_calib_info {
370#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */ 381#define EEPROM_BOARD_PBA_NUMBER (2*0x3B+1) /* 9 bytes */
371#define EEPROM_VERSION (2*0x44) /* 2 bytes */ 382#define EEPROM_VERSION (2*0x44) /* 2 bytes */
372#define EEPROM_SKU_CAP (2*0x45) /* 1 bytes */ 383#define EEPROM_SKU_CAP (2*0x45) /* 1 bytes */
373#define EEPROM_LEDS_MODE (2*0x45+1) /* 1 bytes */
374#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */ 384#define EEPROM_OEM_MODE (2*0x46) /* 2 bytes */
375#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */ 385#define EEPROM_WOWLAN_MODE (2*0x47) /* 2 bytes */
376#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */ 386#define EEPROM_RADIO_CONFIG (2*0x48) /* 2 bytes */
377#define EEPROM_3945_M_VERSION (2*0x4A) /* 1 bytes */ 387#define EEPROM_3945_M_VERSION (2*0x4A) /* 1 bytes */
378#define EEPROM_ANTENNA_SWITCH_TYPE (2*0x4A+1) /* 1 bytes */
379 388
380/* The following masks are to be applied on EEPROM_RADIO_CONFIG */ 389/* The following masks are to be applied on EEPROM_RADIO_CONFIG */
381#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */ 390#define EEPROM_RF_CFG_TYPE_MSK(x) (x & 0x3) /* bits 0-1 */
@@ -387,7 +396,12 @@ struct iwl_eeprom_calib_info {
387 396
388#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0 397#define EEPROM_3945_RF_CFG_TYPE_MAX 0x0
389#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1 398#define EEPROM_4965_RF_CFG_TYPE_MAX 0x1
390#define EEPROM_5000_RF_CFG_TYPE_MAX 0x3 399
400/* Radio Config for 5000 and up */
401#define EEPROM_RF_CONFIG_TYPE_R3x3 0x0
402#define EEPROM_RF_CONFIG_TYPE_R2x2 0x1
403#define EEPROM_RF_CONFIG_TYPE_R1x2 0x2
404#define EEPROM_RF_CONFIG_TYPE_MAX 0x3
391 405
392/* 406/*
393 * Per-channel regulatory data. 407 * Per-channel regulatory data.
diff --git a/drivers/net/wireless/iwlwifi/iwl-hcmd.c b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
index 532c8d6cd8da..905645d15a9b 100644
--- a/drivers/net/wireless/iwlwifi/iwl-hcmd.c
+++ b/drivers/net/wireless/iwlwifi/iwl-hcmd.c
@@ -55,6 +55,8 @@ const char *get_cmd_string(u8 cmd)
55 IWL_CMD(REPLY_LEDS_CMD); 55 IWL_CMD(REPLY_LEDS_CMD);
56 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD); 56 IWL_CMD(REPLY_TX_LINK_QUALITY_CMD);
57 IWL_CMD(COEX_PRIORITY_TABLE_CMD); 57 IWL_CMD(COEX_PRIORITY_TABLE_CMD);
58 IWL_CMD(COEX_MEDIUM_NOTIFICATION);
59 IWL_CMD(COEX_EVENT_CMD);
58 IWL_CMD(RADAR_NOTIFICATION); 60 IWL_CMD(RADAR_NOTIFICATION);
59 IWL_CMD(REPLY_QUIET_CMD); 61 IWL_CMD(REPLY_QUIET_CMD);
60 IWL_CMD(REPLY_CHANNEL_SWITCH); 62 IWL_CMD(REPLY_CHANNEL_SWITCH);
@@ -92,6 +94,8 @@ const char *get_cmd_string(u8 cmd)
92 IWL_CMD(CALIBRATION_RES_NOTIFICATION); 94 IWL_CMD(CALIBRATION_RES_NOTIFICATION);
93 IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION); 95 IWL_CMD(CALIBRATION_COMPLETE_NOTIFICATION);
94 IWL_CMD(REPLY_TX_POWER_DBM_CMD); 96 IWL_CMD(REPLY_TX_POWER_DBM_CMD);
97 IWL_CMD(TEMPERATURE_NOTIFICATION);
98 IWL_CMD(TX_ANT_CONFIGURATION_CMD);
95 default: 99 default:
96 return "UNKNOWN"; 100 return "UNKNOWN";
97 101
@@ -103,17 +107,8 @@ EXPORT_SYMBOL(get_cmd_string);
103 107
104static void iwl_generic_cmd_callback(struct iwl_priv *priv, 108static void iwl_generic_cmd_callback(struct iwl_priv *priv,
105 struct iwl_device_cmd *cmd, 109 struct iwl_device_cmd *cmd,
106 struct sk_buff *skb) 110 struct iwl_rx_packet *pkt)
107{ 111{
108 struct iwl_rx_packet *pkt = NULL;
109
110 if (!skb) {
111 IWL_ERR(priv, "Error: Response NULL in %s.\n",
112 get_cmd_string(cmd->hdr.cmd));
113 return;
114 }
115
116 pkt = (struct iwl_rx_packet *)skb->data;
117 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) { 112 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
118 IWL_ERR(priv, "Bad return from %s (0x%08X)\n", 113 IWL_ERR(priv, "Bad return from %s (0x%08X)\n",
119 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags); 114 get_cmd_string(cmd->hdr.cmd), pkt->hdr.flags);
@@ -215,7 +210,7 @@ int iwl_send_cmd_sync(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
215 ret = -EIO; 210 ret = -EIO;
216 goto fail; 211 goto fail;
217 } 212 }
218 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_skb) { 213 if ((cmd->flags & CMD_WANT_SKB) && !cmd->reply_page) {
219 IWL_ERR(priv, "Error: Response NULL in '%s'\n", 214 IWL_ERR(priv, "Error: Response NULL in '%s'\n",
220 get_cmd_string(cmd->id)); 215 get_cmd_string(cmd->id));
221 ret = -EIO; 216 ret = -EIO;
@@ -237,9 +232,9 @@ cancel:
237 ~CMD_WANT_SKB; 232 ~CMD_WANT_SKB;
238 } 233 }
239fail: 234fail:
240 if (cmd->reply_skb) { 235 if (cmd->reply_page) {
241 dev_kfree_skb_any(cmd->reply_skb); 236 free_pages(cmd->reply_page, priv->hw_params.rx_page_order);
242 cmd->reply_skb = NULL; 237 cmd->reply_page = 0;
243 } 238 }
244out: 239out:
245 clear_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status); 240 clear_bit(STATUS_HCMD_SYNC_ACTIVE, &priv->status);
@@ -272,7 +267,7 @@ int iwl_send_cmd_pdu_async(struct iwl_priv *priv,
272 u8 id, u16 len, const void *data, 267 u8 id, u16 len, const void *data,
273 void (*callback)(struct iwl_priv *priv, 268 void (*callback)(struct iwl_priv *priv,
274 struct iwl_device_cmd *cmd, 269 struct iwl_device_cmd *cmd,
275 struct sk_buff *skb)) 270 struct iwl_rx_packet *pkt))
276{ 271{
277 struct iwl_host_cmd cmd = { 272 struct iwl_host_cmd cmd = {
278 .id = id, 273 .id = id,
diff --git a/drivers/net/wireless/iwlwifi/iwl-io.h b/drivers/net/wireless/iwlwifi/iwl-io.h
index d30cb0275d19..0a078b082833 100644
--- a/drivers/net/wireless/iwlwifi/iwl-io.h
+++ b/drivers/net/wireless/iwlwifi/iwl-io.h
@@ -32,6 +32,7 @@
32#include <linux/io.h> 32#include <linux/io.h>
33 33
34#include "iwl-debug.h" 34#include "iwl-debug.h"
35#include "iwl-devtrace.h"
35 36
36/* 37/*
37 * IO, register, and NIC memory access functions 38 * IO, register, and NIC memory access functions
@@ -61,7 +62,12 @@
61 * 62 *
62 */ 63 */
63 64
64#define _iwl_write32(priv, ofs, val) iowrite32((val), (priv)->hw_base + (ofs)) 65static inline void _iwl_write32(struct iwl_priv *priv, u32 ofs, u32 val)
66{
67 trace_iwlwifi_dev_iowrite32(priv, ofs, val);
68 iowrite32(val, priv->hw_base + ofs);
69}
70
65#ifdef CONFIG_IWLWIFI_DEBUG 71#ifdef CONFIG_IWLWIFI_DEBUG
66static inline void __iwl_write32(const char *f, u32 l, struct iwl_priv *priv, 72static inline void __iwl_write32(const char *f, u32 l, struct iwl_priv *priv,
67 u32 ofs, u32 val) 73 u32 ofs, u32 val)
@@ -75,7 +81,13 @@ static inline void __iwl_write32(const char *f, u32 l, struct iwl_priv *priv,
75#define iwl_write32(priv, ofs, val) _iwl_write32(priv, ofs, val) 81#define iwl_write32(priv, ofs, val) _iwl_write32(priv, ofs, val)
76#endif 82#endif
77 83
78#define _iwl_read32(priv, ofs) ioread32((priv)->hw_base + (ofs)) 84static inline u32 _iwl_read32(struct iwl_priv *priv, u32 ofs)
85{
86 u32 val = ioread32(priv->hw_base + ofs);
87 trace_iwlwifi_dev_ioread32(priv, ofs, val);
88 return val;
89}
90
79#ifdef CONFIG_IWLWIFI_DEBUG 91#ifdef CONFIG_IWLWIFI_DEBUG
80static inline u32 __iwl_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs) 92static inline u32 __iwl_read32(char *f, u32 l, struct iwl_priv *priv, u32 ofs)
81{ 93{
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.c b/drivers/net/wireless/iwlwifi/iwl-led.c
index f420c99e7240..478c90511ebf 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.c
+++ b/drivers/net/wireless/iwlwifi/iwl-led.c
@@ -42,15 +42,11 @@
42#include "iwl-core.h" 42#include "iwl-core.h"
43#include "iwl-io.h" 43#include "iwl-io.h"
44 44
45#ifdef CONFIG_IWLWIFI_DEBUG 45/* default: IWL_LED_BLINK(0) using blinking index table */
46static const char *led_type_str[] = { 46static int led_mode;
47 __stringify(IWL_LED_TRG_TX), 47module_param(led_mode, int, S_IRUGO);
48 __stringify(IWL_LED_TRG_RX), 48MODULE_PARM_DESC(led_mode, "led mode: 0=blinking, 1=On(RF On)/Off(RF Off), "
49 __stringify(IWL_LED_TRG_ASSOC), 49 "(default 0)\n");
50 __stringify(IWL_LED_TRG_RADIO),
51 NULL
52};
53#endif /* CONFIG_IWLWIFI_DEBUG */
54 50
55 51
56static const struct { 52static const struct {
@@ -65,11 +61,11 @@ static const struct {
65 {70, 65, 65}, 61 {70, 65, 65},
66 {50, 75, 75}, 62 {50, 75, 75},
67 {20, 85, 85}, 63 {20, 85, 85},
68 {15, 95, 95 }, 64 {10, 95, 95},
69 {10, 110, 110}, 65 {5, 110, 110},
70 {5, 130, 130}, 66 {1, 130, 130},
71 {0, 167, 167}, 67 {0, 167, 167},
72/* SOLID_ON */ 68 /* SOLID_ON */
73 {-1, IWL_LED_SOLID, 0} 69 {-1, IWL_LED_SOLID, 0}
74}; 70};
75 71
@@ -78,191 +74,74 @@ static const struct {
78#define IWL_MAX_BLINK_TBL (ARRAY_SIZE(blink_tbl) - 1) /* exclude SOLID_ON */ 74#define IWL_MAX_BLINK_TBL (ARRAY_SIZE(blink_tbl) - 1) /* exclude SOLID_ON */
79#define IWL_SOLID_BLINK_IDX (ARRAY_SIZE(blink_tbl) - 1) 75#define IWL_SOLID_BLINK_IDX (ARRAY_SIZE(blink_tbl) - 1)
80 76
81/* [0-256] -> [0..8] FIXME: we need [0..10] */ 77/*
82static inline int iwl_brightness_to_idx(enum led_brightness brightness) 78 * Adjust led blink rate to compensate on a MAC Clock difference on every HW
83{ 79 * Led blink rate analysis showed an average deviation of 0% on 3945,
84 return fls(0x000000FF & (u32)brightness); 80 * 5% on 4965 HW and 20% on 5000 series and up.
85} 81 * Need to compensate on the led on/off time per HW according to the deviation
86 82 * to achieve the desired led frequency
87/* Send led command */ 83 * The calculation is: (100-averageDeviation)/100 * blinkTime
88static int iwl_send_led_cmd(struct iwl_priv *priv, struct iwl_led_cmd *led_cmd) 84 * For code efficiency the calculation will be:
85 * compensation = (100 - averageDeviation) * 64 / 100
86 * NewBlinkTime = (compensation * BlinkTime) / 64
87 */
88static inline u8 iwl_blink_compensation(struct iwl_priv *priv,
89 u8 time, u16 compensation)
89{ 90{
90 struct iwl_host_cmd cmd = { 91 if (!compensation) {
91 .id = REPLY_LEDS_CMD, 92 IWL_ERR(priv, "undefined blink compensation: "
92 .len = sizeof(struct iwl_led_cmd), 93 "use pre-defined blinking time\n");
93 .data = led_cmd, 94 return time;
94 .flags = CMD_ASYNC, 95 }
95 .callback = NULL,
96 };
97 u32 reg;
98
99 reg = iwl_read32(priv, CSR_LED_REG);
100 if (reg != (reg & CSR_LED_BSM_CTRL_MSK))
101 iwl_write32(priv, CSR_LED_REG, reg & CSR_LED_BSM_CTRL_MSK);
102 96
103 return iwl_send_cmd(priv, &cmd); 97 return (u8)((time * compensation) >> 6);
104} 98}
105 99
106/* Set led pattern command */ 100/* Set led pattern command */
107static int iwl_led_pattern(struct iwl_priv *priv, int led_id, 101static int iwl_led_pattern(struct iwl_priv *priv, unsigned int idx)
108 unsigned int idx)
109{ 102{
110 struct iwl_led_cmd led_cmd = { 103 struct iwl_led_cmd led_cmd = {
111 .id = led_id, 104 .id = IWL_LED_LINK,
112 .interval = IWL_DEF_LED_INTRVL 105 .interval = IWL_DEF_LED_INTRVL
113 }; 106 };
114 107
115 BUG_ON(idx > IWL_MAX_BLINK_TBL); 108 BUG_ON(idx > IWL_MAX_BLINK_TBL);
116 109
117 led_cmd.on = blink_tbl[idx].on_time; 110 IWL_DEBUG_LED(priv, "Led blink time compensation= %u\n",
118 led_cmd.off = blink_tbl[idx].off_time; 111 priv->cfg->led_compensation);
119 112 led_cmd.on =
120 return iwl_send_led_cmd(priv, &led_cmd); 113 iwl_blink_compensation(priv, blink_tbl[idx].on_time,
121} 114 priv->cfg->led_compensation);
122 115 led_cmd.off =
123/* Set led register off */ 116 iwl_blink_compensation(priv, blink_tbl[idx].off_time,
124static int iwl_led_on_reg(struct iwl_priv *priv, int led_id) 117 priv->cfg->led_compensation);
125{
126 IWL_DEBUG_LED(priv, "led on %d\n", led_id);
127 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_ON);
128 return 0;
129}
130 118
131#if 0 119 return priv->cfg->ops->led->cmd(priv, &led_cmd);
132/* Set led on command */
133static int iwl_led_on(struct iwl_priv *priv, int led_id)
134{
135 struct iwl_led_cmd led_cmd = {
136 .id = led_id,
137 .on = IWL_LED_SOLID,
138 .off = 0,
139 .interval = IWL_DEF_LED_INTRVL
140 };
141 return iwl_send_led_cmd(priv, &led_cmd);
142} 120}
143 121
144/* Set led off command */ 122int iwl_led_start(struct iwl_priv *priv)
145int iwl_led_off(struct iwl_priv *priv, int led_id)
146{ 123{
147 struct iwl_led_cmd led_cmd = { 124 return priv->cfg->ops->led->on(priv);
148 .id = led_id,
149 .on = 0,
150 .off = 0,
151 .interval = IWL_DEF_LED_INTRVL
152 };
153 IWL_DEBUG_LED(priv, "led off %d\n", led_id);
154 return iwl_send_led_cmd(priv, &led_cmd);
155} 125}
156#endif 126EXPORT_SYMBOL(iwl_led_start);
157
158 127
159/* Set led register off */ 128int iwl_led_associate(struct iwl_priv *priv)
160static int iwl_led_off_reg(struct iwl_priv *priv, int led_id)
161{
162 IWL_DEBUG_LED(priv, "LED Reg off\n");
163 iwl_write32(priv, CSR_LED_REG, CSR_LED_REG_TRUN_OFF);
164 return 0;
165}
166
167/*
168 * Set led register in case of disassociation according to rfkill state
169 */
170static int iwl_led_associate(struct iwl_priv *priv, int led_id)
171{ 129{
172 IWL_DEBUG_LED(priv, "Associated\n"); 130 IWL_DEBUG_LED(priv, "Associated\n");
173 priv->allow_blinking = 1; 131 if (led_mode == IWL_LED_BLINK)
174 return iwl_led_on_reg(priv, led_id); 132 priv->allow_blinking = 1;
175} 133 priv->last_blink_time = jiffies;
176static int iwl_led_disassociate(struct iwl_priv *priv, int led_id)
177{
178 priv->allow_blinking = 0;
179
180 return 0;
181}
182
183/*
184 * brightness call back function for Tx/Rx LED
185 */
186static int iwl_led_associated(struct iwl_priv *priv, int led_id)
187{
188 if (test_bit(STATUS_EXIT_PENDING, &priv->status) ||
189 !test_bit(STATUS_READY, &priv->status))
190 return 0;
191
192 134
193 /* start counting Tx/Rx bytes */
194 if (!priv->last_blink_time && priv->allow_blinking)
195 priv->last_blink_time = jiffies;
196 return 0; 135 return 0;
197} 136}
198 137
199/* 138int iwl_led_disassociate(struct iwl_priv *priv)
200 * brightness call back for association and radio
201 */
202static void iwl_led_brightness_set(struct led_classdev *led_cdev,
203 enum led_brightness brightness)
204{ 139{
205 struct iwl_led *led = container_of(led_cdev, struct iwl_led, led_dev); 140 priv->allow_blinking = 0;
206 struct iwl_priv *priv = led->priv;
207
208 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
209 return;
210
211
212 IWL_DEBUG_LED(priv, "Led type = %s brightness = %d\n",
213 led_type_str[led->type], brightness);
214 switch (brightness) {
215 case LED_FULL:
216 if (led->led_on)
217 led->led_on(priv, IWL_LED_LINK);
218 break;
219 case LED_OFF:
220 if (led->led_off)
221 led->led_off(priv, IWL_LED_LINK);
222 break;
223 default:
224 if (led->led_pattern) {
225 int idx = iwl_brightness_to_idx(brightness);
226 led->led_pattern(priv, IWL_LED_LINK, idx);
227 }
228 break;
229 }
230}
231
232
233
234/*
235 * Register led class with the system
236 */
237static int iwl_leds_register_led(struct iwl_priv *priv, struct iwl_led *led,
238 enum led_type type, u8 set_led,
239 char *trigger)
240{
241 struct device *device = wiphy_dev(priv->hw->wiphy);
242 int ret;
243
244 led->led_dev.name = led->name;
245 led->led_dev.brightness_set = iwl_led_brightness_set;
246 led->led_dev.default_trigger = trigger;
247
248 led->priv = priv;
249 led->type = type;
250
251 ret = led_classdev_register(device, &led->led_dev);
252 if (ret) {
253 IWL_ERR(priv, "Error: failed to register led handler.\n");
254 return ret;
255 }
256
257 led->registered = 1;
258
259 if (set_led && led->led_on)
260 led->led_on(priv, IWL_LED_LINK);
261 141
262 return 0; 142 return 0;
263} 143}
264 144
265
266/* 145/*
267 * calculate blink rate according to last second Tx/Rx activities 146 * calculate blink rate according to last second Tx/Rx activities
268 */ 147 */
@@ -288,7 +167,7 @@ static int iwl_get_blink_rate(struct iwl_priv *priv)
288 i = IWL_MAX_BLINK_TBL; 167 i = IWL_MAX_BLINK_TBL;
289 else 168 else
290 for (i = 0; i < IWL_MAX_BLINK_TBL; i++) 169 for (i = 0; i < IWL_MAX_BLINK_TBL; i++)
291 if (tpt > (blink_tbl[i].tpt * IWL_1MB_RATE)) 170 if (tpt > (blink_tbl[i].tpt * IWL_1MB_RATE))
292 break; 171 break;
293 172
294 IWL_DEBUG_LED(priv, "LED BLINK IDX=%d\n", i); 173 IWL_DEBUG_LED(priv, "LED BLINK IDX=%d\n", i);
@@ -317,8 +196,7 @@ void iwl_leds_background(struct iwl_priv *priv)
317 priv->last_blink_time = 0; 196 priv->last_blink_time = 0;
318 if (priv->last_blink_rate != IWL_SOLID_BLINK_IDX) { 197 if (priv->last_blink_rate != IWL_SOLID_BLINK_IDX) {
319 priv->last_blink_rate = IWL_SOLID_BLINK_IDX; 198 priv->last_blink_rate = IWL_SOLID_BLINK_IDX;
320 iwl_led_pattern(priv, IWL_LED_LINK, 199 iwl_led_pattern(priv, IWL_SOLID_BLINK_IDX);
321 IWL_SOLID_BLINK_IDX);
322 } 200 }
323 return; 201 return;
324 } 202 }
@@ -331,111 +209,18 @@ void iwl_leds_background(struct iwl_priv *priv)
331 209
332 /* call only if blink rate change */ 210 /* call only if blink rate change */
333 if (blink_idx != priv->last_blink_rate) 211 if (blink_idx != priv->last_blink_rate)
334 iwl_led_pattern(priv, IWL_LED_LINK, blink_idx); 212 iwl_led_pattern(priv, blink_idx);
335 213
336 priv->last_blink_time = jiffies; 214 priv->last_blink_time = jiffies;
337 priv->last_blink_rate = blink_idx; 215 priv->last_blink_rate = blink_idx;
338} 216}
217EXPORT_SYMBOL(iwl_leds_background);
339 218
340/* Register all led handler */ 219void iwl_leds_init(struct iwl_priv *priv)
341int iwl_leds_register(struct iwl_priv *priv)
342{ 220{
343 char *trigger;
344 int ret;
345
346 priv->last_blink_rate = 0; 221 priv->last_blink_rate = 0;
347 priv->led_tpt = 0; 222 priv->led_tpt = 0;
348 priv->last_blink_time = 0; 223 priv->last_blink_time = 0;
349 priv->allow_blinking = 0; 224 priv->allow_blinking = 0;
350
351 trigger = ieee80211_get_radio_led_name(priv->hw);
352 snprintf(priv->led[IWL_LED_TRG_RADIO].name,
353 sizeof(priv->led[IWL_LED_TRG_RADIO].name), "iwl-%s::radio",
354 wiphy_name(priv->hw->wiphy));
355
356 priv->led[IWL_LED_TRG_RADIO].led_on = iwl_led_on_reg;
357 priv->led[IWL_LED_TRG_RADIO].led_off = iwl_led_off_reg;
358 priv->led[IWL_LED_TRG_RADIO].led_pattern = NULL;
359
360 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_RADIO],
361 IWL_LED_TRG_RADIO, 1, trigger);
362 if (ret)
363 goto exit_fail;
364
365 trigger = ieee80211_get_assoc_led_name(priv->hw);
366 snprintf(priv->led[IWL_LED_TRG_ASSOC].name,
367 sizeof(priv->led[IWL_LED_TRG_ASSOC].name), "iwl-%s::assoc",
368 wiphy_name(priv->hw->wiphy));
369
370 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_ASSOC],
371 IWL_LED_TRG_ASSOC, 0, trigger);
372
373 /* for assoc always turn led on */
374 priv->led[IWL_LED_TRG_ASSOC].led_on = iwl_led_associate;
375 priv->led[IWL_LED_TRG_ASSOC].led_off = iwl_led_disassociate;
376 priv->led[IWL_LED_TRG_ASSOC].led_pattern = NULL;
377
378 if (ret)
379 goto exit_fail;
380
381 trigger = ieee80211_get_rx_led_name(priv->hw);
382 snprintf(priv->led[IWL_LED_TRG_RX].name,
383 sizeof(priv->led[IWL_LED_TRG_RX].name), "iwl-%s::RX",
384 wiphy_name(priv->hw->wiphy));
385
386 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_RX],
387 IWL_LED_TRG_RX, 0, trigger);
388
389 priv->led[IWL_LED_TRG_RX].led_on = iwl_led_associated;
390 priv->led[IWL_LED_TRG_RX].led_off = iwl_led_associated;
391 priv->led[IWL_LED_TRG_RX].led_pattern = iwl_led_pattern;
392
393 if (ret)
394 goto exit_fail;
395
396 trigger = ieee80211_get_tx_led_name(priv->hw);
397 snprintf(priv->led[IWL_LED_TRG_TX].name,
398 sizeof(priv->led[IWL_LED_TRG_TX].name), "iwl-%s::TX",
399 wiphy_name(priv->hw->wiphy));
400
401 ret = iwl_leds_register_led(priv, &priv->led[IWL_LED_TRG_TX],
402 IWL_LED_TRG_TX, 0, trigger);
403
404 priv->led[IWL_LED_TRG_TX].led_on = iwl_led_associated;
405 priv->led[IWL_LED_TRG_TX].led_off = iwl_led_associated;
406 priv->led[IWL_LED_TRG_TX].led_pattern = iwl_led_pattern;
407
408 if (ret)
409 goto exit_fail;
410
411 return 0;
412
413exit_fail:
414 iwl_leds_unregister(priv);
415 return ret;
416} 225}
417EXPORT_SYMBOL(iwl_leds_register); 226EXPORT_SYMBOL(iwl_leds_init);
418
419/* unregister led class */
420static void iwl_leds_unregister_led(struct iwl_led *led, u8 set_led)
421{
422 if (!led->registered)
423 return;
424
425 led_classdev_unregister(&led->led_dev);
426
427 if (set_led)
428 led->led_dev.brightness_set(&led->led_dev, LED_OFF);
429 led->registered = 0;
430}
431
432/* Unregister all led handlers */
433void iwl_leds_unregister(struct iwl_priv *priv)
434{
435 iwl_leds_unregister_led(&priv->led[IWL_LED_TRG_ASSOC], 0);
436 iwl_leds_unregister_led(&priv->led[IWL_LED_TRG_RX], 0);
437 iwl_leds_unregister_led(&priv->led[IWL_LED_TRG_TX], 0);
438 iwl_leds_unregister_led(&priv->led[IWL_LED_TRG_RADIO], 1);
439}
440EXPORT_SYMBOL(iwl_leds_unregister);
441
diff --git a/drivers/net/wireless/iwlwifi/iwl-led.h b/drivers/net/wireless/iwlwifi/iwl-led.h
index ef9b174c37ff..f47f053f02ea 100644
--- a/drivers/net/wireless/iwlwifi/iwl-led.h
+++ b/drivers/net/wireless/iwlwifi/iwl-led.h
@@ -30,9 +30,6 @@
30 30
31struct iwl_priv; 31struct iwl_priv;
32 32
33#ifdef CONFIG_IWLWIFI_LEDS
34#include <linux/leds.h>
35
36#define IWL_LED_SOLID 11 33#define IWL_LED_SOLID 11
37#define IWL_LED_NAME_LEN 31 34#define IWL_LED_NAME_LEN 31
38#define IWL_DEF_LED_INTRVL cpu_to_le32(1000) 35#define IWL_DEF_LED_INTRVL cpu_to_le32(1000)
@@ -47,38 +44,23 @@ enum led_type {
47 IWL_LED_TRG_RADIO, 44 IWL_LED_TRG_RADIO,
48 IWL_LED_TRG_MAX, 45 IWL_LED_TRG_MAX,
49}; 46};
50#endif
51
52#ifdef CONFIG_IWLWIFI_LEDS
53
54struct iwl_led {
55 struct iwl_priv *priv;
56 struct led_classdev led_dev;
57 char name[32];
58 47
59 int (*led_on) (struct iwl_priv *priv, int led_id); 48/*
60 int (*led_off) (struct iwl_priv *priv, int led_id); 49 * LED mode
61 int (*led_pattern) (struct iwl_priv *priv, int led_id, unsigned int idx); 50 * IWL_LED_BLINK: adjust led blink rate based on blink table
62 51 * IWL_LED_RF_STATE: turn LED on/off based on RF state
63 enum led_type type; 52 * LED ON = RF ON
64 unsigned int registered; 53 * LED OFF = RF OFF
54 */
55enum iwl_led_mode {
56 IWL_LED_BLINK,
57 IWL_LED_RF_STATE,
65}; 58};
66 59
67int iwl_leds_register(struct iwl_priv *priv); 60void iwl_leds_init(struct iwl_priv *priv);
68void iwl_leds_unregister(struct iwl_priv *priv);
69void iwl_leds_background(struct iwl_priv *priv); 61void iwl_leds_background(struct iwl_priv *priv);
62int iwl_led_start(struct iwl_priv *priv);
63int iwl_led_associate(struct iwl_priv *priv);
64int iwl_led_disassociate(struct iwl_priv *priv);
70 65
71#else
72static inline int iwl_leds_register(struct iwl_priv *priv)
73{
74 return 0;
75}
76static inline void iwl_leds_unregister(struct iwl_priv *priv)
77{
78}
79static inline void iwl_leds_background(struct iwl_priv *priv)
80{
81}
82
83#endif /* CONFIG_IWLWIFI_LEDS */
84#endif /* __iwl_leds_h__ */ 66#endif /* __iwl_leds_h__ */
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.c b/drivers/net/wireless/iwlwifi/iwl-power.c
index 60be976afff8..9bce2c1625e3 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.c
+++ b/drivers/net/wireless/iwlwifi/iwl-power.c
@@ -66,7 +66,7 @@ MODULE_PARM_DESC(no_sleep_autoadjust,
66 66
67struct iwl_power_vec_entry { 67struct iwl_power_vec_entry {
68 struct iwl_powertable_cmd cmd; 68 struct iwl_powertable_cmd cmd;
69 u8 no_dtim; 69 u8 no_dtim; /* number of skip dtim */
70}; 70};
71 71
72#define IWL_DTIM_RANGE_0_MAX 2 72#define IWL_DTIM_RANGE_0_MAX 2
@@ -83,8 +83,9 @@ struct iwl_power_vec_entry {
83 cpu_to_le32(X4)} 83 cpu_to_le32(X4)}
84/* default power management (not Tx power) table values */ 84/* default power management (not Tx power) table values */
85/* for DTIM period 0 through IWL_DTIM_RANGE_0_MAX */ 85/* for DTIM period 0 through IWL_DTIM_RANGE_0_MAX */
86/* DTIM 0 - 2 */
86static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = { 87static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = {
87 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0}, 88 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 1, 2, 2, 0xFF)}, 0},
88 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0}, 89 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 2, 2, 0xFF)}, 0},
89 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0}, 90 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 2, 2, 2, 0xFF)}, 0},
90 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1}, 91 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 2, 4, 4, 0xFF)}, 1},
@@ -93,15 +94,17 @@ static const struct iwl_power_vec_entry range_0[IWL_POWER_NUM] = {
93 94
94 95
95/* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */ 96/* for DTIM period IWL_DTIM_RANGE_0_MAX + 1 through IWL_DTIM_RANGE_1_MAX */
97/* DTIM 3 - 10 */
96static const struct iwl_power_vec_entry range_1[IWL_POWER_NUM] = { 98static const struct iwl_power_vec_entry range_1[IWL_POWER_NUM] = {
97 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0}, 99 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 4)}, 0},
98 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0}, 100 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(1, 2, 3, 4, 7)}, 0},
99 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0}, 101 {{SLP, SLP_TOUT(50), SLP_TOUT(100), SLP_VEC(2, 4, 6, 7, 9)}, 0},
100 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1}, 102 {{SLP, SLP_TOUT(50), SLP_TOUT(25), SLP_VEC(2, 4, 6, 9, 10)}, 1},
101 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 7, 10, 10)}, 2} 103 {{SLP, SLP_TOUT(25), SLP_TOUT(25), SLP_VEC(2, 4, 6, 10, 10)}, 2}
102}; 104};
103 105
104/* for DTIM period > IWL_DTIM_RANGE_1_MAX */ 106/* for DTIM period > IWL_DTIM_RANGE_1_MAX */
107/* DTIM 11 - */
105static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = { 108static const struct iwl_power_vec_entry range_2[IWL_POWER_NUM] = {
106 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0}, 109 {{SLP, SLP_TOUT(200), SLP_TOUT(500), SLP_VEC(1, 2, 3, 4, 0xFF)}, 0},
107 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0}, 110 {{SLP, SLP_TOUT(200), SLP_TOUT(300), SLP_VEC(2, 4, 6, 7, 0xFF)}, 0},
@@ -115,13 +118,15 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
115 enum iwl_power_level lvl, int period) 118 enum iwl_power_level lvl, int period)
116{ 119{
117 const struct iwl_power_vec_entry *table; 120 const struct iwl_power_vec_entry *table;
118 int max_sleep, i; 121 int max_sleep[IWL_POWER_VEC_SIZE] = { 0 };
119 bool skip; 122 int i;
123 u8 skip;
124 u32 slp_itrvl;
120 125
121 table = range_2; 126 table = range_2;
122 if (period < IWL_DTIM_RANGE_1_MAX) 127 if (period <= IWL_DTIM_RANGE_1_MAX)
123 table = range_1; 128 table = range_1;
124 if (period < IWL_DTIM_RANGE_0_MAX) 129 if (period <= IWL_DTIM_RANGE_0_MAX)
125 table = range_0; 130 table = range_0;
126 131
127 BUG_ON(lvl < 0 || lvl >= IWL_POWER_NUM); 132 BUG_ON(lvl < 0 || lvl >= IWL_POWER_NUM);
@@ -129,34 +134,60 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
129 *cmd = table[lvl].cmd; 134 *cmd = table[lvl].cmd;
130 135
131 if (period == 0) { 136 if (period == 0) {
132 skip = false; 137 skip = 0;
133 period = 1; 138 period = 1;
139 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
140 max_sleep[i] = 1;
141
134 } else { 142 } else {
135 skip = !!table[lvl].no_dtim; 143 skip = table[lvl].no_dtim;
144 for (i = 0; i < IWL_POWER_VEC_SIZE; i++)
145 max_sleep[i] = le32_to_cpu(cmd->sleep_interval[i]);
146 max_sleep[IWL_POWER_VEC_SIZE - 1] = skip + 1;
136 } 147 }
137 148
138 if (skip) { 149 slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
139 __le32 slp_itrvl = cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]; 150 /* figure out the listen interval based on dtim period and skip */
140 max_sleep = le32_to_cpu(slp_itrvl); 151 if (slp_itrvl == 0xFF)
141 if (max_sleep == 0xFF) 152 cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
142 max_sleep = period * (skip + 1); 153 cpu_to_le32(period * (skip + 1));
143 else if (max_sleep > period) 154
144 max_sleep = (le32_to_cpu(slp_itrvl) / period) * period; 155 slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
156 if (slp_itrvl > period)
157 cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
158 cpu_to_le32((slp_itrvl / period) * period);
159
160 if (skip)
145 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK; 161 cmd->flags |= IWL_POWER_SLEEP_OVER_DTIM_MSK;
146 } else { 162 else
147 max_sleep = period;
148 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK; 163 cmd->flags &= ~IWL_POWER_SLEEP_OVER_DTIM_MSK;
149 }
150 164
151 for (i = 0; i < IWL_POWER_VEC_SIZE; i++) 165 slp_itrvl = le32_to_cpu(cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1]);
152 if (le32_to_cpu(cmd->sleep_interval[i]) > max_sleep) 166 if (slp_itrvl > IWL_CONN_MAX_LISTEN_INTERVAL)
153 cmd->sleep_interval[i] = cpu_to_le32(max_sleep); 167 cmd->sleep_interval[IWL_POWER_VEC_SIZE - 1] =
168 cpu_to_le32(IWL_CONN_MAX_LISTEN_INTERVAL);
169
170 /* enforce max sleep interval */
171 for (i = IWL_POWER_VEC_SIZE - 1; i >= 0 ; i--) {
172 if (le32_to_cpu(cmd->sleep_interval[i]) >
173 (max_sleep[i] * period))
174 cmd->sleep_interval[i] =
175 cpu_to_le32(max_sleep[i] * period);
176 if (i != (IWL_POWER_VEC_SIZE - 1)) {
177 if (le32_to_cpu(cmd->sleep_interval[i]) >
178 le32_to_cpu(cmd->sleep_interval[i+1]))
179 cmd->sleep_interval[i] =
180 cmd->sleep_interval[i+1];
181 }
182 }
154 183
155 if (priv->power_data.pci_pm) 184 if (priv->power_data.pci_pm)
156 cmd->flags |= IWL_POWER_PCI_PM_MSK; 185 cmd->flags |= IWL_POWER_PCI_PM_MSK;
157 else 186 else
158 cmd->flags &= ~IWL_POWER_PCI_PM_MSK; 187 cmd->flags &= ~IWL_POWER_PCI_PM_MSK;
159 188
189 IWL_DEBUG_POWER(priv, "numSkipDtim = %u, dtimPeriod = %d\n",
190 skip, period);
160 IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1); 191 IWL_DEBUG_POWER(priv, "Sleep command for index %d\n", lvl + 1);
161} 192}
162 193
@@ -165,26 +196,26 @@ static void iwl_static_sleep_cmd(struct iwl_priv *priv,
165 *============================================================================= 196 *=============================================================================
166 * Condition Nxt State Condition Nxt State Condition Nxt State 197 * Condition Nxt State Condition Nxt State Condition Nxt State
167 *----------------------------------------------------------------------------- 198 *-----------------------------------------------------------------------------
168 * IWL_TI_0 T >= 115 CT_KILL 115>T>=105 TI_1 N/A N/A 199 * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A
169 * IWL_TI_1 T >= 115 CT_KILL 115>T>=110 TI_2 T<=95 TI_0 200 * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0
170 * IWL_TI_2 T >= 115 CT_KILL T<=100 TI_1 201 * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1
171 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0 202 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
172 *============================================================================= 203 *=============================================================================
173 */ 204 */
174static const struct iwl_tt_trans tt_range_0[IWL_TI_STATE_MAX - 1] = { 205static const struct iwl_tt_trans tt_range_0[IWL_TI_STATE_MAX - 1] = {
175 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 104}, 206 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 104},
176 {IWL_TI_1, 105, CT_KILL_THRESHOLD}, 207 {IWL_TI_1, 105, CT_KILL_THRESHOLD - 1},
177 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD + 1, IWL_ABSOLUTE_MAX} 208 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
178}; 209};
179static const struct iwl_tt_trans tt_range_1[IWL_TI_STATE_MAX - 1] = { 210static const struct iwl_tt_trans tt_range_1[IWL_TI_STATE_MAX - 1] = {
180 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 95}, 211 {IWL_TI_0, IWL_ABSOLUTE_ZERO, 95},
181 {IWL_TI_2, 110, CT_KILL_THRESHOLD}, 212 {IWL_TI_2, 110, CT_KILL_THRESHOLD - 1},
182 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD + 1, IWL_ABSOLUTE_MAX} 213 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
183}; 214};
184static const struct iwl_tt_trans tt_range_2[IWL_TI_STATE_MAX - 1] = { 215static const struct iwl_tt_trans tt_range_2[IWL_TI_STATE_MAX - 1] = {
185 {IWL_TI_1, IWL_ABSOLUTE_ZERO, 100}, 216 {IWL_TI_1, IWL_ABSOLUTE_ZERO, 100},
186 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD + 1, IWL_ABSOLUTE_MAX}, 217 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX},
187 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD + 1, IWL_ABSOLUTE_MAX} 218 {IWL_TI_CT_KILL, CT_KILL_THRESHOLD, IWL_ABSOLUTE_MAX}
188}; 219};
189static const struct iwl_tt_trans tt_range_3[IWL_TI_STATE_MAX - 1] = { 220static const struct iwl_tt_trans tt_range_3[IWL_TI_STATE_MAX - 1] = {
190 {IWL_TI_0, IWL_ABSOLUTE_ZERO, CT_KILL_EXIT_THRESHOLD}, 221 {IWL_TI_0, IWL_ABSOLUTE_ZERO, CT_KILL_EXIT_THRESHOLD},
@@ -294,6 +325,9 @@ int iwl_power_update_mode(struct iwl_priv *priv, bool force)
294 325
295 if (priv->cfg->broken_powersave) 326 if (priv->cfg->broken_powersave)
296 iwl_power_sleep_cam_cmd(priv, &cmd); 327 iwl_power_sleep_cam_cmd(priv, &cmd);
328 else if (priv->cfg->supports_idle &&
329 priv->hw->conf.flags & IEEE80211_CONF_IDLE)
330 iwl_static_sleep_cmd(priv, &cmd, IWL_POWER_INDEX_5, 20);
297 else if (tt->state >= IWL_TI_1) 331 else if (tt->state >= IWL_TI_1)
298 iwl_static_sleep_cmd(priv, &cmd, tt->tt_power_mode, dtimper); 332 iwl_static_sleep_cmd(priv, &cmd, tt->tt_power_mode, dtimper);
299 else if (!enabled) 333 else if (!enabled)
@@ -348,6 +382,23 @@ bool iwl_ht_enabled(struct iwl_priv *priv)
348} 382}
349EXPORT_SYMBOL(iwl_ht_enabled); 383EXPORT_SYMBOL(iwl_ht_enabled);
350 384
385bool iwl_within_ct_kill_margin(struct iwl_priv *priv)
386{
387 s32 temp = priv->temperature; /* degrees CELSIUS except 4965 */
388 bool within_margin = false;
389
390 if ((priv->hw_rev & CSR_HW_REV_TYPE_MSK) == CSR_HW_REV_TYPE_4965)
391 temp = KELVIN_TO_CELSIUS(priv->temperature);
392
393 if (!priv->thermal_throttle.advanced_tt)
394 within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
395 CT_KILL_THRESHOLD_LEGACY) ? true : false;
396 else
397 within_margin = ((temp + IWL_TT_CT_KILL_MARGIN) >=
398 CT_KILL_THRESHOLD) ? true : false;
399 return within_margin;
400}
401
351enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv) 402enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv)
352{ 403{
353 struct iwl_tt_mgmt *tt = &priv->thermal_throttle; 404 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
@@ -372,6 +423,7 @@ enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv)
372} 423}
373 424
374#define CT_KILL_EXIT_DURATION (5) /* 5 seconds duration */ 425#define CT_KILL_EXIT_DURATION (5) /* 5 seconds duration */
426#define CT_KILL_WAITING_DURATION (300) /* 300ms duration */
375 427
376/* 428/*
377 * toggle the bit to wake up uCode and check the temperature 429 * toggle the bit to wake up uCode and check the temperature
@@ -409,6 +461,7 @@ static void iwl_tt_check_exit_ct_kill(unsigned long data)
409 /* Reschedule the ct_kill timer to occur in 461 /* Reschedule the ct_kill timer to occur in
410 * CT_KILL_EXIT_DURATION seconds to ensure we get a 462 * CT_KILL_EXIT_DURATION seconds to ensure we get a
411 * thermal update */ 463 * thermal update */
464 IWL_DEBUG_POWER(priv, "schedule ct_kill exit timer\n");
412 mod_timer(&priv->thermal_throttle.ct_kill_exit_tm, jiffies + 465 mod_timer(&priv->thermal_throttle.ct_kill_exit_tm, jiffies +
413 CT_KILL_EXIT_DURATION * HZ); 466 CT_KILL_EXIT_DURATION * HZ);
414 } 467 }
@@ -432,6 +485,33 @@ static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
432 } 485 }
433} 486}
434 487
488static void iwl_tt_ready_for_ct_kill(unsigned long data)
489{
490 struct iwl_priv *priv = (struct iwl_priv *)data;
491 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
492
493 if (test_bit(STATUS_EXIT_PENDING, &priv->status))
494 return;
495
496 /* temperature timer expired, ready to go into CT_KILL state */
497 if (tt->state != IWL_TI_CT_KILL) {
498 IWL_DEBUG_POWER(priv, "entering CT_KILL state when temperature timer expired\n");
499 tt->state = IWL_TI_CT_KILL;
500 set_bit(STATUS_CT_KILL, &priv->status);
501 iwl_perform_ct_kill_task(priv, true);
502 }
503}
504
505static void iwl_prepare_ct_kill_task(struct iwl_priv *priv)
506{
507 IWL_DEBUG_POWER(priv, "Prepare to enter IWL_TI_CT_KILL\n");
508 /* make request to retrieve statistics information */
509 iwl_send_statistics_request(priv, 0);
510 /* Reschedule the ct_kill wait timer */
511 mod_timer(&priv->thermal_throttle.ct_kill_waiting_tm,
512 jiffies + msecs_to_jiffies(CT_KILL_WAITING_DURATION));
513}
514
435#define IWL_MINIMAL_POWER_THRESHOLD (CT_KILL_THRESHOLD_LEGACY) 515#define IWL_MINIMAL_POWER_THRESHOLD (CT_KILL_THRESHOLD_LEGACY)
436#define IWL_REDUCED_PERFORMANCE_THRESHOLD_2 (100) 516#define IWL_REDUCED_PERFORMANCE_THRESHOLD_2 (100)
437#define IWL_REDUCED_PERFORMANCE_THRESHOLD_1 (90) 517#define IWL_REDUCED_PERFORMANCE_THRESHOLD_1 (90)
@@ -445,7 +525,7 @@ static void iwl_perform_ct_kill_task(struct iwl_priv *priv,
445 * Throttle early enough to lower the power consumption before 525 * Throttle early enough to lower the power consumption before
446 * drastic steps are needed 526 * drastic steps are needed
447 */ 527 */
448static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp) 528static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
449{ 529{
450 struct iwl_tt_mgmt *tt = &priv->thermal_throttle; 530 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
451 enum iwl_tt_state old_state; 531 enum iwl_tt_state old_state;
@@ -474,6 +554,8 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp)
474#ifdef CONFIG_IWLWIFI_DEBUG 554#ifdef CONFIG_IWLWIFI_DEBUG
475 tt->tt_previous_temp = temp; 555 tt->tt_previous_temp = temp;
476#endif 556#endif
557 /* stop ct_kill_waiting_tm timer */
558 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
477 if (tt->state != old_state) { 559 if (tt->state != old_state) {
478 switch (tt->state) { 560 switch (tt->state) {
479 case IWL_TI_0: 561 case IWL_TI_0:
@@ -494,17 +576,28 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp)
494 break; 576 break;
495 } 577 }
496 mutex_lock(&priv->mutex); 578 mutex_lock(&priv->mutex);
497 if (iwl_power_update_mode(priv, true)) { 579 if (old_state == IWL_TI_CT_KILL)
580 clear_bit(STATUS_CT_KILL, &priv->status);
581 if (tt->state != IWL_TI_CT_KILL &&
582 iwl_power_update_mode(priv, true)) {
498 /* TT state not updated 583 /* TT state not updated
499 * try again during next temperature read 584 * try again during next temperature read
500 */ 585 */
586 if (old_state == IWL_TI_CT_KILL)
587 set_bit(STATUS_CT_KILL, &priv->status);
501 tt->state = old_state; 588 tt->state = old_state;
502 IWL_ERR(priv, "Cannot update power mode, " 589 IWL_ERR(priv, "Cannot update power mode, "
503 "TT state not updated\n"); 590 "TT state not updated\n");
504 } else { 591 } else {
505 if (tt->state == IWL_TI_CT_KILL) 592 if (tt->state == IWL_TI_CT_KILL) {
506 iwl_perform_ct_kill_task(priv, true); 593 if (force) {
507 else if (old_state == IWL_TI_CT_KILL && 594 set_bit(STATUS_CT_KILL, &priv->status);
595 iwl_perform_ct_kill_task(priv, true);
596 } else {
597 iwl_prepare_ct_kill_task(priv);
598 tt->state = old_state;
599 }
600 } else if (old_state == IWL_TI_CT_KILL &&
508 tt->state != IWL_TI_CT_KILL) 601 tt->state != IWL_TI_CT_KILL)
509 iwl_perform_ct_kill_task(priv, false); 602 iwl_perform_ct_kill_task(priv, false);
510 IWL_DEBUG_POWER(priv, "Temperature state changed %u\n", 603 IWL_DEBUG_POWER(priv, "Temperature state changed %u\n",
@@ -531,13 +624,13 @@ static void iwl_legacy_tt_handler(struct iwl_priv *priv, s32 temp)
531 *============================================================================= 624 *=============================================================================
532 * Condition Nxt State Condition Nxt State Condition Nxt State 625 * Condition Nxt State Condition Nxt State Condition Nxt State
533 *----------------------------------------------------------------------------- 626 *-----------------------------------------------------------------------------
534 * IWL_TI_0 T >= 115 CT_KILL 115>T>=105 TI_1 N/A N/A 627 * IWL_TI_0 T >= 114 CT_KILL 114>T>=105 TI_1 N/A N/A
535 * IWL_TI_1 T >= 115 CT_KILL 115>T>=110 TI_2 T<=95 TI_0 628 * IWL_TI_1 T >= 114 CT_KILL 114>T>=110 TI_2 T<=95 TI_0
536 * IWL_TI_2 T >= 115 CT_KILL T<=100 TI_1 629 * IWL_TI_2 T >= 114 CT_KILL T<=100 TI_1
537 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0 630 * IWL_CT_KILL N/A N/A N/A N/A T<=95 TI_0
538 *============================================================================= 631 *=============================================================================
539 */ 632 */
540static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp) 633static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp, bool force)
541{ 634{
542 struct iwl_tt_mgmt *tt = &priv->thermal_throttle; 635 struct iwl_tt_mgmt *tt = &priv->thermal_throttle;
543 int i; 636 int i;
@@ -582,6 +675,8 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp)
582 break; 675 break;
583 } 676 }
584 } 677 }
678 /* stop ct_kill_waiting_tm timer */
679 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
585 if (changed) { 680 if (changed) {
586 struct iwl_rxon_cmd *rxon = &priv->staging_rxon; 681 struct iwl_rxon_cmd *rxon = &priv->staging_rxon;
587 682
@@ -613,12 +708,17 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp)
613 iwl_set_rxon_ht(priv, &priv->current_ht_config); 708 iwl_set_rxon_ht(priv, &priv->current_ht_config);
614 } 709 }
615 mutex_lock(&priv->mutex); 710 mutex_lock(&priv->mutex);
616 if (iwl_power_update_mode(priv, true)) { 711 if (old_state == IWL_TI_CT_KILL)
712 clear_bit(STATUS_CT_KILL, &priv->status);
713 if (tt->state != IWL_TI_CT_KILL &&
714 iwl_power_update_mode(priv, true)) {
617 /* TT state not updated 715 /* TT state not updated
618 * try again during next temperature read 716 * try again during next temperature read
619 */ 717 */
620 IWL_ERR(priv, "Cannot update power mode, " 718 IWL_ERR(priv, "Cannot update power mode, "
621 "TT state not updated\n"); 719 "TT state not updated\n");
720 if (old_state == IWL_TI_CT_KILL)
721 set_bit(STATUS_CT_KILL, &priv->status);
622 tt->state = old_state; 722 tt->state = old_state;
623 } else { 723 } else {
624 IWL_DEBUG_POWER(priv, 724 IWL_DEBUG_POWER(priv,
@@ -626,9 +726,15 @@ static void iwl_advance_tt_handler(struct iwl_priv *priv, s32 temp)
626 tt->state); 726 tt->state);
627 if (old_state != IWL_TI_CT_KILL && 727 if (old_state != IWL_TI_CT_KILL &&
628 tt->state == IWL_TI_CT_KILL) { 728 tt->state == IWL_TI_CT_KILL) {
629 IWL_DEBUG_POWER(priv, "Enter IWL_TI_CT_KILL\n"); 729 if (force) {
630 iwl_perform_ct_kill_task(priv, true); 730 IWL_DEBUG_POWER(priv,
631 731 "Enter IWL_TI_CT_KILL\n");
732 set_bit(STATUS_CT_KILL, &priv->status);
733 iwl_perform_ct_kill_task(priv, true);
734 } else {
735 iwl_prepare_ct_kill_task(priv);
736 tt->state = old_state;
737 }
632 } else if (old_state == IWL_TI_CT_KILL && 738 } else if (old_state == IWL_TI_CT_KILL &&
633 tt->state != IWL_TI_CT_KILL) { 739 tt->state != IWL_TI_CT_KILL) {
634 IWL_DEBUG_POWER(priv, "Exit IWL_TI_CT_KILL\n"); 740 IWL_DEBUG_POWER(priv, "Exit IWL_TI_CT_KILL\n");
@@ -665,10 +771,11 @@ static void iwl_bg_ct_enter(struct work_struct *work)
665 "- ucode going to sleep!\n"); 771 "- ucode going to sleep!\n");
666 if (!priv->thermal_throttle.advanced_tt) 772 if (!priv->thermal_throttle.advanced_tt)
667 iwl_legacy_tt_handler(priv, 773 iwl_legacy_tt_handler(priv,
668 IWL_MINIMAL_POWER_THRESHOLD); 774 IWL_MINIMAL_POWER_THRESHOLD,
775 true);
669 else 776 else
670 iwl_advance_tt_handler(priv, 777 iwl_advance_tt_handler(priv,
671 CT_KILL_THRESHOLD + 1); 778 CT_KILL_THRESHOLD + 1, true);
672 } 779 }
673} 780}
674 781
@@ -695,11 +802,18 @@ static void iwl_bg_ct_exit(struct work_struct *work)
695 IWL_ERR(priv, 802 IWL_ERR(priv,
696 "Device temperature below critical" 803 "Device temperature below critical"
697 "- ucode awake!\n"); 804 "- ucode awake!\n");
805 /*
806 * exit from CT_KILL state
807 * reset the current temperature reading
808 */
809 priv->temperature = 0;
698 if (!priv->thermal_throttle.advanced_tt) 810 if (!priv->thermal_throttle.advanced_tt)
699 iwl_legacy_tt_handler(priv, 811 iwl_legacy_tt_handler(priv,
700 IWL_REDUCED_PERFORMANCE_THRESHOLD_2); 812 IWL_REDUCED_PERFORMANCE_THRESHOLD_2,
813 true);
701 else 814 else
702 iwl_advance_tt_handler(priv, CT_KILL_EXIT_THRESHOLD); 815 iwl_advance_tt_handler(priv, CT_KILL_EXIT_THRESHOLD,
816 true);
703 } 817 }
704} 818}
705 819
@@ -735,9 +849,9 @@ static void iwl_bg_tt_work(struct work_struct *work)
735 temp = KELVIN_TO_CELSIUS(priv->temperature); 849 temp = KELVIN_TO_CELSIUS(priv->temperature);
736 850
737 if (!priv->thermal_throttle.advanced_tt) 851 if (!priv->thermal_throttle.advanced_tt)
738 iwl_legacy_tt_handler(priv, temp); 852 iwl_legacy_tt_handler(priv, temp, false);
739 else 853 else
740 iwl_advance_tt_handler(priv, temp); 854 iwl_advance_tt_handler(priv, temp, false);
741} 855}
742 856
743void iwl_tt_handler(struct iwl_priv *priv) 857void iwl_tt_handler(struct iwl_priv *priv)
@@ -768,16 +882,18 @@ void iwl_tt_initialize(struct iwl_priv *priv)
768 tt->state = IWL_TI_0; 882 tt->state = IWL_TI_0;
769 init_timer(&priv->thermal_throttle.ct_kill_exit_tm); 883 init_timer(&priv->thermal_throttle.ct_kill_exit_tm);
770 priv->thermal_throttle.ct_kill_exit_tm.data = (unsigned long)priv; 884 priv->thermal_throttle.ct_kill_exit_tm.data = (unsigned long)priv;
771 priv->thermal_throttle.ct_kill_exit_tm.function = iwl_tt_check_exit_ct_kill; 885 priv->thermal_throttle.ct_kill_exit_tm.function =
772 886 iwl_tt_check_exit_ct_kill;
887 init_timer(&priv->thermal_throttle.ct_kill_waiting_tm);
888 priv->thermal_throttle.ct_kill_waiting_tm.data = (unsigned long)priv;
889 priv->thermal_throttle.ct_kill_waiting_tm.function =
890 iwl_tt_ready_for_ct_kill;
773 /* setup deferred ct kill work */ 891 /* setup deferred ct kill work */
774 INIT_WORK(&priv->tt_work, iwl_bg_tt_work); 892 INIT_WORK(&priv->tt_work, iwl_bg_tt_work);
775 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter); 893 INIT_WORK(&priv->ct_enter, iwl_bg_ct_enter);
776 INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit); 894 INIT_WORK(&priv->ct_exit, iwl_bg_ct_exit);
777 895
778 switch (priv->hw_rev & CSR_HW_REV_TYPE_MSK) { 896 if (priv->cfg->adv_thermal_throttle) {
779 case CSR_HW_REV_TYPE_6x00:
780 case CSR_HW_REV_TYPE_6x50:
781 IWL_DEBUG_POWER(priv, "Advanced Thermal Throttling\n"); 897 IWL_DEBUG_POWER(priv, "Advanced Thermal Throttling\n");
782 tt->restriction = kzalloc(sizeof(struct iwl_tt_restriction) * 898 tt->restriction = kzalloc(sizeof(struct iwl_tt_restriction) *
783 IWL_TI_STATE_MAX, GFP_KERNEL); 899 IWL_TI_STATE_MAX, GFP_KERNEL);
@@ -810,11 +926,9 @@ void iwl_tt_initialize(struct iwl_priv *priv)
810 &restriction_range[0], size); 926 &restriction_range[0], size);
811 priv->thermal_throttle.advanced_tt = true; 927 priv->thermal_throttle.advanced_tt = true;
812 } 928 }
813 break; 929 } else {
814 default:
815 IWL_DEBUG_POWER(priv, "Legacy Thermal Throttling\n"); 930 IWL_DEBUG_POWER(priv, "Legacy Thermal Throttling\n");
816 priv->thermal_throttle.advanced_tt = false; 931 priv->thermal_throttle.advanced_tt = false;
817 break;
818 } 932 }
819} 933}
820EXPORT_SYMBOL(iwl_tt_initialize); 934EXPORT_SYMBOL(iwl_tt_initialize);
@@ -826,6 +940,8 @@ void iwl_tt_exit(struct iwl_priv *priv)
826 940
827 /* stop ct_kill_exit_tm timer if activated */ 941 /* stop ct_kill_exit_tm timer if activated */
828 del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm); 942 del_timer_sync(&priv->thermal_throttle.ct_kill_exit_tm);
943 /* stop ct_kill_waiting_tm timer if activated */
944 del_timer_sync(&priv->thermal_throttle.ct_kill_waiting_tm);
829 cancel_work_sync(&priv->tt_work); 945 cancel_work_sync(&priv->tt_work);
830 cancel_work_sync(&priv->ct_enter); 946 cancel_work_sync(&priv->ct_enter);
831 cancel_work_sync(&priv->ct_exit); 947 cancel_work_sync(&priv->ct_exit);
diff --git a/drivers/net/wireless/iwlwifi/iwl-power.h b/drivers/net/wireless/iwlwifi/iwl-power.h
index df6f6a49712b..310c32e8f698 100644
--- a/drivers/net/wireless/iwlwifi/iwl-power.h
+++ b/drivers/net/wireless/iwlwifi/iwl-power.h
@@ -33,6 +33,7 @@
33#define IWL_ABSOLUTE_ZERO 0 33#define IWL_ABSOLUTE_ZERO 0
34#define IWL_ABSOLUTE_MAX 0xFFFFFFFF 34#define IWL_ABSOLUTE_MAX 0xFFFFFFFF
35#define IWL_TT_INCREASE_MARGIN 5 35#define IWL_TT_INCREASE_MARGIN 5
36#define IWL_TT_CT_KILL_MARGIN 3
36 37
37enum iwl_antenna_ok { 38enum iwl_antenna_ok {
38 IWL_ANT_OK_NONE, 39 IWL_ANT_OK_NONE,
@@ -110,6 +111,7 @@ struct iwl_tt_mgmt {
110 struct iwl_tt_restriction *restriction; 111 struct iwl_tt_restriction *restriction;
111 struct iwl_tt_trans *transaction; 112 struct iwl_tt_trans *transaction;
112 struct timer_list ct_kill_exit_tm; 113 struct timer_list ct_kill_exit_tm;
114 struct timer_list ct_kill_waiting_tm;
113}; 115};
114 116
115enum iwl_power_level { 117enum iwl_power_level {
@@ -129,6 +131,7 @@ struct iwl_power_mgr {
129 131
130int iwl_power_update_mode(struct iwl_priv *priv, bool force); 132int iwl_power_update_mode(struct iwl_priv *priv, bool force);
131bool iwl_ht_enabled(struct iwl_priv *priv); 133bool iwl_ht_enabled(struct iwl_priv *priv);
134bool iwl_within_ct_kill_margin(struct iwl_priv *priv);
132enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv); 135enum iwl_antenna_ok iwl_tx_ant_restriction(struct iwl_priv *priv);
133enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv); 136enum iwl_antenna_ok iwl_rx_ant_restriction(struct iwl_priv *priv);
134void iwl_tt_enter_ct_kill(struct iwl_priv *priv); 137void iwl_tt_enter_ct_kill(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-rx.c b/drivers/net/wireless/iwlwifi/iwl-rx.c
index 493626bcd3ec..e5339c9ad13e 100644
--- a/drivers/net/wireless/iwlwifi/iwl-rx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-rx.c
@@ -200,7 +200,7 @@ int iwl_rx_queue_restock(struct iwl_priv *priv)
200 list_del(element); 200 list_del(element);
201 201
202 /* Point to Rx buffer via next RBD in circular buffer */ 202 /* Point to Rx buffer via next RBD in circular buffer */
203 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->aligned_dma_addr); 203 rxq->bd[rxq->write] = iwl_dma_addr2rbd_ptr(priv, rxb->page_dma);
204 rxq->queue[rxq->write] = rxb; 204 rxq->queue[rxq->write] = rxb;
205 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 205 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
206 rxq->free_count--; 206 rxq->free_count--;
@@ -239,8 +239,9 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
239 struct iwl_rx_queue *rxq = &priv->rxq; 239 struct iwl_rx_queue *rxq = &priv->rxq;
240 struct list_head *element; 240 struct list_head *element;
241 struct iwl_rx_mem_buffer *rxb; 241 struct iwl_rx_mem_buffer *rxb;
242 struct sk_buff *skb; 242 struct page *page;
243 unsigned long flags; 243 unsigned long flags;
244 gfp_t gfp_mask = priority;
244 245
245 while (1) { 246 while (1) {
246 spin_lock_irqsave(&rxq->lock, flags); 247 spin_lock_irqsave(&rxq->lock, flags);
@@ -251,30 +252,35 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
251 spin_unlock_irqrestore(&rxq->lock, flags); 252 spin_unlock_irqrestore(&rxq->lock, flags);
252 253
253 if (rxq->free_count > RX_LOW_WATERMARK) 254 if (rxq->free_count > RX_LOW_WATERMARK)
254 priority |= __GFP_NOWARN; 255 gfp_mask |= __GFP_NOWARN;
255 /* Alloc a new receive buffer */ 256
256 skb = alloc_skb(priv->hw_params.rx_buf_size + 256, 257 if (priv->hw_params.rx_page_order > 0)
257 priority); 258 gfp_mask |= __GFP_COMP;
258 259
259 if (!skb) { 260 /* Alloc a new receive buffer */
261 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
262 if (!page) {
260 if (net_ratelimit()) 263 if (net_ratelimit())
261 IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n"); 264 IWL_DEBUG_INFO(priv, "alloc_pages failed, "
265 "order: %d\n",
266 priv->hw_params.rx_page_order);
267
262 if ((rxq->free_count <= RX_LOW_WATERMARK) && 268 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
263 net_ratelimit()) 269 net_ratelimit())
264 IWL_CRIT(priv, "Failed to allocate SKB buffer with %s. Only %u free buffers remaining.\n", 270 IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
265 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL", 271 priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
266 rxq->free_count); 272 rxq->free_count);
267 /* We don't reschedule replenish work here -- we will 273 /* We don't reschedule replenish work here -- we will
268 * call the restock method and if it still needs 274 * call the restock method and if it still needs
269 * more buffers it will schedule replenish */ 275 * more buffers it will schedule replenish */
270 break; 276 return;
271 } 277 }
272 278
273 spin_lock_irqsave(&rxq->lock, flags); 279 spin_lock_irqsave(&rxq->lock, flags);
274 280
275 if (list_empty(&rxq->rx_used)) { 281 if (list_empty(&rxq->rx_used)) {
276 spin_unlock_irqrestore(&rxq->lock, flags); 282 spin_unlock_irqrestore(&rxq->lock, flags);
277 dev_kfree_skb_any(skb); 283 __free_pages(page, priv->hw_params.rx_page_order);
278 return; 284 return;
279 } 285 }
280 element = rxq->rx_used.next; 286 element = rxq->rx_used.next;
@@ -283,24 +289,21 @@ void iwl_rx_allocate(struct iwl_priv *priv, gfp_t priority)
283 289
284 spin_unlock_irqrestore(&rxq->lock, flags); 290 spin_unlock_irqrestore(&rxq->lock, flags);
285 291
286 rxb->skb = skb; 292 rxb->page = page;
287 /* Get physical address of RB/SKB */ 293 /* Get physical address of the RB */
288 rxb->real_dma_addr = pci_map_single( 294 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
289 priv->pci_dev, 295 PAGE_SIZE << priv->hw_params.rx_page_order,
290 rxb->skb->data, 296 PCI_DMA_FROMDEVICE);
291 priv->hw_params.rx_buf_size + 256,
292 PCI_DMA_FROMDEVICE);
293 /* dma address must be no more than 36 bits */ 297 /* dma address must be no more than 36 bits */
294 BUG_ON(rxb->real_dma_addr & ~DMA_BIT_MASK(36)); 298 BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
295 /* and also 256 byte aligned! */ 299 /* and also 256 byte aligned! */
296 rxb->aligned_dma_addr = ALIGN(rxb->real_dma_addr, 256); 300 BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
297 skb_reserve(rxb->skb, rxb->aligned_dma_addr - rxb->real_dma_addr);
298 301
299 spin_lock_irqsave(&rxq->lock, flags); 302 spin_lock_irqsave(&rxq->lock, flags);
300 303
301 list_add_tail(&rxb->list, &rxq->rx_free); 304 list_add_tail(&rxb->list, &rxq->rx_free);
302 rxq->free_count++; 305 rxq->free_count++;
303 priv->alloc_rxb_skb++; 306 priv->alloc_rxb_page++;
304 307
305 spin_unlock_irqrestore(&rxq->lock, flags); 308 spin_unlock_irqrestore(&rxq->lock, flags);
306 } 309 }
@@ -336,12 +339,14 @@ void iwl_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
336{ 339{
337 int i; 340 int i;
338 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { 341 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
339 if (rxq->pool[i].skb != NULL) { 342 if (rxq->pool[i].page != NULL) {
340 pci_unmap_single(priv->pci_dev, 343 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
341 rxq->pool[i].real_dma_addr, 344 PAGE_SIZE << priv->hw_params.rx_page_order,
342 priv->hw_params.rx_buf_size + 256, 345 PCI_DMA_FROMDEVICE);
343 PCI_DMA_FROMDEVICE); 346 __free_pages(rxq->pool[i].page,
344 dev_kfree_skb(rxq->pool[i].skb); 347 priv->hw_params.rx_page_order);
348 rxq->pool[i].page = NULL;
349 priv->alloc_rxb_page--;
345 } 350 }
346 } 351 }
347 352
@@ -405,14 +410,14 @@ void iwl_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
405 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { 410 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
406 /* In the reset function, these buffers may have been allocated 411 /* In the reset function, these buffers may have been allocated
407 * to an SKB, so we need to unmap and free potential storage */ 412 * to an SKB, so we need to unmap and free potential storage */
408 if (rxq->pool[i].skb != NULL) { 413 if (rxq->pool[i].page != NULL) {
409 pci_unmap_single(priv->pci_dev, 414 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
410 rxq->pool[i].real_dma_addr, 415 PAGE_SIZE << priv->hw_params.rx_page_order,
411 priv->hw_params.rx_buf_size + 256, 416 PCI_DMA_FROMDEVICE);
412 PCI_DMA_FROMDEVICE); 417 priv->alloc_rxb_page--;
413 priv->alloc_rxb_skb--; 418 __free_pages(rxq->pool[i].page,
414 dev_kfree_skb(rxq->pool[i].skb); 419 priv->hw_params.rx_page_order);
415 rxq->pool[i].skb = NULL; 420 rxq->pool[i].page = NULL;
416 } 421 }
417 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 422 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
418 } 423 }
@@ -491,7 +496,7 @@ void iwl_rx_missed_beacon_notif(struct iwl_priv *priv,
491 struct iwl_rx_mem_buffer *rxb) 496 struct iwl_rx_mem_buffer *rxb)
492 497
493{ 498{
494 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 499 struct iwl_rx_packet *pkt = rxb_addr(rxb);
495 struct iwl_missed_beacon_notif *missed_beacon; 500 struct iwl_missed_beacon_notif *missed_beacon;
496 501
497 missed_beacon = &pkt->u.missed_beacon; 502 missed_beacon = &pkt->u.missed_beacon;
@@ -548,13 +553,51 @@ static void iwl_rx_calc_noise(struct iwl_priv *priv)
548 priv->last_rx_noise); 553 priv->last_rx_noise);
549} 554}
550 555
556#ifdef CONFIG_IWLWIFI_DEBUG
557/*
558 * based on the assumption of all statistics counter are in DWORD
559 * FIXME: This function is for debugging, do not deal with
560 * the case of counters roll-over.
561 */
562static void iwl_accumulative_statistics(struct iwl_priv *priv,
563 __le32 *stats)
564{
565 int i;
566 __le32 *prev_stats;
567 u32 *accum_stats;
568
569 prev_stats = (__le32 *)&priv->statistics;
570 accum_stats = (u32 *)&priv->accum_statistics;
571
572 for (i = sizeof(__le32); i < sizeof(struct iwl_notif_statistics);
573 i += sizeof(__le32), stats++, prev_stats++, accum_stats++)
574 if (le32_to_cpu(*stats) > le32_to_cpu(*prev_stats))
575 *accum_stats += (le32_to_cpu(*stats) -
576 le32_to_cpu(*prev_stats));
577
578 /* reset accumulative statistics for "no-counter" type statistics */
579 priv->accum_statistics.general.temperature =
580 priv->statistics.general.temperature;
581 priv->accum_statistics.general.temperature_m =
582 priv->statistics.general.temperature_m;
583 priv->accum_statistics.general.ttl_timestamp =
584 priv->statistics.general.ttl_timestamp;
585 priv->accum_statistics.tx.tx_power.ant_a =
586 priv->statistics.tx.tx_power.ant_a;
587 priv->accum_statistics.tx.tx_power.ant_b =
588 priv->statistics.tx.tx_power.ant_b;
589 priv->accum_statistics.tx.tx_power.ant_c =
590 priv->statistics.tx.tx_power.ant_c;
591}
592#endif
593
551#define REG_RECALIB_PERIOD (60) 594#define REG_RECALIB_PERIOD (60)
552 595
553void iwl_rx_statistics(struct iwl_priv *priv, 596void iwl_rx_statistics(struct iwl_priv *priv,
554 struct iwl_rx_mem_buffer *rxb) 597 struct iwl_rx_mem_buffer *rxb)
555{ 598{
556 int change; 599 int change;
557 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 600 struct iwl_rx_packet *pkt = rxb_addr(rxb);
558 601
559 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n", 602 IWL_DEBUG_RX(priv, "Statistics notification received (%d vs %d).\n",
560 (int)sizeof(priv->statistics), 603 (int)sizeof(priv->statistics),
@@ -566,6 +609,9 @@ void iwl_rx_statistics(struct iwl_priv *priv,
566 STATISTICS_REPLY_FLG_HT40_MODE_MSK) != 609 STATISTICS_REPLY_FLG_HT40_MODE_MSK) !=
567 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK))); 610 (pkt->u.stats.flag & STATISTICS_REPLY_FLG_HT40_MODE_MSK)));
568 611
612#ifdef CONFIG_IWLWIFI_DEBUG
613 iwl_accumulative_statistics(priv, (__le32 *)&pkt->u.stats);
614#endif
569 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics)); 615 memcpy(&priv->statistics, &pkt->u.stats, sizeof(priv->statistics));
570 616
571 set_bit(STATUS_STATISTICS, &priv->status); 617 set_bit(STATUS_STATISTICS, &priv->status);
@@ -582,9 +628,6 @@ void iwl_rx_statistics(struct iwl_priv *priv,
582 iwl_rx_calc_noise(priv); 628 iwl_rx_calc_noise(priv);
583 queue_work(priv->workqueue, &priv->run_time_calib_work); 629 queue_work(priv->workqueue, &priv->run_time_calib_work);
584 } 630 }
585
586 iwl_leds_background(priv);
587
588 if (priv->cfg->ops->lib->temp_ops.temperature && change) 631 if (priv->cfg->ops->lib->temp_ops.temperature && change)
589 priv->cfg->ops->lib->temp_ops.temperature(priv); 632 priv->cfg->ops->lib->temp_ops.temperature(priv);
590} 633}
@@ -878,6 +921,10 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
878 struct iwl_rx_mem_buffer *rxb, 921 struct iwl_rx_mem_buffer *rxb,
879 struct ieee80211_rx_status *stats) 922 struct ieee80211_rx_status *stats)
880{ 923{
924 struct sk_buff *skb;
925 int ret = 0;
926 __le16 fc = hdr->frame_control;
927
881 /* We only process data packets if the interface is open */ 928 /* We only process data packets if the interface is open */
882 if (unlikely(!priv->is_open)) { 929 if (unlikely(!priv->is_open)) {
883 IWL_DEBUG_DROP_LIMIT(priv, 930 IWL_DEBUG_DROP_LIMIT(priv,
@@ -890,15 +937,43 @@ static void iwl_pass_packet_to_mac80211(struct iwl_priv *priv,
890 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats)) 937 iwl_set_decrypted_flag(priv, hdr, ampdu_status, stats))
891 return; 938 return;
892 939
893 /* Resize SKB from mac header to end of packet */ 940 skb = alloc_skb(IWL_LINK_HDR_MAX, GFP_ATOMIC);
894 skb_reserve(rxb->skb, (void *)hdr - (void *)rxb->skb->data); 941 if (!skb) {
895 skb_put(rxb->skb, len); 942 IWL_ERR(priv, "alloc_skb failed\n");
943 return;
944 }
945
946 skb_add_rx_frag(skb, 0, rxb->page, (void *)hdr - rxb_addr(rxb), len);
947
948 /* mac80211 currently doesn't support paged SKB. Convert it to
949 * linear SKB for management frame and data frame requires
950 * software decryption or software defragementation. */
951 if (ieee80211_is_mgmt(fc) ||
952 ieee80211_has_protected(fc) ||
953 ieee80211_has_morefrags(fc) ||
954 le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG)
955 ret = skb_linearize(skb);
956 else
957 ret = __pskb_pull_tail(skb, min_t(u16, IWL_LINK_HDR_MAX, len)) ?
958 0 : -ENOMEM;
959
960 if (ret) {
961 kfree_skb(skb);
962 goto out;
963 }
964
965 /*
966 * XXX: We cannot touch the page and its virtual memory (hdr) after
967 * here. It might have already been freed by the above skb change.
968 */
969
970 iwl_update_stats(priv, false, fc, len);
971 memcpy(IEEE80211_SKB_RXCB(skb), stats, sizeof(*stats));
896 972
897 iwl_update_stats(priv, false, hdr->frame_control, len); 973 ieee80211_rx(priv->hw, skb);
898 memcpy(IEEE80211_SKB_RXCB(rxb->skb), stats, sizeof(*stats)); 974 out:
899 ieee80211_rx_irqsafe(priv->hw, rxb->skb); 975 priv->alloc_rxb_page--;
900 priv->alloc_rxb_skb--; 976 rxb->page = NULL;
901 rxb->skb = NULL;
902} 977}
903 978
904/* This is necessary only for a number of statistics, see the caller. */ 979/* This is necessary only for a number of statistics, see the caller. */
@@ -926,7 +1001,7 @@ void iwl_rx_reply_rx(struct iwl_priv *priv,
926{ 1001{
927 struct ieee80211_hdr *header; 1002 struct ieee80211_hdr *header;
928 struct ieee80211_rx_status rx_status; 1003 struct ieee80211_rx_status rx_status;
929 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1004 struct iwl_rx_packet *pkt = rxb_addr(rxb);
930 struct iwl_rx_phy_res *phy_res; 1005 struct iwl_rx_phy_res *phy_res;
931 __le32 rx_pkt_status; 1006 __le32 rx_pkt_status;
932 struct iwl4965_rx_mpdu_res_start *amsdu; 1007 struct iwl4965_rx_mpdu_res_start *amsdu;
@@ -1087,7 +1162,7 @@ EXPORT_SYMBOL(iwl_rx_reply_rx);
1087void iwl_rx_reply_rx_phy(struct iwl_priv *priv, 1162void iwl_rx_reply_rx_phy(struct iwl_priv *priv,
1088 struct iwl_rx_mem_buffer *rxb) 1163 struct iwl_rx_mem_buffer *rxb)
1089{ 1164{
1090 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1165 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1091 priv->last_phy_res[0] = 1; 1166 priv->last_phy_res[0] = 1;
1092 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]), 1167 memcpy(&priv->last_phy_res[1], &(pkt->u.raw[0]),
1093 sizeof(struct iwl_rx_phy_res)); 1168 sizeof(struct iwl_rx_phy_res));
diff --git a/drivers/net/wireless/iwlwifi/iwl-scan.c b/drivers/net/wireless/iwlwifi/iwl-scan.c
index 4f3a108fa990..1eb0d0bf1fe4 100644
--- a/drivers/net/wireless/iwlwifi/iwl-scan.c
+++ b/drivers/net/wireless/iwlwifi/iwl-scan.c
@@ -27,7 +27,6 @@
27 *****************************************************************************/ 27 *****************************************************************************/
28#include <linux/types.h> 28#include <linux/types.h>
29#include <linux/etherdevice.h> 29#include <linux/etherdevice.h>
30#include <net/lib80211.h>
31#include <net/mac80211.h> 30#include <net/mac80211.h>
32 31
33#include "iwl-eeprom.h" 32#include "iwl-eeprom.h"
@@ -112,7 +111,7 @@ EXPORT_SYMBOL(iwl_scan_cancel_timeout);
112static int iwl_send_scan_abort(struct iwl_priv *priv) 111static int iwl_send_scan_abort(struct iwl_priv *priv)
113{ 112{
114 int ret = 0; 113 int ret = 0;
115 struct iwl_rx_packet *res; 114 struct iwl_rx_packet *pkt;
116 struct iwl_host_cmd cmd = { 115 struct iwl_host_cmd cmd = {
117 .id = REPLY_SCAN_ABORT_CMD, 116 .id = REPLY_SCAN_ABORT_CMD,
118 .flags = CMD_WANT_SKB, 117 .flags = CMD_WANT_SKB,
@@ -132,21 +131,21 @@ static int iwl_send_scan_abort(struct iwl_priv *priv)
132 return ret; 131 return ret;
133 } 132 }
134 133
135 res = (struct iwl_rx_packet *)cmd.reply_skb->data; 134 pkt = (struct iwl_rx_packet *)cmd.reply_page;
136 if (res->u.status != CAN_ABORT_STATUS) { 135 if (pkt->u.status != CAN_ABORT_STATUS) {
137 /* The scan abort will return 1 for success or 136 /* The scan abort will return 1 for success or
138 * 2 for "failure". A failure condition can be 137 * 2 for "failure". A failure condition can be
139 * due to simply not being in an active scan which 138 * due to simply not being in an active scan which
140 * can occur if we send the scan abort before we 139 * can occur if we send the scan abort before we
141 * the microcode has notified us that a scan is 140 * the microcode has notified us that a scan is
142 * completed. */ 141 * completed. */
143 IWL_DEBUG_INFO(priv, "SCAN_ABORT returned %d.\n", res->u.status); 142 IWL_DEBUG_INFO(priv, "SCAN_ABORT returned %d.\n", pkt->u.status);
144 clear_bit(STATUS_SCAN_ABORTING, &priv->status); 143 clear_bit(STATUS_SCAN_ABORTING, &priv->status);
145 clear_bit(STATUS_SCAN_HW, &priv->status); 144 clear_bit(STATUS_SCAN_HW, &priv->status);
146 } 145 }
147 146
148 priv->alloc_rxb_skb--; 147 priv->alloc_rxb_page--;
149 dev_kfree_skb_any(cmd.reply_skb); 148 free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
150 149
151 return ret; 150 return ret;
152} 151}
@@ -156,7 +155,7 @@ static void iwl_rx_reply_scan(struct iwl_priv *priv,
156 struct iwl_rx_mem_buffer *rxb) 155 struct iwl_rx_mem_buffer *rxb)
157{ 156{
158#ifdef CONFIG_IWLWIFI_DEBUG 157#ifdef CONFIG_IWLWIFI_DEBUG
159 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 158 struct iwl_rx_packet *pkt = rxb_addr(rxb);
160 struct iwl_scanreq_notification *notif = 159 struct iwl_scanreq_notification *notif =
161 (struct iwl_scanreq_notification *)pkt->u.raw; 160 (struct iwl_scanreq_notification *)pkt->u.raw;
162 161
@@ -168,7 +167,7 @@ static void iwl_rx_reply_scan(struct iwl_priv *priv,
168static void iwl_rx_scan_start_notif(struct iwl_priv *priv, 167static void iwl_rx_scan_start_notif(struct iwl_priv *priv,
169 struct iwl_rx_mem_buffer *rxb) 168 struct iwl_rx_mem_buffer *rxb)
170{ 169{
171 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 170 struct iwl_rx_packet *pkt = rxb_addr(rxb);
172 struct iwl_scanstart_notification *notif = 171 struct iwl_scanstart_notification *notif =
173 (struct iwl_scanstart_notification *)pkt->u.raw; 172 (struct iwl_scanstart_notification *)pkt->u.raw;
174 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low); 173 priv->scan_start_tsf = le32_to_cpu(notif->tsf_low);
@@ -187,7 +186,7 @@ static void iwl_rx_scan_results_notif(struct iwl_priv *priv,
187 struct iwl_rx_mem_buffer *rxb) 186 struct iwl_rx_mem_buffer *rxb)
188{ 187{
189#ifdef CONFIG_IWLWIFI_DEBUG 188#ifdef CONFIG_IWLWIFI_DEBUG
190 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 189 struct iwl_rx_packet *pkt = rxb_addr(rxb);
191 struct iwl_scanresults_notification *notif = 190 struct iwl_scanresults_notification *notif =
192 (struct iwl_scanresults_notification *)pkt->u.raw; 191 (struct iwl_scanresults_notification *)pkt->u.raw;
193 192
@@ -214,7 +213,7 @@ static void iwl_rx_scan_complete_notif(struct iwl_priv *priv,
214 struct iwl_rx_mem_buffer *rxb) 213 struct iwl_rx_mem_buffer *rxb)
215{ 214{
216#ifdef CONFIG_IWLWIFI_DEBUG 215#ifdef CONFIG_IWLWIFI_DEBUG
217 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 216 struct iwl_rx_packet *pkt = rxb_addr(rxb);
218 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw; 217 struct iwl_scancomplete_notification *scan_notif = (void *)pkt->u.raw;
219 218
220 IWL_DEBUG_SCAN(priv, "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n", 219 IWL_DEBUG_SCAN(priv, "Scan complete: %d channels (TSF 0x%08X:%08X) - %d\n",
@@ -402,6 +401,7 @@ void iwl_init_scan_params(struct iwl_priv *priv)
402 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ]) 401 if (!priv->scan_tx_ant[IEEE80211_BAND_2GHZ])
403 priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx; 402 priv->scan_tx_ant[IEEE80211_BAND_2GHZ] = ant_idx;
404} 403}
404EXPORT_SYMBOL(iwl_init_scan_params);
405 405
406static int iwl_scan_initiate(struct iwl_priv *priv) 406static int iwl_scan_initiate(struct iwl_priv *priv)
407{ 407{
diff --git a/drivers/net/wireless/iwlwifi/iwl-spectrum.c b/drivers/net/wireless/iwlwifi/iwl-spectrum.c
index 022bcf115731..1ea5cd345fe8 100644
--- a/drivers/net/wireless/iwlwifi/iwl-spectrum.c
+++ b/drivers/net/wireless/iwlwifi/iwl-spectrum.c
@@ -177,7 +177,7 @@ static int iwl_get_measurement(struct iwl_priv *priv,
177static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv, 177static void iwl_rx_spectrum_measure_notif(struct iwl_priv *priv,
178 struct iwl_rx_mem_buffer *rxb) 178 struct iwl_rx_mem_buffer *rxb)
179{ 179{
180 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 180 struct iwl_rx_packet *pkt = rxb_addr(rxb);
181 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif); 181 struct iwl_spectrum_notification *report = &(pkt->u.spectrum_notif);
182 182
183 if (!report->state) { 183 if (!report->state) {
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.c b/drivers/net/wireless/iwlwifi/iwl-sta.c
index c6633fec8216..eba36f737388 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.c
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.c
@@ -99,32 +99,25 @@ static void iwl_sta_ucode_activate(struct iwl_priv *priv, u8 sta_id)
99 99
100static void iwl_add_sta_callback(struct iwl_priv *priv, 100static void iwl_add_sta_callback(struct iwl_priv *priv,
101 struct iwl_device_cmd *cmd, 101 struct iwl_device_cmd *cmd,
102 struct sk_buff *skb) 102 struct iwl_rx_packet *pkt)
103{ 103{
104 struct iwl_rx_packet *res = NULL;
105 struct iwl_addsta_cmd *addsta = 104 struct iwl_addsta_cmd *addsta =
106 (struct iwl_addsta_cmd *)cmd->cmd.payload; 105 (struct iwl_addsta_cmd *)cmd->cmd.payload;
107 u8 sta_id = addsta->sta.sta_id; 106 u8 sta_id = addsta->sta.sta_id;
108 107
109 if (!skb) { 108 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
110 IWL_ERR(priv, "Error: Response NULL in REPLY_ADD_STA.\n");
111 return;
112 }
113
114 res = (struct iwl_rx_packet *)skb->data;
115 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
116 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n", 109 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
117 res->hdr.flags); 110 pkt->hdr.flags);
118 return; 111 return;
119 } 112 }
120 113
121 switch (res->u.add_sta.status) { 114 switch (pkt->u.add_sta.status) {
122 case ADD_STA_SUCCESS_MSK: 115 case ADD_STA_SUCCESS_MSK:
123 iwl_sta_ucode_activate(priv, sta_id); 116 iwl_sta_ucode_activate(priv, sta_id);
124 /* fall through */ 117 /* fall through */
125 default: 118 default:
126 IWL_DEBUG_HC(priv, "Received REPLY_ADD_STA:(0x%08X)\n", 119 IWL_DEBUG_HC(priv, "Received REPLY_ADD_STA:(0x%08X)\n",
127 res->u.add_sta.status); 120 pkt->u.add_sta.status);
128 break; 121 break;
129 } 122 }
130} 123}
@@ -132,7 +125,7 @@ static void iwl_add_sta_callback(struct iwl_priv *priv,
132int iwl_send_add_sta(struct iwl_priv *priv, 125int iwl_send_add_sta(struct iwl_priv *priv,
133 struct iwl_addsta_cmd *sta, u8 flags) 126 struct iwl_addsta_cmd *sta, u8 flags)
134{ 127{
135 struct iwl_rx_packet *res = NULL; 128 struct iwl_rx_packet *pkt = NULL;
136 int ret = 0; 129 int ret = 0;
137 u8 data[sizeof(*sta)]; 130 u8 data[sizeof(*sta)];
138 struct iwl_host_cmd cmd = { 131 struct iwl_host_cmd cmd = {
@@ -152,15 +145,15 @@ int iwl_send_add_sta(struct iwl_priv *priv,
152 if (ret || (flags & CMD_ASYNC)) 145 if (ret || (flags & CMD_ASYNC))
153 return ret; 146 return ret;
154 147
155 res = (struct iwl_rx_packet *)cmd.reply_skb->data; 148 pkt = (struct iwl_rx_packet *)cmd.reply_page;
156 if (res->hdr.flags & IWL_CMD_FAILED_MSK) { 149 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
157 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n", 150 IWL_ERR(priv, "Bad return from REPLY_ADD_STA (0x%08X)\n",
158 res->hdr.flags); 151 pkt->hdr.flags);
159 ret = -EIO; 152 ret = -EIO;
160 } 153 }
161 154
162 if (ret == 0) { 155 if (ret == 0) {
163 switch (res->u.add_sta.status) { 156 switch (pkt->u.add_sta.status) {
164 case ADD_STA_SUCCESS_MSK: 157 case ADD_STA_SUCCESS_MSK:
165 iwl_sta_ucode_activate(priv, sta->sta.sta_id); 158 iwl_sta_ucode_activate(priv, sta->sta.sta_id);
166 IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n"); 159 IWL_DEBUG_INFO(priv, "REPLY_ADD_STA PASSED\n");
@@ -172,8 +165,8 @@ int iwl_send_add_sta(struct iwl_priv *priv,
172 } 165 }
173 } 166 }
174 167
175 priv->alloc_rxb_skb--; 168 priv->alloc_rxb_page--;
176 dev_kfree_skb_any(cmd.reply_skb); 169 free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
177 170
178 return ret; 171 return ret;
179} 172}
@@ -189,6 +182,11 @@ static void iwl_set_ht_add_station(struct iwl_priv *priv, u8 index,
189 goto done; 182 goto done;
190 183
191 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2; 184 mimo_ps_mode = (sta_ht_inf->cap & IEEE80211_HT_CAP_SM_PS) >> 2;
185 IWL_DEBUG_ASSOC(priv, "spatial multiplexing power save mode: %s\n",
186 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_STATIC) ?
187 "static" :
188 (mimo_ps_mode == WLAN_HT_CAP_SM_PS_DYNAMIC) ?
189 "dynamic" : "disabled");
192 190
193 sta_flags = priv->stations[index].sta.station_flags; 191 sta_flags = priv->stations[index].sta.station_flags;
194 192
@@ -324,26 +322,19 @@ static void iwl_sta_ucode_deactivate(struct iwl_priv *priv, const char *addr)
324 322
325static void iwl_remove_sta_callback(struct iwl_priv *priv, 323static void iwl_remove_sta_callback(struct iwl_priv *priv,
326 struct iwl_device_cmd *cmd, 324 struct iwl_device_cmd *cmd,
327 struct sk_buff *skb) 325 struct iwl_rx_packet *pkt)
328{ 326{
329 struct iwl_rx_packet *res = NULL;
330 struct iwl_rem_sta_cmd *rm_sta = 327 struct iwl_rem_sta_cmd *rm_sta =
331 (struct iwl_rem_sta_cmd *)cmd->cmd.payload; 328 (struct iwl_rem_sta_cmd *)cmd->cmd.payload;
332 const char *addr = rm_sta->addr; 329 const char *addr = rm_sta->addr;
333 330
334 if (!skb) { 331 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
335 IWL_ERR(priv, "Error: Response NULL in REPLY_REMOVE_STA.\n");
336 return;
337 }
338
339 res = (struct iwl_rx_packet *)skb->data;
340 if (res->hdr.flags & IWL_CMD_FAILED_MSK) {
341 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n", 332 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
342 res->hdr.flags); 333 pkt->hdr.flags);
343 return; 334 return;
344 } 335 }
345 336
346 switch (res->u.rem_sta.status) { 337 switch (pkt->u.rem_sta.status) {
347 case REM_STA_SUCCESS_MSK: 338 case REM_STA_SUCCESS_MSK:
348 iwl_sta_ucode_deactivate(priv, addr); 339 iwl_sta_ucode_deactivate(priv, addr);
349 break; 340 break;
@@ -356,7 +347,7 @@ static void iwl_remove_sta_callback(struct iwl_priv *priv,
356static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr, 347static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
357 u8 flags) 348 u8 flags)
358{ 349{
359 struct iwl_rx_packet *res = NULL; 350 struct iwl_rx_packet *pkt;
360 int ret; 351 int ret;
361 352
362 struct iwl_rem_sta_cmd rm_sta_cmd; 353 struct iwl_rem_sta_cmd rm_sta_cmd;
@@ -381,15 +372,15 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
381 if (ret || (flags & CMD_ASYNC)) 372 if (ret || (flags & CMD_ASYNC))
382 return ret; 373 return ret;
383 374
384 res = (struct iwl_rx_packet *)cmd.reply_skb->data; 375 pkt = (struct iwl_rx_packet *)cmd.reply_page;
385 if (res->hdr.flags & IWL_CMD_FAILED_MSK) { 376 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
386 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n", 377 IWL_ERR(priv, "Bad return from REPLY_REMOVE_STA (0x%08X)\n",
387 res->hdr.flags); 378 pkt->hdr.flags);
388 ret = -EIO; 379 ret = -EIO;
389 } 380 }
390 381
391 if (!ret) { 382 if (!ret) {
392 switch (res->u.rem_sta.status) { 383 switch (pkt->u.rem_sta.status) {
393 case REM_STA_SUCCESS_MSK: 384 case REM_STA_SUCCESS_MSK:
394 iwl_sta_ucode_deactivate(priv, addr); 385 iwl_sta_ucode_deactivate(priv, addr);
395 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n"); 386 IWL_DEBUG_ASSOC(priv, "REPLY_REMOVE_STA PASSED\n");
@@ -401,8 +392,8 @@ static int iwl_send_remove_station(struct iwl_priv *priv, const u8 *addr,
401 } 392 }
402 } 393 }
403 394
404 priv->alloc_rxb_skb--; 395 priv->alloc_rxb_page--;
405 dev_kfree_skb_any(cmd.reply_skb); 396 free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
406 397
407 return ret; 398 return ret;
408} 399}
@@ -1026,7 +1017,7 @@ int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
1026 */ 1017 */
1027 if (priv->current_ht_config.is_ht) { 1018 if (priv->current_ht_config.is_ht) {
1028 rcu_read_lock(); 1019 rcu_read_lock();
1029 sta = ieee80211_find_sta(priv->hw, addr); 1020 sta = ieee80211_find_sta(priv->vif, addr);
1030 if (sta) { 1021 if (sta) {
1031 memcpy(&ht_config, &sta->ht_cap, sizeof(ht_config)); 1022 memcpy(&ht_config, &sta->ht_cap, sizeof(ht_config));
1032 cur_ht_config = &ht_config; 1023 cur_ht_config = &ht_config;
@@ -1044,6 +1035,68 @@ int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap)
1044EXPORT_SYMBOL(iwl_rxon_add_station); 1035EXPORT_SYMBOL(iwl_rxon_add_station);
1045 1036
1046/** 1037/**
1038 * iwl_sta_init_bcast_lq - Initialize a bcast station's hardware rate table
1039 *
1040 * NOTE: Run REPLY_ADD_STA command to set up station table entry, before
1041 * calling this function (which runs REPLY_TX_LINK_QUALITY_CMD,
1042 * which requires station table entry to exist).
1043 */
1044static void iwl_sta_init_bcast_lq(struct iwl_priv *priv)
1045{
1046 int i, r;
1047 struct iwl_link_quality_cmd link_cmd = {
1048 .reserved1 = 0,
1049 };
1050 u32 rate_flags;
1051
1052 /* Set up the rate scaling to start at selected rate, fall back
1053 * all the way down to 1M in IEEE order, and then spin on 1M */
1054 if (priv->band == IEEE80211_BAND_5GHZ)
1055 r = IWL_RATE_6M_INDEX;
1056 else
1057 r = IWL_RATE_1M_INDEX;
1058
1059 for (i = 0; i < LINK_QUAL_MAX_RETRY_NUM; i++) {
1060 rate_flags = 0;
1061 if (r >= IWL_FIRST_CCK_RATE && r <= IWL_LAST_CCK_RATE)
1062 rate_flags |= RATE_MCS_CCK_MSK;
1063
1064 rate_flags |= first_antenna(priv->hw_params.valid_tx_ant) <<
1065 RATE_MCS_ANT_POS;
1066
1067 link_cmd.rs_table[i].rate_n_flags =
1068 iwl_hw_set_rate_n_flags(iwl_rates[r].plcp, rate_flags);
1069 r = iwl_get_prev_ieee_rate(r);
1070 }
1071
1072 link_cmd.general_params.single_stream_ant_msk =
1073 first_antenna(priv->hw_params.valid_tx_ant);
1074 link_cmd.general_params.dual_stream_ant_msk = 3;
1075 link_cmd.agg_params.agg_dis_start_th = LINK_QUAL_AGG_DISABLE_START_DEF;
1076 link_cmd.agg_params.agg_time_limit =
1077 cpu_to_le16(LINK_QUAL_AGG_TIME_LIMIT_DEF);
1078
1079 /* Update the rate scaling for control frame Tx to AP */
1080 link_cmd.sta_id = priv->hw_params.bcast_sta_id;
1081
1082 iwl_send_cmd_pdu_async(priv, REPLY_TX_LINK_QUALITY_CMD,
1083 sizeof(link_cmd), &link_cmd, NULL);
1084}
1085
1086
1087/**
1088 * iwl_add_bcast_station - add broadcast station into station table.
1089 */
1090void iwl_add_bcast_station(struct iwl_priv *priv)
1091{
1092 iwl_add_station(priv, iwl_bcast_addr, false, CMD_SYNC, NULL);
1093
1094 /* Set up default rate scaling table in device's station table */
1095 iwl_sta_init_bcast_lq(priv);
1096}
1097EXPORT_SYMBOL(iwl_add_bcast_station);
1098
1099/**
1047 * iwl_get_sta_id - Find station's index within station table 1100 * iwl_get_sta_id - Find station's index within station table
1048 * 1101 *
1049 * If new IBSS station, create new entry in station table 1102 * If new IBSS station, create new entry in station table
diff --git a/drivers/net/wireless/iwlwifi/iwl-sta.h b/drivers/net/wireless/iwlwifi/iwl-sta.h
index 6deebade6361..1c382de80d49 100644
--- a/drivers/net/wireless/iwlwifi/iwl-sta.h
+++ b/drivers/net/wireless/iwlwifi/iwl-sta.h
@@ -52,6 +52,7 @@ void iwl_update_tkip_key(struct iwl_priv *priv,
52 const u8 *addr, u32 iv32, u16 *phase1key); 52 const u8 *addr, u32 iv32, u16 *phase1key);
53 53
54int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap); 54int iwl_rxon_add_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
55void iwl_add_bcast_station(struct iwl_priv *priv);
55int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap); 56int iwl_remove_station(struct iwl_priv *priv, const u8 *addr, bool is_ap);
56void iwl_clear_stations_table(struct iwl_priv *priv); 57void iwl_clear_stations_table(struct iwl_priv *priv);
57int iwl_get_free_ucode_key_index(struct iwl_priv *priv); 58int iwl_get_free_ucode_key_index(struct iwl_priv *priv);
diff --git a/drivers/net/wireless/iwlwifi/iwl-tx.c b/drivers/net/wireless/iwlwifi/iwl-tx.c
index c18907544701..05e75109d842 100644
--- a/drivers/net/wireless/iwlwifi/iwl-tx.c
+++ b/drivers/net/wireless/iwlwifi/iwl-tx.c
@@ -131,7 +131,7 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
131 struct iwl_tx_queue *txq = &priv->txq[txq_id]; 131 struct iwl_tx_queue *txq = &priv->txq[txq_id];
132 struct iwl_queue *q = &txq->q; 132 struct iwl_queue *q = &txq->q;
133 struct pci_dev *dev = priv->pci_dev; 133 struct pci_dev *dev = priv->pci_dev;
134 int i, len; 134 int i;
135 135
136 if (q->n_bd == 0) 136 if (q->n_bd == 0)
137 return; 137 return;
@@ -141,8 +141,6 @@ void iwl_tx_queue_free(struct iwl_priv *priv, int txq_id)
141 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) 141 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd))
142 priv->cfg->ops->lib->txq_free_tfd(priv, txq); 142 priv->cfg->ops->lib->txq_free_tfd(priv, txq);
143 143
144 len = sizeof(struct iwl_device_cmd) * q->n_window;
145
146 /* De-alloc array of command/tx buffers */ 144 /* De-alloc array of command/tx buffers */
147 for (i = 0; i < TFD_TX_CMD_SLOTS; i++) 145 for (i = 0; i < TFD_TX_CMD_SLOTS; i++)
148 kfree(txq->cmd[i]); 146 kfree(txq->cmd[i]);
@@ -180,14 +178,11 @@ void iwl_cmd_queue_free(struct iwl_priv *priv)
180 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM]; 178 struct iwl_tx_queue *txq = &priv->txq[IWL_CMD_QUEUE_NUM];
181 struct iwl_queue *q = &txq->q; 179 struct iwl_queue *q = &txq->q;
182 struct pci_dev *dev = priv->pci_dev; 180 struct pci_dev *dev = priv->pci_dev;
183 int i, len; 181 int i;
184 182
185 if (q->n_bd == 0) 183 if (q->n_bd == 0)
186 return; 184 return;
187 185
188 len = sizeof(struct iwl_device_cmd) * q->n_window;
189 len += IWL_MAX_SCAN_SIZE;
190
191 /* De-alloc array of command/tx buffers */ 186 /* De-alloc array of command/tx buffers */
192 for (i = 0; i <= TFD_CMD_SLOTS; i++) 187 for (i = 0; i <= TFD_CMD_SLOTS; i++)
193 kfree(txq->cmd[i]); 188 kfree(txq->cmd[i]);
@@ -405,15 +400,19 @@ void iwl_hw_txq_ctx_free(struct iwl_priv *priv)
405 int txq_id; 400 int txq_id;
406 401
407 /* Tx queues */ 402 /* Tx queues */
408 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num; txq_id++) 403 if (priv->txq)
409 if (txq_id == IWL_CMD_QUEUE_NUM) 404 for (txq_id = 0; txq_id < priv->hw_params.max_txq_num;
410 iwl_cmd_queue_free(priv); 405 txq_id++)
411 else 406 if (txq_id == IWL_CMD_QUEUE_NUM)
412 iwl_tx_queue_free(priv, txq_id); 407 iwl_cmd_queue_free(priv);
413 408 else
409 iwl_tx_queue_free(priv, txq_id);
414 iwl_free_dma_ptr(priv, &priv->kw); 410 iwl_free_dma_ptr(priv, &priv->kw);
415 411
416 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls); 412 iwl_free_dma_ptr(priv, &priv->scd_bc_tbls);
413
414 /* free tx queue structure */
415 iwl_free_txq_mem(priv);
417} 416}
418EXPORT_SYMBOL(iwl_hw_txq_ctx_free); 417EXPORT_SYMBOL(iwl_hw_txq_ctx_free);
419 418
@@ -445,6 +444,12 @@ int iwl_txq_ctx_reset(struct iwl_priv *priv)
445 IWL_ERR(priv, "Keep Warm allocation failed\n"); 444 IWL_ERR(priv, "Keep Warm allocation failed\n");
446 goto error_kw; 445 goto error_kw;
447 } 446 }
447
448 /* allocate tx queue structure */
449 ret = iwl_alloc_txq_mem(priv);
450 if (ret)
451 goto error;
452
448 spin_lock_irqsave(&priv->lock, flags); 453 spin_lock_irqsave(&priv->lock, flags);
449 454
450 /* Turn off all Tx DMA fifos */ 455 /* Turn off all Tx DMA fifos */
@@ -581,9 +586,7 @@ static void iwl_tx_cmd_build_rate(struct iwl_priv *priv,
581 u8 rate_plcp; 586 u8 rate_plcp;
582 587
583 /* Set retry limit on DATA packets and Probe Responses*/ 588 /* Set retry limit on DATA packets and Probe Responses*/
584 if (priv->data_retry_limit != -1) 589 if (ieee80211_is_probe_resp(fc))
585 data_retry_limit = priv->data_retry_limit;
586 else if (ieee80211_is_probe_resp(fc))
587 data_retry_limit = 3; 590 data_retry_limit = 3;
588 else 591 else
589 data_retry_limit = IWL_DEFAULT_TX_RETRY; 592 data_retry_limit = IWL_DEFAULT_TX_RETRY;
@@ -709,7 +712,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
709 dma_addr_t phys_addr; 712 dma_addr_t phys_addr;
710 dma_addr_t txcmd_phys; 713 dma_addr_t txcmd_phys;
711 dma_addr_t scratch_phys; 714 dma_addr_t scratch_phys;
712 u16 len, len_org; 715 u16 len, len_org, firstlen, secondlen;
713 u16 seq_number = 0; 716 u16 seq_number = 0;
714 __le16 fc; 717 __le16 fc;
715 u8 hdr_len; 718 u8 hdr_len;
@@ -842,7 +845,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
842 sizeof(struct iwl_cmd_header) + hdr_len; 845 sizeof(struct iwl_cmd_header) + hdr_len;
843 846
844 len_org = len; 847 len_org = len;
845 len = (len + 3) & ~3; 848 firstlen = len = (len + 3) & ~3;
846 849
847 if (len_org != len) 850 if (len_org != len)
848 len_org = 1; 851 len_org = 1;
@@ -876,7 +879,7 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
876 879
877 /* Set up TFD's 2nd entry to point directly to remainder of skb, 880 /* Set up TFD's 2nd entry to point directly to remainder of skb,
878 * if any (802.11 null frames have no payload). */ 881 * if any (802.11 null frames have no payload). */
879 len = skb->len - hdr_len; 882 secondlen = len = skb->len - hdr_len;
880 if (len) { 883 if (len) {
881 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len, 884 phys_addr = pci_map_single(priv->pci_dev, skb->data + hdr_len,
882 len, PCI_DMA_TODEVICE); 885 len, PCI_DMA_TODEVICE);
@@ -910,6 +913,12 @@ int iwl_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
910 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys, 913 pci_dma_sync_single_for_device(priv->pci_dev, txcmd_phys,
911 len, PCI_DMA_BIDIRECTIONAL); 914 len, PCI_DMA_BIDIRECTIONAL);
912 915
916 trace_iwlwifi_dev_tx(priv,
917 &((struct iwl_tfd *)txq->tfds)[txq->q.write_ptr],
918 sizeof(struct iwl_tfd),
919 &out_cmd->hdr, firstlen,
920 skb->data + hdr_len, secondlen);
921
913 /* Tell device the write index *just past* this latest filled TFD */ 922 /* Tell device the write index *just past* this latest filled TFD */
914 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd); 923 q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
915 ret = iwl_txq_update_write_ptr(priv, txq); 924 ret = iwl_txq_update_write_ptr(priv, txq);
@@ -969,13 +978,20 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
969 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) && 978 BUG_ON((fix_size > TFD_MAX_PAYLOAD_SIZE) &&
970 !(cmd->flags & CMD_SIZE_HUGE)); 979 !(cmd->flags & CMD_SIZE_HUGE));
971 980
972 if (iwl_is_rfkill(priv)) { 981 if (iwl_is_rfkill(priv) || iwl_is_ctkill(priv)) {
973 IWL_DEBUG_INFO(priv, "Not sending command - RF KILL\n"); 982 IWL_WARN(priv, "Not sending command - %s KILL\n",
983 iwl_is_rfkill(priv) ? "RF" : "CT");
974 return -EIO; 984 return -EIO;
975 } 985 }
976 986
977 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 987 if (iwl_queue_space(q) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
978 IWL_ERR(priv, "No space for Tx\n"); 988 IWL_ERR(priv, "No space for Tx\n");
989 if (iwl_within_ct_kill_margin(priv))
990 iwl_tt_enter_ct_kill(priv);
991 else {
992 IWL_ERR(priv, "Restarting adapter due to queue full\n");
993 queue_work(priv->workqueue, &priv->restart);
994 }
979 return -ENOSPC; 995 return -ENOSPC;
980 } 996 }
981 997
@@ -1038,6 +1054,8 @@ int iwl_enqueue_hcmd(struct iwl_priv *priv, struct iwl_host_cmd *cmd)
1038 pci_unmap_addr_set(out_meta, mapping, phys_addr); 1054 pci_unmap_addr_set(out_meta, mapping, phys_addr);
1039 pci_unmap_len_set(out_meta, len, fix_size); 1055 pci_unmap_len_set(out_meta, len, fix_size);
1040 1056
1057 trace_iwlwifi_dev_hcmd(priv, &out_cmd->hdr, fix_size, cmd->flags);
1058
1041 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq, 1059 priv->cfg->ops->lib->txq_attach_buf_to_tfd(priv, txq,
1042 phys_addr, fix_size, 1, 1060 phys_addr, fix_size, 1,
1043 U32_PAD(cmd->len)); 1061 U32_PAD(cmd->len));
@@ -1104,11 +1122,6 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
1104 return; 1122 return;
1105 } 1123 }
1106 1124
1107 pci_unmap_single(priv->pci_dev,
1108 pci_unmap_addr(&txq->meta[cmd_idx], mapping),
1109 pci_unmap_len(&txq->meta[cmd_idx], len),
1110 PCI_DMA_BIDIRECTIONAL);
1111
1112 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx; 1125 for (idx = iwl_queue_inc_wrap(idx, q->n_bd); q->read_ptr != idx;
1113 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) { 1126 q->read_ptr = iwl_queue_inc_wrap(q->read_ptr, q->n_bd)) {
1114 1127
@@ -1131,7 +1144,7 @@ static void iwl_hcmd_queue_reclaim(struct iwl_priv *priv, int txq_id,
1131 */ 1144 */
1132void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb) 1145void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1133{ 1146{
1134 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1147 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1135 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1148 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1136 int txq_id = SEQ_TO_QUEUE(sequence); 1149 int txq_id = SEQ_TO_QUEUE(sequence);
1137 int index = SEQ_TO_INDEX(sequence); 1150 int index = SEQ_TO_INDEX(sequence);
@@ -1156,12 +1169,17 @@ void iwl_tx_cmd_complete(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
1156 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index]; 1169 cmd = priv->txq[IWL_CMD_QUEUE_NUM].cmd[cmd_index];
1157 meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index]; 1170 meta = &priv->txq[IWL_CMD_QUEUE_NUM].meta[cmd_index];
1158 1171
1172 pci_unmap_single(priv->pci_dev,
1173 pci_unmap_addr(meta, mapping),
1174 pci_unmap_len(meta, len),
1175 PCI_DMA_BIDIRECTIONAL);
1176
1159 /* Input error checking is done when commands are added to queue. */ 1177 /* Input error checking is done when commands are added to queue. */
1160 if (meta->flags & CMD_WANT_SKB) { 1178 if (meta->flags & CMD_WANT_SKB) {
1161 meta->source->reply_skb = rxb->skb; 1179 meta->source->reply_page = (unsigned long)rxb_addr(rxb);
1162 rxb->skb = NULL; 1180 rxb->page = NULL;
1163 } else if (meta->callback) 1181 } else if (meta->callback)
1164 meta->callback(priv, cmd, rxb->skb); 1182 meta->callback(priv, cmd, pkt);
1165 1183
1166 iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index); 1184 iwl_hcmd_queue_reclaim(priv, txq_id, index, cmd_index);
1167 1185
@@ -1400,7 +1418,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1400 1418
1401 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]); 1419 info = IEEE80211_SKB_CB(priv->txq[scd_flow].txb[agg->start_idx].skb[0]);
1402 memset(&info->status, 0, sizeof(info->status)); 1420 memset(&info->status, 0, sizeof(info->status));
1403 info->flags = IEEE80211_TX_STAT_ACK; 1421 info->flags |= IEEE80211_TX_STAT_ACK;
1404 info->flags |= IEEE80211_TX_STAT_AMPDU; 1422 info->flags |= IEEE80211_TX_STAT_AMPDU;
1405 info->status.ampdu_ack_map = successes; 1423 info->status.ampdu_ack_map = successes;
1406 info->status.ampdu_ack_len = agg->frame_count; 1424 info->status.ampdu_ack_len = agg->frame_count;
@@ -1420,7 +1438,7 @@ static int iwl_tx_status_reply_compressed_ba(struct iwl_priv *priv,
1420void iwl_rx_reply_compressed_ba(struct iwl_priv *priv, 1438void iwl_rx_reply_compressed_ba(struct iwl_priv *priv,
1421 struct iwl_rx_mem_buffer *rxb) 1439 struct iwl_rx_mem_buffer *rxb)
1422{ 1440{
1423 struct iwl_rx_packet *pkt = (struct iwl_rx_packet *)rxb->skb->data; 1441 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1424 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba; 1442 struct iwl_compressed_ba_resp *ba_resp = &pkt->u.compressed_ba;
1425 struct iwl_tx_queue *txq = NULL; 1443 struct iwl_tx_queue *txq = NULL;
1426 struct iwl_ht_agg *agg; 1444 struct iwl_ht_agg *agg;
diff --git a/drivers/net/wireless/iwlwifi/iwl3945-base.c b/drivers/net/wireless/iwlwifi/iwl3945-base.c
index 837a193221cf..23b31e6dcacd 100644
--- a/drivers/net/wireless/iwlwifi/iwl3945-base.c
+++ b/drivers/net/wireless/iwlwifi/iwl3945-base.c
@@ -41,7 +41,6 @@
41#include <linux/if_arp.h> 41#include <linux/if_arp.h>
42 42
43#include <net/ieee80211_radiotap.h> 43#include <net/ieee80211_radiotap.h>
44#include <net/lib80211.h>
45#include <net/mac80211.h> 44#include <net/mac80211.h>
46 45
47#include <asm/div64.h> 46#include <asm/div64.h>
@@ -89,7 +88,6 @@ MODULE_LICENSE("GPL");
89 88
90 /* module parameters */ 89 /* module parameters */
91struct iwl_mod_params iwl3945_mod_params = { 90struct iwl_mod_params iwl3945_mod_params = {
92 .num_of_queues = IWL39_NUM_QUEUES, /* Not used */
93 .sw_crypto = 1, 91 .sw_crypto = 1,
94 .restart_fw = 1, 92 .restart_fw = 1,
95 /* the rest are 0 by default */ 93 /* the rest are 0 by default */
@@ -367,13 +365,13 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
367 struct sk_buff *skb_frag, 365 struct sk_buff *skb_frag,
368 int sta_id) 366 int sta_id)
369{ 367{
370 struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload; 368 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
371 struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo; 369 struct iwl_hw_key *keyinfo = &priv->stations[sta_id].keyinfo;
372 370
373 switch (keyinfo->alg) { 371 switch (keyinfo->alg) {
374 case ALG_CCMP: 372 case ALG_CCMP:
375 tx->sec_ctl = TX_CMD_SEC_CCM; 373 tx_cmd->sec_ctl = TX_CMD_SEC_CCM;
376 memcpy(tx->key, keyinfo->key, keyinfo->keylen); 374 memcpy(tx_cmd->key, keyinfo->key, keyinfo->keylen);
377 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n"); 375 IWL_DEBUG_TX(priv, "tx_cmd with AES hwcrypto\n");
378 break; 376 break;
379 377
@@ -381,13 +379,13 @@ static void iwl3945_build_tx_cmd_hwcrypto(struct iwl_priv *priv,
381 break; 379 break;
382 380
383 case ALG_WEP: 381 case ALG_WEP:
384 tx->sec_ctl = TX_CMD_SEC_WEP | 382 tx_cmd->sec_ctl = TX_CMD_SEC_WEP |
385 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT; 383 (info->control.hw_key->hw_key_idx & TX_CMD_SEC_MSK) << TX_CMD_SEC_SHIFT;
386 384
387 if (keyinfo->keylen == 13) 385 if (keyinfo->keylen == 13)
388 tx->sec_ctl |= TX_CMD_SEC_KEY128; 386 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
389 387
390 memcpy(&tx->key[3], keyinfo->key, keyinfo->keylen); 388 memcpy(&tx_cmd->key[3], keyinfo->key, keyinfo->keylen);
391 389
392 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption " 390 IWL_DEBUG_TX(priv, "Configuring packet for WEP encryption "
393 "with key %d\n", info->control.hw_key->hw_key_idx); 391 "with key %d\n", info->control.hw_key->hw_key_idx);
@@ -407,12 +405,11 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
407 struct ieee80211_tx_info *info, 405 struct ieee80211_tx_info *info,
408 struct ieee80211_hdr *hdr, u8 std_id) 406 struct ieee80211_hdr *hdr, u8 std_id)
409{ 407{
410 struct iwl3945_tx_cmd *tx = (struct iwl3945_tx_cmd *)cmd->cmd.payload; 408 struct iwl3945_tx_cmd *tx_cmd = (struct iwl3945_tx_cmd *)cmd->cmd.payload;
411 __le32 tx_flags = tx->tx_flags; 409 __le32 tx_flags = tx_cmd->tx_flags;
412 __le16 fc = hdr->frame_control; 410 __le16 fc = hdr->frame_control;
413 u8 rc_flags = info->control.rates[0].flags;
414 411
415 tx->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE; 412 tx_cmd->stop_time.life_time = TX_CMD_LIFE_TIME_INFINITE;
416 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { 413 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
417 tx_flags |= TX_CMD_FLG_ACK_MSK; 414 tx_flags |= TX_CMD_FLG_ACK_MSK;
418 if (ieee80211_is_mgmt(fc)) 415 if (ieee80211_is_mgmt(fc))
@@ -425,25 +422,19 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
425 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 422 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
426 } 423 }
427 424
428 tx->sta_id = std_id; 425 tx_cmd->sta_id = std_id;
429 if (ieee80211_has_morefrags(fc)) 426 if (ieee80211_has_morefrags(fc))
430 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK; 427 tx_flags |= TX_CMD_FLG_MORE_FRAG_MSK;
431 428
432 if (ieee80211_is_data_qos(fc)) { 429 if (ieee80211_is_data_qos(fc)) {
433 u8 *qc = ieee80211_get_qos_ctl(hdr); 430 u8 *qc = ieee80211_get_qos_ctl(hdr);
434 tx->tid_tspec = qc[0] & 0xf; 431 tx_cmd->tid_tspec = qc[0] & 0xf;
435 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK; 432 tx_flags &= ~TX_CMD_FLG_SEQ_CTL_MSK;
436 } else { 433 } else {
437 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK; 434 tx_flags |= TX_CMD_FLG_SEQ_CTL_MSK;
438 } 435 }
439 436
440 if (rc_flags & IEEE80211_TX_RC_USE_RTS_CTS) { 437 priv->cfg->ops->utils->rts_tx_cmd_flag(info, &tx_flags);
441 tx_flags |= TX_CMD_FLG_RTS_MSK;
442 tx_flags &= ~TX_CMD_FLG_CTS_MSK;
443 } else if (rc_flags & IEEE80211_TX_RC_USE_CTS_PROTECT) {
444 tx_flags &= ~TX_CMD_FLG_RTS_MSK;
445 tx_flags |= TX_CMD_FLG_CTS_MSK;
446 }
447 438
448 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK)) 439 if ((tx_flags & TX_CMD_FLG_RTS_MSK) || (tx_flags & TX_CMD_FLG_CTS_MSK))
449 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK; 440 tx_flags |= TX_CMD_FLG_FULL_TXOP_PROT_MSK;
@@ -451,19 +442,16 @@ static void iwl3945_build_tx_cmd_basic(struct iwl_priv *priv,
451 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK); 442 tx_flags &= ~(TX_CMD_FLG_ANT_SEL_MSK);
452 if (ieee80211_is_mgmt(fc)) { 443 if (ieee80211_is_mgmt(fc)) {
453 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc)) 444 if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
454 tx->timeout.pm_frame_timeout = cpu_to_le16(3); 445 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(3);
455 else 446 else
456 tx->timeout.pm_frame_timeout = cpu_to_le16(2); 447 tx_cmd->timeout.pm_frame_timeout = cpu_to_le16(2);
457 } else { 448 } else {
458 tx->timeout.pm_frame_timeout = 0; 449 tx_cmd->timeout.pm_frame_timeout = 0;
459#ifdef CONFIG_IWLWIFI_LEDS
460 priv->rxtxpackets += le16_to_cpu(cmd->cmd.tx.len);
461#endif
462 } 450 }
463 451
464 tx->driver_txop = 0; 452 tx_cmd->driver_txop = 0;
465 tx->tx_flags = tx_flags; 453 tx_cmd->tx_flags = tx_flags;
466 tx->next_frame_len = 0; 454 tx_cmd->next_frame_len = 0;
467} 455}
468 456
469/* 457/*
@@ -473,7 +461,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
473{ 461{
474 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 462 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
475 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 463 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
476 struct iwl3945_tx_cmd *tx; 464 struct iwl3945_tx_cmd *tx_cmd;
477 struct iwl_tx_queue *txq = NULL; 465 struct iwl_tx_queue *txq = NULL;
478 struct iwl_queue *q = NULL; 466 struct iwl_queue *q = NULL;
479 struct iwl_device_cmd *out_cmd; 467 struct iwl_device_cmd *out_cmd;
@@ -572,9 +560,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
572 /* Init first empty entry in queue's array of Tx/cmd buffers */ 560 /* Init first empty entry in queue's array of Tx/cmd buffers */
573 out_cmd = txq->cmd[idx]; 561 out_cmd = txq->cmd[idx];
574 out_meta = &txq->meta[idx]; 562 out_meta = &txq->meta[idx];
575 tx = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload; 563 tx_cmd = (struct iwl3945_tx_cmd *)out_cmd->cmd.payload;
576 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr)); 564 memset(&out_cmd->hdr, 0, sizeof(out_cmd->hdr));
577 memset(tx, 0, sizeof(*tx)); 565 memset(tx_cmd, 0, sizeof(*tx_cmd));
578 566
579 /* 567 /*
580 * Set up the Tx-command (not MAC!) header. 568 * Set up the Tx-command (not MAC!) header.
@@ -587,7 +575,7 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
587 INDEX_TO_SEQ(q->write_ptr))); 575 INDEX_TO_SEQ(q->write_ptr)));
588 576
589 /* Copy MAC header from skb into command buffer */ 577 /* Copy MAC header from skb into command buffer */
590 memcpy(tx->hdr, hdr, hdr_len); 578 memcpy(tx_cmd->hdr, hdr, hdr_len);
591 579
592 580
593 if (info->control.hw_key) 581 if (info->control.hw_key)
@@ -601,12 +589,12 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
601 589
602 /* Total # bytes to be transmitted */ 590 /* Total # bytes to be transmitted */
603 len = (u16)skb->len; 591 len = (u16)skb->len;
604 tx->len = cpu_to_le16(len); 592 tx_cmd->len = cpu_to_le16(len);
605 593
606 iwl_dbg_log_tx_data_frame(priv, len, hdr); 594 iwl_dbg_log_tx_data_frame(priv, len, hdr);
607 iwl_update_stats(priv, true, fc, len); 595 iwl_update_stats(priv, true, fc, len);
608 tx->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK; 596 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_A_MSK;
609 tx->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK; 597 tx_cmd->tx_flags &= ~TX_CMD_FLG_ANT_B_MSK;
610 598
611 if (!ieee80211_has_morefrags(hdr->frame_control)) { 599 if (!ieee80211_has_morefrags(hdr->frame_control)) {
612 txq->need_update = 1; 600 txq->need_update = 1;
@@ -619,9 +607,9 @@ static int iwl3945_tx_skb(struct iwl_priv *priv, struct sk_buff *skb)
619 607
620 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n", 608 IWL_DEBUG_TX(priv, "sequence nr = 0X%x \n",
621 le16_to_cpu(out_cmd->hdr.sequence)); 609 le16_to_cpu(out_cmd->hdr.sequence));
622 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx->tx_flags)); 610 IWL_DEBUG_TX(priv, "tx_flags = 0X%x \n", le32_to_cpu(tx_cmd->tx_flags));
623 iwl_print_hex_dump(priv, IWL_DL_TX, tx, sizeof(*tx)); 611 iwl_print_hex_dump(priv, IWL_DL_TX, tx_cmd, sizeof(*tx_cmd));
624 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx->hdr, 612 iwl_print_hex_dump(priv, IWL_DL_TX, (u8 *)tx_cmd->hdr,
625 ieee80211_hdrlen(fc)); 613 ieee80211_hdrlen(fc));
626 614
627 /* 615 /*
@@ -757,7 +745,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
757 u8 type) 745 u8 type)
758{ 746{
759 struct iwl_spectrum_cmd spectrum; 747 struct iwl_spectrum_cmd spectrum;
760 struct iwl_rx_packet *res; 748 struct iwl_rx_packet *pkt;
761 struct iwl_host_cmd cmd = { 749 struct iwl_host_cmd cmd = {
762 .id = REPLY_SPECTRUM_MEASUREMENT_CMD, 750 .id = REPLY_SPECTRUM_MEASUREMENT_CMD,
763 .data = (void *)&spectrum, 751 .data = (void *)&spectrum,
@@ -802,18 +790,18 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
802 if (rc) 790 if (rc)
803 return rc; 791 return rc;
804 792
805 res = (struct iwl_rx_packet *)cmd.reply_skb->data; 793 pkt = (struct iwl_rx_packet *)cmd.reply_page;
806 if (res->hdr.flags & IWL_CMD_FAILED_MSK) { 794 if (pkt->hdr.flags & IWL_CMD_FAILED_MSK) {
807 IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n"); 795 IWL_ERR(priv, "Bad return from REPLY_RX_ON_ASSOC command\n");
808 rc = -EIO; 796 rc = -EIO;
809 } 797 }
810 798
811 spectrum_resp_status = le16_to_cpu(res->u.spectrum.status); 799 spectrum_resp_status = le16_to_cpu(pkt->u.spectrum.status);
812 switch (spectrum_resp_status) { 800 switch (spectrum_resp_status) {
813 case 0: /* Command will be handled */ 801 case 0: /* Command will be handled */
814 if (res->u.spectrum.id != 0xff) { 802 if (pkt->u.spectrum.id != 0xff) {
815 IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n", 803 IWL_DEBUG_INFO(priv, "Replaced existing measurement: %d\n",
816 res->u.spectrum.id); 804 pkt->u.spectrum.id);
817 priv->measurement_status &= ~MEASUREMENT_READY; 805 priv->measurement_status &= ~MEASUREMENT_READY;
818 } 806 }
819 priv->measurement_status |= MEASUREMENT_ACTIVE; 807 priv->measurement_status |= MEASUREMENT_ACTIVE;
@@ -825,7 +813,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
825 break; 813 break;
826 } 814 }
827 815
828 dev_kfree_skb_any(cmd.reply_skb); 816 free_pages(cmd.reply_page, priv->hw_params.rx_page_order);
829 817
830 return rc; 818 return rc;
831} 819}
@@ -834,7 +822,7 @@ static int iwl3945_get_measurement(struct iwl_priv *priv,
834static void iwl3945_rx_reply_alive(struct iwl_priv *priv, 822static void iwl3945_rx_reply_alive(struct iwl_priv *priv,
835 struct iwl_rx_mem_buffer *rxb) 823 struct iwl_rx_mem_buffer *rxb)
836{ 824{
837 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 825 struct iwl_rx_packet *pkt = rxb_addr(rxb);
838 struct iwl_alive_resp *palive; 826 struct iwl_alive_resp *palive;
839 struct delayed_work *pwork; 827 struct delayed_work *pwork;
840 828
@@ -871,7 +859,7 @@ static void iwl3945_rx_reply_add_sta(struct iwl_priv *priv,
871 struct iwl_rx_mem_buffer *rxb) 859 struct iwl_rx_mem_buffer *rxb)
872{ 860{
873#ifdef CONFIG_IWLWIFI_DEBUG 861#ifdef CONFIG_IWLWIFI_DEBUG
874 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 862 struct iwl_rx_packet *pkt = rxb_addr(rxb);
875#endif 863#endif
876 864
877 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status); 865 IWL_DEBUG_RX(priv, "Received REPLY_ADD_STA: 0x%02X\n", pkt->u.status);
@@ -907,7 +895,7 @@ static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
907 struct iwl_rx_mem_buffer *rxb) 895 struct iwl_rx_mem_buffer *rxb)
908{ 896{
909#ifdef CONFIG_IWLWIFI_DEBUG 897#ifdef CONFIG_IWLWIFI_DEBUG
910 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 898 struct iwl_rx_packet *pkt = rxb_addr(rxb);
911 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status); 899 struct iwl3945_beacon_notif *beacon = &(pkt->u.beacon_status);
912 u8 rate = beacon->beacon_notify_hdr.rate; 900 u8 rate = beacon->beacon_notify_hdr.rate;
913 901
@@ -930,7 +918,7 @@ static void iwl3945_rx_beacon_notif(struct iwl_priv *priv,
930static void iwl3945_rx_card_state_notif(struct iwl_priv *priv, 918static void iwl3945_rx_card_state_notif(struct iwl_priv *priv,
931 struct iwl_rx_mem_buffer *rxb) 919 struct iwl_rx_mem_buffer *rxb)
932{ 920{
933 struct iwl_rx_packet *pkt = (void *)rxb->skb->data; 921 struct iwl_rx_packet *pkt = rxb_addr(rxb);
934 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags); 922 u32 flags = le32_to_cpu(pkt->u.card_state_notif.flags);
935 unsigned long status = priv->status; 923 unsigned long status = priv->status;
936 924
@@ -1094,7 +1082,7 @@ static int iwl3945_rx_queue_restock(struct iwl_priv *priv)
1094 list_del(element); 1082 list_del(element);
1095 1083
1096 /* Point to Rx buffer via next RBD in circular buffer */ 1084 /* Point to Rx buffer via next RBD in circular buffer */
1097 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->real_dma_addr); 1085 rxq->bd[rxq->write] = iwl3945_dma_addr2rbd_ptr(priv, rxb->page_dma);
1098 rxq->queue[rxq->write] = rxb; 1086 rxq->queue[rxq->write] = rxb;
1099 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK; 1087 rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
1100 rxq->free_count--; 1088 rxq->free_count--;
@@ -1134,8 +1122,9 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1134 struct iwl_rx_queue *rxq = &priv->rxq; 1122 struct iwl_rx_queue *rxq = &priv->rxq;
1135 struct list_head *element; 1123 struct list_head *element;
1136 struct iwl_rx_mem_buffer *rxb; 1124 struct iwl_rx_mem_buffer *rxb;
1137 struct sk_buff *skb; 1125 struct page *page;
1138 unsigned long flags; 1126 unsigned long flags;
1127 gfp_t gfp_mask = priority;
1139 1128
1140 while (1) { 1129 while (1) {
1141 spin_lock_irqsave(&rxq->lock, flags); 1130 spin_lock_irqsave(&rxq->lock, flags);
@@ -1147,10 +1136,14 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1147 spin_unlock_irqrestore(&rxq->lock, flags); 1136 spin_unlock_irqrestore(&rxq->lock, flags);
1148 1137
1149 if (rxq->free_count > RX_LOW_WATERMARK) 1138 if (rxq->free_count > RX_LOW_WATERMARK)
1150 priority |= __GFP_NOWARN; 1139 gfp_mask |= __GFP_NOWARN;
1140
1141 if (priv->hw_params.rx_page_order > 0)
1142 gfp_mask |= __GFP_COMP;
1143
1151 /* Alloc a new receive buffer */ 1144 /* Alloc a new receive buffer */
1152 skb = alloc_skb(priv->hw_params.rx_buf_size, priority); 1145 page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
1153 if (!skb) { 1146 if (!page) {
1154 if (net_ratelimit()) 1147 if (net_ratelimit())
1155 IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n"); 1148 IWL_DEBUG_INFO(priv, "Failed to allocate SKB buffer.\n");
1156 if ((rxq->free_count <= RX_LOW_WATERMARK) && 1149 if ((rxq->free_count <= RX_LOW_WATERMARK) &&
@@ -1167,7 +1160,7 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1167 spin_lock_irqsave(&rxq->lock, flags); 1160 spin_lock_irqsave(&rxq->lock, flags);
1168 if (list_empty(&rxq->rx_used)) { 1161 if (list_empty(&rxq->rx_used)) {
1169 spin_unlock_irqrestore(&rxq->lock, flags); 1162 spin_unlock_irqrestore(&rxq->lock, flags);
1170 dev_kfree_skb_any(skb); 1163 __free_pages(page, priv->hw_params.rx_page_order);
1171 return; 1164 return;
1172 } 1165 }
1173 element = rxq->rx_used.next; 1166 element = rxq->rx_used.next;
@@ -1175,26 +1168,18 @@ static void iwl3945_rx_allocate(struct iwl_priv *priv, gfp_t priority)
1175 list_del(element); 1168 list_del(element);
1176 spin_unlock_irqrestore(&rxq->lock, flags); 1169 spin_unlock_irqrestore(&rxq->lock, flags);
1177 1170
1178 rxb->skb = skb; 1171 rxb->page = page;
1179
1180 /* If radiotap head is required, reserve some headroom here.
1181 * The physical head count is a variable rx_stats->phy_count.
1182 * We reserve 4 bytes here. Plus these extra bytes, the
1183 * headroom of the physical head should be enough for the
1184 * radiotap head that iwl3945 supported. See iwl3945_rt.
1185 */
1186 skb_reserve(rxb->skb, 4);
1187
1188 /* Get physical address of RB/SKB */ 1172 /* Get physical address of RB/SKB */
1189 rxb->real_dma_addr = pci_map_single(priv->pci_dev, 1173 rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
1190 rxb->skb->data, 1174 PAGE_SIZE << priv->hw_params.rx_page_order,
1191 priv->hw_params.rx_buf_size, 1175 PCI_DMA_FROMDEVICE);
1192 PCI_DMA_FROMDEVICE);
1193 1176
1194 spin_lock_irqsave(&rxq->lock, flags); 1177 spin_lock_irqsave(&rxq->lock, flags);
1178
1195 list_add_tail(&rxb->list, &rxq->rx_free); 1179 list_add_tail(&rxb->list, &rxq->rx_free);
1196 priv->alloc_rxb_skb++;
1197 rxq->free_count++; 1180 rxq->free_count++;
1181 priv->alloc_rxb_page++;
1182
1198 spin_unlock_irqrestore(&rxq->lock, flags); 1183 spin_unlock_irqrestore(&rxq->lock, flags);
1199 } 1184 }
1200} 1185}
@@ -1210,14 +1195,14 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1210 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) { 1195 for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
1211 /* In the reset function, these buffers may have been allocated 1196 /* In the reset function, these buffers may have been allocated
1212 * to an SKB, so we need to unmap and free potential storage */ 1197 * to an SKB, so we need to unmap and free potential storage */
1213 if (rxq->pool[i].skb != NULL) { 1198 if (rxq->pool[i].page != NULL) {
1214 pci_unmap_single(priv->pci_dev, 1199 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1215 rxq->pool[i].real_dma_addr, 1200 PAGE_SIZE << priv->hw_params.rx_page_order,
1216 priv->hw_params.rx_buf_size, 1201 PCI_DMA_FROMDEVICE);
1217 PCI_DMA_FROMDEVICE); 1202 priv->alloc_rxb_page--;
1218 priv->alloc_rxb_skb--; 1203 __free_pages(rxq->pool[i].page,
1219 dev_kfree_skb(rxq->pool[i].skb); 1204 priv->hw_params.rx_page_order);
1220 rxq->pool[i].skb = NULL; 1205 rxq->pool[i].page = NULL;
1221 } 1206 }
1222 list_add_tail(&rxq->pool[i].list, &rxq->rx_used); 1207 list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
1223 } 1208 }
@@ -1225,8 +1210,8 @@ void iwl3945_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
1225 /* Set us so that we have processed and used all buffers, but have 1210 /* Set us so that we have processed and used all buffers, but have
1226 * not restocked the Rx queue with fresh buffers */ 1211 * not restocked the Rx queue with fresh buffers */
1227 rxq->read = rxq->write = 0; 1212 rxq->read = rxq->write = 0;
1228 rxq->free_count = 0;
1229 rxq->write_actual = 0; 1213 rxq->write_actual = 0;
1214 rxq->free_count = 0;
1230 spin_unlock_irqrestore(&rxq->lock, flags); 1215 spin_unlock_irqrestore(&rxq->lock, flags);
1231} 1216}
1232 1217
@@ -1259,12 +1244,14 @@ static void iwl3945_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rx
1259{ 1244{
1260 int i; 1245 int i;
1261 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) { 1246 for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
1262 if (rxq->pool[i].skb != NULL) { 1247 if (rxq->pool[i].page != NULL) {
1263 pci_unmap_single(priv->pci_dev, 1248 pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
1264 rxq->pool[i].real_dma_addr, 1249 PAGE_SIZE << priv->hw_params.rx_page_order,
1265 priv->hw_params.rx_buf_size, 1250 PCI_DMA_FROMDEVICE);
1266 PCI_DMA_FROMDEVICE); 1251 __free_pages(rxq->pool[i].page,
1267 dev_kfree_skb(rxq->pool[i].skb); 1252 priv->hw_params.rx_page_order);
1253 rxq->pool[i].page = NULL;
1254 priv->alloc_rxb_page--;
1268 } 1255 }
1269 } 1256 }
1270 1257
@@ -1380,7 +1367,7 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1380 i = rxq->read; 1367 i = rxq->read;
1381 1368
1382 /* calculate total frames need to be restock after handling RX */ 1369 /* calculate total frames need to be restock after handling RX */
1383 total_empty = r - priv->rxq.write_actual; 1370 total_empty = r - rxq->write_actual;
1384 if (total_empty < 0) 1371 if (total_empty < 0)
1385 total_empty += RX_QUEUE_SIZE; 1372 total_empty += RX_QUEUE_SIZE;
1386 1373
@@ -1400,10 +1387,13 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1400 1387
1401 rxq->queue[i] = NULL; 1388 rxq->queue[i] = NULL;
1402 1389
1403 pci_unmap_single(priv->pci_dev, rxb->real_dma_addr, 1390 pci_unmap_page(priv->pci_dev, rxb->page_dma,
1404 priv->hw_params.rx_buf_size, 1391 PAGE_SIZE << priv->hw_params.rx_page_order,
1405 PCI_DMA_FROMDEVICE); 1392 PCI_DMA_FROMDEVICE);
1406 pkt = (struct iwl_rx_packet *)rxb->skb->data; 1393 pkt = rxb_addr(rxb);
1394
1395 trace_iwlwifi_dev_rx(priv, pkt,
1396 le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_FRAME_SIZE_MSK);
1407 1397
1408 /* Reclaim a command buffer only if this packet is a response 1398 /* Reclaim a command buffer only if this packet is a response
1409 * to a (driver-originated) command. 1399 * to a (driver-originated) command.
@@ -1421,44 +1411,55 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1421 if (priv->rx_handlers[pkt->hdr.cmd]) { 1411 if (priv->rx_handlers[pkt->hdr.cmd]) {
1422 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i, 1412 IWL_DEBUG_RX(priv, "r = %d, i = %d, %s, 0x%02x\n", r, i,
1423 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd); 1413 get_cmd_string(pkt->hdr.cmd), pkt->hdr.cmd);
1424 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
1425 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++; 1414 priv->isr_stats.rx_handlers[pkt->hdr.cmd]++;
1415 priv->rx_handlers[pkt->hdr.cmd] (priv, rxb);
1426 } else { 1416 } else {
1427 /* No handling needed */ 1417 /* No handling needed */
1428 IWL_DEBUG_RX(priv, "r %d i %d No handler needed for %s, 0x%02x\n", 1418 IWL_DEBUG_RX(priv,
1419 "r %d i %d No handler needed for %s, 0x%02x\n",
1429 r, i, get_cmd_string(pkt->hdr.cmd), 1420 r, i, get_cmd_string(pkt->hdr.cmd),
1430 pkt->hdr.cmd); 1421 pkt->hdr.cmd);
1431 } 1422 }
1432 1423
1424 /*
1425 * XXX: After here, we should always check rxb->page
1426 * against NULL before touching it or its virtual
1427 * memory (pkt). Because some rx_handler might have
1428 * already taken or freed the pages.
1429 */
1430
1433 if (reclaim) { 1431 if (reclaim) {
1434 /* Invoke any callbacks, transfer the skb to caller, and 1432 /* Invoke any callbacks, transfer the buffer to caller,
1435 * fire off the (possibly) blocking iwl_send_cmd() 1433 * and fire off the (possibly) blocking iwl_send_cmd()
1436 * as we reclaim the driver command queue */ 1434 * as we reclaim the driver command queue */
1437 if (rxb && rxb->skb) 1435 if (rxb->page)
1438 iwl_tx_cmd_complete(priv, rxb); 1436 iwl_tx_cmd_complete(priv, rxb);
1439 else 1437 else
1440 IWL_WARN(priv, "Claim null rxb?\n"); 1438 IWL_WARN(priv, "Claim null rxb?\n");
1441 } 1439 }
1442 1440
1443 /* For now we just don't re-use anything. We can tweak this 1441 /* Reuse the page if possible. For notification packets and
1444 * later to try and re-use notification packets and SKBs that 1442 * SKBs that fail to Rx correctly, add them back into the
1445 * fail to Rx correctly */ 1443 * rx_free list for reuse later. */
1446 if (rxb->skb != NULL) {
1447 priv->alloc_rxb_skb--;
1448 dev_kfree_skb_any(rxb->skb);
1449 rxb->skb = NULL;
1450 }
1451
1452 spin_lock_irqsave(&rxq->lock, flags); 1444 spin_lock_irqsave(&rxq->lock, flags);
1453 list_add_tail(&rxb->list, &priv->rxq.rx_used); 1445 if (rxb->page != NULL) {
1446 rxb->page_dma = pci_map_page(priv->pci_dev, rxb->page,
1447 0, PAGE_SIZE << priv->hw_params.rx_page_order,
1448 PCI_DMA_FROMDEVICE);
1449 list_add_tail(&rxb->list, &rxq->rx_free);
1450 rxq->free_count++;
1451 } else
1452 list_add_tail(&rxb->list, &rxq->rx_used);
1453
1454 spin_unlock_irqrestore(&rxq->lock, flags); 1454 spin_unlock_irqrestore(&rxq->lock, flags);
1455
1455 i = (i + 1) & RX_QUEUE_MASK; 1456 i = (i + 1) & RX_QUEUE_MASK;
1456 /* If there are a lot of unused frames, 1457 /* If there are a lot of unused frames,
1457 * restock the Rx queue so ucode won't assert. */ 1458 * restock the Rx queue so ucode won't assert. */
1458 if (fill_rx) { 1459 if (fill_rx) {
1459 count++; 1460 count++;
1460 if (count >= 8) { 1461 if (count >= 8) {
1461 priv->rxq.read = i; 1462 rxq->read = i;
1462 iwl3945_rx_replenish_now(priv); 1463 iwl3945_rx_replenish_now(priv);
1463 count = 0; 1464 count = 0;
1464 } 1465 }
@@ -1466,7 +1467,7 @@ static void iwl3945_rx_handle(struct iwl_priv *priv)
1466 } 1467 }
1467 1468
1468 /* Backtrack one entry */ 1469 /* Backtrack one entry */
1469 priv->rxq.read = i; 1470 rxq->read = i;
1470 if (fill_rx) 1471 if (fill_rx)
1471 iwl3945_rx_replenish_now(priv); 1472 iwl3945_rx_replenish_now(priv);
1472 else 1473 else
@@ -1550,8 +1551,9 @@ void iwl3945_dump_nic_error_log(struct iwl_priv *priv)
1550 "%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n", 1551 "%-13s (#%d) %010u 0x%05X 0x%05X 0x%05X 0x%05X %u\n\n",
1551 desc_lookup(desc), desc, time, blink1, blink2, 1552 desc_lookup(desc), desc, time, blink1, blink2,
1552 ilink1, ilink2, data1); 1553 ilink1, ilink2, data1);
1554 trace_iwlwifi_dev_ucode_error(priv, desc, time, data1, 0,
1555 0, blink1, blink2, ilink1, ilink2);
1553 } 1556 }
1554
1555} 1557}
1556 1558
1557#define EVENT_START_OFFSET (6 * sizeof(u32)) 1559#define EVENT_START_OFFSET (6 * sizeof(u32))
@@ -1591,10 +1593,12 @@ static void iwl3945_print_event_log(struct iwl_priv *priv, u32 start_idx,
1591 if (mode == 0) { 1593 if (mode == 0) {
1592 /* data, ev */ 1594 /* data, ev */
1593 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev); 1595 IWL_ERR(priv, "0x%08x\t%04u\n", time, ev);
1596 trace_iwlwifi_dev_ucode_event(priv, 0, time, ev);
1594 } else { 1597 } else {
1595 data = iwl_read_targ_mem(priv, ptr); 1598 data = iwl_read_targ_mem(priv, ptr);
1596 ptr += sizeof(u32); 1599 ptr += sizeof(u32);
1597 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", time, data, ev); 1600 IWL_ERR(priv, "%010u\t0x%08x\t%04u\n", time, data, ev);
1601 trace_iwlwifi_dev_ucode_event(priv, time, data, ev);
1598 } 1602 }
1599 } 1603 }
1600} 1604}
@@ -1684,6 +1688,8 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1684 } 1688 }
1685#endif 1689#endif
1686 1690
1691 spin_unlock_irqrestore(&priv->lock, flags);
1692
1687 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not 1693 /* Since CSR_INT and CSR_FH_INT_STATUS reads and clears are not
1688 * atomic, make sure that inta covers all the interrupts that 1694 * atomic, make sure that inta covers all the interrupts that
1689 * we've discovered, even if FH interrupt came in just after 1695 * we've discovered, even if FH interrupt came in just after
@@ -1705,8 +1711,6 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1705 1711
1706 handled |= CSR_INT_BIT_HW_ERR; 1712 handled |= CSR_INT_BIT_HW_ERR;
1707 1713
1708 spin_unlock_irqrestore(&priv->lock, flags);
1709
1710 return; 1714 return;
1711 } 1715 }
1712 1716
@@ -1798,7 +1802,6 @@ static void iwl3945_irq_tasklet(struct iwl_priv *priv)
1798 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags); 1802 "flags 0x%08lx\n", inta, inta_mask, inta_fh, flags);
1799 } 1803 }
1800#endif 1804#endif
1801 spin_unlock_irqrestore(&priv->lock, flags);
1802} 1805}
1803 1806
1804static int iwl3945_get_channels_for_scan(struct iwl_priv *priv, 1807static int iwl3945_get_channels_for_scan(struct iwl_priv *priv,
@@ -2157,6 +2160,14 @@ static int iwl3945_read_ucode(struct iwl_priv *priv)
2157 IWL_UCODE_API(priv->ucode_ver), 2160 IWL_UCODE_API(priv->ucode_ver),
2158 IWL_UCODE_SERIAL(priv->ucode_ver)); 2161 IWL_UCODE_SERIAL(priv->ucode_ver));
2159 2162
2163 snprintf(priv->hw->wiphy->fw_version,
2164 sizeof(priv->hw->wiphy->fw_version),
2165 "%u.%u.%u.%u",
2166 IWL_UCODE_MAJOR(priv->ucode_ver),
2167 IWL_UCODE_MINOR(priv->ucode_ver),
2168 IWL_UCODE_API(priv->ucode_ver),
2169 IWL_UCODE_SERIAL(priv->ucode_ver));
2170
2160 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n", 2171 IWL_DEBUG_INFO(priv, "f/w package hdr ucode version raw = 0x%x\n",
2161 priv->ucode_ver); 2172 priv->ucode_ver);
2162 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n", 2173 IWL_DEBUG_INFO(priv, "f/w package hdr runtime inst size = %u\n",
@@ -2478,7 +2489,7 @@ static void iwl3945_alive_start(struct iwl_priv *priv)
2478 2489
2479 iwl3945_reg_txpower_periodic(priv); 2490 iwl3945_reg_txpower_periodic(priv);
2480 2491
2481 iwl3945_led_register(priv); 2492 iwl_leds_init(priv);
2482 2493
2483 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n"); 2494 IWL_DEBUG_INFO(priv, "ALIVE processing complete.\n");
2484 set_bit(STATUS_READY, &priv->status); 2495 set_bit(STATUS_READY, &priv->status);
@@ -2516,7 +2527,6 @@ static void __iwl3945_down(struct iwl_priv *priv)
2516 if (!exit_pending) 2527 if (!exit_pending)
2517 set_bit(STATUS_EXIT_PENDING, &priv->status); 2528 set_bit(STATUS_EXIT_PENDING, &priv->status);
2518 2529
2519 iwl3945_led_unregister(priv);
2520 iwl_clear_stations_table(priv); 2530 iwl_clear_stations_table(priv);
2521 2531
2522 /* Unblock any waiting calls */ 2532 /* Unblock any waiting calls */
@@ -2562,11 +2572,6 @@ static void __iwl3945_down(struct iwl_priv *priv)
2562 test_bit(STATUS_EXIT_PENDING, &priv->status) << 2572 test_bit(STATUS_EXIT_PENDING, &priv->status) <<
2563 STATUS_EXIT_PENDING; 2573 STATUS_EXIT_PENDING;
2564 2574
2565 priv->cfg->ops->lib->apm_ops.reset(priv);
2566 spin_lock_irqsave(&priv->lock, flags);
2567 iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2568 spin_unlock_irqrestore(&priv->lock, flags);
2569
2570 iwl3945_hw_txq_ctx_stop(priv); 2575 iwl3945_hw_txq_ctx_stop(priv);
2571 iwl3945_hw_rxq_stop(priv); 2576 iwl3945_hw_rxq_stop(priv);
2572 2577
@@ -2575,10 +2580,8 @@ static void __iwl3945_down(struct iwl_priv *priv)
2575 2580
2576 udelay(5); 2581 udelay(5);
2577 2582
2578 if (exit_pending) 2583 /* Stop the device, and put it in low power state */
2579 priv->cfg->ops->lib->apm_ops.stop(priv); 2584 priv->cfg->ops->lib->apm_ops.stop(priv);
2580 else
2581 priv->cfg->ops->lib->apm_ops.reset(priv);
2582 2585
2583 exit: 2586 exit:
2584 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp)); 2587 memset(&priv->card_alive, 0, sizeof(struct iwl_alive_resp));
@@ -2723,19 +2726,34 @@ static void iwl3945_bg_alive_start(struct work_struct *data)
2723 mutex_unlock(&priv->mutex); 2726 mutex_unlock(&priv->mutex);
2724} 2727}
2725 2728
2729/*
2730 * 3945 cannot interrupt driver when hardware rf kill switch toggles;
2731 * driver must poll CSR_GP_CNTRL_REG register for change. This register
2732 * *is* readable even when device has been SW_RESET into low power mode
2733 * (e.g. during RF KILL).
2734 */
2726static void iwl3945_rfkill_poll(struct work_struct *data) 2735static void iwl3945_rfkill_poll(struct work_struct *data)
2727{ 2736{
2728 struct iwl_priv *priv = 2737 struct iwl_priv *priv =
2729 container_of(data, struct iwl_priv, rfkill_poll.work); 2738 container_of(data, struct iwl_priv, rfkill_poll.work);
2739 bool old_rfkill = test_bit(STATUS_RF_KILL_HW, &priv->status);
2740 bool new_rfkill = !(iwl_read32(priv, CSR_GP_CNTRL)
2741 & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
2730 2742
2731 if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW) 2743 if (new_rfkill != old_rfkill) {
2732 clear_bit(STATUS_RF_KILL_HW, &priv->status); 2744 if (new_rfkill)
2733 else 2745 set_bit(STATUS_RF_KILL_HW, &priv->status);
2734 set_bit(STATUS_RF_KILL_HW, &priv->status); 2746 else
2747 clear_bit(STATUS_RF_KILL_HW, &priv->status);
2735 2748
2736 wiphy_rfkill_set_hw_state(priv->hw->wiphy, 2749 wiphy_rfkill_set_hw_state(priv->hw->wiphy, new_rfkill);
2737 test_bit(STATUS_RF_KILL_HW, &priv->status));
2738 2750
2751 IWL_DEBUG_RF_KILL(priv, "RF_KILL bit toggled to %s.\n",
2752 new_rfkill ? "disable radio" : "enable radio");
2753 }
2754
2755 /* Keep this running, even if radio now enabled. This will be
2756 * cancelled in mac_start() if system decides to start again */
2739 queue_delayed_work(priv->workqueue, &priv->rfkill_poll, 2757 queue_delayed_work(priv->workqueue, &priv->rfkill_poll,
2740 round_jiffies_relative(2 * HZ)); 2758 round_jiffies_relative(2 * HZ));
2741 2759
@@ -3151,6 +3169,8 @@ static int iwl3945_mac_start(struct ieee80211_hw *hw)
3151 * no need to poll the killswitch state anymore */ 3169 * no need to poll the killswitch state anymore */
3152 cancel_delayed_work(&priv->rfkill_poll); 3170 cancel_delayed_work(&priv->rfkill_poll);
3153 3171
3172 iwl_led_start(priv);
3173
3154 priv->is_open = 1; 3174 priv->is_open = 1;
3155 IWL_DEBUG_MAC80211(priv, "leave\n"); 3175 IWL_DEBUG_MAC80211(priv, "leave\n");
3156 return 0; 3176 return 0;
@@ -3794,7 +3814,6 @@ static int iwl3945_init_drv(struct iwl_priv *priv)
3794 /* Clear the driver's (not device's) station table */ 3814 /* Clear the driver's (not device's) station table */
3795 iwl_clear_stations_table(priv); 3815 iwl_clear_stations_table(priv);
3796 3816
3797 priv->data_retry_limit = -1;
3798 priv->ieee_channels = NULL; 3817 priv->ieee_channels = NULL;
3799 priv->ieee_rates = NULL; 3818 priv->ieee_rates = NULL;
3800 priv->band = IEEE80211_BAND_2GHZ; 3819 priv->band = IEEE80211_BAND_2GHZ;
@@ -3981,13 +4000,6 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
3981 */ 4000 */
3982 spin_lock_init(&priv->reg_lock); 4001 spin_lock_init(&priv->reg_lock);
3983 4002
3984 /* amp init */
3985 err = priv->cfg->ops->lib->apm_ops.init(priv);
3986 if (err < 0) {
3987 IWL_DEBUG_INFO(priv, "Failed to init the card\n");
3988 goto out_iounmap;
3989 }
3990
3991 /*********************** 4003 /***********************
3992 * 4. Read EEPROM 4004 * 4. Read EEPROM
3993 * ********************/ 4005 * ********************/
@@ -4053,6 +4065,7 @@ static int iwl3945_pci_probe(struct pci_dev *pdev, const struct pci_device_id *e
4053 &priv->bands[IEEE80211_BAND_2GHZ].channels[5]); 4065 &priv->bands[IEEE80211_BAND_2GHZ].channels[5]);
4054 iwl3945_setup_deferred_work(priv); 4066 iwl3945_setup_deferred_work(priv);
4055 iwl3945_setup_rx_handlers(priv); 4067 iwl3945_setup_rx_handlers(priv);
4068 iwl_power_initialize(priv);
4056 4069
4057 /********************************* 4070 /*********************************
4058 * 8. Setup and Register mac80211 4071 * 8. Setup and Register mac80211
@@ -4123,6 +4136,15 @@ static void __devexit iwl3945_pci_remove(struct pci_dev *pdev)
4123 iwl3945_down(priv); 4136 iwl3945_down(priv);
4124 } 4137 }
4125 4138
4139 /*
4140 * Make sure device is reset to low power before unloading driver.
4141 * This may be redundant with iwl_down(), but there are paths to
4142 * run iwl_down() without calling apm_ops.stop(), and there are
4143 * paths to avoid running iwl_down() at all before leaving driver.
4144 * This (inexpensive) call *makes sure* device is reset.
4145 */
4146 priv->cfg->ops->lib->apm_ops.stop(priv);
4147
4126 /* make sure we flush any pending irq or 4148 /* make sure we flush any pending irq or
4127 * tasklet for the driver 4149 * tasklet for the driver
4128 */ 4150 */
@@ -4225,18 +4247,19 @@ static void __exit iwl3945_exit(void)
4225 4247
4226MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX)); 4248MODULE_FIRMWARE(IWL3945_MODULE_FIRMWARE(IWL3945_UCODE_API_MAX));
4227 4249
4228module_param_named(antenna, iwl3945_mod_params.antenna, int, 0444); 4250module_param_named(antenna, iwl3945_mod_params.antenna, int, S_IRUGO);
4229MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])"); 4251MODULE_PARM_DESC(antenna, "select antenna (1=Main, 2=Aux, default 0 [both])");
4230module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, 0444); 4252module_param_named(swcrypto, iwl3945_mod_params.sw_crypto, int, S_IRUGO);
4231MODULE_PARM_DESC(swcrypto, 4253MODULE_PARM_DESC(swcrypto,
4232 "using software crypto (default 1 [software])\n"); 4254 "using software crypto (default 1 [software])\n");
4233#ifdef CONFIG_IWLWIFI_DEBUG 4255#ifdef CONFIG_IWLWIFI_DEBUG
4234module_param_named(debug, iwl_debug_level, uint, 0644); 4256module_param_named(debug, iwl_debug_level, uint, S_IRUGO | S_IWUSR);
4235MODULE_PARM_DESC(debug, "debug output mask"); 4257MODULE_PARM_DESC(debug, "debug output mask");
4236#endif 4258#endif
4237module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan, int, 0444); 4259module_param_named(disable_hw_scan, iwl3945_mod_params.disable_hw_scan,
4260 int, S_IRUGO);
4238MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)"); 4261MODULE_PARM_DESC(disable_hw_scan, "disable hardware scanning (default 0)");
4239module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, 0444); 4262module_param_named(fw_restart3945, iwl3945_mod_params.restart_fw, int, S_IRUGO);
4240MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error"); 4263MODULE_PARM_DESC(fw_restart3945, "restart firmware in case of error");
4241 4264
4242module_exit(iwl3945_exit); 4265module_exit(iwl3945_exit);
diff --git a/drivers/net/wireless/iwmc3200wifi/Kconfig b/drivers/net/wireless/iwmc3200wifi/Kconfig
index c25a04371ca8..b9d34a766964 100644
--- a/drivers/net/wireless/iwmc3200wifi/Kconfig
+++ b/drivers/net/wireless/iwmc3200wifi/Kconfig
@@ -1,8 +1,9 @@
1config IWM 1config IWM
2 tristate "Intel Wireless Multicomm 3200 WiFi driver" 2 tristate "Intel Wireless Multicomm 3200 WiFi driver"
3 depends on MMC && WLAN_80211 && EXPERIMENTAL 3 depends on MMC && EXPERIMENTAL
4 depends on CFG80211 4 depends on CFG80211
5 select FW_LOADER 5 select FW_LOADER
6 select IWMC3200TOP
6 help 7 help
7 The Intel Wireless Multicomm 3200 hardware is a combo 8 The Intel Wireless Multicomm 3200 hardware is a combo
8 card with GPS, Bluetooth, WiMax and 802.11 radios. It 9 card with GPS, Bluetooth, WiMax and 802.11 radios. It
diff --git a/drivers/net/wireless/iwmc3200wifi/cfg80211.c b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
index a56a2b0ac99a..af72cc746f15 100644
--- a/drivers/net/wireless/iwmc3200wifi/cfg80211.c
+++ b/drivers/net/wireless/iwmc3200wifi/cfg80211.c
@@ -404,39 +404,21 @@ static int iwm_cfg80211_join_ibss(struct wiphy *wiphy, struct net_device *dev,
404{ 404{
405 struct iwm_priv *iwm = wiphy_to_iwm(wiphy); 405 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
406 struct ieee80211_channel *chan = params->channel; 406 struct ieee80211_channel *chan = params->channel;
407 struct cfg80211_bss *bss;
408 407
409 if (!test_bit(IWM_STATUS_READY, &iwm->status)) 408 if (!test_bit(IWM_STATUS_READY, &iwm->status))
410 return -EIO; 409 return -EIO;
411 410
412 /* UMAC doesn't support creating IBSS network with specified bssid. 411 /* UMAC doesn't support creating or joining an IBSS network
413 * This should be removed after we have join only mode supported. */ 412 * with specified bssid. */
414 if (params->bssid) 413 if (params->bssid)
415 return -EOPNOTSUPP; 414 return -EOPNOTSUPP;
416 415
417 bss = cfg80211_get_ibss(iwm_to_wiphy(iwm), NULL,
418 params->ssid, params->ssid_len);
419 if (!bss) {
420 iwm_scan_one_ssid(iwm, params->ssid, params->ssid_len);
421 schedule_timeout_interruptible(2 * HZ);
422 bss = cfg80211_get_ibss(iwm_to_wiphy(iwm), NULL,
423 params->ssid, params->ssid_len);
424 }
425 /* IBSS join only mode is not supported by UMAC ATM */
426 if (bss) {
427 cfg80211_put_bss(bss);
428 return -EOPNOTSUPP;
429 }
430
431 iwm->channel = ieee80211_frequency_to_channel(chan->center_freq); 416 iwm->channel = ieee80211_frequency_to_channel(chan->center_freq);
432 iwm->umac_profile->ibss.band = chan->band; 417 iwm->umac_profile->ibss.band = chan->band;
433 iwm->umac_profile->ibss.channel = iwm->channel; 418 iwm->umac_profile->ibss.channel = iwm->channel;
434 iwm->umac_profile->ssid.ssid_len = params->ssid_len; 419 iwm->umac_profile->ssid.ssid_len = params->ssid_len;
435 memcpy(iwm->umac_profile->ssid.ssid, params->ssid, params->ssid_len); 420 memcpy(iwm->umac_profile->ssid.ssid, params->ssid, params->ssid_len);
436 421
437 if (params->bssid)
438 memcpy(&iwm->umac_profile->bssid[0], params->bssid, ETH_ALEN);
439
440 return iwm_send_mlme_profile(iwm); 422 return iwm_send_mlme_profile(iwm);
441} 423}
442 424
@@ -489,12 +471,12 @@ static int iwm_set_wpa_version(struct iwm_priv *iwm, u32 wpa_version)
489 return 0; 471 return 0;
490 } 472 }
491 473
474 if (wpa_version & NL80211_WPA_VERSION_1)
475 iwm->umac_profile->sec.flags = UMAC_SEC_FLG_WPA_ON_MSK;
476
492 if (wpa_version & NL80211_WPA_VERSION_2) 477 if (wpa_version & NL80211_WPA_VERSION_2)
493 iwm->umac_profile->sec.flags = UMAC_SEC_FLG_RSNA_ON_MSK; 478 iwm->umac_profile->sec.flags = UMAC_SEC_FLG_RSNA_ON_MSK;
494 479
495 if (wpa_version & NL80211_WPA_VERSION_1)
496 iwm->umac_profile->sec.flags |= UMAC_SEC_FLG_WPA_ON_MSK;
497
498 return 0; 480 return 0;
499} 481}
500 482
@@ -645,6 +627,13 @@ static int iwm_cfg80211_connect(struct wiphy *wiphy, struct net_device *dev,
645 iwm->default_key = sme->key_idx; 627 iwm->default_key = sme->key_idx;
646 } 628 }
647 629
630 /* WPA and open AUTH type from wpa_s means WPS (a.k.a. WSC) */
631 if ((iwm->umac_profile->sec.flags &
632 (UMAC_SEC_FLG_WPA_ON_MSK | UMAC_SEC_FLG_RSNA_ON_MSK)) &&
633 iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_OPEN) {
634 iwm->umac_profile->sec.flags = UMAC_SEC_FLG_WSC_ON_MSK;
635 }
636
648 ret = iwm_send_mlme_profile(iwm); 637 ret = iwm_send_mlme_profile(iwm);
649 638
650 if (iwm->umac_profile->sec.auth_type != UMAC_AUTH_TYPE_LEGACY_PSK || 639 if (iwm->umac_profile->sec.auth_type != UMAC_AUTH_TYPE_LEGACY_PSK ||
@@ -681,9 +670,19 @@ static int iwm_cfg80211_disconnect(struct wiphy *wiphy, struct net_device *dev,
681static int iwm_cfg80211_set_txpower(struct wiphy *wiphy, 670static int iwm_cfg80211_set_txpower(struct wiphy *wiphy,
682 enum tx_power_setting type, int dbm) 671 enum tx_power_setting type, int dbm)
683{ 672{
673 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
674 int ret;
675
684 switch (type) { 676 switch (type) {
685 case TX_POWER_AUTOMATIC: 677 case TX_POWER_AUTOMATIC:
686 return 0; 678 return 0;
679 case TX_POWER_FIXED:
680 ret = iwm_umac_set_config_fix(iwm, UMAC_PARAM_TBL_CFG_FIX,
681 CFG_TX_PWR_LIMIT_USR, dbm * 2);
682 if (ret < 0)
683 return ret;
684
685 return iwm_tx_power_trigger(iwm);
687 default: 686 default:
688 return -EOPNOTSUPP; 687 return -EOPNOTSUPP;
689 } 688 }
@@ -695,7 +694,7 @@ static int iwm_cfg80211_get_txpower(struct wiphy *wiphy, int *dbm)
695{ 694{
696 struct iwm_priv *iwm = wiphy_to_iwm(wiphy); 695 struct iwm_priv *iwm = wiphy_to_iwm(wiphy);
697 696
698 *dbm = iwm->txpower; 697 *dbm = iwm->txpower >> 1;
699 698
700 return 0; 699 return 0;
701} 700}
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.c b/drivers/net/wireless/iwmc3200wifi/commands.c
index 23b52fa2605f..cad511afd907 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.c
+++ b/drivers/net/wireless/iwmc3200wifi/commands.c
@@ -76,6 +76,11 @@ int iwm_send_wifi_if_cmd(struct iwm_priv *iwm, void *payload, u16 payload_size,
76 int ret; 76 int ret;
77 u8 oid = hdr->oid; 77 u8 oid = hdr->oid;
78 78
79 if (!test_bit(IWM_STATUS_READY, &iwm->status)) {
80 IWM_ERR(iwm, "Interface is not ready yet");
81 return -EAGAIN;
82 }
83
79 umac_cmd.id = UMAC_CMD_OPCODE_WIFI_IF_WRAPPER; 84 umac_cmd.id = UMAC_CMD_OPCODE_WIFI_IF_WRAPPER;
80 umac_cmd.resp = resp; 85 umac_cmd.resp = resp;
81 86
@@ -274,6 +279,17 @@ int iwm_send_calib_results(struct iwm_priv *iwm)
274 return ret; 279 return ret;
275} 280}
276 281
282int iwm_send_ct_kill_cfg(struct iwm_priv *iwm, u8 entry, u8 exit)
283{
284 struct iwm_ct_kill_cfg_cmd cmd;
285
286 cmd.entry_threshold = entry;
287 cmd.exit_threshold = exit;
288
289 return iwm_send_lmac_ptrough_cmd(iwm, REPLY_CT_KILL_CONFIG_CMD, &cmd,
290 sizeof(struct iwm_ct_kill_cfg_cmd), 0);
291}
292
277int iwm_send_umac_reset(struct iwm_priv *iwm, __le32 reset_flags, bool resp) 293int iwm_send_umac_reset(struct iwm_priv *iwm, __le32 reset_flags, bool resp)
278{ 294{
279 struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT; 295 struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
@@ -777,11 +793,24 @@ int iwm_invalidate_mlme_profile(struct iwm_priv *iwm)
777 return ret; 793 return ret;
778 794
779 ret = wait_event_interruptible_timeout(iwm->mlme_queue, 795 ret = wait_event_interruptible_timeout(iwm->mlme_queue,
780 (iwm->umac_profile_active == 0), 2 * HZ); 796 (iwm->umac_profile_active == 0), 5 * HZ);
781 797
782 return ret ? 0 : -EBUSY; 798 return ret ? 0 : -EBUSY;
783} 799}
784 800
801int iwm_tx_power_trigger(struct iwm_priv *iwm)
802{
803 struct iwm_umac_pwr_trigger pwr_trigger;
804
805 pwr_trigger.hdr.oid = UMAC_WIFI_IF_CMD_TX_PWR_TRIGGER;
806 pwr_trigger.hdr.buf_size =
807 cpu_to_le16(sizeof(struct iwm_umac_pwr_trigger) -
808 sizeof(struct iwm_umac_wifi_if));
809
810
811 return iwm_send_wifi_if_cmd(iwm, &pwr_trigger, sizeof(pwr_trigger), 1);
812}
813
785int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags) 814int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags)
786{ 815{
787 struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT; 816 struct iwm_udma_wifi_cmd udma_cmd = UDMA_UMAC_INIT;
diff --git a/drivers/net/wireless/iwmc3200wifi/commands.h b/drivers/net/wireless/iwmc3200wifi/commands.h
index e24d5b633997..b36be2b23a3c 100644
--- a/drivers/net/wireless/iwmc3200wifi/commands.h
+++ b/drivers/net/wireless/iwmc3200wifi/commands.h
@@ -102,7 +102,6 @@ enum {
102 CFG_SCAN_NUM_PASSIVE_CHAN_PER_PARTIAL_SCAN, 102 CFG_SCAN_NUM_PASSIVE_CHAN_PER_PARTIAL_SCAN,
103 CFG_TLC_SUPPORTED_TX_HT_RATES, 103 CFG_TLC_SUPPORTED_TX_HT_RATES,
104 CFG_TLC_SUPPORTED_TX_RATES, 104 CFG_TLC_SUPPORTED_TX_RATES,
105 CFG_TLC_VALID_ANTENNA,
106 CFG_TLC_SPATIAL_STREAM_SUPPORTED, 105 CFG_TLC_SPATIAL_STREAM_SUPPORTED,
107 CFG_TLC_RETRY_PER_RATE, 106 CFG_TLC_RETRY_PER_RATE,
108 CFG_TLC_RETRY_PER_HT_RATE, 107 CFG_TLC_RETRY_PER_HT_RATE,
@@ -136,6 +135,10 @@ enum {
136 CFG_TLC_RENEW_ADDBA_DELAY, 135 CFG_TLC_RENEW_ADDBA_DELAY,
137 CFG_TLC_NUM_OF_MULTISEC_TO_COUN_LOAD, 136 CFG_TLC_NUM_OF_MULTISEC_TO_COUN_LOAD,
138 CFG_TLC_IS_STABLE_IN_HT, 137 CFG_TLC_IS_STABLE_IN_HT,
138 CFG_TLC_SR_SIC_1ST_FAIL,
139 CFG_TLC_SR_SIC_1ST_PASS,
140 CFG_TLC_SR_SIC_TOTAL_FAIL,
141 CFG_TLC_SR_SIC_TOTAL_PASS,
139 CFG_RLC_CHAIN_CTRL, 142 CFG_RLC_CHAIN_CTRL,
140 CFG_TRK_TABLE_OP_MODE, 143 CFG_TRK_TABLE_OP_MODE,
141 CFG_TRK_TABLE_RSSI_THRESHOLD, 144 CFG_TRK_TABLE_RSSI_THRESHOLD,
@@ -147,6 +150,58 @@ enum {
147 CFG_MLME_DBG_NOTIF_BLOCK, 150 CFG_MLME_DBG_NOTIF_BLOCK,
148 CFG_BT_OFF_BECONS_INTERVALS, 151 CFG_BT_OFF_BECONS_INTERVALS,
149 CFG_BT_FRAG_DURATION, 152 CFG_BT_FRAG_DURATION,
153 CFG_ACTIVE_CHAINS,
154 CFG_CALIB_CTRL,
155 CFG_CAPABILITY_SUPPORTED_HT_RATES,
156 CFG_HT_MAC_PARAM_INFO,
157 CFG_MIMO_PS_MODE,
158 CFG_HT_DEFAULT_CAPABILIES_INFO,
159 CFG_LED_SC_RESOLUTION_FACTOR,
160 CFG_PTAM_ENERGY_CCK_DET_DEFAULT,
161 CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_DEFAULT,
162 CFG_PTAM_CORR40_4_TH_ADD_MIN_DEFAULT,
163 CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_DEFAULT,
164 CFG_PTAM_CORR32_4_TH_ADD_MIN_DEFAULT,
165 CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_DEFAULT,
166 CFG_PTAM_CORR32_1_TH_ADD_MIN_DEFAULT,
167 CFG_PTAM_ENERGY_CCK_DET_MIN_VAL,
168 CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_MIN_VAL,
169 CFG_PTAM_CORR40_4_TH_ADD_MIN_MIN_VAL,
170 CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_MIN_VAL,
171 CFG_PTAM_CORR32_4_TH_ADD_MIN_MIN_VAL,
172 CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_MIN_VAL,
173 CFG_PTAM_CORR32_1_TH_ADD_MIN_MIN_VAL,
174 CFG_PTAM_ENERGY_CCK_DET_MAX_VAL,
175 CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_MAX_VAL,
176 CFG_PTAM_CORR40_4_TH_ADD_MIN_MAX_VAL,
177 CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_MAX_VAL,
178 CFG_PTAM_CORR32_4_TH_ADD_MIN_MAX_VAL,
179 CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_MAX_VAL,
180 CFG_PTAM_CORR32_1_TH_ADD_MIN_MAX_VAL,
181 CFG_PTAM_ENERGY_CCK_DET_STEP_VAL,
182 CFG_PTAM_CORR40_4_TH_ADD_MIN_MRC_STEP_VAL,
183 CFG_PTAM_CORR40_4_TH_ADD_MIN_STEP_VAL,
184 CFG_PTAM_CORR32_4_TH_ADD_MIN_MRC_STEP_VAL,
185 CFG_PTAM_CORR32_4_TH_ADD_MIN_STEP_VAL,
186 CFG_PTAM_CORR32_1_TH_ADD_MIN_MRC_STEP_VAL,
187 CFG_PTAM_CORR32_1_TH_ADD_MIN_STEP_VAL,
188 CFG_PTAM_LINK_SENS_FA_OFDM_MAX,
189 CFG_PTAM_LINK_SENS_FA_OFDM_MIN,
190 CFG_PTAM_LINK_SENS_FA_CCK_MAX,
191 CFG_PTAM_LINK_SENS_FA_CCK_MIN,
192 CFG_PTAM_LINK_SENS_NRG_DIFF,
193 CFG_PTAM_LINK_SENS_NRG_MARGIN,
194 CFG_PTAM_LINK_SENS_MAX_NUMBER_OF_TIMES_IN_CCK_NO_FA,
195 CFG_PTAM_LINK_SENS_AUTO_CORR_MAX_TH_CCK,
196 CFG_AGG_MGG_TID_LOAD_ADDBA_THRESHOLD,
197 CFG_AGG_MGG_TID_LOAD_DELBA_THRESHOLD,
198 CFG_AGG_MGG_ADDBA_BUF_SIZE,
199 CFG_AGG_MGG_ADDBA_INACTIVE_TIMEOUT,
200 CFG_AGG_MGG_ADDBA_DEBUG_FLAGS,
201 CFG_SCAN_PERIODIC_RSSI_HIGH_THRESHOLD,
202 CFG_SCAN_PERIODIC_COEF_RSSI_HIGH,
203 CFG_11D_ENABLED,
204 CFG_11H_FEATURE_FLAGS,
150 205
151 /* <-- LAST --> */ 206 /* <-- LAST --> */
152 CFG_TBL_FIX_LAST 207 CFG_TBL_FIX_LAST
@@ -155,7 +210,8 @@ enum {
155/* variable size table */ 210/* variable size table */
156enum { 211enum {
157 CFG_NET_ADDR = 0, 212 CFG_NET_ADDR = 0,
158 CFG_PROFILE, 213 CFG_LED_PATTERN_TABLE,
214
159 /* <-- LAST --> */ 215 /* <-- LAST --> */
160 CFG_TBL_VAR_LAST 216 CFG_TBL_VAR_LAST
161}; 217};
@@ -288,6 +344,9 @@ struct iwm_umac_cmd_scan_request {
288/* iwm_umac_security.flag is WSC mode on -- bits [2:2] */ 344/* iwm_umac_security.flag is WSC mode on -- bits [2:2] */
289#define UMAC_SEC_FLG_WSC_ON_POS 2 345#define UMAC_SEC_FLG_WSC_ON_POS 2
290#define UMAC_SEC_FLG_WSC_ON_SEED 1 346#define UMAC_SEC_FLG_WSC_ON_SEED 1
347#define UMAC_SEC_FLG_WSC_ON_MSK (UMAC_SEC_FLG_WSC_ON_SEED << \
348 UMAC_SEC_FLG_WSC_ON_POS)
349
291 350
292/* Legacy profile can use only WEP40 and WEP104 for encryption and 351/* Legacy profile can use only WEP40 and WEP104 for encryption and
293 * OPEN or PSK for authentication */ 352 * OPEN or PSK for authentication */
@@ -382,6 +441,11 @@ struct iwm_umac_tx_key_id {
382 u8 reserved[3]; 441 u8 reserved[3];
383} __attribute__ ((packed)); 442} __attribute__ ((packed));
384 443
444struct iwm_umac_pwr_trigger {
445 struct iwm_umac_wifi_if hdr;
446 __le32 reseved;
447} __attribute__ ((packed));
448
385struct iwm_umac_cmd_stats_req { 449struct iwm_umac_cmd_stats_req {
386 __le32 flags; 450 __le32 flags;
387} __attribute__ ((packed)); 451} __attribute__ ((packed));
@@ -393,6 +457,7 @@ int iwm_send_init_calib_cfg(struct iwm_priv *iwm, u8 calib_requested);
393int iwm_send_periodic_calib_cfg(struct iwm_priv *iwm, u8 calib_requested); 457int iwm_send_periodic_calib_cfg(struct iwm_priv *iwm, u8 calib_requested);
394int iwm_send_calib_results(struct iwm_priv *iwm); 458int iwm_send_calib_results(struct iwm_priv *iwm);
395int iwm_store_rxiq_calib_result(struct iwm_priv *iwm); 459int iwm_store_rxiq_calib_result(struct iwm_priv *iwm);
460int iwm_send_ct_kill_cfg(struct iwm_priv *iwm, u8 entry, u8 exit);
396 461
397/* UMAC commands */ 462/* UMAC commands */
398int iwm_send_wifi_if_cmd(struct iwm_priv *iwm, void *payload, u16 payload_size, 463int iwm_send_wifi_if_cmd(struct iwm_priv *iwm, void *payload, u16 payload_size,
@@ -407,6 +472,7 @@ int iwm_invalidate_mlme_profile(struct iwm_priv *iwm);
407int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id); 472int iwm_send_packet(struct iwm_priv *iwm, struct sk_buff *skb, int pool_id);
408int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx); 473int iwm_set_tx_key(struct iwm_priv *iwm, u8 key_idx);
409int iwm_set_key(struct iwm_priv *iwm, bool remove, struct iwm_key *key); 474int iwm_set_key(struct iwm_priv *iwm, bool remove, struct iwm_key *key);
475int iwm_tx_power_trigger(struct iwm_priv *iwm);
410int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags); 476int iwm_send_umac_stats_req(struct iwm_priv *iwm, u32 flags);
411int iwm_send_umac_channel_list(struct iwm_priv *iwm); 477int iwm_send_umac_channel_list(struct iwm_priv *iwm);
412int iwm_scan_ssids(struct iwm_priv *iwm, struct cfg80211_ssid *ssids, 478int iwm_scan_ssids(struct iwm_priv *iwm, struct cfg80211_ssid *ssids,
diff --git a/drivers/net/wireless/iwmc3200wifi/fw.c b/drivers/net/wireless/iwmc3200wifi/fw.c
index 6b0bcad758ca..49067092d336 100644
--- a/drivers/net/wireless/iwmc3200wifi/fw.c
+++ b/drivers/net/wireless/iwmc3200wifi/fw.c
@@ -217,6 +217,13 @@ static int iwm_load_img(struct iwm_priv *iwm, const char *img_name)
217 IWM_BUILD_YEAR(build_date), IWM_BUILD_MONTH(build_date), 217 IWM_BUILD_YEAR(build_date), IWM_BUILD_MONTH(build_date),
218 IWM_BUILD_DAY(build_date)); 218 IWM_BUILD_DAY(build_date));
219 219
220 if (!strcmp(img_name, iwm->bus_ops->umac_name))
221 sprintf(iwm->umac_version, "%02X.%02X",
222 ver->major, ver->minor);
223
224 if (!strcmp(img_name, iwm->bus_ops->lmac_name))
225 sprintf(iwm->lmac_version, "%02X.%02X",
226 ver->major, ver->minor);
220 227
221 err_release_fw: 228 err_release_fw:
222 release_firmware(fw); 229 release_firmware(fw);
@@ -398,6 +405,8 @@ int iwm_load_fw(struct iwm_priv *iwm)
398 iwm_send_prio_table(iwm); 405 iwm_send_prio_table(iwm);
399 iwm_send_calib_results(iwm); 406 iwm_send_calib_results(iwm);
400 iwm_send_periodic_calib_cfg(iwm, periodic_calib_map); 407 iwm_send_periodic_calib_cfg(iwm, periodic_calib_map);
408 iwm_send_ct_kill_cfg(iwm, iwm->conf.ct_kill_entry,
409 iwm->conf.ct_kill_exit);
401 410
402 return 0; 411 return 0;
403 412
diff --git a/drivers/net/wireless/iwmc3200wifi/iwm.h b/drivers/net/wireless/iwmc3200wifi/iwm.h
index 1b02a4e2a1ac..a9bf6bc97bea 100644
--- a/drivers/net/wireless/iwmc3200wifi/iwm.h
+++ b/drivers/net/wireless/iwmc3200wifi/iwm.h
@@ -65,6 +65,8 @@ struct iwm_conf {
65 u32 sdio_ior_timeout; 65 u32 sdio_ior_timeout;
66 unsigned long calib_map; 66 unsigned long calib_map;
67 unsigned long expected_calib_map; 67 unsigned long expected_calib_map;
68 u8 ct_kill_entry;
69 u8 ct_kill_exit;
68 bool reset_on_fatal_err; 70 bool reset_on_fatal_err;
69 bool auto_connect; 71 bool auto_connect;
70 bool wimax_not_present; 72 bool wimax_not_present;
@@ -276,12 +278,14 @@ struct iwm_priv {
276 struct iw_statistics wstats; 278 struct iw_statistics wstats;
277 struct delayed_work stats_request; 279 struct delayed_work stats_request;
278 struct delayed_work disconnect; 280 struct delayed_work disconnect;
281 struct delayed_work ct_kill_delay;
279 282
280 struct iwm_debugfs dbg; 283 struct iwm_debugfs dbg;
281 284
282 u8 *eeprom; 285 u8 *eeprom;
283 struct timer_list watchdog; 286 struct timer_list watchdog;
284 struct work_struct reset_worker; 287 struct work_struct reset_worker;
288 struct work_struct auth_retry_worker;
285 struct mutex mutex; 289 struct mutex mutex;
286 290
287 u8 *req_ie; 291 u8 *req_ie;
@@ -290,6 +294,8 @@ struct iwm_priv {
290 int resp_ie_len; 294 int resp_ie_len;
291 295
292 struct iwm_fw_error_hdr *last_fw_err; 296 struct iwm_fw_error_hdr *last_fw_err;
297 char umac_version[8];
298 char lmac_version[8];
293 299
294 char private[0] __attribute__((__aligned__(NETDEV_ALIGN))); 300 char private[0] __attribute__((__aligned__(NETDEV_ALIGN)));
295}; 301};
diff --git a/drivers/net/wireless/iwmc3200wifi/lmac.h b/drivers/net/wireless/iwmc3200wifi/lmac.h
index 6c1a14c4480f..a3a79b5e2898 100644
--- a/drivers/net/wireless/iwmc3200wifi/lmac.h
+++ b/drivers/net/wireless/iwmc3200wifi/lmac.h
@@ -187,6 +187,14 @@ struct iwm_coex_prio_table_cmd {
187 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK | \ 187 COEX_EVT_FLAG_MEDIUM_ACTV_NTFY_MSK | \
188 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK) 188 COEX_EVT_FLAG_DELAY_MEDIUM_FREE_NTFY_MSK)
189 189
190/* CT kill config command */
191struct iwm_ct_kill_cfg_cmd {
192 u32 exit_threshold;
193 u32 reserved;
194 u32 entry_threshold;
195} __attribute__ ((packed));
196
197
190/* LMAC OP CODES */ 198/* LMAC OP CODES */
191#define REPLY_PAD 0x0 199#define REPLY_PAD 0x0
192#define REPLY_ALIVE 0x1 200#define REPLY_ALIVE 0x1
diff --git a/drivers/net/wireless/iwmc3200wifi/main.c b/drivers/net/wireless/iwmc3200wifi/main.c
index d668e4756324..f93e9139b0f2 100644
--- a/drivers/net/wireless/iwmc3200wifi/main.c
+++ b/drivers/net/wireless/iwmc3200wifi/main.c
@@ -63,6 +63,8 @@ static struct iwm_conf def_iwm_conf = {
63 BIT(PHY_CALIBRATE_TX_IQ_CMD) | 63 BIT(PHY_CALIBRATE_TX_IQ_CMD) |
64 BIT(PHY_CALIBRATE_RX_IQ_CMD) | 64 BIT(PHY_CALIBRATE_RX_IQ_CMD) |
65 BIT(SHILOH_PHY_CALIBRATE_BASE_BAND_CMD), 65 BIT(SHILOH_PHY_CALIBRATE_BASE_BAND_CMD),
66 .ct_kill_entry = 110,
67 .ct_kill_exit = 110,
66 .reset_on_fatal_err = 1, 68 .reset_on_fatal_err = 1,
67 .auto_connect = 1, 69 .auto_connect = 1,
68 .wimax_not_present = 0, 70 .wimax_not_present = 0,
@@ -133,6 +135,17 @@ static void iwm_disconnect_work(struct work_struct *work)
133 cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0, GFP_KERNEL); 135 cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0, GFP_KERNEL);
134} 136}
135 137
138static void iwm_ct_kill_work(struct work_struct *work)
139{
140 struct iwm_priv *iwm =
141 container_of(work, struct iwm_priv, ct_kill_delay.work);
142 struct wiphy *wiphy = iwm_to_wiphy(iwm);
143
144 IWM_INFO(iwm, "CT kill delay timeout\n");
145
146 wiphy_rfkill_set_hw_state(wiphy, false);
147}
148
136static int __iwm_up(struct iwm_priv *iwm); 149static int __iwm_up(struct iwm_priv *iwm);
137static int __iwm_down(struct iwm_priv *iwm); 150static int __iwm_down(struct iwm_priv *iwm);
138 151
@@ -194,6 +207,33 @@ static void iwm_reset_worker(struct work_struct *work)
194 mutex_unlock(&iwm->mutex); 207 mutex_unlock(&iwm->mutex);
195} 208}
196 209
210static void iwm_auth_retry_worker(struct work_struct *work)
211{
212 struct iwm_priv *iwm;
213 int i, ret;
214
215 iwm = container_of(work, struct iwm_priv, auth_retry_worker);
216 if (iwm->umac_profile_active) {
217 ret = iwm_invalidate_mlme_profile(iwm);
218 if (ret < 0)
219 return;
220 }
221
222 iwm->umac_profile->sec.auth_type = UMAC_AUTH_TYPE_LEGACY_PSK;
223
224 ret = iwm_send_mlme_profile(iwm);
225 if (ret < 0)
226 return;
227
228 for (i = 0; i < IWM_NUM_KEYS; i++)
229 if (iwm->keys[i].key_len)
230 iwm_set_key(iwm, 0, &iwm->keys[i]);
231
232 iwm_set_tx_key(iwm, iwm->default_key);
233}
234
235
236
197static void iwm_watchdog(unsigned long data) 237static void iwm_watchdog(unsigned long data)
198{ 238{
199 struct iwm_priv *iwm = (struct iwm_priv *)data; 239 struct iwm_priv *iwm = (struct iwm_priv *)data;
@@ -225,7 +265,9 @@ int iwm_priv_init(struct iwm_priv *iwm)
225 iwm->scan_id = 1; 265 iwm->scan_id = 1;
226 INIT_DELAYED_WORK(&iwm->stats_request, iwm_statistics_request); 266 INIT_DELAYED_WORK(&iwm->stats_request, iwm_statistics_request);
227 INIT_DELAYED_WORK(&iwm->disconnect, iwm_disconnect_work); 267 INIT_DELAYED_WORK(&iwm->disconnect, iwm_disconnect_work);
268 INIT_DELAYED_WORK(&iwm->ct_kill_delay, iwm_ct_kill_work);
228 INIT_WORK(&iwm->reset_worker, iwm_reset_worker); 269 INIT_WORK(&iwm->reset_worker, iwm_reset_worker);
270 INIT_WORK(&iwm->auth_retry_worker, iwm_auth_retry_worker);
229 INIT_LIST_HEAD(&iwm->bss_list); 271 INIT_LIST_HEAD(&iwm->bss_list);
230 272
231 skb_queue_head_init(&iwm->rx_list); 273 skb_queue_head_init(&iwm->rx_list);
@@ -586,6 +628,7 @@ static int __iwm_up(struct iwm_priv *iwm)
586{ 628{
587 int ret; 629 int ret;
588 struct iwm_notif *notif_reboot, *notif_ack = NULL; 630 struct iwm_notif *notif_reboot, *notif_ack = NULL;
631 struct wiphy *wiphy = iwm_to_wiphy(iwm);
589 632
590 ret = iwm_bus_enable(iwm); 633 ret = iwm_bus_enable(iwm);
591 if (ret) { 634 if (ret) {
@@ -637,6 +680,8 @@ static int __iwm_up(struct iwm_priv *iwm)
637 IWM_ERR(iwm, "MAC reading failed\n"); 680 IWM_ERR(iwm, "MAC reading failed\n");
638 goto err_disable; 681 goto err_disable;
639 } 682 }
683 memcpy(iwm_to_ndev(iwm)->perm_addr, iwm_to_ndev(iwm)->dev_addr,
684 ETH_ALEN);
640 685
641 /* We can load the FWs */ 686 /* We can load the FWs */
642 ret = iwm_load_fw(iwm); 687 ret = iwm_load_fw(iwm);
@@ -645,6 +690,9 @@ static int __iwm_up(struct iwm_priv *iwm)
645 goto err_disable; 690 goto err_disable;
646 } 691 }
647 692
693 snprintf(wiphy->fw_version, sizeof(wiphy->fw_version), "L%s_U%s",
694 iwm->lmac_version, iwm->umac_version);
695
648 /* We configure the UMAC and enable the wifi module */ 696 /* We configure the UMAC and enable the wifi module */
649 ret = iwm_send_umac_config(iwm, 697 ret = iwm_send_umac_config(iwm,
650 cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_CORE_EN) | 698 cpu_to_le32(UMAC_RST_CTRL_FLG_WIFI_CORE_EN) |
diff --git a/drivers/net/wireless/iwmc3200wifi/netdev.c b/drivers/net/wireless/iwmc3200wifi/netdev.c
index 35ec006c2d2c..4f8dbdd7b917 100644
--- a/drivers/net/wireless/iwmc3200wifi/netdev.c
+++ b/drivers/net/wireless/iwmc3200wifi/netdev.c
@@ -152,6 +152,7 @@ void iwm_if_free(struct iwm_priv *iwm)
152 if (!iwm_to_ndev(iwm)) 152 if (!iwm_to_ndev(iwm))
153 return; 153 return;
154 154
155 cancel_delayed_work_sync(&iwm->ct_kill_delay);
155 free_netdev(iwm_to_ndev(iwm)); 156 free_netdev(iwm_to_ndev(iwm));
156 iwm_priv_deinit(iwm); 157 iwm_priv_deinit(iwm);
157 kfree(iwm->umac_profile); 158 kfree(iwm->umac_profile);
diff --git a/drivers/net/wireless/iwmc3200wifi/rx.c b/drivers/net/wireless/iwmc3200wifi/rx.c
index 40dbcbc16593..3ad95dc0dd8d 100644
--- a/drivers/net/wireless/iwmc3200wifi/rx.c
+++ b/drivers/net/wireless/iwmc3200wifi/rx.c
@@ -422,7 +422,9 @@ static int iwm_ntf_rx_ticket(struct iwm_priv *iwm, u8 *buf,
422 if (IS_ERR(ticket_node)) 422 if (IS_ERR(ticket_node))
423 return PTR_ERR(ticket_node); 423 return PTR_ERR(ticket_node);
424 424
425 IWM_DBG_RX(iwm, DBG, "TICKET RELEASE(%d)\n", 425 IWM_DBG_RX(iwm, DBG, "TICKET %s(%d)\n",
426 ticket->action == IWM_RX_TICKET_RELEASE ?
427 "RELEASE" : "DROP",
426 ticket->id); 428 ticket->id);
427 list_add_tail(&ticket_node->node, &iwm->rx_tickets); 429 list_add_tail(&ticket_node->node, &iwm->rx_tickets);
428 430
@@ -499,6 +501,18 @@ static int iwm_mlme_assoc_start(struct iwm_priv *iwm, u8 *buf,
499 return 0; 501 return 0;
500} 502}
501 503
504static u8 iwm_is_open_wep_profile(struct iwm_priv *iwm)
505{
506 if ((iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_40 ||
507 iwm->umac_profile->sec.ucast_cipher == UMAC_CIPHER_TYPE_WEP_104) &&
508 (iwm->umac_profile->sec.ucast_cipher ==
509 iwm->umac_profile->sec.mcast_cipher) &&
510 (iwm->umac_profile->sec.auth_type == UMAC_AUTH_TYPE_OPEN))
511 return 1;
512
513 return 0;
514}
515
502static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf, 516static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
503 unsigned long buf_size, 517 unsigned long buf_size,
504 struct iwm_wifi_cmd *cmd) 518 struct iwm_wifi_cmd *cmd)
@@ -564,11 +578,17 @@ static int iwm_mlme_assoc_complete(struct iwm_priv *iwm, u8 *buf,
564 goto ibss; 578 goto ibss;
565 579
566 if (!test_bit(IWM_STATUS_RESETTING, &iwm->status)) 580 if (!test_bit(IWM_STATUS_RESETTING, &iwm->status))
567 cfg80211_connect_result(iwm_to_ndev(iwm), 581 if (!iwm_is_open_wep_profile(iwm)) {
568 complete->bssid, 582 cfg80211_connect_result(iwm_to_ndev(iwm),
569 NULL, 0, NULL, 0, 583 complete->bssid,
570 WLAN_STATUS_UNSPECIFIED_FAILURE, 584 NULL, 0, NULL, 0,
571 GFP_KERNEL); 585 WLAN_STATUS_UNSPECIFIED_FAILURE,
586 GFP_KERNEL);
587 } else {
588 /* Let's try shared WEP auth */
589 IWM_ERR(iwm, "Trying WEP shared auth\n");
590 schedule_work(&iwm->auth_retry_worker);
591 }
572 else 592 else
573 cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0, 593 cfg80211_disconnected(iwm_to_ndev(iwm), 0, NULL, 0,
574 GFP_KERNEL); 594 GFP_KERNEL);
@@ -712,6 +732,19 @@ static int iwm_mlme_update_sta_table(struct iwm_priv *iwm, u8 *buf,
712 return 0; 732 return 0;
713} 733}
714 734
735static int iwm_mlme_medium_lost(struct iwm_priv *iwm, u8 *buf,
736 unsigned long buf_size,
737 struct iwm_wifi_cmd *cmd)
738{
739 struct wiphy *wiphy = iwm_to_wiphy(iwm);
740
741 IWM_DBG_NTF(iwm, DBG, "WiFi/WiMax coexistence radio is OFF\n");
742
743 wiphy_rfkill_set_hw_state(wiphy, true);
744
745 return 0;
746}
747
715static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf, 748static int iwm_mlme_update_bss_table(struct iwm_priv *iwm, u8 *buf,
716 unsigned long buf_size, 749 unsigned long buf_size,
717 struct iwm_wifi_cmd *cmd) 750 struct iwm_wifi_cmd *cmd)
@@ -898,6 +931,8 @@ static int iwm_ntf_mlme(struct iwm_priv *iwm, u8 *buf,
898 case WIFI_IF_NTFY_EXTENDED_IE_REQUIRED: 931 case WIFI_IF_NTFY_EXTENDED_IE_REQUIRED:
899 IWM_DBG_MLME(iwm, DBG, "Extended IE required\n"); 932 IWM_DBG_MLME(iwm, DBG, "Extended IE required\n");
900 break; 933 break;
934 case WIFI_IF_NTFY_RADIO_PREEMPTION:
935 return iwm_mlme_medium_lost(iwm, buf, buf_size, cmd);
901 case WIFI_IF_NTFY_BSS_TRK_TABLE_CHANGED: 936 case WIFI_IF_NTFY_BSS_TRK_TABLE_CHANGED:
902 return iwm_mlme_update_bss_table(iwm, buf, buf_size, cmd); 937 return iwm_mlme_update_bss_table(iwm, buf, buf_size, cmd);
903 case WIFI_IF_NTFY_BSS_TRK_ENTRIES_REMOVED: 938 case WIFI_IF_NTFY_BSS_TRK_ENTRIES_REMOVED:
@@ -1055,8 +1090,14 @@ static int iwm_ntf_wifi_if_wrapper(struct iwm_priv *iwm, u8 *buf,
1055 unsigned long buf_size, 1090 unsigned long buf_size,
1056 struct iwm_wifi_cmd *cmd) 1091 struct iwm_wifi_cmd *cmd)
1057{ 1092{
1058 struct iwm_umac_wifi_if *hdr = 1093 struct iwm_umac_wifi_if *hdr;
1059 (struct iwm_umac_wifi_if *)cmd->buf.payload; 1094
1095 if (cmd == NULL) {
1096 IWM_ERR(iwm, "Couldn't find expected wifi command\n");
1097 return -EINVAL;
1098 }
1099
1100 hdr = (struct iwm_umac_wifi_if *)cmd->buf.payload;
1060 1101
1061 IWM_DBG_NTF(iwm, DBG, "WIFI_IF_WRAPPER cmd is delivered to UMAC: " 1102 IWM_DBG_NTF(iwm, DBG, "WIFI_IF_WRAPPER cmd is delivered to UMAC: "
1062 "oid is 0x%x\n", hdr->oid); 1103 "oid is 0x%x\n", hdr->oid);
@@ -1078,6 +1119,7 @@ static int iwm_ntf_wifi_if_wrapper(struct iwm_priv *iwm, u8 *buf,
1078 return 0; 1119 return 0;
1079} 1120}
1080 1121
1122#define CT_KILL_DELAY (30 * HZ)
1081static int iwm_ntf_card_state(struct iwm_priv *iwm, u8 *buf, 1123static int iwm_ntf_card_state(struct iwm_priv *iwm, u8 *buf,
1082 unsigned long buf_size, struct iwm_wifi_cmd *cmd) 1124 unsigned long buf_size, struct iwm_wifi_cmd *cmd)
1083{ 1125{
@@ -1090,7 +1132,20 @@ static int iwm_ntf_card_state(struct iwm_priv *iwm, u8 *buf,
1090 flags & IWM_CARD_STATE_HW_DISABLED ? "ON" : "OFF", 1132 flags & IWM_CARD_STATE_HW_DISABLED ? "ON" : "OFF",
1091 flags & IWM_CARD_STATE_CTKILL_DISABLED ? "ON" : "OFF"); 1133 flags & IWM_CARD_STATE_CTKILL_DISABLED ? "ON" : "OFF");
1092 1134
1093 wiphy_rfkill_set_hw_state(wiphy, flags & IWM_CARD_STATE_HW_DISABLED); 1135 if (flags & IWM_CARD_STATE_CTKILL_DISABLED) {
1136 /*
1137 * We got a CTKILL event: We bring the interface down in
1138 * oder to cool the device down, and try to bring it up
1139 * 30 seconds later. If it's still too hot, we'll go through
1140 * this code path again.
1141 */
1142 cancel_delayed_work_sync(&iwm->ct_kill_delay);
1143 schedule_delayed_work(&iwm->ct_kill_delay, CT_KILL_DELAY);
1144 }
1145
1146 wiphy_rfkill_set_hw_state(wiphy, flags &
1147 (IWM_CARD_STATE_HW_DISABLED |
1148 IWM_CARD_STATE_CTKILL_DISABLED));
1094 1149
1095 return 0; 1150 return 0;
1096} 1151}
@@ -1281,6 +1336,14 @@ int iwm_rx_handle(struct iwm_priv *iwm, u8 *buf, unsigned long buf_size)
1281 1336
1282 switch (le32_to_cpu(hdr->cmd)) { 1337 switch (le32_to_cpu(hdr->cmd)) {
1283 case UMAC_REBOOT_BARKER: 1338 case UMAC_REBOOT_BARKER:
1339 if (test_bit(IWM_STATUS_READY, &iwm->status)) {
1340 IWM_ERR(iwm, "Unexpected BARKER\n");
1341
1342 schedule_work(&iwm->reset_worker);
1343
1344 return 0;
1345 }
1346
1284 return iwm_notif_send(iwm, NULL, IWM_BARKER_REBOOT_NOTIFICATION, 1347 return iwm_notif_send(iwm, NULL, IWM_BARKER_REBOOT_NOTIFICATION,
1285 IWM_SRC_UDMA, buf, buf_size); 1348 IWM_SRC_UDMA, buf, buf_size);
1286 case UMAC_ACK_BARKER: 1349 case UMAC_ACK_BARKER:
@@ -1443,7 +1506,8 @@ static void iwm_rx_process_packet(struct iwm_priv *iwm,
1443 } 1506 }
1444 break; 1507 break;
1445 case IWM_RX_TICKET_DROP: 1508 case IWM_RX_TICKET_DROP:
1446 IWM_DBG_RX(iwm, DBG, "DROP packet\n"); 1509 IWM_DBG_RX(iwm, DBG, "DROP packet: 0x%x\n",
1510 le16_to_cpu(ticket_node->ticket->flags));
1447 kfree_skb(packet->skb); 1511 kfree_skb(packet->skb);
1448 break; 1512 break;
1449 default: 1513 default:
diff --git a/drivers/net/wireless/iwmc3200wifi/sdio.c b/drivers/net/wireless/iwmc3200wifi/sdio.c
index 8b1de84003ca..cf86294f719b 100644
--- a/drivers/net/wireless/iwmc3200wifi/sdio.c
+++ b/drivers/net/wireless/iwmc3200wifi/sdio.c
@@ -224,8 +224,6 @@ static int if_sdio_disable(struct iwm_priv *iwm)
224 struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm); 224 struct iwm_sdio_priv *hw = iwm_to_if_sdio(iwm);
225 int ret; 225 int ret;
226 226
227 iwm_reset(iwm);
228
229 sdio_claim_host(hw->func); 227 sdio_claim_host(hw->func);
230 sdio_writeb(hw->func, 0, IWM_SDIO_INTR_ENABLE_ADDR, &ret); 228 sdio_writeb(hw->func, 0, IWM_SDIO_INTR_ENABLE_ADDR, &ret);
231 if (ret < 0) 229 if (ret < 0)
@@ -237,6 +235,8 @@ static int if_sdio_disable(struct iwm_priv *iwm)
237 235
238 iwm_sdio_rx_free(hw); 236 iwm_sdio_rx_free(hw);
239 237
238 iwm_reset(iwm);
239
240 IWM_DBG_SDIO(iwm, INFO, "IWM SDIO disable\n"); 240 IWM_DBG_SDIO(iwm, INFO, "IWM SDIO disable\n");
241 241
242 return 0; 242 return 0;
@@ -493,8 +493,10 @@ static void iwm_sdio_remove(struct sdio_func *func)
493} 493}
494 494
495static const struct sdio_device_id iwm_sdio_ids[] = { 495static const struct sdio_device_id iwm_sdio_ids[] = {
496 { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 496 /* Global/AGN SKU */
497 SDIO_DEVICE_ID_INTEL_IWMC3200WIFI) }, 497 { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1403) },
498 /* BGN SKU */
499 { SDIO_DEVICE(SDIO_VENDOR_ID_INTEL, 0x1408) },
498 { /* end: all zeroes */ }, 500 { /* end: all zeroes */ },
499}; 501};
500MODULE_DEVICE_TABLE(sdio, iwm_sdio_ids); 502MODULE_DEVICE_TABLE(sdio, iwm_sdio_ids);
diff --git a/drivers/net/wireless/iwmc3200wifi/umac.h b/drivers/net/wireless/iwmc3200wifi/umac.h
index c5a14ae3160a..be903543bb47 100644
--- a/drivers/net/wireless/iwmc3200wifi/umac.h
+++ b/drivers/net/wireless/iwmc3200wifi/umac.h
@@ -687,6 +687,9 @@ struct iwm_umac_notif_rx_ticket {
687/* Tx/Rx rates window (number of max of last update window per second) */ 687/* Tx/Rx rates window (number of max of last update window per second) */
688#define UMAC_NTF_RATE_SAMPLE_NR 4 688#define UMAC_NTF_RATE_SAMPLE_NR 4
689 689
690/* Max numbers of bits required to go through all antennae in bitmasks */
691#define UMAC_PHY_NUM_CHAINS 3
692
690#define IWM_UMAC_MGMT_TID 8 693#define IWM_UMAC_MGMT_TID 8
691#define IWM_UMAC_TID_NR 8 694#define IWM_UMAC_TID_NR 8
692 695
@@ -697,9 +700,11 @@ struct iwm_umac_notif_stats {
697 __le16 tid_load[IWM_UMAC_TID_NR + 2]; /* 1 non-QoS + 1 dword align */ 700 __le16 tid_load[IWM_UMAC_TID_NR + 2]; /* 1 non-QoS + 1 dword align */
698 __le16 tx_rate[UMAC_NTF_RATE_SAMPLE_NR]; 701 __le16 tx_rate[UMAC_NTF_RATE_SAMPLE_NR];
699 __le16 rx_rate[UMAC_NTF_RATE_SAMPLE_NR]; 702 __le16 rx_rate[UMAC_NTF_RATE_SAMPLE_NR];
703 __le32 chain_energy[UMAC_PHY_NUM_CHAINS];
700 s32 rssi_dbm; 704 s32 rssi_dbm;
701 s32 noise_dbm; 705 s32 noise_dbm;
702 __le32 supp_rates; 706 __le32 supp_rates;
707 __le32 supp_ht_rates;
703 __le32 missed_beacons; 708 __le32 missed_beacons;
704 __le32 rx_beacons; 709 __le32 rx_beacons;
705 __le32 rx_dir_pkts; 710 __le32 rx_dir_pkts;
diff --git a/drivers/net/wireless/libertas/11d.c b/drivers/net/wireless/libertas/11d.c
deleted file mode 100644
index 5c6968101f0d..000000000000
--- a/drivers/net/wireless/libertas/11d.c
+++ /dev/null
@@ -1,696 +0,0 @@
1/**
2 * This file contains functions for 802.11D.
3 */
4#include <linux/ctype.h>
5#include <linux/kernel.h>
6#include <linux/wireless.h>
7
8#include "host.h"
9#include "decl.h"
10#include "11d.h"
11#include "dev.h"
12#include "wext.h"
13
14#define TX_PWR_DEFAULT 10
15
16static struct region_code_mapping region_code_mapping[] = {
17 {"US ", 0x10}, /* US FCC */
18 {"CA ", 0x10}, /* IC Canada */
19 {"SG ", 0x10}, /* Singapore */
20 {"EU ", 0x30}, /* ETSI */
21 {"AU ", 0x30}, /* Australia */
22 {"KR ", 0x30}, /* Republic Of Korea */
23 {"ES ", 0x31}, /* Spain */
24 {"FR ", 0x32}, /* France */
25 {"JP ", 0x40}, /* Japan */
26};
27
28/* Following 2 structure defines the supported channels */
29static struct chan_freq_power channel_freq_power_UN_BG[] = {
30 {1, 2412, TX_PWR_DEFAULT},
31 {2, 2417, TX_PWR_DEFAULT},
32 {3, 2422, TX_PWR_DEFAULT},
33 {4, 2427, TX_PWR_DEFAULT},
34 {5, 2432, TX_PWR_DEFAULT},
35 {6, 2437, TX_PWR_DEFAULT},
36 {7, 2442, TX_PWR_DEFAULT},
37 {8, 2447, TX_PWR_DEFAULT},
38 {9, 2452, TX_PWR_DEFAULT},
39 {10, 2457, TX_PWR_DEFAULT},
40 {11, 2462, TX_PWR_DEFAULT},
41 {12, 2467, TX_PWR_DEFAULT},
42 {13, 2472, TX_PWR_DEFAULT},
43 {14, 2484, TX_PWR_DEFAULT}
44};
45
46static u8 lbs_region_2_code(u8 *region)
47{
48 u8 i;
49
50 for (i = 0; i < COUNTRY_CODE_LEN && region[i]; i++)
51 region[i] = toupper(region[i]);
52
53 for (i = 0; i < ARRAY_SIZE(region_code_mapping); i++) {
54 if (!memcmp(region, region_code_mapping[i].region,
55 COUNTRY_CODE_LEN))
56 return (region_code_mapping[i].code);
57 }
58
59 /* default is US */
60 return (region_code_mapping[0].code);
61}
62
63static u8 *lbs_code_2_region(u8 code)
64{
65 u8 i;
66
67 for (i = 0; i < ARRAY_SIZE(region_code_mapping); i++) {
68 if (region_code_mapping[i].code == code)
69 return (region_code_mapping[i].region);
70 }
71 /* default is US */
72 return (region_code_mapping[0].region);
73}
74
75/**
76 * @brief This function finds the nrchan-th chan after the firstchan
77 * @param band band
78 * @param firstchan first channel number
79 * @param nrchan number of channels
80 * @return the nrchan-th chan number
81*/
82static u8 lbs_get_chan_11d(u8 firstchan, u8 nrchan, u8 *chan)
83/*find the nrchan-th chan after the firstchan*/
84{
85 u8 i;
86 struct chan_freq_power *cfp;
87 u8 cfp_no;
88
89 cfp = channel_freq_power_UN_BG;
90 cfp_no = ARRAY_SIZE(channel_freq_power_UN_BG);
91
92 for (i = 0; i < cfp_no; i++) {
93 if ((cfp + i)->channel == firstchan) {
94 lbs_deb_11d("firstchan found\n");
95 break;
96 }
97 }
98
99 if (i < cfp_no) {
100 /*if beyond the boundary */
101 if (i + nrchan < cfp_no) {
102 *chan = (cfp + i + nrchan)->channel;
103 return 1;
104 }
105 }
106
107 return 0;
108}
109
110/**
111 * @brief This function Checks if chan txpwr is learned from AP/IBSS
112 * @param chan chan number
113 * @param parsed_region_chan pointer to parsed_region_chan_11d
114 * @return TRUE; FALSE
115*/
116static u8 lbs_channel_known_11d(u8 chan,
117 struct parsed_region_chan_11d * parsed_region_chan)
118{
119 struct chan_power_11d *chanpwr = parsed_region_chan->chanpwr;
120 u8 nr_chan = parsed_region_chan->nr_chan;
121 u8 i = 0;
122
123 lbs_deb_hex(LBS_DEB_11D, "parsed_region_chan", (char *)chanpwr,
124 sizeof(struct chan_power_11d) * nr_chan);
125
126 for (i = 0; i < nr_chan; i++) {
127 if (chan == chanpwr[i].chan) {
128 lbs_deb_11d("found chan %d\n", chan);
129 return 1;
130 }
131 }
132
133 lbs_deb_11d("chan %d not found\n", chan);
134 return 0;
135}
136
137u32 lbs_chan_2_freq(u8 chan)
138{
139 struct chan_freq_power *cf;
140 u16 i;
141 u32 freq = 0;
142
143 cf = channel_freq_power_UN_BG;
144
145 for (i = 0; i < ARRAY_SIZE(channel_freq_power_UN_BG); i++) {
146 if (chan == cf[i].channel)
147 freq = cf[i].freq;
148 }
149
150 return freq;
151}
152
153static int generate_domain_info_11d(struct parsed_region_chan_11d
154 *parsed_region_chan,
155 struct lbs_802_11d_domain_reg *domaininfo)
156{
157 u8 nr_subband = 0;
158
159 u8 nr_chan = parsed_region_chan->nr_chan;
160 u8 nr_parsedchan = 0;
161
162 u8 firstchan = 0, nextchan = 0, maxpwr = 0;
163
164 u8 i, flag = 0;
165
166 memcpy(domaininfo->countrycode, parsed_region_chan->countrycode,
167 COUNTRY_CODE_LEN);
168
169 lbs_deb_11d("nrchan %d\n", nr_chan);
170 lbs_deb_hex(LBS_DEB_11D, "parsed_region_chan", (char *)parsed_region_chan,
171 sizeof(struct parsed_region_chan_11d));
172
173 for (i = 0; i < nr_chan; i++) {
174 if (!flag) {
175 flag = 1;
176 nextchan = firstchan =
177 parsed_region_chan->chanpwr[i].chan;
178 maxpwr = parsed_region_chan->chanpwr[i].pwr;
179 nr_parsedchan = 1;
180 continue;
181 }
182
183 if (parsed_region_chan->chanpwr[i].chan == nextchan + 1 &&
184 parsed_region_chan->chanpwr[i].pwr == maxpwr) {
185 nextchan++;
186 nr_parsedchan++;
187 } else {
188 domaininfo->subband[nr_subband].firstchan = firstchan;
189 domaininfo->subband[nr_subband].nrchan =
190 nr_parsedchan;
191 domaininfo->subband[nr_subband].maxtxpwr = maxpwr;
192 nr_subband++;
193 nextchan = firstchan =
194 parsed_region_chan->chanpwr[i].chan;
195 maxpwr = parsed_region_chan->chanpwr[i].pwr;
196 }
197 }
198
199 if (flag) {
200 domaininfo->subband[nr_subband].firstchan = firstchan;
201 domaininfo->subband[nr_subband].nrchan = nr_parsedchan;
202 domaininfo->subband[nr_subband].maxtxpwr = maxpwr;
203 nr_subband++;
204 }
205 domaininfo->nr_subband = nr_subband;
206
207 lbs_deb_11d("nr_subband=%x\n", domaininfo->nr_subband);
208 lbs_deb_hex(LBS_DEB_11D, "domaininfo", (char *)domaininfo,
209 COUNTRY_CODE_LEN + 1 +
210 sizeof(struct ieee_subbandset) * nr_subband);
211 return 0;
212}
213
214/**
215 * @brief This function generates parsed_region_chan from Domain Info learned from AP/IBSS
216 * @param region_chan pointer to struct region_channel
217 * @param *parsed_region_chan pointer to parsed_region_chan_11d
218 * @return N/A
219*/
220static void lbs_generate_parsed_region_chan_11d(struct region_channel *region_chan,
221 struct parsed_region_chan_11d *
222 parsed_region_chan)
223{
224 u8 i;
225 struct chan_freq_power *cfp;
226
227 if (region_chan == NULL) {
228 lbs_deb_11d("region_chan is NULL\n");
229 return;
230 }
231
232 cfp = region_chan->CFP;
233 if (cfp == NULL) {
234 lbs_deb_11d("cfp is NULL \n");
235 return;
236 }
237
238 parsed_region_chan->band = region_chan->band;
239 parsed_region_chan->region = region_chan->region;
240 memcpy(parsed_region_chan->countrycode,
241 lbs_code_2_region(region_chan->region), COUNTRY_CODE_LEN);
242
243 lbs_deb_11d("region 0x%x, band %d\n", parsed_region_chan->region,
244 parsed_region_chan->band);
245
246 for (i = 0; i < region_chan->nrcfp; i++, cfp++) {
247 parsed_region_chan->chanpwr[i].chan = cfp->channel;
248 parsed_region_chan->chanpwr[i].pwr = cfp->maxtxpower;
249 lbs_deb_11d("chan %d, pwr %d\n",
250 parsed_region_chan->chanpwr[i].chan,
251 parsed_region_chan->chanpwr[i].pwr);
252 }
253 parsed_region_chan->nr_chan = region_chan->nrcfp;
254
255 lbs_deb_11d("nrchan %d\n", parsed_region_chan->nr_chan);
256
257 return;
258}
259
260/**
261 * @brief generate parsed_region_chan from Domain Info learned from AP/IBSS
262 * @param region region ID
263 * @param band band
264 * @param chan chan
265 * @return TRUE;FALSE
266*/
267static u8 lbs_region_chan_supported_11d(u8 region, u8 chan)
268{
269 struct chan_freq_power *cfp;
270 int cfp_no;
271 u8 idx;
272 int ret = 0;
273
274 lbs_deb_enter(LBS_DEB_11D);
275
276 cfp = lbs_get_region_cfp_table(region, &cfp_no);
277 if (cfp == NULL)
278 return 0;
279
280 for (idx = 0; idx < cfp_no; idx++) {
281 if (chan == (cfp + idx)->channel) {
282 /* If Mrvl Chip Supported? */
283 if ((cfp + idx)->unsupported) {
284 ret = 0;
285 } else {
286 ret = 1;
287 }
288 goto done;
289 }
290 }
291
292 /*chan is not in the region table */
293
294done:
295 lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
296 return ret;
297}
298
299/**
300 * @brief This function checks if chan txpwr is learned from AP/IBSS
301 * @param chan chan number
302 * @param parsed_region_chan pointer to parsed_region_chan_11d
303 * @return 0
304*/
305static int parse_domain_info_11d(struct ieee_ie_country_info_full_set *countryinfo,
306 u8 band,
307 struct parsed_region_chan_11d *parsed_region_chan)
308{
309 u8 nr_subband, nrchan;
310 u8 lastchan, firstchan;
311 u8 region;
312 u8 curchan = 0;
313
314 u8 idx = 0; /*chan index in parsed_region_chan */
315
316 u8 j, i;
317
318 lbs_deb_enter(LBS_DEB_11D);
319
320 /*validation Rules:
321 1. valid region Code
322 2. First Chan increment
323 3. channel range no overlap
324 4. channel is valid?
325 5. channel is supported by region?
326 6. Others
327 */
328
329 lbs_deb_hex(LBS_DEB_11D, "countryinfo", (u8 *) countryinfo, 30);
330
331 if ((*(countryinfo->countrycode)) == 0
332 || (countryinfo->header.len <= COUNTRY_CODE_LEN)) {
333 /* No region Info or Wrong region info: treat as No 11D info */
334 goto done;
335 }
336
337 /*Step1: check region_code */
338 parsed_region_chan->region = region =
339 lbs_region_2_code(countryinfo->countrycode);
340
341 lbs_deb_11d("regioncode=%x\n", (u8) parsed_region_chan->region);
342 lbs_deb_hex(LBS_DEB_11D, "countrycode", (char *)countryinfo->countrycode,
343 COUNTRY_CODE_LEN);
344
345 parsed_region_chan->band = band;
346
347 memcpy(parsed_region_chan->countrycode, countryinfo->countrycode,
348 COUNTRY_CODE_LEN);
349
350 nr_subband = (countryinfo->header.len - COUNTRY_CODE_LEN) /
351 sizeof(struct ieee_subbandset);
352
353 for (j = 0, lastchan = 0; j < nr_subband; j++) {
354
355 if (countryinfo->subband[j].firstchan <= lastchan) {
356 /*Step2&3. Check First Chan Num increment and no overlap */
357 lbs_deb_11d("chan %d>%d, overlap\n",
358 countryinfo->subband[j].firstchan, lastchan);
359 continue;
360 }
361
362 firstchan = countryinfo->subband[j].firstchan;
363 nrchan = countryinfo->subband[j].nrchan;
364
365 for (i = 0; idx < MAX_NO_OF_CHAN && i < nrchan; i++) {
366 /*step4: channel is supported? */
367
368 if (!lbs_get_chan_11d(firstchan, i, &curchan)) {
369 /* Chan is not found in UN table */
370 lbs_deb_11d("chan is not supported: %d \n", i);
371 break;
372 }
373
374 lastchan = curchan;
375
376 if (lbs_region_chan_supported_11d(region, curchan)) {
377 /*step5: Check if curchan is supported by mrvl in region */
378 parsed_region_chan->chanpwr[idx].chan = curchan;
379 parsed_region_chan->chanpwr[idx].pwr =
380 countryinfo->subband[j].maxtxpwr;
381 idx++;
382 } else {
383 /*not supported and ignore the chan */
384 lbs_deb_11d(
385 "i %d, chan %d unsupported in region %x, band %d\n",
386 i, curchan, region, band);
387 }
388 }
389
390 /*Step6: Add other checking if any */
391
392 }
393
394 parsed_region_chan->nr_chan = idx;
395
396 lbs_deb_11d("nrchan=%x\n", parsed_region_chan->nr_chan);
397 lbs_deb_hex(LBS_DEB_11D, "parsed_region_chan", (u8 *) parsed_region_chan,
398 2 + COUNTRY_CODE_LEN + sizeof(struct parsed_region_chan_11d) * idx);
399
400done:
401 lbs_deb_enter(LBS_DEB_11D);
402 return 0;
403}
404
405/**
406 * @brief This function calculates the scan type for channels
407 * @param chan chan number
408 * @param parsed_region_chan pointer to parsed_region_chan_11d
409 * @return PASSIVE if chan is unknown; ACTIVE if chan is known
410*/
411u8 lbs_get_scan_type_11d(u8 chan,
412 struct parsed_region_chan_11d * parsed_region_chan)
413{
414 u8 scan_type = CMD_SCAN_TYPE_PASSIVE;
415
416 lbs_deb_enter(LBS_DEB_11D);
417
418 if (lbs_channel_known_11d(chan, parsed_region_chan)) {
419 lbs_deb_11d("found, do active scan\n");
420 scan_type = CMD_SCAN_TYPE_ACTIVE;
421 } else {
422 lbs_deb_11d("not found, do passive scan\n");
423 }
424
425 lbs_deb_leave_args(LBS_DEB_11D, "ret scan_type %d", scan_type);
426 return scan_type;
427
428}
429
430void lbs_init_11d(struct lbs_private *priv)
431{
432 priv->enable11d = 0;
433 memset(&(priv->parsed_region_chan), 0,
434 sizeof(struct parsed_region_chan_11d));
435 return;
436}
437
438/**
439 * @brief This function sets DOMAIN INFO to FW
440 * @param priv pointer to struct lbs_private
441 * @return 0; -1
442*/
443static int set_domain_info_11d(struct lbs_private *priv)
444{
445 int ret;
446
447 if (!priv->enable11d) {
448 lbs_deb_11d("dnld domain Info with 11d disabled\n");
449 return 0;
450 }
451
452 ret = lbs_prepare_and_send_command(priv, CMD_802_11D_DOMAIN_INFO,
453 CMD_ACT_SET,
454 CMD_OPTION_WAITFORRSP, 0, NULL);
455 if (ret)
456 lbs_deb_11d("fail to dnld domain info\n");
457
458 return ret;
459}
460
461/**
462 * @brief This function setups scan channels
463 * @param priv pointer to struct lbs_private
464 * @param band band
465 * @return 0
466*/
467int lbs_set_universaltable(struct lbs_private *priv, u8 band)
468{
469 u16 size = sizeof(struct chan_freq_power);
470 u16 i = 0;
471
472 memset(priv->universal_channel, 0,
473 sizeof(priv->universal_channel));
474
475 priv->universal_channel[i].nrcfp =
476 sizeof(channel_freq_power_UN_BG) / size;
477 lbs_deb_11d("BG-band nrcfp %d\n",
478 priv->universal_channel[i].nrcfp);
479
480 priv->universal_channel[i].CFP = channel_freq_power_UN_BG;
481 priv->universal_channel[i].valid = 1;
482 priv->universal_channel[i].region = UNIVERSAL_REGION_CODE;
483 priv->universal_channel[i].band = band;
484 i++;
485
486 return 0;
487}
488
489/**
490 * @brief This function implements command CMD_802_11D_DOMAIN_INFO
491 * @param priv pointer to struct lbs_private
492 * @param cmd pointer to cmd buffer
493 * @param cmdno cmd ID
494 * @param cmdOption cmd action
495 * @return 0
496*/
497int lbs_cmd_802_11d_domain_info(struct lbs_private *priv,
498 struct cmd_ds_command *cmd, u16 cmdno,
499 u16 cmdoption)
500{
501 struct cmd_ds_802_11d_domain_info *pdomaininfo =
502 &cmd->params.domaininfo;
503 struct mrvl_ie_domain_param_set *domain = &pdomaininfo->domain;
504 u8 nr_subband = priv->domainreg.nr_subband;
505
506 lbs_deb_enter(LBS_DEB_11D);
507
508 lbs_deb_11d("nr_subband=%x\n", nr_subband);
509
510 cmd->command = cpu_to_le16(cmdno);
511 pdomaininfo->action = cpu_to_le16(cmdoption);
512 if (cmdoption == CMD_ACT_GET) {
513 cmd->size =
514 cpu_to_le16(sizeof(pdomaininfo->action) + S_DS_GEN);
515 lbs_deb_hex(LBS_DEB_11D, "802_11D_DOMAIN_INFO", (u8 *) cmd,
516 le16_to_cpu(cmd->size));
517 goto done;
518 }
519
520 domain->header.type = cpu_to_le16(TLV_TYPE_DOMAIN);
521 memcpy(domain->countrycode, priv->domainreg.countrycode,
522 sizeof(domain->countrycode));
523
524 domain->header.len =
525 cpu_to_le16(nr_subband * sizeof(struct ieee_subbandset) +
526 sizeof(domain->countrycode));
527
528 if (nr_subband) {
529 memcpy(domain->subband, priv->domainreg.subband,
530 nr_subband * sizeof(struct ieee_subbandset));
531
532 cmd->size = cpu_to_le16(sizeof(pdomaininfo->action) +
533 le16_to_cpu(domain->header.len) +
534 sizeof(struct mrvl_ie_header) +
535 S_DS_GEN);
536 } else {
537 cmd->size =
538 cpu_to_le16(sizeof(pdomaininfo->action) + S_DS_GEN);
539 }
540
541 lbs_deb_hex(LBS_DEB_11D, "802_11D_DOMAIN_INFO", (u8 *) cmd, le16_to_cpu(cmd->size));
542
543done:
544 lbs_deb_enter(LBS_DEB_11D);
545 return 0;
546}
547
548/**
549 * @brief This function parses countryinfo from AP and download country info to FW
550 * @param priv pointer to struct lbs_private
551 * @param resp pointer to command response buffer
552 * @return 0; -1
553 */
554int lbs_ret_802_11d_domain_info(struct cmd_ds_command *resp)
555{
556 struct cmd_ds_802_11d_domain_info *domaininfo = &resp->params.domaininforesp;
557 struct mrvl_ie_domain_param_set *domain = &domaininfo->domain;
558 u16 action = le16_to_cpu(domaininfo->action);
559 s16 ret = 0;
560 u8 nr_subband = 0;
561
562 lbs_deb_enter(LBS_DEB_11D);
563
564 lbs_deb_hex(LBS_DEB_11D, "domain info resp", (u8 *) resp,
565 (int)le16_to_cpu(resp->size));
566
567 nr_subband = (le16_to_cpu(domain->header.len) - COUNTRY_CODE_LEN) /
568 sizeof(struct ieee_subbandset);
569
570 lbs_deb_11d("domain info resp: nr_subband %d\n", nr_subband);
571
572 if (nr_subband > MRVDRV_MAX_SUBBAND_802_11D) {
573 lbs_deb_11d("Invalid Numrer of Subband returned!!\n");
574 return -1;
575 }
576
577 switch (action) {
578 case CMD_ACT_SET: /*Proc Set action */
579 break;
580
581 case CMD_ACT_GET:
582 break;
583 default:
584 lbs_deb_11d("Invalid action:%d\n", domaininfo->action);
585 ret = -1;
586 break;
587 }
588
589 lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
590 return ret;
591}
592
593/**
594 * @brief This function parses countryinfo from AP and download country info to FW
595 * @param priv pointer to struct lbs_private
596 * @return 0; -1
597 */
598int lbs_parse_dnld_countryinfo_11d(struct lbs_private *priv,
599 struct bss_descriptor * bss)
600{
601 int ret;
602
603 lbs_deb_enter(LBS_DEB_11D);
604 if (priv->enable11d) {
605 memset(&priv->parsed_region_chan, 0,
606 sizeof(struct parsed_region_chan_11d));
607 ret = parse_domain_info_11d(&bss->countryinfo, 0,
608 &priv->parsed_region_chan);
609
610 if (ret == -1) {
611 lbs_deb_11d("error parsing domain_info from AP\n");
612 goto done;
613 }
614
615 memset(&priv->domainreg, 0,
616 sizeof(struct lbs_802_11d_domain_reg));
617 generate_domain_info_11d(&priv->parsed_region_chan,
618 &priv->domainreg);
619
620 ret = set_domain_info_11d(priv);
621
622 if (ret) {
623 lbs_deb_11d("error setting domain info\n");
624 goto done;
625 }
626 }
627 ret = 0;
628
629done:
630 lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
631 return ret;
632}
633
634/**
635 * @brief This function generates 11D info from user specified regioncode and download to FW
636 * @param priv pointer to struct lbs_private
637 * @return 0; -1
638 */
639int lbs_create_dnld_countryinfo_11d(struct lbs_private *priv)
640{
641 int ret;
642 struct region_channel *region_chan;
643 u8 j;
644
645 lbs_deb_enter(LBS_DEB_11D);
646 lbs_deb_11d("curbssparams.band %d\n", priv->curbssparams.band);
647
648 if (priv->enable11d) {
649 /* update parsed_region_chan_11; dnld domaininf to FW */
650
651 for (j = 0; j < ARRAY_SIZE(priv->region_channel); j++) {
652 region_chan = &priv->region_channel[j];
653
654 lbs_deb_11d("%d region_chan->band %d\n", j,
655 region_chan->band);
656
657 if (!region_chan || !region_chan->valid
658 || !region_chan->CFP)
659 continue;
660 if (region_chan->band != priv->curbssparams.band)
661 continue;
662 break;
663 }
664
665 if (j >= ARRAY_SIZE(priv->region_channel)) {
666 lbs_deb_11d("region_chan not found, band %d\n",
667 priv->curbssparams.band);
668 ret = -1;
669 goto done;
670 }
671
672 memset(&priv->parsed_region_chan, 0,
673 sizeof(struct parsed_region_chan_11d));
674 lbs_generate_parsed_region_chan_11d(region_chan,
675 &priv->
676 parsed_region_chan);
677
678 memset(&priv->domainreg, 0,
679 sizeof(struct lbs_802_11d_domain_reg));
680 generate_domain_info_11d(&priv->parsed_region_chan,
681 &priv->domainreg);
682
683 ret = set_domain_info_11d(priv);
684
685 if (ret) {
686 lbs_deb_11d("error setting domain info\n");
687 goto done;
688 }
689
690 }
691 ret = 0;
692
693done:
694 lbs_deb_leave_args(LBS_DEB_11D, "ret %d", ret);
695 return ret;
696}
diff --git a/drivers/net/wireless/libertas/11d.h b/drivers/net/wireless/libertas/11d.h
deleted file mode 100644
index fb75d3e321a0..000000000000
--- a/drivers/net/wireless/libertas/11d.h
+++ /dev/null
@@ -1,105 +0,0 @@
1/**
2 * This header file contains data structures and
3 * function declarations of 802.11d
4 */
5#ifndef _LBS_11D_
6#define _LBS_11D_
7
8#include "types.h"
9#include "defs.h"
10
11#define UNIVERSAL_REGION_CODE 0xff
12
13/** (Beaconsize(256)-5(IEId,len,contrystr(3))/3(FirstChan,NoOfChan,MaxPwr)
14 */
15#define MRVDRV_MAX_SUBBAND_802_11D 83
16
17#define COUNTRY_CODE_LEN 3
18#define MAX_NO_OF_CHAN 40
19
20struct cmd_ds_command;
21
22/** Data structure for Country IE*/
23struct ieee_subbandset {
24 u8 firstchan;
25 u8 nrchan;
26 u8 maxtxpwr;
27} __attribute__ ((packed));
28
29struct ieee_ie_country_info_set {
30 struct ieee_ie_header header;
31
32 u8 countrycode[COUNTRY_CODE_LEN];
33 struct ieee_subbandset subband[1];
34};
35
36struct ieee_ie_country_info_full_set {
37 struct ieee_ie_header header;
38
39 u8 countrycode[COUNTRY_CODE_LEN];
40 struct ieee_subbandset subband[MRVDRV_MAX_SUBBAND_802_11D];
41} __attribute__ ((packed));
42
43struct mrvl_ie_domain_param_set {
44 struct mrvl_ie_header header;
45
46 u8 countrycode[COUNTRY_CODE_LEN];
47 struct ieee_subbandset subband[1];
48} __attribute__ ((packed));
49
50struct cmd_ds_802_11d_domain_info {
51 __le16 action;
52 struct mrvl_ie_domain_param_set domain;
53} __attribute__ ((packed));
54
55/** domain regulatory information */
56struct lbs_802_11d_domain_reg {
57 /** country Code*/
58 u8 countrycode[COUNTRY_CODE_LEN];
59 /** No. of subband*/
60 u8 nr_subband;
61 struct ieee_subbandset subband[MRVDRV_MAX_SUBBAND_802_11D];
62};
63
64struct chan_power_11d {
65 u8 chan;
66 u8 pwr;
67} __attribute__ ((packed));
68
69struct parsed_region_chan_11d {
70 u8 band;
71 u8 region;
72 s8 countrycode[COUNTRY_CODE_LEN];
73 struct chan_power_11d chanpwr[MAX_NO_OF_CHAN];
74 u8 nr_chan;
75} __attribute__ ((packed));
76
77struct region_code_mapping {
78 u8 region[COUNTRY_CODE_LEN];
79 u8 code;
80};
81
82struct lbs_private;
83
84u8 lbs_get_scan_type_11d(u8 chan,
85 struct parsed_region_chan_11d *parsed_region_chan);
86
87u32 lbs_chan_2_freq(u8 chan);
88
89void lbs_init_11d(struct lbs_private *priv);
90
91int lbs_set_universaltable(struct lbs_private *priv, u8 band);
92
93int lbs_cmd_802_11d_domain_info(struct lbs_private *priv,
94 struct cmd_ds_command *cmd, u16 cmdno,
95 u16 cmdOption);
96
97int lbs_ret_802_11d_domain_info(struct cmd_ds_command *resp);
98
99struct bss_descriptor;
100int lbs_parse_dnld_countryinfo_11d(struct lbs_private *priv,
101 struct bss_descriptor * bss);
102
103int lbs_create_dnld_countryinfo_11d(struct lbs_private *priv);
104
105#endif
diff --git a/drivers/net/wireless/libertas/Kconfig b/drivers/net/wireless/libertas/Kconfig
new file mode 100644
index 000000000000..30aa9d48d67e
--- /dev/null
+++ b/drivers/net/wireless/libertas/Kconfig
@@ -0,0 +1,39 @@
1config LIBERTAS
2 tristate "Marvell 8xxx Libertas WLAN driver support"
3 depends on CFG80211
4 select WIRELESS_EXT
5 select WEXT_SPY
6 select LIB80211
7 select FW_LOADER
8 ---help---
9 A library for Marvell Libertas 8xxx devices.
10
11config LIBERTAS_USB
12 tristate "Marvell Libertas 8388 USB 802.11b/g cards"
13 depends on LIBERTAS && USB
14 ---help---
15 A driver for Marvell Libertas 8388 USB devices.
16
17config LIBERTAS_CS
18 tristate "Marvell Libertas 8385 CompactFlash 802.11b/g cards"
19 depends on LIBERTAS && PCMCIA
20 ---help---
21 A driver for Marvell Libertas 8385 CompactFlash devices.
22
23config LIBERTAS_SDIO
24 tristate "Marvell Libertas 8385/8686/8688 SDIO 802.11b/g cards"
25 depends on LIBERTAS && MMC
26 ---help---
27 A driver for Marvell Libertas 8385/8686/8688 SDIO devices.
28
29config LIBERTAS_SPI
30 tristate "Marvell Libertas 8686 SPI 802.11b/g cards"
31 depends on LIBERTAS && SPI
32 ---help---
33 A driver for Marvell Libertas 8686 SPI devices.
34
35config LIBERTAS_DEBUG
36 bool "Enable full debugging output in the Libertas module."
37 depends on LIBERTAS
38 ---help---
39 Debugging support.
diff --git a/drivers/net/wireless/libertas/Makefile b/drivers/net/wireless/libertas/Makefile
index 0b6918584503..fa37039e0eae 100644
--- a/drivers/net/wireless/libertas/Makefile
+++ b/drivers/net/wireless/libertas/Makefile
@@ -1,5 +1,15 @@
1libertas-objs := main.o wext.o rx.o tx.o cmd.o cmdresp.o scan.o 11d.o \ 1libertas-y += assoc.o
2 debugfs.o persistcfg.o ethtool.o assoc.o 2libertas-y += cfg.o
3libertas-y += cmd.o
4libertas-y += cmdresp.o
5libertas-y += debugfs.o
6libertas-y += ethtool.o
7libertas-y += main.o
8libertas-y += persistcfg.o
9libertas-y += rx.o
10libertas-y += scan.o
11libertas-y += tx.o
12libertas-y += wext.o
3 13
4usb8xxx-objs += if_usb.o 14usb8xxx-objs += if_usb.o
5libertas_cs-objs += if_cs.o 15libertas_cs-objs += if_cs.o
diff --git a/drivers/net/wireless/libertas/README b/drivers/net/wireless/libertas/README
index ab6a2d518af0..2726c044430f 100644
--- a/drivers/net/wireless/libertas/README
+++ b/drivers/net/wireless/libertas/README
@@ -1,5 +1,5 @@
1================================================================================ 1================================================================================
2 README for USB8388 2 README for Libertas
3 3
4 (c) Copyright © 2003-2006, Marvell International Ltd. 4 (c) Copyright © 2003-2006, Marvell International Ltd.
5 All Rights Reserved 5 All Rights Reserved
@@ -226,4 +226,28 @@ setuserscan
226 All entries in the scan table (not just the new scan data when keep=1) 226 All entries in the scan table (not just the new scan data when keep=1)
227 will be displayed upon completion by use of the getscantable ioctl. 227 will be displayed upon completion by use of the getscantable ioctl.
228 228
229========================
230IWCONFIG COMMANDS
231========================
232power period
233
234 This command is used to configure the station in deep sleep mode /
235 auto deep sleep mode.
236
237 The timer is implemented to monitor the activities (command, event,
238 etc.). When an activity is detected station will exit from deep
239 sleep mode automatically and restart the timer. At timer expiry
240 (no activity for defined time period) the deep sleep mode is entered
241 automatically.
242
243 Note: this command is for SDIO interface only.
244
245 Usage:
246 To enable deep sleep mode do:
247 iwconfig wlan0 power period 0
248 To enable auto deep sleep mode with idle time period 5 seconds do:
249 iwconfig wlan0 power period 5
250 To disable deep sleep/auto deep sleep mode do:
251 iwconfig wlan0 power period -1
252
229============================================================================== 253==============================================================================
diff --git a/drivers/net/wireless/libertas/assoc.c b/drivers/net/wireless/libertas/assoc.c
index dd8732611ba9..751067369ba8 100644
--- a/drivers/net/wireless/libertas/assoc.c
+++ b/drivers/net/wireless/libertas/assoc.c
@@ -23,6 +23,13 @@ static const u8 bssid_off[ETH_ALEN] __attribute__ ((aligned (2))) =
23 */ 23 */
24#define CAPINFO_MASK (~(0xda00)) 24#define CAPINFO_MASK (~(0xda00))
25 25
26/**
27 * 802.11b/g supported bitrates (in 500Kb/s units)
28 */
29u8 lbs_bg_rates[MAX_RATES] =
30 { 0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c,
310x00, 0x00 };
32
26 33
27/** 34/**
28 * @brief This function finds common rates between rates and card rates. 35 * @brief This function finds common rates between rates and card rates.
@@ -147,6 +154,397 @@ static int lbs_set_authentication(struct lbs_private *priv, u8 bssid[6], u8 auth
147} 154}
148 155
149 156
157int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
158 struct assoc_request *assoc)
159{
160 struct cmd_ds_802_11_set_wep cmd;
161 int ret = 0;
162
163 lbs_deb_enter(LBS_DEB_CMD);
164
165 memset(&cmd, 0, sizeof(cmd));
166 cmd.hdr.command = cpu_to_le16(CMD_802_11_SET_WEP);
167 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
168
169 cmd.action = cpu_to_le16(cmd_action);
170
171 if (cmd_action == CMD_ACT_ADD) {
172 int i;
173
174 /* default tx key index */
175 cmd.keyindex = cpu_to_le16(assoc->wep_tx_keyidx &
176 CMD_WEP_KEY_INDEX_MASK);
177
178 /* Copy key types and material to host command structure */
179 for (i = 0; i < 4; i++) {
180 struct enc_key *pkey = &assoc->wep_keys[i];
181
182 switch (pkey->len) {
183 case KEY_LEN_WEP_40:
184 cmd.keytype[i] = CMD_TYPE_WEP_40_BIT;
185 memmove(cmd.keymaterial[i], pkey->key, pkey->len);
186 lbs_deb_cmd("SET_WEP: add key %d (40 bit)\n", i);
187 break;
188 case KEY_LEN_WEP_104:
189 cmd.keytype[i] = CMD_TYPE_WEP_104_BIT;
190 memmove(cmd.keymaterial[i], pkey->key, pkey->len);
191 lbs_deb_cmd("SET_WEP: add key %d (104 bit)\n", i);
192 break;
193 case 0:
194 break;
195 default:
196 lbs_deb_cmd("SET_WEP: invalid key %d, length %d\n",
197 i, pkey->len);
198 ret = -1;
199 goto done;
200 break;
201 }
202 }
203 } else if (cmd_action == CMD_ACT_REMOVE) {
204 /* ACT_REMOVE clears _all_ WEP keys */
205
206 /* default tx key index */
207 cmd.keyindex = cpu_to_le16(priv->wep_tx_keyidx &
208 CMD_WEP_KEY_INDEX_MASK);
209 lbs_deb_cmd("SET_WEP: remove key %d\n", priv->wep_tx_keyidx);
210 }
211
212 ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd);
213done:
214 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
215 return ret;
216}
217
218int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
219 uint16_t *enable)
220{
221 struct cmd_ds_802_11_enable_rsn cmd;
222 int ret;
223
224 lbs_deb_enter(LBS_DEB_CMD);
225
226 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
227 cmd.action = cpu_to_le16(cmd_action);
228
229 if (cmd_action == CMD_ACT_GET)
230 cmd.enable = 0;
231 else {
232 if (*enable)
233 cmd.enable = cpu_to_le16(CMD_ENABLE_RSN);
234 else
235 cmd.enable = cpu_to_le16(CMD_DISABLE_RSN);
236 lbs_deb_cmd("ENABLE_RSN: %d\n", *enable);
237 }
238
239 ret = lbs_cmd_with_response(priv, CMD_802_11_ENABLE_RSN, &cmd);
240 if (!ret && cmd_action == CMD_ACT_GET)
241 *enable = le16_to_cpu(cmd.enable);
242
243 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
244 return ret;
245}
246
247static void set_one_wpa_key(struct MrvlIEtype_keyParamSet *keyparam,
248 struct enc_key *key)
249{
250 lbs_deb_enter(LBS_DEB_CMD);
251
252 if (key->flags & KEY_INFO_WPA_ENABLED)
253 keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_ENABLED);
254 if (key->flags & KEY_INFO_WPA_UNICAST)
255 keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_UNICAST);
256 if (key->flags & KEY_INFO_WPA_MCAST)
257 keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_MCAST);
258
259 keyparam->type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
260 keyparam->keytypeid = cpu_to_le16(key->type);
261 keyparam->keylen = cpu_to_le16(key->len);
262 memcpy(keyparam->key, key->key, key->len);
263
264 /* Length field doesn't include the {type,length} header */
265 keyparam->length = cpu_to_le16(sizeof(*keyparam) - 4);
266 lbs_deb_leave(LBS_DEB_CMD);
267}
268
269int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
270 struct assoc_request *assoc)
271{
272 struct cmd_ds_802_11_key_material cmd;
273 int ret = 0;
274 int index = 0;
275
276 lbs_deb_enter(LBS_DEB_CMD);
277
278 cmd.action = cpu_to_le16(cmd_action);
279 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
280
281 if (cmd_action == CMD_ACT_GET) {
282 cmd.hdr.size = cpu_to_le16(sizeof(struct cmd_header) + 2);
283 } else {
284 memset(cmd.keyParamSet, 0, sizeof(cmd.keyParamSet));
285
286 if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc->flags)) {
287 set_one_wpa_key(&cmd.keyParamSet[index],
288 &assoc->wpa_unicast_key);
289 index++;
290 }
291
292 if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc->flags)) {
293 set_one_wpa_key(&cmd.keyParamSet[index],
294 &assoc->wpa_mcast_key);
295 index++;
296 }
297
298 /* The common header and as many keys as we included */
299 cmd.hdr.size = cpu_to_le16(offsetof(typeof(cmd),
300 keyParamSet[index]));
301 }
302 ret = lbs_cmd_with_response(priv, CMD_802_11_KEY_MATERIAL, &cmd);
303 /* Copy the returned key to driver private data */
304 if (!ret && cmd_action == CMD_ACT_GET) {
305 void *buf_ptr = cmd.keyParamSet;
306 void *resp_end = &(&cmd)[1];
307
308 while (buf_ptr < resp_end) {
309 struct MrvlIEtype_keyParamSet *keyparam = buf_ptr;
310 struct enc_key *key;
311 uint16_t param_set_len = le16_to_cpu(keyparam->length);
312 uint16_t key_len = le16_to_cpu(keyparam->keylen);
313 uint16_t key_flags = le16_to_cpu(keyparam->keyinfo);
314 uint16_t key_type = le16_to_cpu(keyparam->keytypeid);
315 void *end;
316
317 end = (void *)keyparam + sizeof(keyparam->type)
318 + sizeof(keyparam->length) + param_set_len;
319
320 /* Make sure we don't access past the end of the IEs */
321 if (end > resp_end)
322 break;
323
324 if (key_flags & KEY_INFO_WPA_UNICAST)
325 key = &priv->wpa_unicast_key;
326 else if (key_flags & KEY_INFO_WPA_MCAST)
327 key = &priv->wpa_mcast_key;
328 else
329 break;
330
331 /* Copy returned key into driver */
332 memset(key, 0, sizeof(struct enc_key));
333 if (key_len > sizeof(key->key))
334 break;
335 key->type = key_type;
336 key->flags = key_flags;
337 key->len = key_len;
338 memcpy(key->key, keyparam->key, key->len);
339
340 buf_ptr = end + 1;
341 }
342 }
343
344 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
345 return ret;
346}
347
348static __le16 lbs_rate_to_fw_bitmap(int rate, int lower_rates_ok)
349{
350/* Bit Rate
351* 15:13 Reserved
352* 12 54 Mbps
353* 11 48 Mbps
354* 10 36 Mbps
355* 9 24 Mbps
356* 8 18 Mbps
357* 7 12 Mbps
358* 6 9 Mbps
359* 5 6 Mbps
360* 4 Reserved
361* 3 11 Mbps
362* 2 5.5 Mbps
363* 1 2 Mbps
364* 0 1 Mbps
365**/
366
367 uint16_t ratemask;
368 int i = lbs_data_rate_to_fw_index(rate);
369 if (lower_rates_ok)
370 ratemask = (0x1fef >> (12 - i));
371 else
372 ratemask = (1 << i);
373 return cpu_to_le16(ratemask);
374}
375
376int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
377 uint16_t cmd_action)
378{
379 struct cmd_ds_802_11_rate_adapt_rateset cmd;
380 int ret;
381
382 lbs_deb_enter(LBS_DEB_CMD);
383
384 if (!priv->cur_rate && !priv->enablehwauto)
385 return -EINVAL;
386
387 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
388
389 cmd.action = cpu_to_le16(cmd_action);
390 cmd.enablehwauto = cpu_to_le16(priv->enablehwauto);
391 cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto);
392 ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd);
393 if (!ret && cmd_action == CMD_ACT_GET) {
394 priv->ratebitmap = le16_to_cpu(cmd.bitmap);
395 priv->enablehwauto = le16_to_cpu(cmd.enablehwauto);
396 }
397
398 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
399 return ret;
400}
401
402/**
403 * @brief Set the data rate
404 *
405 * @param priv A pointer to struct lbs_private structure
406 * @param rate The desired data rate, or 0 to clear a locked rate
407 *
408 * @return 0 on success, error on failure
409 */
410int lbs_set_data_rate(struct lbs_private *priv, u8 rate)
411{
412 struct cmd_ds_802_11_data_rate cmd;
413 int ret = 0;
414
415 lbs_deb_enter(LBS_DEB_CMD);
416
417 memset(&cmd, 0, sizeof(cmd));
418 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
419
420 if (rate > 0) {
421 cmd.action = cpu_to_le16(CMD_ACT_SET_TX_FIX_RATE);
422 cmd.rates[0] = lbs_data_rate_to_fw_index(rate);
423 if (cmd.rates[0] == 0) {
424 lbs_deb_cmd("DATA_RATE: invalid requested rate of"
425 " 0x%02X\n", rate);
426 ret = 0;
427 goto out;
428 }
429 lbs_deb_cmd("DATA_RATE: set fixed 0x%02X\n", cmd.rates[0]);
430 } else {
431 cmd.action = cpu_to_le16(CMD_ACT_SET_TX_AUTO);
432 lbs_deb_cmd("DATA_RATE: setting auto\n");
433 }
434
435 ret = lbs_cmd_with_response(priv, CMD_802_11_DATA_RATE, &cmd);
436 if (ret)
437 goto out;
438
439 lbs_deb_hex(LBS_DEB_CMD, "DATA_RATE_RESP", (u8 *) &cmd, sizeof(cmd));
440
441 /* FIXME: get actual rates FW can do if this command actually returns
442 * all data rates supported.
443 */
444 priv->cur_rate = lbs_fw_index_to_data_rate(cmd.rates[0]);
445 lbs_deb_cmd("DATA_RATE: current rate is 0x%02x\n", priv->cur_rate);
446
447out:
448 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
449 return ret;
450}
451
452
453int lbs_cmd_802_11_rssi(struct lbs_private *priv,
454 struct cmd_ds_command *cmd)
455{
456
457 lbs_deb_enter(LBS_DEB_CMD);
458 cmd->command = cpu_to_le16(CMD_802_11_RSSI);
459 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_rssi) +
460 sizeof(struct cmd_header));
461 cmd->params.rssi.N = cpu_to_le16(DEFAULT_BCN_AVG_FACTOR);
462
463 /* reset Beacon SNR/NF/RSSI values */
464 priv->SNR[TYPE_BEACON][TYPE_NOAVG] = 0;
465 priv->SNR[TYPE_BEACON][TYPE_AVG] = 0;
466 priv->NF[TYPE_BEACON][TYPE_NOAVG] = 0;
467 priv->NF[TYPE_BEACON][TYPE_AVG] = 0;
468 priv->RSSI[TYPE_BEACON][TYPE_NOAVG] = 0;
469 priv->RSSI[TYPE_BEACON][TYPE_AVG] = 0;
470
471 lbs_deb_leave(LBS_DEB_CMD);
472 return 0;
473}
474
475int lbs_ret_802_11_rssi(struct lbs_private *priv,
476 struct cmd_ds_command *resp)
477{
478 struct cmd_ds_802_11_rssi_rsp *rssirsp = &resp->params.rssirsp;
479
480 lbs_deb_enter(LBS_DEB_CMD);
481
482 /* store the non average value */
483 priv->SNR[TYPE_BEACON][TYPE_NOAVG] = get_unaligned_le16(&rssirsp->SNR);
484 priv->NF[TYPE_BEACON][TYPE_NOAVG] =
485 get_unaligned_le16(&rssirsp->noisefloor);
486
487 priv->SNR[TYPE_BEACON][TYPE_AVG] = get_unaligned_le16(&rssirsp->avgSNR);
488 priv->NF[TYPE_BEACON][TYPE_AVG] =
489 get_unaligned_le16(&rssirsp->avgnoisefloor);
490
491 priv->RSSI[TYPE_BEACON][TYPE_NOAVG] =
492 CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_NOAVG],
493 priv->NF[TYPE_BEACON][TYPE_NOAVG]);
494
495 priv->RSSI[TYPE_BEACON][TYPE_AVG] =
496 CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_AVG] / AVG_SCALE,
497 priv->NF[TYPE_BEACON][TYPE_AVG] / AVG_SCALE);
498
499 lbs_deb_cmd("RSSI: beacon %d, avg %d\n",
500 priv->RSSI[TYPE_BEACON][TYPE_NOAVG],
501 priv->RSSI[TYPE_BEACON][TYPE_AVG]);
502
503 lbs_deb_leave(LBS_DEB_CMD);
504 return 0;
505}
506
507
508int lbs_cmd_bcn_ctrl(struct lbs_private *priv,
509 struct cmd_ds_command *cmd,
510 u16 cmd_action)
511{
512 struct cmd_ds_802_11_beacon_control
513 *bcn_ctrl = &cmd->params.bcn_ctrl;
514
515 lbs_deb_enter(LBS_DEB_CMD);
516 cmd->size =
517 cpu_to_le16(sizeof(struct cmd_ds_802_11_beacon_control)
518 + sizeof(struct cmd_header));
519 cmd->command = cpu_to_le16(CMD_802_11_BEACON_CTRL);
520
521 bcn_ctrl->action = cpu_to_le16(cmd_action);
522 bcn_ctrl->beacon_enable = cpu_to_le16(priv->beacon_enable);
523 bcn_ctrl->beacon_period = cpu_to_le16(priv->beacon_period);
524
525 lbs_deb_leave(LBS_DEB_CMD);
526 return 0;
527}
528
529int lbs_ret_802_11_bcn_ctrl(struct lbs_private *priv,
530 struct cmd_ds_command *resp)
531{
532 struct cmd_ds_802_11_beacon_control *bcn_ctrl =
533 &resp->params.bcn_ctrl;
534
535 lbs_deb_enter(LBS_DEB_CMD);
536
537 if (bcn_ctrl->action == CMD_ACT_GET) {
538 priv->beacon_enable = (u8) le16_to_cpu(bcn_ctrl->beacon_enable);
539 priv->beacon_period = le16_to_cpu(bcn_ctrl->beacon_period);
540 }
541
542 lbs_deb_enter(LBS_DEB_CMD);
543 return 0;
544}
545
546
547
150static int lbs_assoc_post(struct lbs_private *priv, 548static int lbs_assoc_post(struct lbs_private *priv,
151 struct cmd_ds_802_11_associate_response *resp) 549 struct cmd_ds_802_11_associate_response *resp)
152{ 550{
@@ -226,7 +624,7 @@ static int lbs_assoc_post(struct lbs_private *priv,
226 priv->connect_status = LBS_CONNECTED; 624 priv->connect_status = LBS_CONNECTED;
227 625
228 /* Update current SSID and BSSID */ 626 /* Update current SSID and BSSID */
229 memcpy(&priv->curbssparams.ssid, &bss->ssid, IW_ESSID_MAX_SIZE); 627 memcpy(&priv->curbssparams.ssid, &bss->ssid, IEEE80211_MAX_SSID_LEN);
230 priv->curbssparams.ssid_len = bss->ssid_len; 628 priv->curbssparams.ssid_len = bss->ssid_len;
231 memcpy(priv->curbssparams.bssid, bss->bssid, ETH_ALEN); 629 memcpy(priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
232 630
@@ -369,12 +767,7 @@ static int lbs_associate(struct lbs_private *priv,
369 (u16)(pos - (u8 *) &cmd.iebuf)); 767 (u16)(pos - (u8 *) &cmd.iebuf));
370 768
371 /* update curbssparams */ 769 /* update curbssparams */
372 priv->curbssparams.channel = bss->phy.ds.channel; 770 priv->channel = bss->phy.ds.channel;
373
374 if (lbs_parse_dnld_countryinfo_11d(priv, bss)) {
375 ret = -1;
376 goto done;
377 }
378 771
379 ret = lbs_cmd_with_response(priv, command, &cmd); 772 ret = lbs_cmd_with_response(priv, command, &cmd);
380 if (ret == 0) { 773 if (ret == 0) {
@@ -472,7 +865,7 @@ static int lbs_adhoc_post(struct lbs_private *priv,
472 memcpy(&priv->curbssparams.bssid, bss->bssid, ETH_ALEN); 865 memcpy(&priv->curbssparams.bssid, bss->bssid, ETH_ALEN);
473 866
474 /* Set the new SSID to current SSID */ 867 /* Set the new SSID to current SSID */
475 memcpy(&priv->curbssparams.ssid, &bss->ssid, IW_ESSID_MAX_SIZE); 868 memcpy(&priv->curbssparams.ssid, &bss->ssid, IEEE80211_MAX_SSID_LEN);
476 priv->curbssparams.ssid_len = bss->ssid_len; 869 priv->curbssparams.ssid_len = bss->ssid_len;
477 870
478 netif_carrier_on(priv->dev); 871 netif_carrier_on(priv->dev);
@@ -487,7 +880,7 @@ static int lbs_adhoc_post(struct lbs_private *priv,
487 lbs_deb_join("ADHOC_RESP: Joined/started '%s', BSSID %pM, channel %d\n", 880 lbs_deb_join("ADHOC_RESP: Joined/started '%s', BSSID %pM, channel %d\n",
488 print_ssid(ssid, bss->ssid, bss->ssid_len), 881 print_ssid(ssid, bss->ssid, bss->ssid_len),
489 priv->curbssparams.bssid, 882 priv->curbssparams.bssid,
490 priv->curbssparams.channel); 883 priv->channel);
491 884
492done: 885done:
493 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret); 886 lbs_deb_leave_args(LBS_DEB_JOIN, "ret %d", ret);
@@ -560,7 +953,7 @@ static int lbs_adhoc_join(struct lbs_private *priv,
560 lbs_deb_join("AdhocJoin: band = %c\n", assoc_req->band); 953 lbs_deb_join("AdhocJoin: band = %c\n", assoc_req->band);
561 954
562 priv->adhoccreate = 0; 955 priv->adhoccreate = 0;
563 priv->curbssparams.channel = bss->channel; 956 priv->channel = bss->channel;
564 957
565 /* Build the join command */ 958 /* Build the join command */
566 memset(&cmd, 0, sizeof(cmd)); 959 memset(&cmd, 0, sizeof(cmd));
@@ -633,11 +1026,6 @@ static int lbs_adhoc_join(struct lbs_private *priv,
633 } 1026 }
634 } 1027 }
635 1028
636 if (lbs_parse_dnld_countryinfo_11d(priv, bss)) {
637 ret = -1;
638 goto out;
639 }
640
641 ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_JOIN, &cmd); 1029 ret = lbs_cmd_with_response(priv, CMD_802_11_AD_HOC_JOIN, &cmd);
642 if (ret == 0) { 1030 if (ret == 0) {
643 ret = lbs_adhoc_post(priv, 1031 ret = lbs_adhoc_post(priv,
@@ -737,12 +1125,6 @@ static int lbs_adhoc_start(struct lbs_private *priv,
737 lbs_deb_join("ADHOC_START: rates=%02x %02x %02x %02x\n", 1125 lbs_deb_join("ADHOC_START: rates=%02x %02x %02x %02x\n",
738 cmd.rates[0], cmd.rates[1], cmd.rates[2], cmd.rates[3]); 1126 cmd.rates[0], cmd.rates[1], cmd.rates[2], cmd.rates[3]);
739 1127
740 if (lbs_create_dnld_countryinfo_11d(priv)) {
741 lbs_deb_join("ADHOC_START: dnld_countryinfo_11d failed\n");
742 ret = -1;
743 goto out;
744 }
745
746 lbs_deb_join("ADHOC_START: Starting Ad-Hoc BSS on channel %d, band %d\n", 1128 lbs_deb_join("ADHOC_START: Starting Ad-Hoc BSS on channel %d, band %d\n",
747 assoc_req->channel, assoc_req->band); 1129 assoc_req->channel, assoc_req->band);
748 1130
@@ -1099,7 +1481,7 @@ static int assoc_helper_essid(struct lbs_private *priv,
1099 /* else send START command */ 1481 /* else send START command */
1100 lbs_deb_assoc("SSID not found, creating adhoc network\n"); 1482 lbs_deb_assoc("SSID not found, creating adhoc network\n");
1101 memcpy(&assoc_req->bss.ssid, &assoc_req->ssid, 1483 memcpy(&assoc_req->bss.ssid, &assoc_req->ssid,
1102 IW_ESSID_MAX_SIZE); 1484 IEEE80211_MAX_SSID_LEN);
1103 assoc_req->bss.ssid_len = assoc_req->ssid_len; 1485 assoc_req->bss.ssid_len = assoc_req->ssid_len;
1104 lbs_adhoc_start(priv, assoc_req); 1486 lbs_adhoc_start(priv, assoc_req);
1105 } 1487 }
@@ -1185,7 +1567,8 @@ static int assoc_helper_mode(struct lbs_private *priv,
1185 } 1567 }
1186 1568
1187 priv->mode = assoc_req->mode; 1569 priv->mode = assoc_req->mode;
1188 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE, assoc_req->mode); 1570 ret = lbs_set_snmp_mib(priv, SNMP_MIB_OID_BSS_TYPE,
1571 assoc_req->mode == IW_MODE_ADHOC ? 2 : 1);
1189 1572
1190done: 1573done:
1191 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); 1574 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
@@ -1205,7 +1588,7 @@ static int assoc_helper_channel(struct lbs_private *priv,
1205 goto done; 1588 goto done;
1206 } 1589 }
1207 1590
1208 if (assoc_req->channel == priv->curbssparams.channel) 1591 if (assoc_req->channel == priv->channel)
1209 goto done; 1592 goto done;
1210 1593
1211 if (priv->mesh_dev) { 1594 if (priv->mesh_dev) {
@@ -1217,7 +1600,7 @@ static int assoc_helper_channel(struct lbs_private *priv,
1217 } 1600 }
1218 1601
1219 lbs_deb_assoc("ASSOC: channel: %d -> %d\n", 1602 lbs_deb_assoc("ASSOC: channel: %d -> %d\n",
1220 priv->curbssparams.channel, assoc_req->channel); 1603 priv->channel, assoc_req->channel);
1221 1604
1222 ret = lbs_set_channel(priv, assoc_req->channel); 1605 ret = lbs_set_channel(priv, assoc_req->channel);
1223 if (ret < 0) 1606 if (ret < 0)
@@ -1232,7 +1615,7 @@ static int assoc_helper_channel(struct lbs_private *priv,
1232 goto done; 1615 goto done;
1233 } 1616 }
1234 1617
1235 if (assoc_req->channel != priv->curbssparams.channel) { 1618 if (assoc_req->channel != priv->channel) {
1236 lbs_deb_assoc("ASSOC: channel: failed to update channel to %d\n", 1619 lbs_deb_assoc("ASSOC: channel: failed to update channel to %d\n",
1237 assoc_req->channel); 1620 assoc_req->channel);
1238 goto restore_mesh; 1621 goto restore_mesh;
@@ -1253,7 +1636,7 @@ static int assoc_helper_channel(struct lbs_private *priv,
1253 restore_mesh: 1636 restore_mesh:
1254 if (priv->mesh_dev) 1637 if (priv->mesh_dev)
1255 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, 1638 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
1256 priv->curbssparams.channel); 1639 priv->channel);
1257 1640
1258 done: 1641 done:
1259 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); 1642 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
@@ -1475,7 +1858,7 @@ static int should_stop_adhoc(struct lbs_private *priv,
1475 } 1858 }
1476 1859
1477 if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) { 1860 if (test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) {
1478 if (assoc_req->channel != priv->curbssparams.channel) 1861 if (assoc_req->channel != priv->channel)
1479 return 1; 1862 return 1;
1480 } 1863 }
1481 1864
@@ -1557,7 +1940,7 @@ static int lbs_find_best_network_ssid(struct lbs_private *priv,
1557 1940
1558 found = lbs_find_best_ssid_in_list(priv, preferred_mode); 1941 found = lbs_find_best_ssid_in_list(priv, preferred_mode);
1559 if (found && (found->ssid_len > 0)) { 1942 if (found && (found->ssid_len > 0)) {
1560 memcpy(out_ssid, &found->ssid, IW_ESSID_MAX_SIZE); 1943 memcpy(out_ssid, &found->ssid, IEEE80211_MAX_SSID_LEN);
1561 *out_ssid_len = found->ssid_len; 1944 *out_ssid_len = found->ssid_len;
1562 *out_mode = found->mode; 1945 *out_mode = found->mode;
1563 ret = 0; 1946 ret = 0;
@@ -1775,12 +2158,12 @@ struct assoc_request *lbs_get_association_request(struct lbs_private *priv)
1775 assoc_req = priv->pending_assoc_req; 2158 assoc_req = priv->pending_assoc_req;
1776 if (!test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) { 2159 if (!test_bit(ASSOC_FLAG_SSID, &assoc_req->flags)) {
1777 memcpy(&assoc_req->ssid, &priv->curbssparams.ssid, 2160 memcpy(&assoc_req->ssid, &priv->curbssparams.ssid,
1778 IW_ESSID_MAX_SIZE); 2161 IEEE80211_MAX_SSID_LEN);
1779 assoc_req->ssid_len = priv->curbssparams.ssid_len; 2162 assoc_req->ssid_len = priv->curbssparams.ssid_len;
1780 } 2163 }
1781 2164
1782 if (!test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags)) 2165 if (!test_bit(ASSOC_FLAG_CHANNEL, &assoc_req->flags))
1783 assoc_req->channel = priv->curbssparams.channel; 2166 assoc_req->channel = priv->channel;
1784 2167
1785 if (!test_bit(ASSOC_FLAG_BAND, &assoc_req->flags)) 2168 if (!test_bit(ASSOC_FLAG_BAND, &assoc_req->flags))
1786 assoc_req->band = priv->curbssparams.band; 2169 assoc_req->band = priv->curbssparams.band;
diff --git a/drivers/net/wireless/libertas/assoc.h b/drivers/net/wireless/libertas/assoc.h
index 6e765e9f91a3..40621b789fc5 100644
--- a/drivers/net/wireless/libertas/assoc.h
+++ b/drivers/net/wireless/libertas/assoc.h
@@ -3,7 +3,126 @@
3#ifndef _LBS_ASSOC_H_ 3#ifndef _LBS_ASSOC_H_
4#define _LBS_ASSOC_H_ 4#define _LBS_ASSOC_H_
5 5
6#include "dev.h" 6
7#include "defs.h"
8#include "host.h"
9
10
11struct lbs_private;
12
13/*
14 * In theory, the IE is limited to the IE length, 255,
15 * but in practice 64 bytes are enough.
16 */
17#define MAX_WPA_IE_LEN 64
18
19
20
21struct lbs_802_11_security {
22 u8 WPAenabled;
23 u8 WPA2enabled;
24 u8 wep_enabled;
25 u8 auth_mode;
26 u32 key_mgmt;
27};
28
29/** Current Basic Service Set State Structure */
30struct current_bss_params {
31 /** bssid */
32 u8 bssid[ETH_ALEN];
33 /** ssid */
34 u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
35 u8 ssid_len;
36
37 /** band */
38 u8 band;
39 /** channel is directly in priv->channel */
40 /** zero-terminated array of supported data rates */
41 u8 rates[MAX_RATES + 1];
42};
43
44/**
45 * @brief Structure used to store information for each beacon/probe response
46 */
47struct bss_descriptor {
48 u8 bssid[ETH_ALEN];
49
50 u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
51 u8 ssid_len;
52
53 u16 capability;
54 u32 rssi;
55 u32 channel;
56 u16 beaconperiod;
57 __le16 atimwindow;
58
59 /* IW_MODE_AUTO, IW_MODE_ADHOC, IW_MODE_INFRA */
60 u8 mode;
61
62 /* zero-terminated array of supported data rates */
63 u8 rates[MAX_RATES + 1];
64
65 unsigned long last_scanned;
66
67 union ieee_phy_param_set phy;
68 union ieee_ss_param_set ss;
69
70 u8 wpa_ie[MAX_WPA_IE_LEN];
71 size_t wpa_ie_len;
72 u8 rsn_ie[MAX_WPA_IE_LEN];
73 size_t rsn_ie_len;
74
75 u8 mesh;
76
77 struct list_head list;
78};
79
80/** Association request
81 *
82 * Encapsulates all the options that describe a specific assocation request
83 * or configuration of the wireless card's radio, mode, and security settings.
84 */
85struct assoc_request {
86#define ASSOC_FLAG_SSID 1
87#define ASSOC_FLAG_CHANNEL 2
88#define ASSOC_FLAG_BAND 3
89#define ASSOC_FLAG_MODE 4
90#define ASSOC_FLAG_BSSID 5
91#define ASSOC_FLAG_WEP_KEYS 6
92#define ASSOC_FLAG_WEP_TX_KEYIDX 7
93#define ASSOC_FLAG_WPA_MCAST_KEY 8
94#define ASSOC_FLAG_WPA_UCAST_KEY 9
95#define ASSOC_FLAG_SECINFO 10
96#define ASSOC_FLAG_WPA_IE 11
97 unsigned long flags;
98
99 u8 ssid[IEEE80211_MAX_SSID_LEN + 1];
100 u8 ssid_len;
101 u8 channel;
102 u8 band;
103 u8 mode;
104 u8 bssid[ETH_ALEN] __attribute__ ((aligned (2)));
105
106 /** WEP keys */
107 struct enc_key wep_keys[4];
108 u16 wep_tx_keyidx;
109
110 /** WPA keys */
111 struct enc_key wpa_mcast_key;
112 struct enc_key wpa_unicast_key;
113
114 struct lbs_802_11_security secinfo;
115
116 /** WPA Information Elements*/
117 u8 wpa_ie[MAX_WPA_IE_LEN];
118 u8 wpa_ie_len;
119
120 /* BSS to associate with for infrastructure of Ad-Hoc join */
121 struct bss_descriptor bss;
122};
123
124
125extern u8 lbs_bg_rates[MAX_RATES];
7 126
8void lbs_association_worker(struct work_struct *work); 127void lbs_association_worker(struct work_struct *work);
9struct assoc_request *lbs_get_association_request(struct lbs_private *priv); 128struct assoc_request *lbs_get_association_request(struct lbs_private *priv);
@@ -13,4 +132,24 @@ int lbs_adhoc_stop(struct lbs_private *priv);
13int lbs_cmd_80211_deauthenticate(struct lbs_private *priv, 132int lbs_cmd_80211_deauthenticate(struct lbs_private *priv,
14 u8 bssid[ETH_ALEN], u16 reason); 133 u8 bssid[ETH_ALEN], u16 reason);
15 134
135int lbs_cmd_802_11_rssi(struct lbs_private *priv,
136 struct cmd_ds_command *cmd);
137int lbs_ret_802_11_rssi(struct lbs_private *priv,
138 struct cmd_ds_command *resp);
139
140int lbs_cmd_bcn_ctrl(struct lbs_private *priv,
141 struct cmd_ds_command *cmd,
142 u16 cmd_action);
143int lbs_ret_802_11_bcn_ctrl(struct lbs_private *priv,
144 struct cmd_ds_command *resp);
145
146int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
147 struct assoc_request *assoc);
148
149int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
150 uint16_t *enable);
151
152int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
153 struct assoc_request *assoc);
154
16#endif /* _LBS_ASSOC_H */ 155#endif /* _LBS_ASSOC_H */
diff --git a/drivers/net/wireless/libertas/cfg.c b/drivers/net/wireless/libertas/cfg.c
new file mode 100644
index 000000000000..4396dccd12ac
--- /dev/null
+++ b/drivers/net/wireless/libertas/cfg.c
@@ -0,0 +1,198 @@
1/*
2 * Implement cfg80211 ("iw") support.
3 *
4 * Copyright (C) 2009 M&N Solutions GmbH, 61191 Rosbach, Germany
5 * Holger Schurig <hs4233@mail.mn-solutions.de>
6 *
7 */
8
9#include <net/cfg80211.h>
10
11#include "cfg.h"
12#include "cmd.h"
13
14
15#define CHAN2G(_channel, _freq, _flags) { \
16 .band = IEEE80211_BAND_2GHZ, \
17 .center_freq = (_freq), \
18 .hw_value = (_channel), \
19 .flags = (_flags), \
20 .max_antenna_gain = 0, \
21 .max_power = 30, \
22}
23
24static struct ieee80211_channel lbs_2ghz_channels[] = {
25 CHAN2G(1, 2412, 0),
26 CHAN2G(2, 2417, 0),
27 CHAN2G(3, 2422, 0),
28 CHAN2G(4, 2427, 0),
29 CHAN2G(5, 2432, 0),
30 CHAN2G(6, 2437, 0),
31 CHAN2G(7, 2442, 0),
32 CHAN2G(8, 2447, 0),
33 CHAN2G(9, 2452, 0),
34 CHAN2G(10, 2457, 0),
35 CHAN2G(11, 2462, 0),
36 CHAN2G(12, 2467, 0),
37 CHAN2G(13, 2472, 0),
38 CHAN2G(14, 2484, 0),
39};
40
41#define RATETAB_ENT(_rate, _rateid, _flags) { \
42 .bitrate = (_rate), \
43 .hw_value = (_rateid), \
44 .flags = (_flags), \
45}
46
47
48static struct ieee80211_rate lbs_rates[] = {
49 RATETAB_ENT(10, 0x1, 0),
50 RATETAB_ENT(20, 0x2, 0),
51 RATETAB_ENT(55, 0x4, 0),
52 RATETAB_ENT(110, 0x8, 0),
53 RATETAB_ENT(60, 0x10, 0),
54 RATETAB_ENT(90, 0x20, 0),
55 RATETAB_ENT(120, 0x40, 0),
56 RATETAB_ENT(180, 0x80, 0),
57 RATETAB_ENT(240, 0x100, 0),
58 RATETAB_ENT(360, 0x200, 0),
59 RATETAB_ENT(480, 0x400, 0),
60 RATETAB_ENT(540, 0x800, 0),
61};
62
63static struct ieee80211_supported_band lbs_band_2ghz = {
64 .channels = lbs_2ghz_channels,
65 .n_channels = ARRAY_SIZE(lbs_2ghz_channels),
66 .bitrates = lbs_rates,
67 .n_bitrates = ARRAY_SIZE(lbs_rates),
68};
69
70
71static const u32 cipher_suites[] = {
72 WLAN_CIPHER_SUITE_WEP40,
73 WLAN_CIPHER_SUITE_WEP104,
74 WLAN_CIPHER_SUITE_TKIP,
75 WLAN_CIPHER_SUITE_CCMP,
76};
77
78
79
80static int lbs_cfg_set_channel(struct wiphy *wiphy,
81 struct ieee80211_channel *chan,
82 enum nl80211_channel_type channel_type)
83{
84 struct lbs_private *priv = wiphy_priv(wiphy);
85 int ret = -ENOTSUPP;
86
87 lbs_deb_enter_args(LBS_DEB_CFG80211, "freq %d, type %d", chan->center_freq, channel_type);
88
89 if (channel_type != NL80211_CHAN_NO_HT)
90 goto out;
91
92 ret = lbs_set_channel(priv, chan->hw_value);
93
94 out:
95 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
96 return ret;
97}
98
99
100
101
102static struct cfg80211_ops lbs_cfg80211_ops = {
103 .set_channel = lbs_cfg_set_channel,
104};
105
106
107/*
108 * At this time lbs_private *priv doesn't even exist, so we just allocate
109 * memory and don't initialize the wiphy further. This is postponed until we
110 * can talk to the firmware and happens at registration time in
111 * lbs_cfg_wiphy_register().
112 */
113struct wireless_dev *lbs_cfg_alloc(struct device *dev)
114{
115 int ret = 0;
116 struct wireless_dev *wdev;
117
118 lbs_deb_enter(LBS_DEB_CFG80211);
119
120 wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL);
121 if (!wdev) {
122 dev_err(dev, "cannot allocate wireless device\n");
123 return ERR_PTR(-ENOMEM);
124 }
125
126 wdev->wiphy = wiphy_new(&lbs_cfg80211_ops, sizeof(struct lbs_private));
127 if (!wdev->wiphy) {
128 dev_err(dev, "cannot allocate wiphy\n");
129 ret = -ENOMEM;
130 goto err_wiphy_new;
131 }
132
133 lbs_deb_leave(LBS_DEB_CFG80211);
134 return wdev;
135
136 err_wiphy_new:
137 kfree(wdev);
138 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
139 return ERR_PTR(ret);
140}
141
142
143/*
144 * This function get's called after lbs_setup_firmware() determined the
145 * firmware capabities. So we can setup the wiphy according to our
146 * hardware/firmware.
147 */
148int lbs_cfg_register(struct lbs_private *priv)
149{
150 struct wireless_dev *wdev = priv->wdev;
151 int ret;
152
153 lbs_deb_enter(LBS_DEB_CFG80211);
154
155 wdev->wiphy->max_scan_ssids = 1;
156 wdev->wiphy->signal_type = CFG80211_SIGNAL_TYPE_MBM;
157
158 /* TODO: BIT(NL80211_IFTYPE_ADHOC); */
159 wdev->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
160
161 /* TODO: honor priv->regioncode */
162 wdev->wiphy->bands[IEEE80211_BAND_2GHZ] = &lbs_band_2ghz;
163
164 /*
165 * We could check priv->fwcapinfo && FW_CAPINFO_WPA, but I have
166 * never seen a firmware without WPA
167 */
168 wdev->wiphy->cipher_suites = cipher_suites;
169 wdev->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
170
171 ret = wiphy_register(wdev->wiphy);
172 if (ret < 0)
173 lbs_pr_err("cannot register wiphy device\n");
174
175 ret = register_netdev(priv->dev);
176 if (ret)
177 lbs_pr_err("cannot register network device\n");
178
179 lbs_deb_leave_args(LBS_DEB_CFG80211, "ret %d", ret);
180 return ret;
181}
182
183
184void lbs_cfg_free(struct lbs_private *priv)
185{
186 struct wireless_dev *wdev = priv->wdev;
187
188 lbs_deb_enter(LBS_DEB_CFG80211);
189
190 if (!wdev)
191 return;
192
193 if (wdev->wiphy) {
194 wiphy_unregister(wdev->wiphy);
195 wiphy_free(wdev->wiphy);
196 }
197 kfree(wdev);
198}
diff --git a/drivers/net/wireless/libertas/cfg.h b/drivers/net/wireless/libertas/cfg.h
new file mode 100644
index 000000000000..e09a193a34d6
--- /dev/null
+++ b/drivers/net/wireless/libertas/cfg.h
@@ -0,0 +1,16 @@
1#ifndef __LBS_CFG80211_H__
2#define __LBS_CFG80211_H__
3
4#include "dev.h"
5
6struct wireless_dev *lbs_cfg_alloc(struct device *dev);
7int lbs_cfg_register(struct lbs_private *priv);
8void lbs_cfg_free(struct lbs_private *priv);
9
10int lbs_send_specific_ssid_scan(struct lbs_private *priv, u8 *ssid,
11 u8 ssid_len);
12int lbs_scan_networks(struct lbs_private *priv, int full_scan);
13void lbs_cfg_scan_worker(struct work_struct *work);
14
15
16#endif
diff --git a/drivers/net/wireless/libertas/cmd.c b/drivers/net/wireless/libertas/cmd.c
index 685098148e10..1065ce29cd08 100644
--- a/drivers/net/wireless/libertas/cmd.c
+++ b/drivers/net/wireless/libertas/cmd.c
@@ -3,20 +3,21 @@
3 * It prepares command and sends it to firmware when it is ready. 3 * It prepares command and sends it to firmware when it is ready.
4 */ 4 */
5 5
6#include <net/iw_handler.h>
7#include <net/lib80211.h> 6#include <net/lib80211.h>
8#include <linux/kfifo.h> 7#include <linux/kfifo.h>
8#include <linux/sched.h>
9
9#include "host.h" 10#include "host.h"
10#include "hostcmd.h"
11#include "decl.h" 11#include "decl.h"
12#include "defs.h" 12#include "defs.h"
13#include "dev.h" 13#include "dev.h"
14#include "assoc.h" 14#include "assoc.h"
15#include "wext.h" 15#include "wext.h"
16#include "scan.h"
16#include "cmd.h" 17#include "cmd.h"
17 18
18static struct cmd_ctrl_node *lbs_get_cmd_ctrl_node(struct lbs_private *priv);
19 19
20static struct cmd_ctrl_node *lbs_get_cmd_ctrl_node(struct lbs_private *priv);
20 21
21/** 22/**
22 * @brief Simple callback that copies response back into command 23 * @brief Simple callback that copies response back into command
@@ -76,6 +77,30 @@ static u8 is_command_allowed_in_ps(u16 cmd)
76} 77}
77 78
78/** 79/**
80 * @brief This function checks if the command is allowed.
81 *
82 * @param priv A pointer to lbs_private structure
83 * @return allowed or not allowed.
84 */
85
86static int lbs_is_cmd_allowed(struct lbs_private *priv)
87{
88 int ret = 1;
89
90 lbs_deb_enter(LBS_DEB_CMD);
91
92 if (!priv->is_auto_deep_sleep_enabled) {
93 if (priv->is_deep_sleep) {
94 lbs_deb_cmd("command not allowed in deep sleep\n");
95 ret = 0;
96 }
97 }
98
99 lbs_deb_leave(LBS_DEB_CMD);
100 return ret;
101}
102
103/**
79 * @brief Updates the hardware details like MAC address and regulatory region 104 * @brief Updates the hardware details like MAC address and regulatory region
80 * 105 *
81 * @param priv A pointer to struct lbs_private structure 106 * @param priv A pointer to struct lbs_private structure
@@ -168,11 +193,6 @@ int lbs_update_hw_spec(struct lbs_private *priv)
168 goto out; 193 goto out;
169 } 194 }
170 195
171 if (lbs_set_universaltable(priv, 0)) {
172 ret = -1;
173 goto out;
174 }
175
176out: 196out:
177 lbs_deb_leave(LBS_DEB_CMD); 197 lbs_deb_leave(LBS_DEB_CMD);
178 return ret; 198 return ret;
@@ -221,7 +241,7 @@ static int lbs_cmd_802_11_ps_mode(struct cmd_ds_command *cmd,
221 241
222 cmd->command = cpu_to_le16(CMD_802_11_PS_MODE); 242 cmd->command = cpu_to_le16(CMD_802_11_PS_MODE);
223 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_ps_mode) + 243 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_ps_mode) +
224 S_DS_GEN); 244 sizeof(struct cmd_header));
225 psm->action = cpu_to_le16(cmd_action); 245 psm->action = cpu_to_le16(cmd_action);
226 psm->multipledtim = 0; 246 psm->multipledtim = 0;
227 switch (cmd_action) { 247 switch (cmd_action) {
@@ -250,33 +270,6 @@ static int lbs_cmd_802_11_ps_mode(struct cmd_ds_command *cmd,
250 return 0; 270 return 0;
251} 271}
252 272
253int lbs_cmd_802_11_inactivity_timeout(struct lbs_private *priv,
254 uint16_t cmd_action, uint16_t *timeout)
255{
256 struct cmd_ds_802_11_inactivity_timeout cmd;
257 int ret;
258
259 lbs_deb_enter(LBS_DEB_CMD);
260
261 cmd.hdr.command = cpu_to_le16(CMD_802_11_INACTIVITY_TIMEOUT);
262 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
263
264 cmd.action = cpu_to_le16(cmd_action);
265
266 if (cmd_action == CMD_ACT_SET)
267 cmd.timeout = cpu_to_le16(*timeout);
268 else
269 cmd.timeout = 0;
270
271 ret = lbs_cmd_with_response(priv, CMD_802_11_INACTIVITY_TIMEOUT, &cmd);
272
273 if (!ret)
274 *timeout = le16_to_cpu(cmd.timeout);
275
276 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
277 return 0;
278}
279
280int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action, 273int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action,
281 struct sleep_params *sp) 274 struct sleep_params *sp)
282{ 275{
@@ -319,190 +312,53 @@ int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action,
319 return 0; 312 return 0;
320} 313}
321 314
322int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action, 315static int lbs_wait_for_ds_awake(struct lbs_private *priv)
323 struct assoc_request *assoc)
324{ 316{
325 struct cmd_ds_802_11_set_wep cmd;
326 int ret = 0; 317 int ret = 0;
327 318
328 lbs_deb_enter(LBS_DEB_CMD); 319 lbs_deb_enter(LBS_DEB_CMD);
329 320
330 memset(&cmd, 0, sizeof(cmd)); 321 if (priv->is_deep_sleep) {
331 cmd.hdr.command = cpu_to_le16(CMD_802_11_SET_WEP); 322 if (!wait_event_interruptible_timeout(priv->ds_awake_q,
332 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 323 !priv->is_deep_sleep, (10 * HZ))) {
333 324 lbs_pr_err("ds_awake_q: timer expired\n");
334 cmd.action = cpu_to_le16(cmd_action); 325 ret = -1;
335
336 if (cmd_action == CMD_ACT_ADD) {
337 int i;
338
339 /* default tx key index */
340 cmd.keyindex = cpu_to_le16(assoc->wep_tx_keyidx &
341 CMD_WEP_KEY_INDEX_MASK);
342
343 /* Copy key types and material to host command structure */
344 for (i = 0; i < 4; i++) {
345 struct enc_key *pkey = &assoc->wep_keys[i];
346
347 switch (pkey->len) {
348 case KEY_LEN_WEP_40:
349 cmd.keytype[i] = CMD_TYPE_WEP_40_BIT;
350 memmove(cmd.keymaterial[i], pkey->key, pkey->len);
351 lbs_deb_cmd("SET_WEP: add key %d (40 bit)\n", i);
352 break;
353 case KEY_LEN_WEP_104:
354 cmd.keytype[i] = CMD_TYPE_WEP_104_BIT;
355 memmove(cmd.keymaterial[i], pkey->key, pkey->len);
356 lbs_deb_cmd("SET_WEP: add key %d (104 bit)\n", i);
357 break;
358 case 0:
359 break;
360 default:
361 lbs_deb_cmd("SET_WEP: invalid key %d, length %d\n",
362 i, pkey->len);
363 ret = -1;
364 goto done;
365 break;
366 }
367 } 326 }
368 } else if (cmd_action == CMD_ACT_REMOVE) {
369 /* ACT_REMOVE clears _all_ WEP keys */
370
371 /* default tx key index */
372 cmd.keyindex = cpu_to_le16(priv->wep_tx_keyidx &
373 CMD_WEP_KEY_INDEX_MASK);
374 lbs_deb_cmd("SET_WEP: remove key %d\n", priv->wep_tx_keyidx);
375 }
376
377 ret = lbs_cmd_with_response(priv, CMD_802_11_SET_WEP, &cmd);
378done:
379 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
380 return ret;
381}
382
383int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
384 uint16_t *enable)
385{
386 struct cmd_ds_802_11_enable_rsn cmd;
387 int ret;
388
389 lbs_deb_enter(LBS_DEB_CMD);
390
391 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
392 cmd.action = cpu_to_le16(cmd_action);
393
394 if (cmd_action == CMD_ACT_GET)
395 cmd.enable = 0;
396 else {
397 if (*enable)
398 cmd.enable = cpu_to_le16(CMD_ENABLE_RSN);
399 else
400 cmd.enable = cpu_to_le16(CMD_DISABLE_RSN);
401 lbs_deb_cmd("ENABLE_RSN: %d\n", *enable);
402 } 327 }
403 328
404 ret = lbs_cmd_with_response(priv, CMD_802_11_ENABLE_RSN, &cmd);
405 if (!ret && cmd_action == CMD_ACT_GET)
406 *enable = le16_to_cpu(cmd.enable);
407
408 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); 329 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
409 return ret; 330 return ret;
410} 331}
411 332
412static void set_one_wpa_key(struct MrvlIEtype_keyParamSet *keyparam, 333int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep)
413 struct enc_key *key)
414{
415 lbs_deb_enter(LBS_DEB_CMD);
416
417 if (key->flags & KEY_INFO_WPA_ENABLED)
418 keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_ENABLED);
419 if (key->flags & KEY_INFO_WPA_UNICAST)
420 keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_UNICAST);
421 if (key->flags & KEY_INFO_WPA_MCAST)
422 keyparam->keyinfo |= cpu_to_le16(KEY_INFO_WPA_MCAST);
423
424 keyparam->type = cpu_to_le16(TLV_TYPE_KEY_MATERIAL);
425 keyparam->keytypeid = cpu_to_le16(key->type);
426 keyparam->keylen = cpu_to_le16(key->len);
427 memcpy(keyparam->key, key->key, key->len);
428
429 /* Length field doesn't include the {type,length} header */
430 keyparam->length = cpu_to_le16(sizeof(*keyparam) - 4);
431 lbs_deb_leave(LBS_DEB_CMD);
432}
433
434int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
435 struct assoc_request *assoc)
436{ 334{
437 struct cmd_ds_802_11_key_material cmd; 335 int ret = 0;
438 int ret = 0;
439 int index = 0;
440 336
441 lbs_deb_enter(LBS_DEB_CMD); 337 lbs_deb_enter(LBS_DEB_CMD);
442 338
443 cmd.action = cpu_to_le16(cmd_action); 339 if (deep_sleep) {
444 cmd.hdr.size = cpu_to_le16(sizeof(cmd)); 340 if (priv->is_deep_sleep != 1) {
445 341 lbs_deb_cmd("deep sleep: sleep\n");
446 if (cmd_action == CMD_ACT_GET) { 342 BUG_ON(!priv->enter_deep_sleep);
447 cmd.hdr.size = cpu_to_le16(S_DS_GEN + 2); 343 ret = priv->enter_deep_sleep(priv);
448 } else { 344 if (!ret) {
449 memset(cmd.keyParamSet, 0, sizeof(cmd.keyParamSet)); 345 netif_stop_queue(priv->dev);
450 346 netif_carrier_off(priv->dev);
451 if (test_bit(ASSOC_FLAG_WPA_UCAST_KEY, &assoc->flags)) { 347 }
452 set_one_wpa_key(&cmd.keyParamSet[index], 348 } else {
453 &assoc->wpa_unicast_key); 349 lbs_pr_err("deep sleep: already enabled\n");
454 index++;
455 }
456
457 if (test_bit(ASSOC_FLAG_WPA_MCAST_KEY, &assoc->flags)) {
458 set_one_wpa_key(&cmd.keyParamSet[index],
459 &assoc->wpa_mcast_key);
460 index++;
461 } 350 }
462 351 } else {
463 /* The common header and as many keys as we included */ 352 if (priv->is_deep_sleep) {
464 cmd.hdr.size = cpu_to_le16(offsetof(typeof(cmd), 353 lbs_deb_cmd("deep sleep: wakeup\n");
465 keyParamSet[index])); 354 BUG_ON(!priv->exit_deep_sleep);
466 } 355 ret = priv->exit_deep_sleep(priv);
467 ret = lbs_cmd_with_response(priv, CMD_802_11_KEY_MATERIAL, &cmd); 356 if (!ret) {
468 /* Copy the returned key to driver private data */ 357 ret = lbs_wait_for_ds_awake(priv);
469 if (!ret && cmd_action == CMD_ACT_GET) { 358 if (ret)
470 void *buf_ptr = cmd.keyParamSet; 359 lbs_pr_err("deep sleep: wakeup"
471 void *resp_end = &(&cmd)[1]; 360 "failed\n");
472 361 }
473 while (buf_ptr < resp_end) {
474 struct MrvlIEtype_keyParamSet *keyparam = buf_ptr;
475 struct enc_key *key;
476 uint16_t param_set_len = le16_to_cpu(keyparam->length);
477 uint16_t key_len = le16_to_cpu(keyparam->keylen);
478 uint16_t key_flags = le16_to_cpu(keyparam->keyinfo);
479 uint16_t key_type = le16_to_cpu(keyparam->keytypeid);
480 void *end;
481
482 end = (void *)keyparam + sizeof(keyparam->type)
483 + sizeof(keyparam->length) + param_set_len;
484
485 /* Make sure we don't access past the end of the IEs */
486 if (end > resp_end)
487 break;
488
489 if (key_flags & KEY_INFO_WPA_UNICAST)
490 key = &priv->wpa_unicast_key;
491 else if (key_flags & KEY_INFO_WPA_MCAST)
492 key = &priv->wpa_mcast_key;
493 else
494 break;
495
496 /* Copy returned key into driver */
497 memset(key, 0, sizeof(struct enc_key));
498 if (key_len > sizeof(key->key))
499 break;
500 key->type = key_type;
501 key->flags = key_flags;
502 key->len = key_len;
503 memcpy(key->key, keyparam->key, key->len);
504
505 buf_ptr = end + 1;
506 } 362 }
507 } 363 }
508 364
@@ -534,7 +390,7 @@ int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val)
534 switch (oid) { 390 switch (oid) {
535 case SNMP_MIB_OID_BSS_TYPE: 391 case SNMP_MIB_OID_BSS_TYPE:
536 cmd.bufsize = cpu_to_le16(sizeof(u8)); 392 cmd.bufsize = cpu_to_le16(sizeof(u8));
537 cmd.value[0] = (val == IW_MODE_ADHOC) ? 2 : 1; 393 cmd.value[0] = val;
538 break; 394 break;
539 case SNMP_MIB_OID_11D_ENABLE: 395 case SNMP_MIB_OID_11D_ENABLE:
540 case SNMP_MIB_OID_FRAG_THRESHOLD: 396 case SNMP_MIB_OID_FRAG_THRESHOLD:
@@ -587,13 +443,7 @@ int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val)
587 443
588 switch (le16_to_cpu(cmd.bufsize)) { 444 switch (le16_to_cpu(cmd.bufsize)) {
589 case sizeof(u8): 445 case sizeof(u8):
590 if (oid == SNMP_MIB_OID_BSS_TYPE) { 446 *out_val = cmd.value[0];
591 if (cmd.value[0] == 2)
592 *out_val = IW_MODE_ADHOC;
593 else
594 *out_val = IW_MODE_INFRA;
595 } else
596 *out_val = cmd.value[0];
597 break; 447 break;
598 case sizeof(u16): 448 case sizeof(u16):
599 *out_val = le16_to_cpu(*((__le16 *)(&cmd.value))); 449 *out_val = le16_to_cpu(*((__le16 *)(&cmd.value)));
@@ -680,7 +530,7 @@ static int lbs_cmd_802_11_monitor_mode(struct cmd_ds_command *cmd,
680 cmd->command = cpu_to_le16(CMD_802_11_MONITOR_MODE); 530 cmd->command = cpu_to_le16(CMD_802_11_MONITOR_MODE);
681 cmd->size = 531 cmd->size =
682 cpu_to_le16(sizeof(struct cmd_ds_802_11_monitor_mode) + 532 cpu_to_le16(sizeof(struct cmd_ds_802_11_monitor_mode) +
683 S_DS_GEN); 533 sizeof(struct cmd_header));
684 534
685 monitor->action = cpu_to_le16(cmd_action); 535 monitor->action = cpu_to_le16(cmd_action);
686 if (cmd_action == CMD_ACT_SET) { 536 if (cmd_action == CMD_ACT_SET) {
@@ -691,111 +541,6 @@ static int lbs_cmd_802_11_monitor_mode(struct cmd_ds_command *cmd,
691 return 0; 541 return 0;
692} 542}
693 543
694static __le16 lbs_rate_to_fw_bitmap(int rate, int lower_rates_ok)
695{
696/* Bit Rate
697* 15:13 Reserved
698* 12 54 Mbps
699* 11 48 Mbps
700* 10 36 Mbps
701* 9 24 Mbps
702* 8 18 Mbps
703* 7 12 Mbps
704* 6 9 Mbps
705* 5 6 Mbps
706* 4 Reserved
707* 3 11 Mbps
708* 2 5.5 Mbps
709* 1 2 Mbps
710* 0 1 Mbps
711**/
712
713 uint16_t ratemask;
714 int i = lbs_data_rate_to_fw_index(rate);
715 if (lower_rates_ok)
716 ratemask = (0x1fef >> (12 - i));
717 else
718 ratemask = (1 << i);
719 return cpu_to_le16(ratemask);
720}
721
722int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
723 uint16_t cmd_action)
724{
725 struct cmd_ds_802_11_rate_adapt_rateset cmd;
726 int ret;
727
728 lbs_deb_enter(LBS_DEB_CMD);
729
730 if (!priv->cur_rate && !priv->enablehwauto)
731 return -EINVAL;
732
733 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
734
735 cmd.action = cpu_to_le16(cmd_action);
736 cmd.enablehwauto = cpu_to_le16(priv->enablehwauto);
737 cmd.bitmap = lbs_rate_to_fw_bitmap(priv->cur_rate, priv->enablehwauto);
738 ret = lbs_cmd_with_response(priv, CMD_802_11_RATE_ADAPT_RATESET, &cmd);
739 if (!ret && cmd_action == CMD_ACT_GET) {
740 priv->ratebitmap = le16_to_cpu(cmd.bitmap);
741 priv->enablehwauto = le16_to_cpu(cmd.enablehwauto);
742 }
743
744 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
745 return ret;
746}
747EXPORT_SYMBOL_GPL(lbs_cmd_802_11_rate_adapt_rateset);
748
749/**
750 * @brief Set the data rate
751 *
752 * @param priv A pointer to struct lbs_private structure
753 * @param rate The desired data rate, or 0 to clear a locked rate
754 *
755 * @return 0 on success, error on failure
756 */
757int lbs_set_data_rate(struct lbs_private *priv, u8 rate)
758{
759 struct cmd_ds_802_11_data_rate cmd;
760 int ret = 0;
761
762 lbs_deb_enter(LBS_DEB_CMD);
763
764 memset(&cmd, 0, sizeof(cmd));
765 cmd.hdr.size = cpu_to_le16(sizeof(cmd));
766
767 if (rate > 0) {
768 cmd.action = cpu_to_le16(CMD_ACT_SET_TX_FIX_RATE);
769 cmd.rates[0] = lbs_data_rate_to_fw_index(rate);
770 if (cmd.rates[0] == 0) {
771 lbs_deb_cmd("DATA_RATE: invalid requested rate of"
772 " 0x%02X\n", rate);
773 ret = 0;
774 goto out;
775 }
776 lbs_deb_cmd("DATA_RATE: set fixed 0x%02X\n", cmd.rates[0]);
777 } else {
778 cmd.action = cpu_to_le16(CMD_ACT_SET_TX_AUTO);
779 lbs_deb_cmd("DATA_RATE: setting auto\n");
780 }
781
782 ret = lbs_cmd_with_response(priv, CMD_802_11_DATA_RATE, &cmd);
783 if (ret)
784 goto out;
785
786 lbs_deb_hex(LBS_DEB_CMD, "DATA_RATE_RESP", (u8 *) &cmd, sizeof (cmd));
787
788 /* FIXME: get actual rates FW can do if this command actually returns
789 * all data rates supported.
790 */
791 priv->cur_rate = lbs_fw_index_to_data_rate(cmd.rates[0]);
792 lbs_deb_cmd("DATA_RATE: current rate is 0x%02x\n", priv->cur_rate);
793
794out:
795 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
796 return ret;
797}
798
799/** 544/**
800 * @brief Get the radio channel 545 * @brief Get the radio channel
801 * 546 *
@@ -803,7 +548,7 @@ out:
803 * 548 *
804 * @return The channel on success, error on failure 549 * @return The channel on success, error on failure
805 */ 550 */
806int lbs_get_channel(struct lbs_private *priv) 551static int lbs_get_channel(struct lbs_private *priv)
807{ 552{
808 struct cmd_ds_802_11_rf_channel cmd; 553 struct cmd_ds_802_11_rf_channel cmd;
809 int ret = 0; 554 int ret = 0;
@@ -835,7 +580,7 @@ int lbs_update_channel(struct lbs_private *priv)
835 580
836 ret = lbs_get_channel(priv); 581 ret = lbs_get_channel(priv);
837 if (ret > 0) { 582 if (ret > 0) {
838 priv->curbssparams.channel = ret; 583 priv->channel = ret;
839 ret = 0; 584 ret = 0;
840 } 585 }
841 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret); 586 lbs_deb_leave_args(LBS_DEB_ASSOC, "ret %d", ret);
@@ -854,7 +599,7 @@ int lbs_set_channel(struct lbs_private *priv, u8 channel)
854{ 599{
855 struct cmd_ds_802_11_rf_channel cmd; 600 struct cmd_ds_802_11_rf_channel cmd;
856#ifdef DEBUG 601#ifdef DEBUG
857 u8 old_channel = priv->curbssparams.channel; 602 u8 old_channel = priv->channel;
858#endif 603#endif
859 int ret = 0; 604 int ret = 0;
860 605
@@ -869,36 +614,15 @@ int lbs_set_channel(struct lbs_private *priv, u8 channel)
869 if (ret) 614 if (ret)
870 goto out; 615 goto out;
871 616
872 priv->curbssparams.channel = (uint8_t) le16_to_cpu(cmd.channel); 617 priv->channel = (uint8_t) le16_to_cpu(cmd.channel);
873 lbs_deb_cmd("channel switch from %d to %d\n", old_channel, 618 lbs_deb_cmd("channel switch from %d to %d\n", old_channel,
874 priv->curbssparams.channel); 619 priv->channel);
875 620
876out: 621out:
877 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret); 622 lbs_deb_leave_args(LBS_DEB_CMD, "ret %d", ret);
878 return ret; 623 return ret;
879} 624}
880 625
881static int lbs_cmd_802_11_rssi(struct lbs_private *priv,
882 struct cmd_ds_command *cmd)
883{
884
885 lbs_deb_enter(LBS_DEB_CMD);
886 cmd->command = cpu_to_le16(CMD_802_11_RSSI);
887 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_rssi) + S_DS_GEN);
888 cmd->params.rssi.N = cpu_to_le16(DEFAULT_BCN_AVG_FACTOR);
889
890 /* reset Beacon SNR/NF/RSSI values */
891 priv->SNR[TYPE_BEACON][TYPE_NOAVG] = 0;
892 priv->SNR[TYPE_BEACON][TYPE_AVG] = 0;
893 priv->NF[TYPE_BEACON][TYPE_NOAVG] = 0;
894 priv->NF[TYPE_BEACON][TYPE_AVG] = 0;
895 priv->RSSI[TYPE_BEACON][TYPE_NOAVG] = 0;
896 priv->RSSI[TYPE_BEACON][TYPE_AVG] = 0;
897
898 lbs_deb_leave(LBS_DEB_CMD);
899 return 0;
900}
901
902static int lbs_cmd_reg_access(struct cmd_ds_command *cmdptr, 626static int lbs_cmd_reg_access(struct cmd_ds_command *cmdptr,
903 u8 cmd_action, void *pdata_buf) 627 u8 cmd_action, void *pdata_buf)
904{ 628{
@@ -915,7 +639,7 @@ static int lbs_cmd_reg_access(struct cmd_ds_command *cmdptr,
915 639
916 cmdptr->size = 640 cmdptr->size =
917 cpu_to_le16(sizeof (struct cmd_ds_mac_reg_access) 641 cpu_to_le16(sizeof (struct cmd_ds_mac_reg_access)
918 + S_DS_GEN); 642 + sizeof(struct cmd_header));
919 macreg = 643 macreg =
920 (struct cmd_ds_mac_reg_access *)&cmdptr->params. 644 (struct cmd_ds_mac_reg_access *)&cmdptr->params.
921 macreg; 645 macreg;
@@ -934,7 +658,7 @@ static int lbs_cmd_reg_access(struct cmd_ds_command *cmdptr,
934 cmdptr->size = 658 cmdptr->size =
935 cpu_to_le16(sizeof 659 cpu_to_le16(sizeof
936 (struct cmd_ds_bbp_reg_access) 660 (struct cmd_ds_bbp_reg_access)
937 + S_DS_GEN); 661 + sizeof(struct cmd_header));
938 bbpreg = 662 bbpreg =
939 (struct cmd_ds_bbp_reg_access *)&cmdptr->params. 663 (struct cmd_ds_bbp_reg_access *)&cmdptr->params.
940 bbpreg; 664 bbpreg;
@@ -953,7 +677,7 @@ static int lbs_cmd_reg_access(struct cmd_ds_command *cmdptr,
953 cmdptr->size = 677 cmdptr->size =
954 cpu_to_le16(sizeof 678 cpu_to_le16(sizeof
955 (struct cmd_ds_rf_reg_access) + 679 (struct cmd_ds_rf_reg_access) +
956 S_DS_GEN); 680 sizeof(struct cmd_header));
957 rfreg = 681 rfreg =
958 (struct cmd_ds_rf_reg_access *)&cmdptr->params. 682 (struct cmd_ds_rf_reg_access *)&cmdptr->params.
959 rfreg; 683 rfreg;
@@ -980,7 +704,8 @@ static int lbs_cmd_bt_access(struct cmd_ds_command *cmd,
980 lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action); 704 lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action);
981 705
982 cmd->command = cpu_to_le16(CMD_BT_ACCESS); 706 cmd->command = cpu_to_le16(CMD_BT_ACCESS);
983 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_bt_access) + S_DS_GEN); 707 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_bt_access) +
708 sizeof(struct cmd_header));
984 cmd->result = 0; 709 cmd->result = 0;
985 bt_access->action = cpu_to_le16(cmd_action); 710 bt_access->action = cpu_to_le16(cmd_action);
986 711
@@ -1017,7 +742,8 @@ static int lbs_cmd_fwt_access(struct cmd_ds_command *cmd,
1017 lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action); 742 lbs_deb_enter_args(LBS_DEB_CMD, "action %d", cmd_action);
1018 743
1019 cmd->command = cpu_to_le16(CMD_FWT_ACCESS); 744 cmd->command = cpu_to_le16(CMD_FWT_ACCESS);
1020 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_fwt_access) + S_DS_GEN); 745 cmd->size = cpu_to_le16(sizeof(struct cmd_ds_fwt_access) +
746 sizeof(struct cmd_header));
1021 cmd->result = 0; 747 cmd->result = 0;
1022 748
1023 if (pdata_buf) 749 if (pdata_buf)
@@ -1123,7 +849,7 @@ int lbs_mesh_config(struct lbs_private *priv, uint16_t action, uint16_t chan)
1123 ie->val.mesh_id_len = priv->mesh_ssid_len; 849 ie->val.mesh_id_len = priv->mesh_ssid_len;
1124 memcpy(ie->val.mesh_id, priv->mesh_ssid, priv->mesh_ssid_len); 850 memcpy(ie->val.mesh_id, priv->mesh_ssid, priv->mesh_ssid_len);
1125 ie->len = sizeof(struct mrvl_meshie_val) - 851 ie->len = sizeof(struct mrvl_meshie_val) -
1126 IW_ESSID_MAX_SIZE + priv->mesh_ssid_len; 852 IEEE80211_MAX_SSID_LEN + priv->mesh_ssid_len;
1127 cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie_val)); 853 cmd.length = cpu_to_le16(sizeof(struct mrvl_meshie_val));
1128 break; 854 break;
1129 case CMD_ACT_MESH_CONFIG_STOP: 855 case CMD_ACT_MESH_CONFIG_STOP:
@@ -1138,27 +864,6 @@ int lbs_mesh_config(struct lbs_private *priv, uint16_t action, uint16_t chan)
1138 return __lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv); 864 return __lbs_mesh_config_send(priv, &cmd, action, priv->mesh_tlv);
1139} 865}
1140 866
1141static int lbs_cmd_bcn_ctrl(struct lbs_private * priv,
1142 struct cmd_ds_command *cmd,
1143 u16 cmd_action)
1144{
1145 struct cmd_ds_802_11_beacon_control
1146 *bcn_ctrl = &cmd->params.bcn_ctrl;
1147
1148 lbs_deb_enter(LBS_DEB_CMD);
1149 cmd->size =
1150 cpu_to_le16(sizeof(struct cmd_ds_802_11_beacon_control)
1151 + S_DS_GEN);
1152 cmd->command = cpu_to_le16(CMD_802_11_BEACON_CTRL);
1153
1154 bcn_ctrl->action = cpu_to_le16(cmd_action);
1155 bcn_ctrl->beacon_enable = cpu_to_le16(priv->beacon_enable);
1156 bcn_ctrl->beacon_period = cpu_to_le16(priv->beacon_period);
1157
1158 lbs_deb_leave(LBS_DEB_CMD);
1159 return 0;
1160}
1161
1162static void lbs_queue_cmd(struct lbs_private *priv, 867static void lbs_queue_cmd(struct lbs_private *priv,
1163 struct cmd_ctrl_node *cmdnode) 868 struct cmd_ctrl_node *cmdnode)
1164{ 869{
@@ -1242,8 +947,17 @@ static void lbs_submit_command(struct lbs_private *priv,
1242 timeo = HZ/4; 947 timeo = HZ/4;
1243 } 948 }
1244 949
1245 /* Setup the timer after transmit command */ 950 if (command == CMD_802_11_DEEP_SLEEP) {
1246 mod_timer(&priv->command_timer, jiffies + timeo); 951 if (priv->is_auto_deep_sleep_enabled) {
952 priv->wakeup_dev_required = 1;
953 priv->dnld_sent = 0;
954 }
955 priv->is_deep_sleep = 1;
956 lbs_complete_command(priv, cmdnode, 0);
957 } else {
958 /* Setup the timer after transmit command */
959 mod_timer(&priv->command_timer, jiffies + timeo);
960 }
1247 961
1248 lbs_deb_leave(LBS_DEB_HOST); 962 lbs_deb_leave(LBS_DEB_HOST);
1249} 963}
@@ -1390,6 +1104,11 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1390 goto done; 1104 goto done;
1391 } 1105 }
1392 1106
1107 if (!lbs_is_cmd_allowed(priv)) {
1108 ret = -EBUSY;
1109 goto done;
1110 }
1111
1393 cmdnode = lbs_get_cmd_ctrl_node(priv); 1112 cmdnode = lbs_get_cmd_ctrl_node(priv);
1394 1113
1395 if (cmdnode == NULL) { 1114 if (cmdnode == NULL) {
@@ -1440,7 +1159,7 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1440 1159
1441 cmdptr->command = cpu_to_le16(cmd_no); 1160 cmdptr->command = cpu_to_le16(cmd_no);
1442 cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_afc) + 1161 cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_802_11_afc) +
1443 S_DS_GEN); 1162 sizeof(struct cmd_header));
1444 1163
1445 memmove(&cmdptr->params.afc, 1164 memmove(&cmdptr->params.afc,
1446 pdata_buf, sizeof(struct cmd_ds_802_11_afc)); 1165 pdata_buf, sizeof(struct cmd_ds_802_11_afc));
@@ -1448,45 +1167,17 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1448 ret = 0; 1167 ret = 0;
1449 goto done; 1168 goto done;
1450 1169
1451 case CMD_802_11D_DOMAIN_INFO:
1452 ret = lbs_cmd_802_11d_domain_info(priv, cmdptr,
1453 cmd_no, cmd_action);
1454 break;
1455
1456 case CMD_802_11_TPC_CFG: 1170 case CMD_802_11_TPC_CFG:
1457 cmdptr->command = cpu_to_le16(CMD_802_11_TPC_CFG); 1171 cmdptr->command = cpu_to_le16(CMD_802_11_TPC_CFG);
1458 cmdptr->size = 1172 cmdptr->size =
1459 cpu_to_le16(sizeof(struct cmd_ds_802_11_tpc_cfg) + 1173 cpu_to_le16(sizeof(struct cmd_ds_802_11_tpc_cfg) +
1460 S_DS_GEN); 1174 sizeof(struct cmd_header));
1461 1175
1462 memmove(&cmdptr->params.tpccfg, 1176 memmove(&cmdptr->params.tpccfg,
1463 pdata_buf, sizeof(struct cmd_ds_802_11_tpc_cfg)); 1177 pdata_buf, sizeof(struct cmd_ds_802_11_tpc_cfg));
1464 1178
1465 ret = 0; 1179 ret = 0;
1466 break; 1180 break;
1467 case CMD_802_11_LED_GPIO_CTRL:
1468 {
1469 struct mrvl_ie_ledgpio *gpio =
1470 (struct mrvl_ie_ledgpio*)
1471 cmdptr->params.ledgpio.data;
1472
1473 memmove(&cmdptr->params.ledgpio,
1474 pdata_buf,
1475 sizeof(struct cmd_ds_802_11_led_ctrl));
1476
1477 cmdptr->command =
1478 cpu_to_le16(CMD_802_11_LED_GPIO_CTRL);
1479
1480#define ACTION_NUMLED_TLVTYPE_LEN_FIELDS_LEN 8
1481 cmdptr->size =
1482 cpu_to_le16(le16_to_cpu(gpio->header.len)
1483 + S_DS_GEN
1484 + ACTION_NUMLED_TLVTYPE_LEN_FIELDS_LEN);
1485 gpio->header.len = gpio->header.len;
1486
1487 ret = 0;
1488 break;
1489 }
1490 1181
1491 case CMD_BT_ACCESS: 1182 case CMD_BT_ACCESS:
1492 ret = lbs_cmd_bt_access(cmdptr, cmd_action, pdata_buf); 1183 ret = lbs_cmd_bt_access(cmdptr, cmd_action, pdata_buf);
@@ -1496,15 +1187,13 @@ int lbs_prepare_and_send_command(struct lbs_private *priv,
1496 ret = lbs_cmd_fwt_access(cmdptr, cmd_action, pdata_buf); 1187 ret = lbs_cmd_fwt_access(cmdptr, cmd_action, pdata_buf);
1497 break; 1188 break;
1498 1189
1499 case CMD_GET_TSF:
1500 cmdptr->command = cpu_to_le16(CMD_GET_TSF);
1501 cmdptr->size = cpu_to_le16(sizeof(struct cmd_ds_get_tsf) +
1502 S_DS_GEN);
1503 ret = 0;
1504 break;
1505 case CMD_802_11_BEACON_CTRL: 1190 case CMD_802_11_BEACON_CTRL:
1506 ret = lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action); 1191 ret = lbs_cmd_bcn_ctrl(priv, cmdptr, cmd_action);
1507 break; 1192 break;
1193 case CMD_802_11_DEEP_SLEEP:
1194 cmdptr->command = cpu_to_le16(CMD_802_11_DEEP_SLEEP);
1195 cmdptr->size = cpu_to_le16(sizeof(struct cmd_header));
1196 break;
1508 default: 1197 default:
1509 lbs_pr_err("PREP_CMD: unknown command 0x%04x\n", cmd_no); 1198 lbs_pr_err("PREP_CMD: unknown command 0x%04x\n", cmd_no);
1510 ret = -1; 1199 ret = -1;
@@ -1822,30 +1511,6 @@ done:
1822 return ret; 1511 return ret;
1823} 1512}
1824 1513
1825void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str)
1826{
1827 union iwreq_data iwrq;
1828 u8 buf[50];
1829
1830 lbs_deb_enter(LBS_DEB_WEXT);
1831
1832 memset(&iwrq, 0, sizeof(union iwreq_data));
1833 memset(buf, 0, sizeof(buf));
1834
1835 snprintf(buf, sizeof(buf) - 1, "%s", str);
1836
1837 iwrq.data.length = strlen(buf) + 1 + IW_EV_LCP_LEN;
1838
1839 /* Send Event to upper layer */
1840 lbs_deb_wext("event indication string %s\n", (char *)buf);
1841 lbs_deb_wext("event indication length %d\n", iwrq.data.length);
1842 lbs_deb_wext("sending wireless event IWEVCUSTOM for %s\n", str);
1843
1844 wireless_send_event(priv->dev, IWEVCUSTOM, &iwrq, buf);
1845
1846 lbs_deb_leave(LBS_DEB_WEXT);
1847}
1848
1849static void lbs_send_confirmsleep(struct lbs_private *priv) 1514static void lbs_send_confirmsleep(struct lbs_private *priv)
1850{ 1515{
1851 unsigned long flags; 1516 unsigned long flags;
@@ -2023,7 +1688,7 @@ int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
2023} 1688}
2024 1689
2025 1690
2026static struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv, 1691struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
2027 uint16_t command, struct cmd_header *in_cmd, int in_cmd_size, 1692 uint16_t command, struct cmd_header *in_cmd, int in_cmd_size,
2028 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *), 1693 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
2029 unsigned long callback_arg) 1694 unsigned long callback_arg)
@@ -2038,6 +1703,11 @@ static struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
2038 goto done; 1703 goto done;
2039 } 1704 }
2040 1705
1706 if (!lbs_is_cmd_allowed(priv)) {
1707 cmdnode = ERR_PTR(-EBUSY);
1708 goto done;
1709 }
1710
2041 cmdnode = lbs_get_cmd_ctrl_node(priv); 1711 cmdnode = lbs_get_cmd_ctrl_node(priv);
2042 if (cmdnode == NULL) { 1712 if (cmdnode == NULL) {
2043 lbs_deb_host("PREP_CMD: cmdnode is NULL\n"); 1713 lbs_deb_host("PREP_CMD: cmdnode is NULL\n");
@@ -2116,5 +1786,3 @@ done:
2116 return ret; 1786 return ret;
2117} 1787}
2118EXPORT_SYMBOL_GPL(__lbs_cmd); 1788EXPORT_SYMBOL_GPL(__lbs_cmd);
2119
2120
diff --git a/drivers/net/wireless/libertas/cmd.h b/drivers/net/wireless/libertas/cmd.h
index 392e578ca095..2862748aef70 100644
--- a/drivers/net/wireless/libertas/cmd.h
+++ b/drivers/net/wireless/libertas/cmd.h
@@ -3,11 +3,30 @@
3#ifndef _LBS_CMD_H_ 3#ifndef _LBS_CMD_H_
4#define _LBS_CMD_H_ 4#define _LBS_CMD_H_
5 5
6#include "hostcmd.h" 6#include "host.h"
7#include "dev.h" 7#include "dev.h"
8 8
9
10/* Command & response transfer between host and card */
11
12struct cmd_ctrl_node {
13 struct list_head list;
14 int result;
15 /* command response */
16 int (*callback)(struct lbs_private *,
17 unsigned long,
18 struct cmd_header *);
19 unsigned long callback_arg;
20 /* command data */
21 struct cmd_header *cmdbuf;
22 /* wait queue */
23 u16 cmdwaitqwoken;
24 wait_queue_head_t cmdwait_q;
25};
26
27
9/* lbs_cmd() infers the size of the buffer to copy data back into, from 28/* lbs_cmd() infers the size of the buffer to copy data back into, from
10 the size of the target of the pointer. Since the command to be sent 29 the size of the target of the pointer. Since the command to be sent
11 may often be smaller, that size is set in cmd->size by the caller.*/ 30 may often be smaller, that size is set in cmd->size by the caller.*/
12#define lbs_cmd(priv, cmdnr, cmd, cb, cb_arg) ({ \ 31#define lbs_cmd(priv, cmdnr, cmd, cb, cb_arg) ({ \
13 uint16_t __sz = le16_to_cpu((cmd)->hdr.size); \ 32 uint16_t __sz = le16_to_cpu((cmd)->hdr.size); \
@@ -18,6 +37,11 @@
18#define lbs_cmd_with_response(priv, cmdnr, cmd) \ 37#define lbs_cmd_with_response(priv, cmdnr, cmd) \
19 lbs_cmd(priv, cmdnr, cmd, lbs_cmd_copyback, (unsigned long) (cmd)) 38 lbs_cmd(priv, cmdnr, cmd, lbs_cmd_copyback, (unsigned long) (cmd))
20 39
40int lbs_prepare_and_send_command(struct lbs_private *priv,
41 u16 cmd_no,
42 u16 cmd_action,
43 u16 wait_option, u32 cmd_oid, void *pdata_buf);
44
21void lbs_cmd_async(struct lbs_private *priv, uint16_t command, 45void lbs_cmd_async(struct lbs_private *priv, uint16_t command,
22 struct cmd_header *in_cmd, int in_cmd_size); 46 struct cmd_header *in_cmd, int in_cmd_size);
23 47
@@ -26,62 +50,93 @@ int __lbs_cmd(struct lbs_private *priv, uint16_t command,
26 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *), 50 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
27 unsigned long callback_arg); 51 unsigned long callback_arg);
28 52
29int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0, 53struct cmd_ctrl_node *__lbs_cmd_async(struct lbs_private *priv,
30 int8_t p1, int8_t p2); 54 uint16_t command, struct cmd_header *in_cmd, int in_cmd_size,
55 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *),
56 unsigned long callback_arg);
31 57
32int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1, 58int lbs_cmd_copyback(struct lbs_private *priv, unsigned long extra,
33 int8_t p2, int usesnr); 59 struct cmd_header *resp);
34 60
35int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0, 61int lbs_allocate_cmd_buffer(struct lbs_private *priv);
36 int8_t p1, int8_t p2); 62int lbs_free_cmd_buffer(struct lbs_private *priv);
37 63
38int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1, 64int lbs_execute_next_command(struct lbs_private *priv);
39 int8_t p2, int usesnr); 65void lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd,
66 int result);
67int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len);
40 68
41int lbs_cmd_copyback(struct lbs_private *priv, unsigned long extra,
42 struct cmd_header *resp);
43 69
44int lbs_update_hw_spec(struct lbs_private *priv); 70/* From cmdresp.c */
45 71
46int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action, 72void lbs_mac_event_disconnected(struct lbs_private *priv);
47 struct cmd_ds_mesh_access *cmd);
48 73
49int lbs_set_data_rate(struct lbs_private *priv, u8 rate);
50 74
51int lbs_get_channel(struct lbs_private *priv); 75
76/* Events */
77
78int lbs_process_event(struct lbs_private *priv, u32 event);
79
80
81/* Actual commands */
82
83int lbs_update_hw_spec(struct lbs_private *priv);
84
52int lbs_set_channel(struct lbs_private *priv, u8 channel); 85int lbs_set_channel(struct lbs_private *priv, u8 channel);
53 86
54int lbs_mesh_config_send(struct lbs_private *priv, 87int lbs_update_channel(struct lbs_private *priv);
55 struct cmd_ds_mesh_config *cmd,
56 uint16_t action, uint16_t type);
57int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
58 88
59int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria, 89int lbs_host_sleep_cfg(struct lbs_private *priv, uint32_t criteria,
60 struct wol_config *p_wol_config); 90 struct wol_config *p_wol_config);
61int lbs_suspend(struct lbs_private *priv);
62void lbs_resume(struct lbs_private *priv);
63 91
64int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
65 uint16_t cmd_action);
66int lbs_cmd_802_11_inactivity_timeout(struct lbs_private *priv,
67 uint16_t cmd_action, uint16_t *timeout);
68int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action, 92int lbs_cmd_802_11_sleep_params(struct lbs_private *priv, uint16_t cmd_action,
69 struct sleep_params *sp); 93 struct sleep_params *sp);
70int lbs_cmd_802_11_set_wep(struct lbs_private *priv, uint16_t cmd_action,
71 struct assoc_request *assoc);
72int lbs_cmd_802_11_enable_rsn(struct lbs_private *priv, uint16_t cmd_action,
73 uint16_t *enable);
74int lbs_cmd_802_11_key_material(struct lbs_private *priv, uint16_t cmd_action,
75 struct assoc_request *assoc);
76 94
77int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel, 95void lbs_ps_sleep(struct lbs_private *priv, int wait_option);
78 s16 *maxlevel); 96
79int lbs_set_tx_power(struct lbs_private *priv, s16 dbm); 97void lbs_ps_wakeup(struct lbs_private *priv, int wait_option);
98
99void lbs_ps_confirm_sleep(struct lbs_private *priv);
80 100
81int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on); 101int lbs_set_radio(struct lbs_private *priv, u8 preamble, u8 radio_on);
82 102
103void lbs_set_mac_control(struct lbs_private *priv);
104
105int lbs_get_tx_power(struct lbs_private *priv, s16 *curlevel, s16 *minlevel,
106 s16 *maxlevel);
107
83int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val); 108int lbs_set_snmp_mib(struct lbs_private *priv, u32 oid, u16 val);
84 109
85int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val); 110int lbs_get_snmp_mib(struct lbs_private *priv, u32 oid, u16 *out_val);
86 111
112
113/* Mesh related */
114
115int lbs_mesh_access(struct lbs_private *priv, uint16_t cmd_action,
116 struct cmd_ds_mesh_access *cmd);
117
118int lbs_mesh_config_send(struct lbs_private *priv,
119 struct cmd_ds_mesh_config *cmd,
120 uint16_t action, uint16_t type);
121
122int lbs_mesh_config(struct lbs_private *priv, uint16_t enable, uint16_t chan);
123
124
125/* Commands only used in wext.c, assoc. and scan.c */
126
127int lbs_set_power_adapt_cfg(struct lbs_private *priv, int enable, int8_t p0,
128 int8_t p1, int8_t p2);
129
130int lbs_set_tpc_cfg(struct lbs_private *priv, int enable, int8_t p0, int8_t p1,
131 int8_t p2, int usesnr);
132
133int lbs_set_data_rate(struct lbs_private *priv, u8 rate);
134
135int lbs_cmd_802_11_rate_adapt_rateset(struct lbs_private *priv,
136 uint16_t cmd_action);
137
138int lbs_set_tx_power(struct lbs_private *priv, s16 dbm);
139
140int lbs_set_deep_sleep(struct lbs_private *priv, int deep_sleep);
141
87#endif /* _LBS_CMD_H */ 142#endif /* _LBS_CMD_H */
diff --git a/drivers/net/wireless/libertas/cmdresp.c b/drivers/net/wireless/libertas/cmdresp.c
index 23f684337fdd..21d57690c20a 100644
--- a/drivers/net/wireless/libertas/cmdresp.c
+++ b/drivers/net/wireless/libertas/cmdresp.c
@@ -11,6 +11,7 @@
11 11
12#include "host.h" 12#include "host.h"
13#include "decl.h" 13#include "decl.h"
14#include "cmd.h"
14#include "defs.h" 15#include "defs.h"
15#include "dev.h" 16#include "dev.h"
16#include "assoc.h" 17#include "assoc.h"
@@ -26,23 +27,17 @@
26 */ 27 */
27void lbs_mac_event_disconnected(struct lbs_private *priv) 28void lbs_mac_event_disconnected(struct lbs_private *priv)
28{ 29{
29 union iwreq_data wrqu;
30
31 if (priv->connect_status != LBS_CONNECTED) 30 if (priv->connect_status != LBS_CONNECTED)
32 return; 31 return;
33 32
34 lbs_deb_enter(LBS_DEB_ASSOC); 33 lbs_deb_enter(LBS_DEB_ASSOC);
35 34
36 memset(wrqu.ap_addr.sa_data, 0x00, ETH_ALEN);
37 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
38
39 /* 35 /*
40 * Cisco AP sends EAP failure and de-auth in less than 0.5 ms. 36 * Cisco AP sends EAP failure and de-auth in less than 0.5 ms.
41 * It causes problem in the Supplicant 37 * It causes problem in the Supplicant
42 */ 38 */
43
44 msleep_interruptible(1000); 39 msleep_interruptible(1000);
45 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); 40 lbs_send_disconnect_notification(priv);
46 41
47 /* report disconnect to upper layer */ 42 /* report disconnect to upper layer */
48 netif_stop_queue(priv->dev); 43 netif_stop_queue(priv->dev);
@@ -67,7 +62,7 @@ void lbs_mac_event_disconnected(struct lbs_private *priv)
67 * no longer valid. 62 * no longer valid.
68 */ 63 */
69 memset(&priv->curbssparams.bssid, 0, ETH_ALEN); 64 memset(&priv->curbssparams.bssid, 0, ETH_ALEN);
70 memset(&priv->curbssparams.ssid, 0, IW_ESSID_MAX_SIZE); 65 memset(&priv->curbssparams.ssid, 0, IEEE80211_MAX_SSID_LEN);
71 priv->curbssparams.ssid_len = 0; 66 priv->curbssparams.ssid_len = 0;
72 67
73 if (priv->psstate != PS_STATE_FULL_POWER) { 68 if (priv->psstate != PS_STATE_FULL_POWER) {
@@ -78,32 +73,6 @@ void lbs_mac_event_disconnected(struct lbs_private *priv)
78 lbs_deb_leave(LBS_DEB_ASSOC); 73 lbs_deb_leave(LBS_DEB_ASSOC);
79} 74}
80 75
81/**
82 * @brief This function handles MIC failure event.
83 *
84 * @param priv A pointer to struct lbs_private structure
85 * @para event the event id
86 * @return n/a
87 */
88static void handle_mic_failureevent(struct lbs_private *priv, u32 event)
89{
90 char buf[50];
91
92 lbs_deb_enter(LBS_DEB_CMD);
93 memset(buf, 0, sizeof(buf));
94
95 sprintf(buf, "%s", "MLME-MICHAELMICFAILURE.indication ");
96
97 if (event == MACREG_INT_CODE_MIC_ERR_UNICAST) {
98 strcat(buf, "unicast ");
99 } else {
100 strcat(buf, "multicast ");
101 }
102
103 lbs_send_iwevcustom_event(priv, buf);
104 lbs_deb_leave(LBS_DEB_CMD);
105}
106
107static int lbs_ret_reg_access(struct lbs_private *priv, 76static int lbs_ret_reg_access(struct lbs_private *priv,
108 u16 type, struct cmd_ds_command *resp) 77 u16 type, struct cmd_ds_command *resp)
109{ 78{
@@ -147,53 +116,6 @@ static int lbs_ret_reg_access(struct lbs_private *priv,
147 return ret; 116 return ret;
148} 117}
149 118
150static int lbs_ret_802_11_rssi(struct lbs_private *priv,
151 struct cmd_ds_command *resp)
152{
153 struct cmd_ds_802_11_rssi_rsp *rssirsp = &resp->params.rssirsp;
154
155 lbs_deb_enter(LBS_DEB_CMD);
156
157 /* store the non average value */
158 priv->SNR[TYPE_BEACON][TYPE_NOAVG] = get_unaligned_le16(&rssirsp->SNR);
159 priv->NF[TYPE_BEACON][TYPE_NOAVG] = get_unaligned_le16(&rssirsp->noisefloor);
160
161 priv->SNR[TYPE_BEACON][TYPE_AVG] = get_unaligned_le16(&rssirsp->avgSNR);
162 priv->NF[TYPE_BEACON][TYPE_AVG] = get_unaligned_le16(&rssirsp->avgnoisefloor);
163
164 priv->RSSI[TYPE_BEACON][TYPE_NOAVG] =
165 CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_NOAVG],
166 priv->NF[TYPE_BEACON][TYPE_NOAVG]);
167
168 priv->RSSI[TYPE_BEACON][TYPE_AVG] =
169 CAL_RSSI(priv->SNR[TYPE_BEACON][TYPE_AVG] / AVG_SCALE,
170 priv->NF[TYPE_BEACON][TYPE_AVG] / AVG_SCALE);
171
172 lbs_deb_cmd("RSSI: beacon %d, avg %d\n",
173 priv->RSSI[TYPE_BEACON][TYPE_NOAVG],
174 priv->RSSI[TYPE_BEACON][TYPE_AVG]);
175
176 lbs_deb_leave(LBS_DEB_CMD);
177 return 0;
178}
179
180static int lbs_ret_802_11_bcn_ctrl(struct lbs_private * priv,
181 struct cmd_ds_command *resp)
182{
183 struct cmd_ds_802_11_beacon_control *bcn_ctrl =
184 &resp->params.bcn_ctrl;
185
186 lbs_deb_enter(LBS_DEB_CMD);
187
188 if (bcn_ctrl->action == CMD_ACT_GET) {
189 priv->beacon_enable = (u8) le16_to_cpu(bcn_ctrl->beacon_enable);
190 priv->beacon_period = le16_to_cpu(bcn_ctrl->beacon_period);
191 }
192
193 lbs_deb_enter(LBS_DEB_CMD);
194 return 0;
195}
196
197static inline int handle_cmd_response(struct lbs_private *priv, 119static inline int handle_cmd_response(struct lbs_private *priv,
198 struct cmd_header *cmd_response) 120 struct cmd_header *cmd_response)
199{ 121{
@@ -227,29 +149,13 @@ static inline int handle_cmd_response(struct lbs_private *priv,
227 ret = lbs_ret_802_11_rssi(priv, resp); 149 ret = lbs_ret_802_11_rssi(priv, resp);
228 break; 150 break;
229 151
230 case CMD_RET(CMD_802_11D_DOMAIN_INFO):
231 ret = lbs_ret_802_11d_domain_info(resp);
232 break;
233
234 case CMD_RET(CMD_802_11_TPC_CFG): 152 case CMD_RET(CMD_802_11_TPC_CFG):
235 spin_lock_irqsave(&priv->driver_lock, flags); 153 spin_lock_irqsave(&priv->driver_lock, flags);
236 memmove((void *)priv->cur_cmd->callback_arg, &resp->params.tpccfg, 154 memmove((void *)priv->cur_cmd->callback_arg, &resp->params.tpccfg,
237 sizeof(struct cmd_ds_802_11_tpc_cfg)); 155 sizeof(struct cmd_ds_802_11_tpc_cfg));
238 spin_unlock_irqrestore(&priv->driver_lock, flags); 156 spin_unlock_irqrestore(&priv->driver_lock, flags);
239 break; 157 break;
240 case CMD_RET(CMD_802_11_LED_GPIO_CTRL):
241 spin_lock_irqsave(&priv->driver_lock, flags);
242 memmove((void *)priv->cur_cmd->callback_arg, &resp->params.ledgpio,
243 sizeof(struct cmd_ds_802_11_led_ctrl));
244 spin_unlock_irqrestore(&priv->driver_lock, flags);
245 break;
246 158
247 case CMD_RET(CMD_GET_TSF):
248 spin_lock_irqsave(&priv->driver_lock, flags);
249 memcpy((void *)priv->cur_cmd->callback_arg,
250 &resp->params.gettsf.tsfvalue, sizeof(u64));
251 spin_unlock_irqrestore(&priv->driver_lock, flags);
252 break;
253 case CMD_RET(CMD_BT_ACCESS): 159 case CMD_RET(CMD_BT_ACCESS):
254 spin_lock_irqsave(&priv->driver_lock, flags); 160 spin_lock_irqsave(&priv->driver_lock, flags);
255 if (priv->cur_cmd->callback_arg) 161 if (priv->cur_cmd->callback_arg)
@@ -505,9 +411,21 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
505 411
506 case MACREG_INT_CODE_HOST_AWAKE: 412 case MACREG_INT_CODE_HOST_AWAKE:
507 lbs_deb_cmd("EVENT: host awake\n"); 413 lbs_deb_cmd("EVENT: host awake\n");
414 if (priv->reset_deep_sleep_wakeup)
415 priv->reset_deep_sleep_wakeup(priv);
416 priv->is_deep_sleep = 0;
508 lbs_send_confirmwake(priv); 417 lbs_send_confirmwake(priv);
509 break; 418 break;
510 419
420 case MACREG_INT_CODE_DEEP_SLEEP_AWAKE:
421 if (priv->reset_deep_sleep_wakeup)
422 priv->reset_deep_sleep_wakeup(priv);
423 lbs_deb_cmd("EVENT: ds awake\n");
424 priv->is_deep_sleep = 0;
425 priv->wakeup_dev_required = 0;
426 wake_up_interruptible(&priv->ds_awake_q);
427 break;
428
511 case MACREG_INT_CODE_PS_AWAKE: 429 case MACREG_INT_CODE_PS_AWAKE:
512 lbs_deb_cmd("EVENT: ps awake\n"); 430 lbs_deb_cmd("EVENT: ps awake\n");
513 /* handle unexpected PS AWAKE event */ 431 /* handle unexpected PS AWAKE event */
@@ -533,12 +451,12 @@ int lbs_process_event(struct lbs_private *priv, u32 event)
533 451
534 case MACREG_INT_CODE_MIC_ERR_UNICAST: 452 case MACREG_INT_CODE_MIC_ERR_UNICAST:
535 lbs_deb_cmd("EVENT: UNICAST MIC ERROR\n"); 453 lbs_deb_cmd("EVENT: UNICAST MIC ERROR\n");
536 handle_mic_failureevent(priv, MACREG_INT_CODE_MIC_ERR_UNICAST); 454 lbs_send_mic_failureevent(priv, event);
537 break; 455 break;
538 456
539 case MACREG_INT_CODE_MIC_ERR_MULTICAST: 457 case MACREG_INT_CODE_MIC_ERR_MULTICAST:
540 lbs_deb_cmd("EVENT: MULTICAST MIC ERROR\n"); 458 lbs_deb_cmd("EVENT: MULTICAST MIC ERROR\n");
541 handle_mic_failureevent(priv, MACREG_INT_CODE_MIC_ERR_MULTICAST); 459 lbs_send_mic_failureevent(priv, event);
542 break; 460 break;
543 461
544 case MACREG_INT_CODE_MIB_CHANGED: 462 case MACREG_INT_CODE_MIB_CHANGED:
diff --git a/drivers/net/wireless/libertas/debugfs.c b/drivers/net/wireless/libertas/debugfs.c
index 893a55ca344a..587b0cb0088d 100644
--- a/drivers/net/wireless/libertas/debugfs.c
+++ b/drivers/net/wireless/libertas/debugfs.c
@@ -451,10 +451,12 @@ static ssize_t lbs_rdmac_read(struct file *file, char __user *userbuf,
451 CMD_MAC_REG_ACCESS, 0, 451 CMD_MAC_REG_ACCESS, 0,
452 CMD_OPTION_WAITFORRSP, 0, &offval); 452 CMD_OPTION_WAITFORRSP, 0, &offval);
453 mdelay(10); 453 mdelay(10);
454 pos += snprintf(buf+pos, len-pos, "MAC[0x%x] = 0x%08x\n", 454 if (!ret) {
455 pos += snprintf(buf+pos, len-pos, "MAC[0x%x] = 0x%08x\n",
455 priv->mac_offset, priv->offsetvalue.value); 456 priv->mac_offset, priv->offsetvalue.value);
456 457
457 ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); 458 ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
459 }
458 free_page(addr); 460 free_page(addr);
459 return ret; 461 return ret;
460} 462}
@@ -514,7 +516,8 @@ static ssize_t lbs_wrmac_write(struct file *file,
514 CMD_OPTION_WAITFORRSP, 0, &offval); 516 CMD_OPTION_WAITFORRSP, 0, &offval);
515 mdelay(10); 517 mdelay(10);
516 518
517 res = count; 519 if (!res)
520 res = count;
518out_unlock: 521out_unlock:
519 free_page(addr); 522 free_page(addr);
520 return res; 523 return res;
@@ -539,10 +542,12 @@ static ssize_t lbs_rdbbp_read(struct file *file, char __user *userbuf,
539 CMD_BBP_REG_ACCESS, 0, 542 CMD_BBP_REG_ACCESS, 0,
540 CMD_OPTION_WAITFORRSP, 0, &offval); 543 CMD_OPTION_WAITFORRSP, 0, &offval);
541 mdelay(10); 544 mdelay(10);
542 pos += snprintf(buf+pos, len-pos, "BBP[0x%x] = 0x%08x\n", 545 if (!ret) {
546 pos += snprintf(buf+pos, len-pos, "BBP[0x%x] = 0x%08x\n",
543 priv->bbp_offset, priv->offsetvalue.value); 547 priv->bbp_offset, priv->offsetvalue.value);
544 548
545 ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); 549 ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
550 }
546 free_page(addr); 551 free_page(addr);
547 552
548 return ret; 553 return ret;
@@ -603,7 +608,8 @@ static ssize_t lbs_wrbbp_write(struct file *file,
603 CMD_OPTION_WAITFORRSP, 0, &offval); 608 CMD_OPTION_WAITFORRSP, 0, &offval);
604 mdelay(10); 609 mdelay(10);
605 610
606 res = count; 611 if (!res)
612 res = count;
607out_unlock: 613out_unlock:
608 free_page(addr); 614 free_page(addr);
609 return res; 615 return res;
@@ -628,10 +634,12 @@ static ssize_t lbs_rdrf_read(struct file *file, char __user *userbuf,
628 CMD_RF_REG_ACCESS, 0, 634 CMD_RF_REG_ACCESS, 0,
629 CMD_OPTION_WAITFORRSP, 0, &offval); 635 CMD_OPTION_WAITFORRSP, 0, &offval);
630 mdelay(10); 636 mdelay(10);
631 pos += snprintf(buf+pos, len-pos, "RF[0x%x] = 0x%08x\n", 637 if (!ret) {
638 pos += snprintf(buf+pos, len-pos, "RF[0x%x] = 0x%08x\n",
632 priv->rf_offset, priv->offsetvalue.value); 639 priv->rf_offset, priv->offsetvalue.value);
633 640
634 ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos); 641 ret = simple_read_from_buffer(userbuf, count, ppos, buf, pos);
642 }
635 free_page(addr); 643 free_page(addr);
636 644
637 return ret; 645 return ret;
@@ -692,7 +700,8 @@ static ssize_t lbs_wrrf_write(struct file *file,
692 CMD_OPTION_WAITFORRSP, 0, &offval); 700 CMD_OPTION_WAITFORRSP, 0, &offval);
693 mdelay(10); 701 mdelay(10);
694 702
695 res = count; 703 if (!res)
704 res = count;
696out_unlock: 705out_unlock:
697 free_page(addr); 706 free_page(addr);
698 return res; 707 return res;
diff --git a/drivers/net/wireless/libertas/decl.h b/drivers/net/wireless/libertas/decl.h
index 8b15380ae6e1..678f7c9f7503 100644
--- a/drivers/net/wireless/libertas/decl.h
+++ b/drivers/net/wireless/libertas/decl.h
@@ -8,71 +8,48 @@
8 8
9#include <linux/netdevice.h> 9#include <linux/netdevice.h>
10 10
11#include "defs.h"
12 11
13/** Function Prototype Declaration */
14struct lbs_private; 12struct lbs_private;
15struct sk_buff; 13struct sk_buff;
16struct net_device; 14struct net_device;
17struct cmd_ctrl_node;
18struct cmd_ds_command;
19 15
20void lbs_set_mac_control(struct lbs_private *priv);
21 16
22void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count); 17/* ethtool.c */
23 18extern const struct ethtool_ops lbs_ethtool_ops;
24int lbs_free_cmd_buffer(struct lbs_private *priv);
25
26int lbs_prepare_and_send_command(struct lbs_private *priv,
27 u16 cmd_no,
28 u16 cmd_action,
29 u16 wait_option, u32 cmd_oid, void *pdata_buf);
30 19
31int lbs_allocate_cmd_buffer(struct lbs_private *priv);
32int lbs_execute_next_command(struct lbs_private *priv);
33int lbs_process_event(struct lbs_private *priv, u32 event);
34void lbs_queue_event(struct lbs_private *priv, u32 event);
35void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx);
36 20
37u32 lbs_fw_index_to_data_rate(u8 index); 21/* tx.c */
38u8 lbs_data_rate_to_fw_index(u32 rate); 22void lbs_send_tx_feedback(struct lbs_private *priv, u32 try_count);
39
40/** The proc fs interface */
41int lbs_process_command_response(struct lbs_private *priv, u8 *data, u32 len);
42void lbs_complete_command(struct lbs_private *priv, struct cmd_ctrl_node *cmd,
43 int result);
44netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb, 23netdev_tx_t lbs_hard_start_xmit(struct sk_buff *skb,
45 struct net_device *dev); 24 struct net_device *dev);
46int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band);
47 25
26/* rx.c */
48int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *); 27int lbs_process_rxed_packet(struct lbs_private *priv, struct sk_buff *);
49 28
50void lbs_ps_sleep(struct lbs_private *priv, int wait_option);
51void lbs_ps_confirm_sleep(struct lbs_private *priv);
52void lbs_ps_wakeup(struct lbs_private *priv, int wait_option);
53
54struct chan_freq_power *lbs_find_cfp_by_band_and_channel(
55 struct lbs_private *priv,
56 u8 band,
57 u16 channel);
58
59void lbs_mac_event_disconnected(struct lbs_private *priv);
60
61void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str);
62 29
63/* persistcfg.c */ 30/* persistcfg.c */
64void lbs_persist_config_init(struct net_device *net); 31void lbs_persist_config_init(struct net_device *net);
65void lbs_persist_config_remove(struct net_device *net); 32void lbs_persist_config_remove(struct net_device *net);
66 33
34
67/* main.c */ 35/* main.c */
68struct chan_freq_power *lbs_get_region_cfp_table(u8 region,
69 int *cfp_no);
70struct lbs_private *lbs_add_card(void *card, struct device *dmdev); 36struct lbs_private *lbs_add_card(void *card, struct device *dmdev);
71void lbs_remove_card(struct lbs_private *priv); 37void lbs_remove_card(struct lbs_private *priv);
72int lbs_start_card(struct lbs_private *priv); 38int lbs_start_card(struct lbs_private *priv);
73void lbs_stop_card(struct lbs_private *priv); 39void lbs_stop_card(struct lbs_private *priv);
74void lbs_host_to_card_done(struct lbs_private *priv); 40void lbs_host_to_card_done(struct lbs_private *priv);
75 41
76int lbs_update_channel(struct lbs_private *priv); 42int lbs_suspend(struct lbs_private *priv);
43void lbs_resume(struct lbs_private *priv);
44
45void lbs_queue_event(struct lbs_private *priv, u32 event);
46void lbs_notify_command_response(struct lbs_private *priv, u8 resp_idx);
47
48int lbs_enter_auto_deep_sleep(struct lbs_private *priv);
49int lbs_exit_auto_deep_sleep(struct lbs_private *priv);
50
51u32 lbs_fw_index_to_data_rate(u8 index);
52u8 lbs_data_rate_to_fw_index(u32 rate);
53
77 54
78#endif 55#endif
diff --git a/drivers/net/wireless/libertas/defs.h b/drivers/net/wireless/libertas/defs.h
index 72f3479a4d70..6b6ea9f7bf5b 100644
--- a/drivers/net/wireless/libertas/defs.h
+++ b/drivers/net/wireless/libertas/defs.h
@@ -42,6 +42,7 @@
42#define LBS_DEB_SDIO 0x00400000 42#define LBS_DEB_SDIO 0x00400000
43#define LBS_DEB_SYSFS 0x00800000 43#define LBS_DEB_SYSFS 0x00800000
44#define LBS_DEB_SPI 0x01000000 44#define LBS_DEB_SPI 0x01000000
45#define LBS_DEB_CFG80211 0x02000000
45 46
46extern unsigned int lbs_debug; 47extern unsigned int lbs_debug;
47 48
@@ -86,6 +87,7 @@ do { if ((lbs_debug & (grp)) == (grp)) \
86#define lbs_deb_sdio(fmt, args...) LBS_DEB_LL(LBS_DEB_SDIO, " sdio", fmt, ##args) 87#define lbs_deb_sdio(fmt, args...) LBS_DEB_LL(LBS_DEB_SDIO, " sdio", fmt, ##args)
87#define lbs_deb_sysfs(fmt, args...) LBS_DEB_LL(LBS_DEB_SYSFS, " sysfs", fmt, ##args) 88#define lbs_deb_sysfs(fmt, args...) LBS_DEB_LL(LBS_DEB_SYSFS, " sysfs", fmt, ##args)
88#define lbs_deb_spi(fmt, args...) LBS_DEB_LL(LBS_DEB_SPI, " spi", fmt, ##args) 89#define lbs_deb_spi(fmt, args...) LBS_DEB_LL(LBS_DEB_SPI, " spi", fmt, ##args)
90#define lbs_deb_cfg80211(fmt, args...) LBS_DEB_LL(LBS_DEB_CFG80211, " cfg80211", fmt, ##args)
89 91
90#define lbs_pr_info(format, args...) \ 92#define lbs_pr_info(format, args...) \
91 printk(KERN_INFO DRV_NAME": " format, ## args) 93 printk(KERN_INFO DRV_NAME": " format, ## args)
@@ -320,7 +322,6 @@ static inline void lbs_deb_hex(unsigned int grp, const char *prompt, u8 *buf, in
320extern const char lbs_driver_version[]; 322extern const char lbs_driver_version[];
321extern u16 lbs_region_code_to_index[MRVDRV_MAX_REGION_CODE]; 323extern u16 lbs_region_code_to_index[MRVDRV_MAX_REGION_CODE];
322 324
323extern u8 lbs_bg_rates[MAX_RATES];
324 325
325/** ENUM definition*/ 326/** ENUM definition*/
326/** SNRNF_TYPE */ 327/** SNRNF_TYPE */
diff --git a/drivers/net/wireless/libertas/dev.h b/drivers/net/wireless/libertas/dev.h
index d3b69a4b4b5e..1a675111300d 100644
--- a/drivers/net/wireless/libertas/dev.h
+++ b/drivers/net/wireless/libertas/dev.h
@@ -6,75 +6,10 @@
6#ifndef _LBS_DEV_H_ 6#ifndef _LBS_DEV_H_
7#define _LBS_DEV_H_ 7#define _LBS_DEV_H_
8 8
9#include <linux/netdevice.h> 9#include "scan.h"
10#include <linux/wireless.h> 10#include "assoc.h"
11#include <linux/ethtool.h>
12#include <linux/debugfs.h>
13 11
14#include "defs.h"
15#include "hostcmd.h"
16 12
17extern const struct ethtool_ops lbs_ethtool_ops;
18
19#define MAX_BSSID_PER_CHANNEL 16
20
21#define NR_TX_QUEUE 3
22
23/* For the extended Scan */
24#define MAX_EXTENDED_SCAN_BSSID_LIST MAX_BSSID_PER_CHANNEL * \
25 MRVDRV_MAX_CHANNEL_SIZE + 1
26
27#define MAX_REGION_CHANNEL_NUM 2
28
29/** Chan-freq-TxPower mapping table*/
30struct chan_freq_power {
31 /** channel Number */
32 u16 channel;
33 /** frequency of this channel */
34 u32 freq;
35 /** Max allowed Tx power level */
36 u16 maxtxpower;
37 /** TRUE:channel unsupported; FLASE:supported*/
38 u8 unsupported;
39};
40
41/** region-band mapping table*/
42struct region_channel {
43 /** TRUE if this entry is valid */
44 u8 valid;
45 /** region code for US, Japan ... */
46 u8 region;
47 /** band B/G/A, used for BAND_CONFIG cmd */
48 u8 band;
49 /** Actual No. of elements in the array below */
50 u8 nrcfp;
51 /** chan-freq-txpower mapping table*/
52 struct chan_freq_power *CFP;
53};
54
55struct lbs_802_11_security {
56 u8 WPAenabled;
57 u8 WPA2enabled;
58 u8 wep_enabled;
59 u8 auth_mode;
60 u32 key_mgmt;
61};
62
63/** Current Basic Service Set State Structure */
64struct current_bss_params {
65 /** bssid */
66 u8 bssid[ETH_ALEN];
67 /** ssid */
68 u8 ssid[IW_ESSID_MAX_SIZE + 1];
69 u8 ssid_len;
70
71 /** band */
72 u8 band;
73 /** channel */
74 u8 channel;
75 /** zero-terminated array of supported data rates */
76 u8 rates[MAX_RATES + 1];
77};
78 13
79/** sleep_params */ 14/** sleep_params */
80struct sleep_params { 15struct sleep_params {
@@ -100,95 +35,96 @@ struct lbs_mesh_stats {
100 35
101/** Private structure for the MV device */ 36/** Private structure for the MV device */
102struct lbs_private { 37struct lbs_private {
103 int mesh_open;
104 int mesh_fw_ver;
105 int infra_open;
106 int mesh_autostart_enabled;
107 38
108 char name[DEV_NAME_LEN]; 39 /* Basic networking */
109
110 void *card;
111 struct net_device *dev; 40 struct net_device *dev;
41 u32 connect_status;
42 int infra_open;
43 struct work_struct mcast_work;
44 u32 nr_of_multicastmacaddr;
45 u8 multicastlist[MRVDRV_MAX_MULTICAST_LIST_SIZE][ETH_ALEN];
112 46
47 /* CFG80211 */
48 struct wireless_dev *wdev;
49
50 /* Mesh */
113 struct net_device *mesh_dev; /* Virtual device */ 51 struct net_device *mesh_dev; /* Virtual device */
52 u32 mesh_connect_status;
53 struct lbs_mesh_stats mstats;
54 int mesh_open;
55 int mesh_fw_ver;
56 int mesh_autostart_enabled;
57 uint16_t mesh_tlv;
58 u8 mesh_ssid[IEEE80211_MAX_SSID_LEN + 1];
59 u8 mesh_ssid_len;
60 struct work_struct sync_channel;
61
62 /* Monitor mode */
114 struct net_device *rtap_net_dev; 63 struct net_device *rtap_net_dev;
64 u32 monitormode;
115 65
116 struct iw_statistics wstats; 66 /* Debugfs */
117 struct lbs_mesh_stats mstats;
118 struct dentry *debugfs_dir; 67 struct dentry *debugfs_dir;
119 struct dentry *debugfs_debug; 68 struct dentry *debugfs_debug;
120 struct dentry *debugfs_files[6]; 69 struct dentry *debugfs_files[6];
121
122 struct dentry *events_dir; 70 struct dentry *events_dir;
123 struct dentry *debugfs_events_files[6]; 71 struct dentry *debugfs_events_files[6];
124
125 struct dentry *regs_dir; 72 struct dentry *regs_dir;
126 struct dentry *debugfs_regs_files[6]; 73 struct dentry *debugfs_regs_files[6];
127 74
75 /* Hardware debugging */
128 u32 mac_offset; 76 u32 mac_offset;
129 u32 bbp_offset; 77 u32 bbp_offset;
130 u32 rf_offset; 78 u32 rf_offset;
79 struct lbs_offset_value offsetvalue;
131 80
132 /* Download sent: 81 /* Power management */
133 bit0 1/0=data_sent/data_tx_done, 82 u16 psmode;
134 bit1 1/0=cmd_sent/cmd_tx_done, 83 u32 psstate;
135 all other bits reserved 0 */ 84 u8 needtowakeup;
136 u8 dnld_sent;
137
138 /** thread to service interrupts */
139 struct task_struct *main_thread;
140 wait_queue_head_t waitq;
141 struct workqueue_struct *work_thread;
142 85
143 struct work_struct mcast_work; 86 /* Deep sleep */
87 int is_deep_sleep;
88 int is_auto_deep_sleep_enabled;
89 int wakeup_dev_required;
90 int is_activity_detected;
91 int auto_deep_sleep_timeout; /* in ms */
92 wait_queue_head_t ds_awake_q;
93 struct timer_list auto_deepsleep_timer;
144 94
145 /** Scanning */ 95 /* Hardware access */
146 struct delayed_work scan_work; 96 void *card;
147 struct delayed_work assoc_work; 97 u8 fw_ready;
148 struct work_struct sync_channel; 98 u8 surpriseremoved;
149 /* remember which channel was scanned last, != 0 if currently scanning */
150 int scan_channel;
151 u8 scan_ssid[IW_ESSID_MAX_SIZE + 1];
152 u8 scan_ssid_len;
153
154 /** Hardware access */
155 int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb); 99 int (*hw_host_to_card) (struct lbs_private *priv, u8 type, u8 *payload, u16 nb);
156 void (*reset_card) (struct lbs_private *priv); 100 void (*reset_card) (struct lbs_private *priv);
101 int (*enter_deep_sleep) (struct lbs_private *priv);
102 int (*exit_deep_sleep) (struct lbs_private *priv);
103 int (*reset_deep_sleep_wakeup) (struct lbs_private *priv);
157 104
158 /* Wake On LAN */ 105 /* Adapter info (from EEPROM) */
159 uint32_t wol_criteria;
160 uint8_t wol_gpio;
161 uint8_t wol_gap;
162
163 /** Wlan adapter data structure*/
164 /** STATUS variables */
165 u32 fwrelease; 106 u32 fwrelease;
166 u32 fwcapinfo; 107 u32 fwcapinfo;
108 u16 regioncode;
109 u8 current_addr[ETH_ALEN];
167 110
168 struct mutex lock; 111 /* Command download */
169 112 u8 dnld_sent;
170 /* TX packet ready to be sent... */ 113 /* bit0 1/0=data_sent/data_tx_done,
171 int tx_pending_len; /* -1 while building packet */ 114 bit1 1/0=cmd_sent/cmd_tx_done,
172 115 all other bits reserved 0 */
173 u8 tx_pending_buf[LBS_UPLD_SIZE];
174 /* protected by hard_start_xmit serialization */
175
176 /** command-related variables */
177 u16 seqnum; 116 u16 seqnum;
178
179 struct cmd_ctrl_node *cmd_array; 117 struct cmd_ctrl_node *cmd_array;
180 /** Current command */
181 struct cmd_ctrl_node *cur_cmd; 118 struct cmd_ctrl_node *cur_cmd;
182 int cur_cmd_retcode; 119 struct list_head cmdfreeq; /* free command buffers */
183 /** command Queues */ 120 struct list_head cmdpendingq; /* pending command buffers */
184 /** Free command buffers */
185 struct list_head cmdfreeq;
186 /** Pending command buffers */
187 struct list_head cmdpendingq;
188
189 wait_queue_head_t cmd_pending; 121 wait_queue_head_t cmd_pending;
122 struct timer_list command_timer;
123 int nr_retries;
124 int cmd_timed_out;
190 125
191 /* Command responses sent from the hardware to the driver */ 126 /* Command responses sent from the hardware to the driver */
127 int cur_cmd_retcode;
192 u8 resp_idx; 128 u8 resp_idx;
193 u8 resp_buf[2][LBS_UPLD_SIZE]; 129 u8 resp_buf[2][LBS_UPLD_SIZE];
194 u32 resp_len[2]; 130 u32 resp_len[2];
@@ -196,95 +132,76 @@ struct lbs_private {
196 /* Events sent from hardware to driver */ 132 /* Events sent from hardware to driver */
197 struct kfifo *event_fifo; 133 struct kfifo *event_fifo;
198 134
199 /* nickname */ 135 /** thread to service interrupts */
200 u8 nodename[16]; 136 struct task_struct *main_thread;
201 137 wait_queue_head_t waitq;
202 /** spin locks */ 138 struct workqueue_struct *work_thread;
203 spinlock_t driver_lock;
204
205 /** Timers */
206 struct timer_list command_timer;
207 int nr_retries;
208 int cmd_timed_out;
209
210 /** current ssid/bssid related parameters*/
211 struct current_bss_params curbssparams;
212
213 uint16_t mesh_tlv;
214 u8 mesh_ssid[IW_ESSID_MAX_SIZE + 1];
215 u8 mesh_ssid_len;
216
217 /* IW_MODE_* */
218 u8 mode;
219
220 /* Scan results list */
221 struct list_head network_list;
222 struct list_head network_free_list;
223 struct bss_descriptor *networks;
224
225 u16 beacon_period;
226 u8 beacon_enable;
227 u8 adhoccreate;
228
229 /** capability Info used in Association, start, join */
230 u16 capability;
231
232 /** MAC address information */
233 u8 current_addr[ETH_ALEN];
234 u8 multicastlist[MRVDRV_MAX_MULTICAST_LIST_SIZE][ETH_ALEN];
235 u32 nr_of_multicastmacaddr;
236 139
237 /** 802.11 statistics */ 140 /** Encryption stuff */
238// struct cmd_DS_802_11_GET_STAT wlan802_11Stat; 141 struct lbs_802_11_security secinfo;
142 struct enc_key wpa_mcast_key;
143 struct enc_key wpa_unicast_key;
144 u8 wpa_ie[MAX_WPA_IE_LEN];
145 u8 wpa_ie_len;
146 u16 wep_tx_keyidx;
147 struct enc_key wep_keys[4];
239 148
240 uint16_t enablehwauto; 149 /* Wake On LAN */
241 uint16_t ratebitmap; 150 uint32_t wol_criteria;
151 uint8_t wol_gpio;
152 uint8_t wol_gap;
242 153
154 /* Transmitting */
155 int tx_pending_len; /* -1 while building packet */
156 u8 tx_pending_buf[LBS_UPLD_SIZE];
157 /* protected by hard_start_xmit serialization */
243 u8 txretrycount; 158 u8 txretrycount;
244
245 /** Tx-related variables (for single packet tx) */
246 struct sk_buff *currenttxskb; 159 struct sk_buff *currenttxskb;
247 160
248 /** NIC Operation characteristics */ 161 /* Locks */
162 struct mutex lock;
163 spinlock_t driver_lock;
164
165 /* NIC/link operation characteristics */
249 u16 mac_control; 166 u16 mac_control;
250 u32 connect_status; 167 u8 radio_on;
251 u32 mesh_connect_status; 168 u8 channel;
252 u16 regioncode;
253 s16 txpower_cur; 169 s16 txpower_cur;
254 s16 txpower_min; 170 s16 txpower_min;
255 s16 txpower_max; 171 s16 txpower_max;
256 172
257 /** POWER MANAGEMENT AND PnP SUPPORT */ 173 /** Scanning */
258 u8 surpriseremoved; 174 struct delayed_work scan_work;
259 175 int scan_channel;
260 u16 psmode; /* Wlan802_11PowermodeCAM=disable 176 /* remember which channel was scanned last, != 0 if currently scanning */
261 Wlan802_11PowermodeMAX_PSP=enable */ 177 u8 scan_ssid[IEEE80211_MAX_SSID_LEN + 1];
262 u32 psstate; 178 u8 scan_ssid_len;
263 u8 needtowakeup;
264 179
180 /* Associating */
181 struct delayed_work assoc_work;
182 struct current_bss_params curbssparams;
183 u8 mode;
184 struct list_head network_list;
185 struct list_head network_free_list;
186 struct bss_descriptor *networks;
265 struct assoc_request * pending_assoc_req; 187 struct assoc_request * pending_assoc_req;
266 struct assoc_request * in_progress_assoc_req; 188 struct assoc_request * in_progress_assoc_req;
189 u16 capability;
190 uint16_t enablehwauto;
191 uint16_t ratebitmap;
267 192
268 /** Encryption parameter */ 193 /* ADHOC */
269 struct lbs_802_11_security secinfo; 194 u16 beacon_period;
270 195 u8 beacon_enable;
271 /** WEP keys */ 196 u8 adhoccreate;
272 struct enc_key wep_keys[4];
273 u16 wep_tx_keyidx;
274
275 /** WPA keys */
276 struct enc_key wpa_mcast_key;
277 struct enc_key wpa_unicast_key;
278
279/*
280 * In theory, the IE is limited to the IE length, 255,
281 * but in practice 64 bytes are enough.
282 */
283#define MAX_WPA_IE_LEN 64
284 197
285 /** WPA Information Elements*/ 198 /* WEXT */
286 u8 wpa_ie[MAX_WPA_IE_LEN]; 199 char name[DEV_NAME_LEN];
287 u8 wpa_ie_len; 200 u8 nodename[16];
201 struct iw_statistics wstats;
202 u8 cur_rate;
203#define MAX_REGION_CHANNEL_NUM 2
204 struct region_channel region_channel[MAX_REGION_CHANNEL_NUM];
288 205
289 /** Requested Signal Strength*/ 206 /** Requested Signal Strength*/
290 u16 SNR[MAX_TYPE_B][MAX_TYPE_AVG]; 207 u16 SNR[MAX_TYPE_B][MAX_TYPE_AVG];
@@ -294,116 +211,8 @@ struct lbs_private {
294 u8 rawNF[DEFAULT_DATA_AVG_FACTOR]; 211 u8 rawNF[DEFAULT_DATA_AVG_FACTOR];
295 u16 nextSNRNF; 212 u16 nextSNRNF;
296 u16 numSNRNF; 213 u16 numSNRNF;
297
298 u8 radio_on;
299
300 /** data rate stuff */
301 u8 cur_rate;
302
303 /** RF calibration data */
304
305#define MAX_REGION_CHANNEL_NUM 2
306 /** region channel data */
307 struct region_channel region_channel[MAX_REGION_CHANNEL_NUM];
308
309 struct region_channel universal_channel[MAX_REGION_CHANNEL_NUM];
310
311 /** 11D and Domain Regulatory Data */
312 struct lbs_802_11d_domain_reg domainreg;
313 struct parsed_region_chan_11d parsed_region_chan;
314
315 /** FSM variable for 11d support */
316 u32 enable11d;
317
318 /** MISCELLANEOUS */
319 struct lbs_offset_value offsetvalue;
320
321 u32 monitormode;
322 u8 fw_ready;
323}; 214};
324 215
325extern struct cmd_confirm_sleep confirm_sleep; 216extern struct cmd_confirm_sleep confirm_sleep;
326 217
327/**
328 * @brief Structure used to store information for each beacon/probe response
329 */
330struct bss_descriptor {
331 u8 bssid[ETH_ALEN];
332
333 u8 ssid[IW_ESSID_MAX_SIZE + 1];
334 u8 ssid_len;
335
336 u16 capability;
337 u32 rssi;
338 u32 channel;
339 u16 beaconperiod;
340 __le16 atimwindow;
341
342 /* IW_MODE_AUTO, IW_MODE_ADHOC, IW_MODE_INFRA */
343 u8 mode;
344
345 /* zero-terminated array of supported data rates */
346 u8 rates[MAX_RATES + 1];
347
348 unsigned long last_scanned;
349
350 union ieee_phy_param_set phy;
351 union ieee_ss_param_set ss;
352
353 struct ieee_ie_country_info_full_set countryinfo;
354
355 u8 wpa_ie[MAX_WPA_IE_LEN];
356 size_t wpa_ie_len;
357 u8 rsn_ie[MAX_WPA_IE_LEN];
358 size_t rsn_ie_len;
359
360 u8 mesh;
361
362 struct list_head list;
363};
364
365/** Association request
366 *
367 * Encapsulates all the options that describe a specific assocation request
368 * or configuration of the wireless card's radio, mode, and security settings.
369 */
370struct assoc_request {
371#define ASSOC_FLAG_SSID 1
372#define ASSOC_FLAG_CHANNEL 2
373#define ASSOC_FLAG_BAND 3
374#define ASSOC_FLAG_MODE 4
375#define ASSOC_FLAG_BSSID 5
376#define ASSOC_FLAG_WEP_KEYS 6
377#define ASSOC_FLAG_WEP_TX_KEYIDX 7
378#define ASSOC_FLAG_WPA_MCAST_KEY 8
379#define ASSOC_FLAG_WPA_UCAST_KEY 9
380#define ASSOC_FLAG_SECINFO 10
381#define ASSOC_FLAG_WPA_IE 11
382 unsigned long flags;
383
384 u8 ssid[IW_ESSID_MAX_SIZE + 1];
385 u8 ssid_len;
386 u8 channel;
387 u8 band;
388 u8 mode;
389 u8 bssid[ETH_ALEN] __attribute__ ((aligned (2)));
390
391 /** WEP keys */
392 struct enc_key wep_keys[4];
393 u16 wep_tx_keyidx;
394
395 /** WPA keys */
396 struct enc_key wpa_mcast_key;
397 struct enc_key wpa_unicast_key;
398
399 struct lbs_802_11_security secinfo;
400
401 /** WPA Information Elements*/
402 u8 wpa_ie[MAX_WPA_IE_LEN];
403 u8 wpa_ie_len;
404
405 /* BSS to associate with for infrastructure of Ad-Hoc join */
406 struct bss_descriptor bss;
407};
408
409#endif 218#endif
diff --git a/drivers/net/wireless/libertas/host.h b/drivers/net/wireless/libertas/host.h
index fe8f0cb737bc..3809c0b49464 100644
--- a/drivers/net/wireless/libertas/host.h
+++ b/drivers/net/wireless/libertas/host.h
@@ -1,201 +1,190 @@
1/** 1/**
2 * This file contains definitions of WLAN commands. 2 * This file function prototypes, data structure
3 * and definitions for all the host/station commands
3 */ 4 */
4 5
5#ifndef _LBS_HOST_H_ 6#ifndef _LBS_HOST_H_
6#define _LBS_HOST_H_ 7#define _LBS_HOST_H_
7 8
8/** PUBLIC DEFINITIONS */ 9#include "types.h"
9#define DEFAULT_AD_HOC_CHANNEL 6 10#include "defs.h"
10#define DEFAULT_AD_HOC_CHANNEL_A 36
11 11
12#define CMD_OPTION_WAITFORRSP 0x0002 12#define DEFAULT_AD_HOC_CHANNEL 6
13
14#define CMD_OPTION_WAITFORRSP 0x0002
13 15
14/** Host command IDs */ 16/** Host command IDs */
15 17
16/* Return command are almost always the same as the host command, but with 18/* Return command are almost always the same as the host command, but with
17 * bit 15 set high. There are a few exceptions, though... 19 * bit 15 set high. There are a few exceptions, though...
18 */ 20 */
19#define CMD_RET(cmd) (0x8000 | cmd) 21#define CMD_RET(cmd) (0x8000 | cmd)
20 22
21/* Return command convention exceptions: */ 23/* Return command convention exceptions: */
22#define CMD_RET_802_11_ASSOCIATE 0x8012 24#define CMD_RET_802_11_ASSOCIATE 0x8012
23 25
24/* Command codes */ 26/* Command codes */
25#define CMD_GET_HW_SPEC 0x0003 27#define CMD_GET_HW_SPEC 0x0003
26#define CMD_EEPROM_UPDATE 0x0004 28#define CMD_EEPROM_UPDATE 0x0004
27#define CMD_802_11_RESET 0x0005 29#define CMD_802_11_RESET 0x0005
28#define CMD_802_11_SCAN 0x0006 30#define CMD_802_11_SCAN 0x0006
29#define CMD_802_11_GET_LOG 0x000b 31#define CMD_802_11_GET_LOG 0x000b
30#define CMD_MAC_MULTICAST_ADR 0x0010 32#define CMD_MAC_MULTICAST_ADR 0x0010
31#define CMD_802_11_AUTHENTICATE 0x0011 33#define CMD_802_11_AUTHENTICATE 0x0011
32#define CMD_802_11_EEPROM_ACCESS 0x0059 34#define CMD_802_11_EEPROM_ACCESS 0x0059
33#define CMD_802_11_ASSOCIATE 0x0050 35#define CMD_802_11_ASSOCIATE 0x0050
34#define CMD_802_11_SET_WEP 0x0013 36#define CMD_802_11_SET_WEP 0x0013
35#define CMD_802_11_GET_STAT 0x0014 37#define CMD_802_11_GET_STAT 0x0014
36#define CMD_802_3_GET_STAT 0x0015 38#define CMD_802_3_GET_STAT 0x0015
37#define CMD_802_11_SNMP_MIB 0x0016 39#define CMD_802_11_SNMP_MIB 0x0016
38#define CMD_MAC_REG_MAP 0x0017 40#define CMD_MAC_REG_MAP 0x0017
39#define CMD_BBP_REG_MAP 0x0018 41#define CMD_BBP_REG_MAP 0x0018
40#define CMD_MAC_REG_ACCESS 0x0019 42#define CMD_MAC_REG_ACCESS 0x0019
41#define CMD_BBP_REG_ACCESS 0x001a 43#define CMD_BBP_REG_ACCESS 0x001a
42#define CMD_RF_REG_ACCESS 0x001b 44#define CMD_RF_REG_ACCESS 0x001b
43#define CMD_802_11_RADIO_CONTROL 0x001c 45#define CMD_802_11_RADIO_CONTROL 0x001c
44#define CMD_802_11_RF_CHANNEL 0x001d 46#define CMD_802_11_RF_CHANNEL 0x001d
45#define CMD_802_11_RF_TX_POWER 0x001e 47#define CMD_802_11_RF_TX_POWER 0x001e
46#define CMD_802_11_RSSI 0x001f 48#define CMD_802_11_RSSI 0x001f
47#define CMD_802_11_RF_ANTENNA 0x0020 49#define CMD_802_11_RF_ANTENNA 0x0020
48#define CMD_802_11_PS_MODE 0x0021 50#define CMD_802_11_PS_MODE 0x0021
49#define CMD_802_11_DATA_RATE 0x0022 51#define CMD_802_11_DATA_RATE 0x0022
50#define CMD_RF_REG_MAP 0x0023 52#define CMD_RF_REG_MAP 0x0023
51#define CMD_802_11_DEAUTHENTICATE 0x0024 53#define CMD_802_11_DEAUTHENTICATE 0x0024
52#define CMD_802_11_REASSOCIATE 0x0025 54#define CMD_802_11_REASSOCIATE 0x0025
53#define CMD_MAC_CONTROL 0x0028 55#define CMD_MAC_CONTROL 0x0028
54#define CMD_802_11_AD_HOC_START 0x002b 56#define CMD_802_11_AD_HOC_START 0x002b
55#define CMD_802_11_AD_HOC_JOIN 0x002c 57#define CMD_802_11_AD_HOC_JOIN 0x002c
56#define CMD_802_11_QUERY_TKIP_REPLY_CNTRS 0x002e 58#define CMD_802_11_QUERY_TKIP_REPLY_CNTRS 0x002e
57#define CMD_802_11_ENABLE_RSN 0x002f 59#define CMD_802_11_ENABLE_RSN 0x002f
58#define CMD_802_11_SET_AFC 0x003c 60#define CMD_802_11_SET_AFC 0x003c
59#define CMD_802_11_GET_AFC 0x003d 61#define CMD_802_11_GET_AFC 0x003d
60#define CMD_802_11_AD_HOC_STOP 0x0040 62#define CMD_802_11_DEEP_SLEEP 0x003e
61#define CMD_802_11_HOST_SLEEP_CFG 0x0043 63#define CMD_802_11_AD_HOC_STOP 0x0040
62#define CMD_802_11_WAKEUP_CONFIRM 0x0044 64#define CMD_802_11_HOST_SLEEP_CFG 0x0043
63#define CMD_802_11_HOST_SLEEP_ACTIVATE 0x0045 65#define CMD_802_11_WAKEUP_CONFIRM 0x0044
64#define CMD_802_11_BEACON_STOP 0x0049 66#define CMD_802_11_HOST_SLEEP_ACTIVATE 0x0045
65#define CMD_802_11_MAC_ADDRESS 0x004d 67#define CMD_802_11_BEACON_STOP 0x0049
66#define CMD_802_11_LED_GPIO_CTRL 0x004e 68#define CMD_802_11_MAC_ADDRESS 0x004d
67#define CMD_802_11_EEPROM_ACCESS 0x0059 69#define CMD_802_11_LED_GPIO_CTRL 0x004e
68#define CMD_802_11_BAND_CONFIG 0x0058 70#define CMD_802_11_EEPROM_ACCESS 0x0059
69#define CMD_GSPI_BUS_CONFIG 0x005a 71#define CMD_802_11_BAND_CONFIG 0x0058
70#define CMD_802_11D_DOMAIN_INFO 0x005b 72#define CMD_GSPI_BUS_CONFIG 0x005a
71#define CMD_802_11_KEY_MATERIAL 0x005e 73#define CMD_802_11D_DOMAIN_INFO 0x005b
72#define CMD_802_11_SLEEP_PARAMS 0x0066 74#define CMD_802_11_KEY_MATERIAL 0x005e
73#define CMD_802_11_INACTIVITY_TIMEOUT 0x0067 75#define CMD_802_11_SLEEP_PARAMS 0x0066
74#define CMD_802_11_SLEEP_PERIOD 0x0068 76#define CMD_802_11_INACTIVITY_TIMEOUT 0x0067
75#define CMD_802_11_TPC_CFG 0x0072 77#define CMD_802_11_SLEEP_PERIOD 0x0068
76#define CMD_802_11_PA_CFG 0x0073 78#define CMD_802_11_TPC_CFG 0x0072
77#define CMD_802_11_FW_WAKE_METHOD 0x0074 79#define CMD_802_11_PA_CFG 0x0073
78#define CMD_802_11_SUBSCRIBE_EVENT 0x0075 80#define CMD_802_11_FW_WAKE_METHOD 0x0074
79#define CMD_802_11_RATE_ADAPT_RATESET 0x0076 81#define CMD_802_11_SUBSCRIBE_EVENT 0x0075
80#define CMD_802_11_TX_RATE_QUERY 0x007f 82#define CMD_802_11_RATE_ADAPT_RATESET 0x0076
81#define CMD_GET_TSF 0x0080 83#define CMD_802_11_TX_RATE_QUERY 0x007f
82#define CMD_BT_ACCESS 0x0087 84#define CMD_GET_TSF 0x0080
83#define CMD_FWT_ACCESS 0x0095 85#define CMD_BT_ACCESS 0x0087
84#define CMD_802_11_MONITOR_MODE 0x0098 86#define CMD_FWT_ACCESS 0x0095
85#define CMD_MESH_ACCESS 0x009b 87#define CMD_802_11_MONITOR_MODE 0x0098
86#define CMD_MESH_CONFIG_OLD 0x00a3 88#define CMD_MESH_ACCESS 0x009b
87#define CMD_MESH_CONFIG 0x00ac 89#define CMD_MESH_CONFIG_OLD 0x00a3
88#define CMD_SET_BOOT2_VER 0x00a5 90#define CMD_MESH_CONFIG 0x00ac
89#define CMD_FUNC_INIT 0x00a9 91#define CMD_SET_BOOT2_VER 0x00a5
90#define CMD_FUNC_SHUTDOWN 0x00aa 92#define CMD_FUNC_INIT 0x00a9
91#define CMD_802_11_BEACON_CTRL 0x00b0 93#define CMD_FUNC_SHUTDOWN 0x00aa
94#define CMD_802_11_BEACON_CTRL 0x00b0
92 95
93/* For the IEEE Power Save */ 96/* For the IEEE Power Save */
94#define CMD_SUBCMD_ENTER_PS 0x0030 97#define CMD_SUBCMD_ENTER_PS 0x0030
95#define CMD_SUBCMD_EXIT_PS 0x0031 98#define CMD_SUBCMD_EXIT_PS 0x0031
96#define CMD_SUBCMD_SLEEP_CONFIRMED 0x0034 99#define CMD_SUBCMD_SLEEP_CONFIRMED 0x0034
97#define CMD_SUBCMD_FULL_POWERDOWN 0x0035 100#define CMD_SUBCMD_FULL_POWERDOWN 0x0035
98#define CMD_SUBCMD_FULL_POWERUP 0x0036 101#define CMD_SUBCMD_FULL_POWERUP 0x0036
99 102
100#define CMD_ENABLE_RSN 0x0001 103#define CMD_ENABLE_RSN 0x0001
101#define CMD_DISABLE_RSN 0x0000 104#define CMD_DISABLE_RSN 0x0000
102 105
103#define CMD_ACT_GET 0x0000 106#define CMD_ACT_GET 0x0000
104#define CMD_ACT_SET 0x0001 107#define CMD_ACT_SET 0x0001
105#define CMD_ACT_GET_AES 0x0002
106#define CMD_ACT_SET_AES 0x0003
107#define CMD_ACT_REMOVE_AES 0x0004
108 108
109/* Define action or option for CMD_802_11_SET_WEP */ 109/* Define action or option for CMD_802_11_SET_WEP */
110#define CMD_ACT_ADD 0x0002 110#define CMD_ACT_ADD 0x0002
111#define CMD_ACT_REMOVE 0x0004 111#define CMD_ACT_REMOVE 0x0004
112#define CMD_ACT_USE_DEFAULT 0x0008
113
114#define CMD_TYPE_WEP_40_BIT 0x01
115#define CMD_TYPE_WEP_104_BIT 0x02
116 112
117#define CMD_NUM_OF_WEP_KEYS 4 113#define CMD_TYPE_WEP_40_BIT 0x01
114#define CMD_TYPE_WEP_104_BIT 0x02
118 115
119#define CMD_WEP_KEY_INDEX_MASK 0x3fff 116#define CMD_NUM_OF_WEP_KEYS 4
120 117
121/* Define action or option for CMD_802_11_RESET */ 118#define CMD_WEP_KEY_INDEX_MASK 0x3fff
122#define CMD_ACT_HALT 0x0003
123 119
124/* Define action or option for CMD_802_11_SCAN */ 120/* Define action or option for CMD_802_11_SCAN */
125#define CMD_BSS_TYPE_BSS 0x0001 121#define CMD_BSS_TYPE_BSS 0x0001
126#define CMD_BSS_TYPE_IBSS 0x0002 122#define CMD_BSS_TYPE_IBSS 0x0002
127#define CMD_BSS_TYPE_ANY 0x0003 123#define CMD_BSS_TYPE_ANY 0x0003
128 124
129/* Define action or option for CMD_802_11_SCAN */ 125/* Define action or option for CMD_802_11_SCAN */
130#define CMD_SCAN_TYPE_ACTIVE 0x0000 126#define CMD_SCAN_TYPE_ACTIVE 0x0000
131#define CMD_SCAN_TYPE_PASSIVE 0x0001 127#define CMD_SCAN_TYPE_PASSIVE 0x0001
132 128
133#define CMD_SCAN_RADIO_TYPE_BG 0 129#define CMD_SCAN_RADIO_TYPE_BG 0
134 130
135#define CMD_SCAN_PROBE_DELAY_TIME 0 131#define CMD_SCAN_PROBE_DELAY_TIME 0
136 132
137/* Define action or option for CMD_MAC_CONTROL */ 133/* Define action or option for CMD_MAC_CONTROL */
138#define CMD_ACT_MAC_RX_ON 0x0001 134#define CMD_ACT_MAC_RX_ON 0x0001
139#define CMD_ACT_MAC_TX_ON 0x0002 135#define CMD_ACT_MAC_TX_ON 0x0002
140#define CMD_ACT_MAC_LOOPBACK_ON 0x0004 136#define CMD_ACT_MAC_LOOPBACK_ON 0x0004
141#define CMD_ACT_MAC_WEP_ENABLE 0x0008 137#define CMD_ACT_MAC_WEP_ENABLE 0x0008
142#define CMD_ACT_MAC_INT_ENABLE 0x0010 138#define CMD_ACT_MAC_INT_ENABLE 0x0010
143#define CMD_ACT_MAC_MULTICAST_ENABLE 0x0020 139#define CMD_ACT_MAC_MULTICAST_ENABLE 0x0020
144#define CMD_ACT_MAC_BROADCAST_ENABLE 0x0040 140#define CMD_ACT_MAC_BROADCAST_ENABLE 0x0040
145#define CMD_ACT_MAC_PROMISCUOUS_ENABLE 0x0080 141#define CMD_ACT_MAC_PROMISCUOUS_ENABLE 0x0080
146#define CMD_ACT_MAC_ALL_MULTICAST_ENABLE 0x0100 142#define CMD_ACT_MAC_ALL_MULTICAST_ENABLE 0x0100
147#define CMD_ACT_MAC_STRICT_PROTECTION_ENABLE 0x0400 143#define CMD_ACT_MAC_STRICT_PROTECTION_ENABLE 0x0400
148 144
149/* Event flags for CMD_802_11_SUBSCRIBE_EVENT */ 145/* Event flags for CMD_802_11_SUBSCRIBE_EVENT */
150#define CMD_SUBSCRIBE_RSSI_LOW 0x0001 146#define CMD_SUBSCRIBE_RSSI_LOW 0x0001
151#define CMD_SUBSCRIBE_SNR_LOW 0x0002 147#define CMD_SUBSCRIBE_SNR_LOW 0x0002
152#define CMD_SUBSCRIBE_FAILCOUNT 0x0004 148#define CMD_SUBSCRIBE_FAILCOUNT 0x0004
153#define CMD_SUBSCRIBE_BCNMISS 0x0008 149#define CMD_SUBSCRIBE_BCNMISS 0x0008
154#define CMD_SUBSCRIBE_RSSI_HIGH 0x0010 150#define CMD_SUBSCRIBE_RSSI_HIGH 0x0010
155#define CMD_SUBSCRIBE_SNR_HIGH 0x0020 151#define CMD_SUBSCRIBE_SNR_HIGH 0x0020
156 152
157#define RADIO_PREAMBLE_LONG 0x00 153#define RADIO_PREAMBLE_LONG 0x00
158#define RADIO_PREAMBLE_SHORT 0x02 154#define RADIO_PREAMBLE_SHORT 0x02
159#define RADIO_PREAMBLE_AUTO 0x04 155#define RADIO_PREAMBLE_AUTO 0x04
160 156
161/* Define action or option for CMD_802_11_RF_CHANNEL */ 157/* Define action or option for CMD_802_11_RF_CHANNEL */
162#define CMD_OPT_802_11_RF_CHANNEL_GET 0x00 158#define CMD_OPT_802_11_RF_CHANNEL_GET 0x00
163#define CMD_OPT_802_11_RF_CHANNEL_SET 0x01 159#define CMD_OPT_802_11_RF_CHANNEL_SET 0x01
164 160
165/* Define action or option for CMD_802_11_DATA_RATE */ 161/* Define action or option for CMD_802_11_DATA_RATE */
166#define CMD_ACT_SET_TX_AUTO 0x0000 162#define CMD_ACT_SET_TX_AUTO 0x0000
167#define CMD_ACT_SET_TX_FIX_RATE 0x0001 163#define CMD_ACT_SET_TX_FIX_RATE 0x0001
168#define CMD_ACT_GET_TX_RATE 0x0002 164#define CMD_ACT_GET_TX_RATE 0x0002
169
170#define CMD_ACT_SET_RX 0x0001
171#define CMD_ACT_SET_TX 0x0002
172#define CMD_ACT_SET_BOTH 0x0003
173#define CMD_ACT_GET_RX 0x0004
174#define CMD_ACT_GET_TX 0x0008
175#define CMD_ACT_GET_BOTH 0x000c
176 165
177/* Define action or option for CMD_802_11_PS_MODE */ 166/* Define action or option for CMD_802_11_PS_MODE */
178#define CMD_TYPE_CAM 0x0000 167#define CMD_TYPE_CAM 0x0000
179#define CMD_TYPE_MAX_PSP 0x0001 168#define CMD_TYPE_MAX_PSP 0x0001
180#define CMD_TYPE_FAST_PSP 0x0002 169#define CMD_TYPE_FAST_PSP 0x0002
181 170
182/* Options for CMD_802_11_FW_WAKE_METHOD */ 171/* Options for CMD_802_11_FW_WAKE_METHOD */
183#define CMD_WAKE_METHOD_UNCHANGED 0x0000 172#define CMD_WAKE_METHOD_UNCHANGED 0x0000
184#define CMD_WAKE_METHOD_COMMAND_INT 0x0001 173#define CMD_WAKE_METHOD_COMMAND_INT 0x0001
185#define CMD_WAKE_METHOD_GPIO 0x0002 174#define CMD_WAKE_METHOD_GPIO 0x0002
186 175
187/* Object IDs for CMD_802_11_SNMP_MIB */ 176/* Object IDs for CMD_802_11_SNMP_MIB */
188#define SNMP_MIB_OID_BSS_TYPE 0x0000 177#define SNMP_MIB_OID_BSS_TYPE 0x0000
189#define SNMP_MIB_OID_OP_RATE_SET 0x0001 178#define SNMP_MIB_OID_OP_RATE_SET 0x0001
190#define SNMP_MIB_OID_BEACON_PERIOD 0x0002 /* Reserved on v9+ */ 179#define SNMP_MIB_OID_BEACON_PERIOD 0x0002 /* Reserved on v9+ */
191#define SNMP_MIB_OID_DTIM_PERIOD 0x0003 /* Reserved on v9+ */ 180#define SNMP_MIB_OID_DTIM_PERIOD 0x0003 /* Reserved on v9+ */
192#define SNMP_MIB_OID_ASSOC_TIMEOUT 0x0004 /* Reserved on v9+ */ 181#define SNMP_MIB_OID_ASSOC_TIMEOUT 0x0004 /* Reserved on v9+ */
193#define SNMP_MIB_OID_RTS_THRESHOLD 0x0005 182#define SNMP_MIB_OID_RTS_THRESHOLD 0x0005
194#define SNMP_MIB_OID_SHORT_RETRY_LIMIT 0x0006 183#define SNMP_MIB_OID_SHORT_RETRY_LIMIT 0x0006
195#define SNMP_MIB_OID_LONG_RETRY_LIMIT 0x0007 184#define SNMP_MIB_OID_LONG_RETRY_LIMIT 0x0007
196#define SNMP_MIB_OID_FRAG_THRESHOLD 0x0008 185#define SNMP_MIB_OID_FRAG_THRESHOLD 0x0008
197#define SNMP_MIB_OID_11D_ENABLE 0x0009 186#define SNMP_MIB_OID_11D_ENABLE 0x0009
198#define SNMP_MIB_OID_11H_ENABLE 0x000A 187#define SNMP_MIB_OID_11H_ENABLE 0x000A
199 188
200/* Define action or option for CMD_BT_ACCESS */ 189/* Define action or option for CMD_BT_ACCESS */
201enum cmd_bt_access_opts { 190enum cmd_bt_access_opts {
@@ -302,4 +291,672 @@ enum cmd_mesh_config_types {
302#define MACREG_INT_CODE_MESH_AUTO_STARTED 35 291#define MACREG_INT_CODE_MESH_AUTO_STARTED 35
303#define MACREG_INT_CODE_FIRMWARE_READY 48 292#define MACREG_INT_CODE_FIRMWARE_READY 48
304 293
294
295/* 802.11-related definitions */
296
297/* TxPD descriptor */
298struct txpd {
299 /* union to cope up with later FW revisions */
300 union {
301 /* Current Tx packet status */
302 __le32 tx_status;
303 struct {
304 /* BSS type: client, AP, etc. */
305 u8 bss_type;
306 /* BSS number */
307 u8 bss_num;
308 /* Reserved */
309 __le16 reserved;
310 } bss;
311 } u;
312 /* Tx control */
313 __le32 tx_control;
314 __le32 tx_packet_location;
315 /* Tx packet length */
316 __le16 tx_packet_length;
317 /* First 2 byte of destination MAC address */
318 u8 tx_dest_addr_high[2];
319 /* Last 4 byte of destination MAC address */
320 u8 tx_dest_addr_low[4];
321 /* Pkt Priority */
322 u8 priority;
323 /* Pkt Trasnit Power control */
324 u8 powermgmt;
325 /* Amount of time the packet has been queued (units = 2ms) */
326 u8 pktdelay_2ms;
327 /* reserved */
328 u8 reserved1;
329} __attribute__ ((packed));
330
331/* RxPD Descriptor */
332struct rxpd {
333 /* union to cope up with later FW revisions */
334 union {
335 /* Current Rx packet status */
336 __le16 status;
337 struct {
338 /* BSS type: client, AP, etc. */
339 u8 bss_type;
340 /* BSS number */
341 u8 bss_num;
342 } __attribute__ ((packed)) bss;
343 } __attribute__ ((packed)) u;
344
345 /* SNR */
346 u8 snr;
347
348 /* Tx control */
349 u8 rx_control;
350
351 /* Pkt length */
352 __le16 pkt_len;
353
354 /* Noise Floor */
355 u8 nf;
356
357 /* Rx Packet Rate */
358 u8 rx_rate;
359
360 /* Pkt addr */
361 __le32 pkt_ptr;
362
363 /* Next Rx RxPD addr */
364 __le32 next_rxpd_ptr;
365
366 /* Pkt Priority */
367 u8 priority;
368 u8 reserved[3];
369} __attribute__ ((packed));
370
371struct cmd_header {
372 __le16 command;
373 __le16 size;
374 __le16 seqnum;
375 __le16 result;
376} __attribute__ ((packed));
377
378/* Generic structure to hold all key types. */
379struct enc_key {
380 u16 len;
381 u16 flags; /* KEY_INFO_* from defs.h */
382 u16 type; /* KEY_TYPE_* from defs.h */
383 u8 key[32];
384};
385
386/* lbs_offset_value */
387struct lbs_offset_value {
388 u32 offset;
389 u32 value;
390} __attribute__ ((packed));
391
392/*
393 * Define data structure for CMD_GET_HW_SPEC
394 * This structure defines the response for the GET_HW_SPEC command
395 */
396struct cmd_ds_get_hw_spec {
397 struct cmd_header hdr;
398
399 /* HW Interface version number */
400 __le16 hwifversion;
401 /* HW version number */
402 __le16 version;
403 /* Max number of TxPD FW can handle */
404 __le16 nr_txpd;
405 /* Max no of Multicast address */
406 __le16 nr_mcast_adr;
407 /* MAC address */
408 u8 permanentaddr[6];
409
410 /* region Code */
411 __le16 regioncode;
412
413 /* Number of antenna used */
414 __le16 nr_antenna;
415
416 /* FW release number, example 0x01030304 = 2.3.4p1 */
417 __le32 fwrelease;
418
419 /* Base Address of TxPD queue */
420 __le32 wcb_base;
421 /* Read Pointer of RxPd queue */
422 __le32 rxpd_rdptr;
423
424 /* Write Pointer of RxPd queue */
425 __le32 rxpd_wrptr;
426
427 /*FW/HW capability */
428 __le32 fwcapinfo;
429} __attribute__ ((packed));
430
431struct cmd_ds_802_11_subscribe_event {
432 struct cmd_header hdr;
433
434 __le16 action;
435 __le16 events;
436
437 /* A TLV to the CMD_802_11_SUBSCRIBE_EVENT command can contain a
438 * number of TLVs. From the v5.1 manual, those TLVs would add up to
439 * 40 bytes. However, future firmware might add additional TLVs, so I
440 * bump this up a bit.
441 */
442 uint8_t tlv[128];
443} __attribute__ ((packed));
444
445/*
446 * This scan handle Country Information IE(802.11d compliant)
447 * Define data structure for CMD_802_11_SCAN
448 */
449struct cmd_ds_802_11_scan {
450 struct cmd_header hdr;
451
452 uint8_t bsstype;
453 uint8_t bssid[ETH_ALEN];
454 uint8_t tlvbuffer[0];
455} __attribute__ ((packed));
456
457struct cmd_ds_802_11_scan_rsp {
458 struct cmd_header hdr;
459
460 __le16 bssdescriptsize;
461 uint8_t nr_sets;
462 uint8_t bssdesc_and_tlvbuffer[0];
463} __attribute__ ((packed));
464
465struct cmd_ds_802_11_get_log {
466 struct cmd_header hdr;
467
468 __le32 mcasttxframe;
469 __le32 failed;
470 __le32 retry;
471 __le32 multiretry;
472 __le32 framedup;
473 __le32 rtssuccess;
474 __le32 rtsfailure;
475 __le32 ackfailure;
476 __le32 rxfrag;
477 __le32 mcastrxframe;
478 __le32 fcserror;
479 __le32 txframe;
480 __le32 wepundecryptable;
481} __attribute__ ((packed));
482
483struct cmd_ds_mac_control {
484 struct cmd_header hdr;
485 __le16 action;
486 u16 reserved;
487} __attribute__ ((packed));
488
489struct cmd_ds_mac_multicast_adr {
490 struct cmd_header hdr;
491 __le16 action;
492 __le16 nr_of_adrs;
493 u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE];
494} __attribute__ ((packed));
495
496struct cmd_ds_802_11_authenticate {
497 struct cmd_header hdr;
498
499 u8 bssid[ETH_ALEN];
500 u8 authtype;
501 u8 reserved[10];
502} __attribute__ ((packed));
503
504struct cmd_ds_802_11_deauthenticate {
505 struct cmd_header hdr;
506
507 u8 macaddr[ETH_ALEN];
508 __le16 reasoncode;
509} __attribute__ ((packed));
510
511struct cmd_ds_802_11_associate {
512 struct cmd_header hdr;
513
514 u8 bssid[6];
515 __le16 capability;
516 __le16 listeninterval;
517 __le16 bcnperiod;
518 u8 dtimperiod;
519 u8 iebuf[512]; /* Enough for required and most optional IEs */
520} __attribute__ ((packed));
521
522struct cmd_ds_802_11_associate_response {
523 struct cmd_header hdr;
524
525 __le16 capability;
526 __le16 statuscode;
527 __le16 aid;
528 u8 iebuf[512];
529} __attribute__ ((packed));
530
531struct cmd_ds_802_11_set_wep {
532 struct cmd_header hdr;
533
534 /* ACT_ADD, ACT_REMOVE or ACT_ENABLE */
535 __le16 action;
536
537 /* key Index selected for Tx */
538 __le16 keyindex;
539
540 /* 40, 128bit or TXWEP */
541 uint8_t keytype[4];
542 uint8_t keymaterial[4][16];
543} __attribute__ ((packed));
544
545struct cmd_ds_802_11_snmp_mib {
546 struct cmd_header hdr;
547
548 __le16 action;
549 __le16 oid;
550 __le16 bufsize;
551 u8 value[128];
552} __attribute__ ((packed));
553
554struct cmd_ds_mac_reg_access {
555 __le16 action;
556 __le16 offset;
557 __le32 value;
558} __attribute__ ((packed));
559
560struct cmd_ds_bbp_reg_access {
561 __le16 action;
562 __le16 offset;
563 u8 value;
564 u8 reserved[3];
565} __attribute__ ((packed));
566
567struct cmd_ds_rf_reg_access {
568 __le16 action;
569 __le16 offset;
570 u8 value;
571 u8 reserved[3];
572} __attribute__ ((packed));
573
574struct cmd_ds_802_11_radio_control {
575 struct cmd_header hdr;
576
577 __le16 action;
578 __le16 control;
579} __attribute__ ((packed));
580
581struct cmd_ds_802_11_beacon_control {
582 __le16 action;
583 __le16 beacon_enable;
584 __le16 beacon_period;
585} __attribute__ ((packed));
586
587struct cmd_ds_802_11_sleep_params {
588 struct cmd_header hdr;
589
590 /* ACT_GET/ACT_SET */
591 __le16 action;
592
593 /* Sleep clock error in ppm */
594 __le16 error;
595
596 /* Wakeup offset in usec */
597 __le16 offset;
598
599 /* Clock stabilization time in usec */
600 __le16 stabletime;
601
602 /* control periodic calibration */
603 uint8_t calcontrol;
604
605 /* control the use of external sleep clock */
606 uint8_t externalsleepclk;
607
608 /* reserved field, should be set to zero */
609 __le16 reserved;
610} __attribute__ ((packed));
611
612struct cmd_ds_802_11_rf_channel {
613 struct cmd_header hdr;
614
615 __le16 action;
616 __le16 channel;
617 __le16 rftype; /* unused */
618 __le16 reserved; /* unused */
619 u8 channellist[32]; /* unused */
620} __attribute__ ((packed));
621
622struct cmd_ds_802_11_rssi {
623 /* weighting factor */
624 __le16 N;
625
626 __le16 reserved_0;
627 __le16 reserved_1;
628 __le16 reserved_2;
629} __attribute__ ((packed));
630
631struct cmd_ds_802_11_rssi_rsp {
632 __le16 SNR;
633 __le16 noisefloor;
634 __le16 avgSNR;
635 __le16 avgnoisefloor;
636} __attribute__ ((packed));
637
638struct cmd_ds_802_11_mac_address {
639 struct cmd_header hdr;
640
641 __le16 action;
642 u8 macadd[ETH_ALEN];
643} __attribute__ ((packed));
644
645struct cmd_ds_802_11_rf_tx_power {
646 struct cmd_header hdr;
647
648 __le16 action;
649 __le16 curlevel;
650 s8 maxlevel;
651 s8 minlevel;
652} __attribute__ ((packed));
653
654struct cmd_ds_802_11_monitor_mode {
655 __le16 action;
656 __le16 mode;
657} __attribute__ ((packed));
658
659struct cmd_ds_set_boot2_ver {
660 struct cmd_header hdr;
661
662 __le16 action;
663 __le16 version;
664} __attribute__ ((packed));
665
666struct cmd_ds_802_11_fw_wake_method {
667 struct cmd_header hdr;
668
669 __le16 action;
670 __le16 method;
671} __attribute__ ((packed));
672
673struct cmd_ds_802_11_ps_mode {
674 __le16 action;
675 __le16 nullpktinterval;
676 __le16 multipledtim;
677 __le16 reserved;
678 __le16 locallisteninterval;
679} __attribute__ ((packed));
680
681struct cmd_confirm_sleep {
682 struct cmd_header hdr;
683
684 __le16 action;
685 __le16 nullpktinterval;
686 __le16 multipledtim;
687 __le16 reserved;
688 __le16 locallisteninterval;
689} __attribute__ ((packed));
690
691struct cmd_ds_802_11_data_rate {
692 struct cmd_header hdr;
693
694 __le16 action;
695 __le16 reserved;
696 u8 rates[MAX_RATES];
697} __attribute__ ((packed));
698
699struct cmd_ds_802_11_rate_adapt_rateset {
700 struct cmd_header hdr;
701 __le16 action;
702 __le16 enablehwauto;
703 __le16 bitmap;
704} __attribute__ ((packed));
705
706struct cmd_ds_802_11_ad_hoc_start {
707 struct cmd_header hdr;
708
709 u8 ssid[IEEE80211_MAX_SSID_LEN];
710 u8 bsstype;
711 __le16 beaconperiod;
712 u8 dtimperiod; /* Reserved on v9 and later */
713 struct ieee_ie_ibss_param_set ibss;
714 u8 reserved1[4];
715 struct ieee_ie_ds_param_set ds;
716 u8 reserved2[4];
717 __le16 probedelay; /* Reserved on v9 and later */
718 __le16 capability;
719 u8 rates[MAX_RATES];
720 u8 tlv_memory_size_pad[100];
721} __attribute__ ((packed));
722
723struct cmd_ds_802_11_ad_hoc_result {
724 struct cmd_header hdr;
725
726 u8 pad[3];
727 u8 bssid[ETH_ALEN];
728} __attribute__ ((packed));
729
730struct adhoc_bssdesc {
731 u8 bssid[ETH_ALEN];
732 u8 ssid[IEEE80211_MAX_SSID_LEN];
733 u8 type;
734 __le16 beaconperiod;
735 u8 dtimperiod;
736 __le64 timestamp;
737 __le64 localtime;
738 struct ieee_ie_ds_param_set ds;
739 u8 reserved1[4];
740 struct ieee_ie_ibss_param_set ibss;
741 u8 reserved2[4];
742 __le16 capability;
743 u8 rates[MAX_RATES];
744
745 /* DO NOT ADD ANY FIELDS TO THIS STRUCTURE. It is used below in the
746 * Adhoc join command and will cause a binary layout mismatch with
747 * the firmware
748 */
749} __attribute__ ((packed));
750
751struct cmd_ds_802_11_ad_hoc_join {
752 struct cmd_header hdr;
753
754 struct adhoc_bssdesc bss;
755 __le16 failtimeout; /* Reserved on v9 and later */
756 __le16 probedelay; /* Reserved on v9 and later */
757} __attribute__ ((packed));
758
759struct cmd_ds_802_11_ad_hoc_stop {
760 struct cmd_header hdr;
761} __attribute__ ((packed));
762
763struct cmd_ds_802_11_enable_rsn {
764 struct cmd_header hdr;
765
766 __le16 action;
767 __le16 enable;
768} __attribute__ ((packed));
769
770struct MrvlIEtype_keyParamSet {
771 /* type ID */
772 __le16 type;
773
774 /* length of Payload */
775 __le16 length;
776
777 /* type of key: WEP=0, TKIP=1, AES=2 */
778 __le16 keytypeid;
779
780 /* key control Info specific to a keytypeid */
781 __le16 keyinfo;
782
783 /* length of key */
784 __le16 keylen;
785
786 /* key material of size keylen */
787 u8 key[32];
788} __attribute__ ((packed));
789
790#define MAX_WOL_RULES 16
791
792struct host_wol_rule {
793 uint8_t rule_no;
794 uint8_t rule_ops;
795 __le16 sig_offset;
796 __le16 sig_length;
797 __le16 reserve;
798 __be32 sig_mask;
799 __be32 signature;
800} __attribute__ ((packed));
801
802struct wol_config {
803 uint8_t action;
804 uint8_t pattern;
805 uint8_t no_rules_in_cmd;
806 uint8_t result;
807 struct host_wol_rule rule[MAX_WOL_RULES];
808} __attribute__ ((packed));
809
810struct cmd_ds_host_sleep {
811 struct cmd_header hdr;
812 __le32 criteria;
813 uint8_t gpio;
814 uint16_t gap;
815 struct wol_config wol_conf;
816} __attribute__ ((packed));
817
818
819
820struct cmd_ds_802_11_key_material {
821 struct cmd_header hdr;
822
823 __le16 action;
824 struct MrvlIEtype_keyParamSet keyParamSet[2];
825} __attribute__ ((packed));
826
827struct cmd_ds_802_11_eeprom_access {
828 struct cmd_header hdr;
829 __le16 action;
830 __le16 offset;
831 __le16 len;
832 /* firmware says it returns a maximum of 20 bytes */
833#define LBS_EEPROM_READ_LEN 20
834 u8 value[LBS_EEPROM_READ_LEN];
835} __attribute__ ((packed));
836
837struct cmd_ds_802_11_tpc_cfg {
838 struct cmd_header hdr;
839
840 __le16 action;
841 uint8_t enable;
842 int8_t P0;
843 int8_t P1;
844 int8_t P2;
845 uint8_t usesnr;
846} __attribute__ ((packed));
847
848
849struct cmd_ds_802_11_pa_cfg {
850 struct cmd_header hdr;
851
852 __le16 action;
853 uint8_t enable;
854 int8_t P0;
855 int8_t P1;
856 int8_t P2;
857} __attribute__ ((packed));
858
859
860struct cmd_ds_802_11_led_ctrl {
861 __le16 action;
862 __le16 numled;
863 u8 data[256];
864} __attribute__ ((packed));
865
866struct cmd_ds_802_11_afc {
867 __le16 afc_auto;
868 union {
869 struct {
870 __le16 threshold;
871 __le16 period;
872 };
873 struct {
874 __le16 timing_offset; /* signed */
875 __le16 carrier_offset; /* signed */
876 };
877 };
878} __attribute__ ((packed));
879
880struct cmd_tx_rate_query {
881 __le16 txrate;
882} __attribute__ ((packed));
883
884struct cmd_ds_get_tsf {
885 __le64 tsfvalue;
886} __attribute__ ((packed));
887
888struct cmd_ds_bt_access {
889 __le16 action;
890 __le32 id;
891 u8 addr1[ETH_ALEN];
892 u8 addr2[ETH_ALEN];
893} __attribute__ ((packed));
894
895struct cmd_ds_fwt_access {
896 __le16 action;
897 __le32 id;
898 u8 valid;
899 u8 da[ETH_ALEN];
900 u8 dir;
901 u8 ra[ETH_ALEN];
902 __le32 ssn;
903 __le32 dsn;
904 __le32 metric;
905 u8 rate;
906 u8 hopcount;
907 u8 ttl;
908 __le32 expiration;
909 u8 sleepmode;
910 __le32 snr;
911 __le32 references;
912 u8 prec[ETH_ALEN];
913} __attribute__ ((packed));
914
915struct cmd_ds_mesh_config {
916 struct cmd_header hdr;
917
918 __le16 action;
919 __le16 channel;
920 __le16 type;
921 __le16 length;
922 u8 data[128]; /* last position reserved */
923} __attribute__ ((packed));
924
925struct cmd_ds_mesh_access {
926 struct cmd_header hdr;
927
928 __le16 action;
929 __le32 data[32]; /* last position reserved */
930} __attribute__ ((packed));
931
932/* Number of stats counters returned by the firmware */
933#define MESH_STATS_NUM 8
934
935struct cmd_ds_command {
936 /* command header */
937 __le16 command;
938 __le16 size;
939 __le16 seqnum;
940 __le16 result;
941
942 /* command Body */
943 union {
944 struct cmd_ds_802_11_ps_mode psmode;
945 struct cmd_ds_802_11_monitor_mode monitor;
946 struct cmd_ds_802_11_rssi rssi;
947 struct cmd_ds_802_11_rssi_rsp rssirsp;
948 struct cmd_ds_mac_reg_access macreg;
949 struct cmd_ds_bbp_reg_access bbpreg;
950 struct cmd_ds_rf_reg_access rfreg;
951
952 struct cmd_ds_802_11_tpc_cfg tpccfg;
953 struct cmd_ds_802_11_afc afc;
954 struct cmd_ds_802_11_led_ctrl ledgpio;
955
956 struct cmd_ds_bt_access bt;
957 struct cmd_ds_fwt_access fwt;
958 struct cmd_ds_802_11_beacon_control bcn_ctrl;
959 } params;
960} __attribute__ ((packed));
961
305#endif 962#endif
diff --git a/drivers/net/wireless/libertas/hostcmd.h b/drivers/net/wireless/libertas/hostcmd.h
deleted file mode 100644
index c8a1998d4744..000000000000
--- a/drivers/net/wireless/libertas/hostcmd.h
+++ /dev/null
@@ -1,800 +0,0 @@
1/*
2 * This file contains the function prototypes, data structure
3 * and defines for all the host/station commands
4 */
5#ifndef _LBS_HOSTCMD_H
6#define _LBS_HOSTCMD_H
7
8#include <linux/wireless.h>
9#include "11d.h"
10#include "types.h"
11
12/* 802.11-related definitions */
13
14/* TxPD descriptor */
15struct txpd {
16 /* union to cope up with later FW revisions */
17 union {
18 /* Current Tx packet status */
19 __le32 tx_status;
20 struct {
21 /* BSS type: client, AP, etc. */
22 u8 bss_type;
23 /* BSS number */
24 u8 bss_num;
25 /* Reserved */
26 __le16 reserved;
27 } bss;
28 } u;
29 /* Tx control */
30 __le32 tx_control;
31 __le32 tx_packet_location;
32 /* Tx packet length */
33 __le16 tx_packet_length;
34 /* First 2 byte of destination MAC address */
35 u8 tx_dest_addr_high[2];
36 /* Last 4 byte of destination MAC address */
37 u8 tx_dest_addr_low[4];
38 /* Pkt Priority */
39 u8 priority;
40 /* Pkt Trasnit Power control */
41 u8 powermgmt;
42 /* Amount of time the packet has been queued in the driver (units = 2ms) */
43 u8 pktdelay_2ms;
44 /* reserved */
45 u8 reserved1;
46} __attribute__ ((packed));
47
48/* RxPD Descriptor */
49struct rxpd {
50 /* union to cope up with later FW revisions */
51 union {
52 /* Current Rx packet status */
53 __le16 status;
54 struct {
55 /* BSS type: client, AP, etc. */
56 u8 bss_type;
57 /* BSS number */
58 u8 bss_num;
59 } __attribute__ ((packed)) bss;
60 } __attribute__ ((packed)) u;
61
62 /* SNR */
63 u8 snr;
64
65 /* Tx control */
66 u8 rx_control;
67
68 /* Pkt length */
69 __le16 pkt_len;
70
71 /* Noise Floor */
72 u8 nf;
73
74 /* Rx Packet Rate */
75 u8 rx_rate;
76
77 /* Pkt addr */
78 __le32 pkt_ptr;
79
80 /* Next Rx RxPD addr */
81 __le32 next_rxpd_ptr;
82
83 /* Pkt Priority */
84 u8 priority;
85 u8 reserved[3];
86} __attribute__ ((packed));
87
88struct cmd_header {
89 __le16 command;
90 __le16 size;
91 __le16 seqnum;
92 __le16 result;
93} __attribute__ ((packed));
94
95struct cmd_ctrl_node {
96 struct list_head list;
97 int result;
98 /* command response */
99 int (*callback)(struct lbs_private *, unsigned long, struct cmd_header *);
100 unsigned long callback_arg;
101 /* command data */
102 struct cmd_header *cmdbuf;
103 /* wait queue */
104 u16 cmdwaitqwoken;
105 wait_queue_head_t cmdwait_q;
106};
107
108/* Generic structure to hold all key types. */
109struct enc_key {
110 u16 len;
111 u16 flags; /* KEY_INFO_* from defs.h */
112 u16 type; /* KEY_TYPE_* from defs.h */
113 u8 key[32];
114};
115
116/* lbs_offset_value */
117struct lbs_offset_value {
118 u32 offset;
119 u32 value;
120} __attribute__ ((packed));
121
122/* Define general data structure */
123/* cmd_DS_GEN */
124struct cmd_ds_gen {
125 __le16 command;
126 __le16 size;
127 __le16 seqnum;
128 __le16 result;
129 void *cmdresp[0];
130} __attribute__ ((packed));
131
132#define S_DS_GEN sizeof(struct cmd_ds_gen)
133
134
135/*
136 * Define data structure for CMD_GET_HW_SPEC
137 * This structure defines the response for the GET_HW_SPEC command
138 */
139struct cmd_ds_get_hw_spec {
140 struct cmd_header hdr;
141
142 /* HW Interface version number */
143 __le16 hwifversion;
144 /* HW version number */
145 __le16 version;
146 /* Max number of TxPD FW can handle */
147 __le16 nr_txpd;
148 /* Max no of Multicast address */
149 __le16 nr_mcast_adr;
150 /* MAC address */
151 u8 permanentaddr[6];
152
153 /* region Code */
154 __le16 regioncode;
155
156 /* Number of antenna used */
157 __le16 nr_antenna;
158
159 /* FW release number, example 0x01030304 = 2.3.4p1 */
160 __le32 fwrelease;
161
162 /* Base Address of TxPD queue */
163 __le32 wcb_base;
164 /* Read Pointer of RxPd queue */
165 __le32 rxpd_rdptr;
166
167 /* Write Pointer of RxPd queue */
168 __le32 rxpd_wrptr;
169
170 /*FW/HW capability */
171 __le32 fwcapinfo;
172} __attribute__ ((packed));
173
174struct cmd_ds_802_11_subscribe_event {
175 struct cmd_header hdr;
176
177 __le16 action;
178 __le16 events;
179
180 /* A TLV to the CMD_802_11_SUBSCRIBE_EVENT command can contain a
181 * number of TLVs. From the v5.1 manual, those TLVs would add up to
182 * 40 bytes. However, future firmware might add additional TLVs, so I
183 * bump this up a bit.
184 */
185 uint8_t tlv[128];
186} __attribute__ ((packed));
187
188/*
189 * This scan handle Country Information IE(802.11d compliant)
190 * Define data structure for CMD_802_11_SCAN
191 */
192struct cmd_ds_802_11_scan {
193 struct cmd_header hdr;
194
195 uint8_t bsstype;
196 uint8_t bssid[ETH_ALEN];
197 uint8_t tlvbuffer[0];
198#if 0
199 mrvlietypes_ssidparamset_t ssidParamSet;
200 mrvlietypes_chanlistparamset_t ChanListParamSet;
201 mrvlietypes_ratesparamset_t OpRateSet;
202#endif
203} __attribute__ ((packed));
204
205struct cmd_ds_802_11_scan_rsp {
206 struct cmd_header hdr;
207
208 __le16 bssdescriptsize;
209 uint8_t nr_sets;
210 uint8_t bssdesc_and_tlvbuffer[0];
211} __attribute__ ((packed));
212
213struct cmd_ds_802_11_get_log {
214 struct cmd_header hdr;
215
216 __le32 mcasttxframe;
217 __le32 failed;
218 __le32 retry;
219 __le32 multiretry;
220 __le32 framedup;
221 __le32 rtssuccess;
222 __le32 rtsfailure;
223 __le32 ackfailure;
224 __le32 rxfrag;
225 __le32 mcastrxframe;
226 __le32 fcserror;
227 __le32 txframe;
228 __le32 wepundecryptable;
229} __attribute__ ((packed));
230
231struct cmd_ds_mac_control {
232 struct cmd_header hdr;
233 __le16 action;
234 u16 reserved;
235} __attribute__ ((packed));
236
237struct cmd_ds_mac_multicast_adr {
238 struct cmd_header hdr;
239 __le16 action;
240 __le16 nr_of_adrs;
241 u8 maclist[ETH_ALEN * MRVDRV_MAX_MULTICAST_LIST_SIZE];
242} __attribute__ ((packed));
243
244struct cmd_ds_gspi_bus_config {
245 struct cmd_header hdr;
246 __le16 action;
247 __le16 bus_delay_mode;
248 __le16 host_time_delay_to_read_port;
249 __le16 host_time_delay_to_read_register;
250} __attribute__ ((packed));
251
252struct cmd_ds_802_11_authenticate {
253 struct cmd_header hdr;
254
255 u8 bssid[ETH_ALEN];
256 u8 authtype;
257 u8 reserved[10];
258} __attribute__ ((packed));
259
260struct cmd_ds_802_11_deauthenticate {
261 struct cmd_header hdr;
262
263 u8 macaddr[ETH_ALEN];
264 __le16 reasoncode;
265} __attribute__ ((packed));
266
267struct cmd_ds_802_11_associate {
268 struct cmd_header hdr;
269
270 u8 bssid[6];
271 __le16 capability;
272 __le16 listeninterval;
273 __le16 bcnperiod;
274 u8 dtimperiod;
275 u8 iebuf[512]; /* Enough for required and most optional IEs */
276} __attribute__ ((packed));
277
278struct cmd_ds_802_11_associate_response {
279 struct cmd_header hdr;
280
281 __le16 capability;
282 __le16 statuscode;
283 __le16 aid;
284 u8 iebuf[512];
285} __attribute__ ((packed));
286
287struct cmd_ds_802_11_set_wep {
288 struct cmd_header hdr;
289
290 /* ACT_ADD, ACT_REMOVE or ACT_ENABLE */
291 __le16 action;
292
293 /* key Index selected for Tx */
294 __le16 keyindex;
295
296 /* 40, 128bit or TXWEP */
297 uint8_t keytype[4];
298 uint8_t keymaterial[4][16];
299} __attribute__ ((packed));
300
301struct cmd_ds_802_3_get_stat {
302 __le32 xmitok;
303 __le32 rcvok;
304 __le32 xmiterror;
305 __le32 rcverror;
306 __le32 rcvnobuffer;
307 __le32 rcvcrcerror;
308} __attribute__ ((packed));
309
310struct cmd_ds_802_11_get_stat {
311 __le32 txfragmentcnt;
312 __le32 mcasttxframecnt;
313 __le32 failedcnt;
314 __le32 retrycnt;
315 __le32 Multipleretrycnt;
316 __le32 rtssuccesscnt;
317 __le32 rtsfailurecnt;
318 __le32 ackfailurecnt;
319 __le32 frameduplicatecnt;
320 __le32 rxfragmentcnt;
321 __le32 mcastrxframecnt;
322 __le32 fcserrorcnt;
323 __le32 bcasttxframecnt;
324 __le32 bcastrxframecnt;
325 __le32 txbeacon;
326 __le32 rxbeacon;
327 __le32 wepundecryptable;
328} __attribute__ ((packed));
329
330struct cmd_ds_802_11_snmp_mib {
331 struct cmd_header hdr;
332
333 __le16 action;
334 __le16 oid;
335 __le16 bufsize;
336 u8 value[128];
337} __attribute__ ((packed));
338
339struct cmd_ds_mac_reg_map {
340 __le16 buffersize;
341 u8 regmap[128];
342 __le16 reserved;
343} __attribute__ ((packed));
344
345struct cmd_ds_bbp_reg_map {
346 __le16 buffersize;
347 u8 regmap[128];
348 __le16 reserved;
349} __attribute__ ((packed));
350
351struct cmd_ds_rf_reg_map {
352 __le16 buffersize;
353 u8 regmap[64];
354 __le16 reserved;
355} __attribute__ ((packed));
356
357struct cmd_ds_mac_reg_access {
358 __le16 action;
359 __le16 offset;
360 __le32 value;
361} __attribute__ ((packed));
362
363struct cmd_ds_bbp_reg_access {
364 __le16 action;
365 __le16 offset;
366 u8 value;
367 u8 reserved[3];
368} __attribute__ ((packed));
369
370struct cmd_ds_rf_reg_access {
371 __le16 action;
372 __le16 offset;
373 u8 value;
374 u8 reserved[3];
375} __attribute__ ((packed));
376
377struct cmd_ds_802_11_radio_control {
378 struct cmd_header hdr;
379
380 __le16 action;
381 __le16 control;
382} __attribute__ ((packed));
383
384struct cmd_ds_802_11_beacon_control {
385 __le16 action;
386 __le16 beacon_enable;
387 __le16 beacon_period;
388} __attribute__ ((packed));
389
390struct cmd_ds_802_11_sleep_params {
391 struct cmd_header hdr;
392
393 /* ACT_GET/ACT_SET */
394 __le16 action;
395
396 /* Sleep clock error in ppm */
397 __le16 error;
398
399 /* Wakeup offset in usec */
400 __le16 offset;
401
402 /* Clock stabilization time in usec */
403 __le16 stabletime;
404
405 /* control periodic calibration */
406 uint8_t calcontrol;
407
408 /* control the use of external sleep clock */
409 uint8_t externalsleepclk;
410
411 /* reserved field, should be set to zero */
412 __le16 reserved;
413} __attribute__ ((packed));
414
415struct cmd_ds_802_11_inactivity_timeout {
416 struct cmd_header hdr;
417
418 /* ACT_GET/ACT_SET */
419 __le16 action;
420
421 /* Inactivity timeout in msec */
422 __le16 timeout;
423} __attribute__ ((packed));
424
425struct cmd_ds_802_11_rf_channel {
426 struct cmd_header hdr;
427
428 __le16 action;
429 __le16 channel;
430 __le16 rftype; /* unused */
431 __le16 reserved; /* unused */
432 u8 channellist[32]; /* unused */
433} __attribute__ ((packed));
434
435struct cmd_ds_802_11_rssi {
436 /* weighting factor */
437 __le16 N;
438
439 __le16 reserved_0;
440 __le16 reserved_1;
441 __le16 reserved_2;
442} __attribute__ ((packed));
443
444struct cmd_ds_802_11_rssi_rsp {
445 __le16 SNR;
446 __le16 noisefloor;
447 __le16 avgSNR;
448 __le16 avgnoisefloor;
449} __attribute__ ((packed));
450
451struct cmd_ds_802_11_mac_address {
452 struct cmd_header hdr;
453
454 __le16 action;
455 u8 macadd[ETH_ALEN];
456} __attribute__ ((packed));
457
458struct cmd_ds_802_11_rf_tx_power {
459 struct cmd_header hdr;
460
461 __le16 action;
462 __le16 curlevel;
463 s8 maxlevel;
464 s8 minlevel;
465} __attribute__ ((packed));
466
467struct cmd_ds_802_11_rf_antenna {
468 __le16 action;
469
470 /* Number of antennas or 0xffff(diversity) */
471 __le16 antennamode;
472
473} __attribute__ ((packed));
474
475struct cmd_ds_802_11_monitor_mode {
476 __le16 action;
477 __le16 mode;
478} __attribute__ ((packed));
479
480struct cmd_ds_set_boot2_ver {
481 struct cmd_header hdr;
482
483 __le16 action;
484 __le16 version;
485} __attribute__ ((packed));
486
487struct cmd_ds_802_11_fw_wake_method {
488 struct cmd_header hdr;
489
490 __le16 action;
491 __le16 method;
492} __attribute__ ((packed));
493
494struct cmd_ds_802_11_sleep_period {
495 struct cmd_header hdr;
496
497 __le16 action;
498 __le16 period;
499} __attribute__ ((packed));
500
501struct cmd_ds_802_11_ps_mode {
502 __le16 action;
503 __le16 nullpktinterval;
504 __le16 multipledtim;
505 __le16 reserved;
506 __le16 locallisteninterval;
507} __attribute__ ((packed));
508
509struct cmd_confirm_sleep {
510 struct cmd_header hdr;
511
512 __le16 action;
513 __le16 nullpktinterval;
514 __le16 multipledtim;
515 __le16 reserved;
516 __le16 locallisteninterval;
517} __attribute__ ((packed));
518
519struct cmd_ds_802_11_data_rate {
520 struct cmd_header hdr;
521
522 __le16 action;
523 __le16 reserved;
524 u8 rates[MAX_RATES];
525} __attribute__ ((packed));
526
527struct cmd_ds_802_11_rate_adapt_rateset {
528 struct cmd_header hdr;
529 __le16 action;
530 __le16 enablehwauto;
531 __le16 bitmap;
532} __attribute__ ((packed));
533
534struct cmd_ds_802_11_ad_hoc_start {
535 struct cmd_header hdr;
536
537 u8 ssid[IW_ESSID_MAX_SIZE];
538 u8 bsstype;
539 __le16 beaconperiod;
540 u8 dtimperiod; /* Reserved on v9 and later */
541 struct ieee_ie_ibss_param_set ibss;
542 u8 reserved1[4];
543 struct ieee_ie_ds_param_set ds;
544 u8 reserved2[4];
545 __le16 probedelay; /* Reserved on v9 and later */
546 __le16 capability;
547 u8 rates[MAX_RATES];
548 u8 tlv_memory_size_pad[100];
549} __attribute__ ((packed));
550
551struct cmd_ds_802_11_ad_hoc_result {
552 struct cmd_header hdr;
553
554 u8 pad[3];
555 u8 bssid[ETH_ALEN];
556} __attribute__ ((packed));
557
558struct adhoc_bssdesc {
559 u8 bssid[ETH_ALEN];
560 u8 ssid[IW_ESSID_MAX_SIZE];
561 u8 type;
562 __le16 beaconperiod;
563 u8 dtimperiod;
564 __le64 timestamp;
565 __le64 localtime;
566 struct ieee_ie_ds_param_set ds;
567 u8 reserved1[4];
568 struct ieee_ie_ibss_param_set ibss;
569 u8 reserved2[4];
570 __le16 capability;
571 u8 rates[MAX_RATES];
572
573 /* DO NOT ADD ANY FIELDS TO THIS STRUCTURE. It is used below in the
574 * Adhoc join command and will cause a binary layout mismatch with
575 * the firmware
576 */
577} __attribute__ ((packed));
578
579struct cmd_ds_802_11_ad_hoc_join {
580 struct cmd_header hdr;
581
582 struct adhoc_bssdesc bss;
583 __le16 failtimeout; /* Reserved on v9 and later */
584 __le16 probedelay; /* Reserved on v9 and later */
585} __attribute__ ((packed));
586
587struct cmd_ds_802_11_ad_hoc_stop {
588 struct cmd_header hdr;
589} __attribute__ ((packed));
590
591struct cmd_ds_802_11_enable_rsn {
592 struct cmd_header hdr;
593
594 __le16 action;
595 __le16 enable;
596} __attribute__ ((packed));
597
598struct MrvlIEtype_keyParamSet {
599 /* type ID */
600 __le16 type;
601
602 /* length of Payload */
603 __le16 length;
604
605 /* type of key: WEP=0, TKIP=1, AES=2 */
606 __le16 keytypeid;
607
608 /* key control Info specific to a keytypeid */
609 __le16 keyinfo;
610
611 /* length of key */
612 __le16 keylen;
613
614 /* key material of size keylen */
615 u8 key[32];
616} __attribute__ ((packed));
617
618#define MAX_WOL_RULES 16
619
620struct host_wol_rule {
621 uint8_t rule_no;
622 uint8_t rule_ops;
623 __le16 sig_offset;
624 __le16 sig_length;
625 __le16 reserve;
626 __be32 sig_mask;
627 __be32 signature;
628} __attribute__ ((packed));
629
630struct wol_config {
631 uint8_t action;
632 uint8_t pattern;
633 uint8_t no_rules_in_cmd;
634 uint8_t result;
635 struct host_wol_rule rule[MAX_WOL_RULES];
636} __attribute__ ((packed));
637
638struct cmd_ds_host_sleep {
639 struct cmd_header hdr;
640 __le32 criteria;
641 uint8_t gpio;
642 uint16_t gap;
643 struct wol_config wol_conf;
644} __attribute__ ((packed));
645
646
647
648struct cmd_ds_802_11_key_material {
649 struct cmd_header hdr;
650
651 __le16 action;
652 struct MrvlIEtype_keyParamSet keyParamSet[2];
653} __attribute__ ((packed));
654
655struct cmd_ds_802_11_eeprom_access {
656 struct cmd_header hdr;
657 __le16 action;
658 __le16 offset;
659 __le16 len;
660 /* firmware says it returns a maximum of 20 bytes */
661#define LBS_EEPROM_READ_LEN 20
662 u8 value[LBS_EEPROM_READ_LEN];
663} __attribute__ ((packed));
664
665struct cmd_ds_802_11_tpc_cfg {
666 struct cmd_header hdr;
667
668 __le16 action;
669 uint8_t enable;
670 int8_t P0;
671 int8_t P1;
672 int8_t P2;
673 uint8_t usesnr;
674} __attribute__ ((packed));
675
676
677struct cmd_ds_802_11_pa_cfg {
678 struct cmd_header hdr;
679
680 __le16 action;
681 uint8_t enable;
682 int8_t P0;
683 int8_t P1;
684 int8_t P2;
685} __attribute__ ((packed));
686
687
688struct cmd_ds_802_11_led_ctrl {
689 __le16 action;
690 __le16 numled;
691 u8 data[256];
692} __attribute__ ((packed));
693
694struct cmd_ds_802_11_afc {
695 __le16 afc_auto;
696 union {
697 struct {
698 __le16 threshold;
699 __le16 period;
700 };
701 struct {
702 __le16 timing_offset; /* signed */
703 __le16 carrier_offset; /* signed */
704 };
705 };
706} __attribute__ ((packed));
707
708struct cmd_tx_rate_query {
709 __le16 txrate;
710} __attribute__ ((packed));
711
712struct cmd_ds_get_tsf {
713 __le64 tsfvalue;
714} __attribute__ ((packed));
715
716struct cmd_ds_bt_access {
717 __le16 action;
718 __le32 id;
719 u8 addr1[ETH_ALEN];
720 u8 addr2[ETH_ALEN];
721} __attribute__ ((packed));
722
723struct cmd_ds_fwt_access {
724 __le16 action;
725 __le32 id;
726 u8 valid;
727 u8 da[ETH_ALEN];
728 u8 dir;
729 u8 ra[ETH_ALEN];
730 __le32 ssn;
731 __le32 dsn;
732 __le32 metric;
733 u8 rate;
734 u8 hopcount;
735 u8 ttl;
736 __le32 expiration;
737 u8 sleepmode;
738 __le32 snr;
739 __le32 references;
740 u8 prec[ETH_ALEN];
741} __attribute__ ((packed));
742
743
744struct cmd_ds_mesh_config {
745 struct cmd_header hdr;
746
747 __le16 action;
748 __le16 channel;
749 __le16 type;
750 __le16 length;
751 u8 data[128]; /* last position reserved */
752} __attribute__ ((packed));
753
754
755struct cmd_ds_mesh_access {
756 struct cmd_header hdr;
757
758 __le16 action;
759 __le32 data[32]; /* last position reserved */
760} __attribute__ ((packed));
761
762/* Number of stats counters returned by the firmware */
763#define MESH_STATS_NUM 8
764
765struct cmd_ds_command {
766 /* command header */
767 __le16 command;
768 __le16 size;
769 __le16 seqnum;
770 __le16 result;
771
772 /* command Body */
773 union {
774 struct cmd_ds_802_11_ps_mode psmode;
775 struct cmd_ds_802_11_get_stat gstat;
776 struct cmd_ds_802_3_get_stat gstat_8023;
777 struct cmd_ds_802_11_rf_antenna rant;
778 struct cmd_ds_802_11_monitor_mode monitor;
779 struct cmd_ds_802_11_rssi rssi;
780 struct cmd_ds_802_11_rssi_rsp rssirsp;
781 struct cmd_ds_mac_reg_access macreg;
782 struct cmd_ds_bbp_reg_access bbpreg;
783 struct cmd_ds_rf_reg_access rfreg;
784
785 struct cmd_ds_802_11d_domain_info domaininfo;
786 struct cmd_ds_802_11d_domain_info domaininforesp;
787
788 struct cmd_ds_802_11_tpc_cfg tpccfg;
789 struct cmd_ds_802_11_afc afc;
790 struct cmd_ds_802_11_led_ctrl ledgpio;
791
792 struct cmd_tx_rate_query txrate;
793 struct cmd_ds_bt_access bt;
794 struct cmd_ds_fwt_access fwt;
795 struct cmd_ds_get_tsf gettsf;
796 struct cmd_ds_802_11_beacon_control bcn_ctrl;
797 } params;
798} __attribute__ ((packed));
799
800#endif
diff --git a/drivers/net/wireless/libertas/if_cs.c b/drivers/net/wireless/libertas/if_cs.c
index 62381768f2d5..465742f19ecb 100644
--- a/drivers/net/wireless/libertas/if_cs.c
+++ b/drivers/net/wireless/libertas/if_cs.c
@@ -946,6 +946,9 @@ static int if_cs_probe(struct pcmcia_device *p_dev)
946 card->priv = priv; 946 card->priv = priv;
947 priv->card = card; 947 priv->card = card;
948 priv->hw_host_to_card = if_cs_host_to_card; 948 priv->hw_host_to_card = if_cs_host_to_card;
949 priv->enter_deep_sleep = NULL;
950 priv->exit_deep_sleep = NULL;
951 priv->reset_deep_sleep_wakeup = NULL;
949 priv->fw_ready = 1; 952 priv->fw_ready = 1;
950 953
951 /* Now actually get the IRQ */ 954 /* Now actually get the IRQ */
diff --git a/drivers/net/wireless/libertas/if_sdio.c b/drivers/net/wireless/libertas/if_sdio.c
index 485a8d406525..9716728a33cb 100644
--- a/drivers/net/wireless/libertas/if_sdio.c
+++ b/drivers/net/wireless/libertas/if_sdio.c
@@ -831,6 +831,58 @@ out:
831 return ret; 831 return ret;
832} 832}
833 833
834static int if_sdio_enter_deep_sleep(struct lbs_private *priv)
835{
836 int ret = -1;
837 struct cmd_header cmd;
838
839 memset(&cmd, 0, sizeof(cmd));
840
841 lbs_deb_sdio("send DEEP_SLEEP command\n");
842 ret = __lbs_cmd(priv, CMD_802_11_DEEP_SLEEP, &cmd, sizeof(cmd),
843 lbs_cmd_copyback, (unsigned long) &cmd);
844 if (ret)
845 lbs_pr_err("DEEP_SLEEP cmd failed\n");
846
847 mdelay(200);
848 return ret;
849}
850
851static int if_sdio_exit_deep_sleep(struct lbs_private *priv)
852{
853 struct if_sdio_card *card = priv->card;
854 int ret = -1;
855
856 lbs_deb_enter(LBS_DEB_SDIO);
857 sdio_claim_host(card->func);
858
859 sdio_writeb(card->func, HOST_POWER_UP, CONFIGURATION_REG, &ret);
860 if (ret)
861 lbs_pr_err("sdio_writeb failed!\n");
862
863 sdio_release_host(card->func);
864 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
865 return ret;
866}
867
868static int if_sdio_reset_deep_sleep_wakeup(struct lbs_private *priv)
869{
870 struct if_sdio_card *card = priv->card;
871 int ret = -1;
872
873 lbs_deb_enter(LBS_DEB_SDIO);
874 sdio_claim_host(card->func);
875
876 sdio_writeb(card->func, 0, CONFIGURATION_REG, &ret);
877 if (ret)
878 lbs_pr_err("sdio_writeb failed!\n");
879
880 sdio_release_host(card->func);
881 lbs_deb_leave_args(LBS_DEB_SDIO, "ret %d", ret);
882 return ret;
883
884}
885
834/*******************************************************************/ 886/*******************************************************************/
835/* SDIO callbacks */ 887/* SDIO callbacks */
836/*******************************************************************/ 888/*******************************************************************/
@@ -859,6 +911,7 @@ static void if_sdio_interrupt(struct sdio_func *func)
859 * Ignore the define name, this really means the card has 911 * Ignore the define name, this really means the card has
860 * successfully received the command. 912 * successfully received the command.
861 */ 913 */
914 card->priv->is_activity_detected = 1;
862 if (cause & IF_SDIO_H_INT_DNLD) 915 if (cause & IF_SDIO_H_INT_DNLD)
863 lbs_host_to_card_done(card->priv); 916 lbs_host_to_card_done(card->priv);
864 917
@@ -998,6 +1051,9 @@ static int if_sdio_probe(struct sdio_func *func,
998 1051
999 priv->card = card; 1052 priv->card = card;
1000 priv->hw_host_to_card = if_sdio_host_to_card; 1053 priv->hw_host_to_card = if_sdio_host_to_card;
1054 priv->enter_deep_sleep = if_sdio_enter_deep_sleep;
1055 priv->exit_deep_sleep = if_sdio_exit_deep_sleep;
1056 priv->reset_deep_sleep_wakeup = if_sdio_reset_deep_sleep_wakeup;
1001 1057
1002 priv->fw_ready = 1; 1058 priv->fw_ready = 1;
1003 1059
diff --git a/drivers/net/wireless/libertas/if_sdio.h b/drivers/net/wireless/libertas/if_sdio.h
index 60c9b2fcef03..12179c1dc9c9 100644
--- a/drivers/net/wireless/libertas/if_sdio.h
+++ b/drivers/net/wireless/libertas/if_sdio.h
@@ -51,5 +51,6 @@
51#define IF_SDIO_EVENT 0x80fc 51#define IF_SDIO_EVENT 0x80fc
52 52
53#define IF_SDIO_BLOCK_SIZE 256 53#define IF_SDIO_BLOCK_SIZE 256
54 54#define CONFIGURATION_REG 0x03
55#define HOST_POWER_UP (0x1U << 1)
55#endif 56#endif
diff --git a/drivers/net/wireless/libertas/if_spi.c b/drivers/net/wireless/libertas/if_spi.c
index 5b3672c4d0cc..d6a48dd3652c 100644
--- a/drivers/net/wireless/libertas/if_spi.c
+++ b/drivers/net/wireless/libertas/if_spi.c
@@ -32,12 +32,6 @@
32#include "dev.h" 32#include "dev.h"
33#include "if_spi.h" 33#include "if_spi.h"
34 34
35struct if_spi_packet {
36 struct list_head list;
37 u16 blen;
38 u8 buffer[0] __attribute__((aligned(4)));
39};
40
41struct if_spi_card { 35struct if_spi_card {
42 struct spi_device *spi; 36 struct spi_device *spi;
43 struct lbs_private *priv; 37 struct lbs_private *priv;
@@ -66,33 +60,10 @@ struct if_spi_card {
66 struct semaphore spi_thread_terminated; 60 struct semaphore spi_thread_terminated;
67 61
68 u8 cmd_buffer[IF_SPI_CMD_BUF_SIZE]; 62 u8 cmd_buffer[IF_SPI_CMD_BUF_SIZE];
69
70 /* A buffer of incoming packets from libertas core.
71 * Since we can't sleep in hw_host_to_card, we have to buffer
72 * them. */
73 struct list_head cmd_packet_list;
74 struct list_head data_packet_list;
75
76 /* Protects cmd_packet_list and data_packet_list */
77 spinlock_t buffer_lock;
78}; 63};
79 64
80static void free_if_spi_card(struct if_spi_card *card) 65static void free_if_spi_card(struct if_spi_card *card)
81{ 66{
82 struct list_head *cursor, *next;
83 struct if_spi_packet *packet;
84
85 BUG_ON(card->run_thread);
86 list_for_each_safe(cursor, next, &card->cmd_packet_list) {
87 packet = container_of(cursor, struct if_spi_packet, list);
88 list_del(&packet->list);
89 kfree(packet);
90 }
91 list_for_each_safe(cursor, next, &card->data_packet_list) {
92 packet = container_of(cursor, struct if_spi_packet, list);
93 list_del(&packet->list);
94 kfree(packet);
95 }
96 spi_set_drvdata(card->spi, NULL); 67 spi_set_drvdata(card->spi, NULL);
97 kfree(card); 68 kfree(card);
98} 69}
@@ -774,40 +745,6 @@ out:
774 return err; 745 return err;
775} 746}
776 747
777/* Move data or a command from the host to the card. */
778static void if_spi_h2c(struct if_spi_card *card,
779 struct if_spi_packet *packet, int type)
780{
781 int err = 0;
782 u16 int_type, port_reg;
783
784 switch (type) {
785 case MVMS_DAT:
786 int_type = IF_SPI_CIC_TX_DOWNLOAD_OVER;
787 port_reg = IF_SPI_DATA_RDWRPORT_REG;
788 break;
789 case MVMS_CMD:
790 int_type = IF_SPI_CIC_CMD_DOWNLOAD_OVER;
791 port_reg = IF_SPI_CMD_RDWRPORT_REG;
792 break;
793 default:
794 lbs_pr_err("can't transfer buffer of type %d\n", type);
795 err = -EINVAL;
796 goto out;
797 }
798
799 /* Write the data to the card */
800 err = spu_write(card, port_reg, packet->buffer, packet->blen);
801 if (err)
802 goto out;
803
804out:
805 kfree(packet);
806
807 if (err)
808 lbs_pr_err("%s: error %d\n", __func__, err);
809}
810
811/* Inform the host about a card event */ 748/* Inform the host about a card event */
812static void if_spi_e2h(struct if_spi_card *card) 749static void if_spi_e2h(struct if_spi_card *card)
813{ 750{
@@ -837,8 +774,6 @@ static int lbs_spi_thread(void *data)
837 int err; 774 int err;
838 struct if_spi_card *card = data; 775 struct if_spi_card *card = data;
839 u16 hiStatus; 776 u16 hiStatus;
840 unsigned long flags;
841 struct if_spi_packet *packet;
842 777
843 while (1) { 778 while (1) {
844 /* Wait to be woken up by one of two things. First, our ISR 779 /* Wait to be woken up by one of two things. First, our ISR
@@ -877,43 +812,9 @@ static int lbs_spi_thread(void *data)
877 if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY || 812 if (hiStatus & IF_SPI_HIST_CMD_DOWNLOAD_RDY ||
878 (card->priv->psstate != PS_STATE_FULL_POWER && 813 (card->priv->psstate != PS_STATE_FULL_POWER &&
879 (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) { 814 (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY))) {
880 /* This means two things. First of all,
881 * if there was a previous command sent, the card has
882 * successfully received it.
883 * Secondly, it is now ready to download another
884 * command.
885 */
886 lbs_host_to_card_done(card->priv); 815 lbs_host_to_card_done(card->priv);
887
888 /* Do we have any command packets from the host to
889 * send? */
890 packet = NULL;
891 spin_lock_irqsave(&card->buffer_lock, flags);
892 if (!list_empty(&card->cmd_packet_list)) {
893 packet = (struct if_spi_packet *)(card->
894 cmd_packet_list.next);
895 list_del(&packet->list);
896 }
897 spin_unlock_irqrestore(&card->buffer_lock, flags);
898
899 if (packet)
900 if_spi_h2c(card, packet, MVMS_CMD);
901 } 816 }
902 if (hiStatus & IF_SPI_HIST_TX_DOWNLOAD_RDY) {
903 /* Do we have any data packets from the host to
904 * send? */
905 packet = NULL;
906 spin_lock_irqsave(&card->buffer_lock, flags);
907 if (!list_empty(&card->data_packet_list)) {
908 packet = (struct if_spi_packet *)(card->
909 data_packet_list.next);
910 list_del(&packet->list);
911 }
912 spin_unlock_irqrestore(&card->buffer_lock, flags);
913 817
914 if (packet)
915 if_spi_h2c(card, packet, MVMS_DAT);
916 }
917 if (hiStatus & IF_SPI_HIST_CARD_EVENT) 818 if (hiStatus & IF_SPI_HIST_CARD_EVENT)
918 if_spi_e2h(card); 819 if_spi_e2h(card);
919 820
@@ -942,40 +843,18 @@ static int if_spi_host_to_card(struct lbs_private *priv,
942 u8 type, u8 *buf, u16 nb) 843 u8 type, u8 *buf, u16 nb)
943{ 844{
944 int err = 0; 845 int err = 0;
945 unsigned long flags;
946 struct if_spi_card *card = priv->card; 846 struct if_spi_card *card = priv->card;
947 struct if_spi_packet *packet;
948 u16 blen;
949 847
950 lbs_deb_enter_args(LBS_DEB_SPI, "type %d, bytes %d", type, nb); 848 lbs_deb_enter_args(LBS_DEB_SPI, "type %d, bytes %d", type, nb);
951 849
952 if (nb == 0) { 850 nb = ALIGN(nb, 4);
953 lbs_pr_err("%s: invalid size requested: %d\n", __func__, nb);
954 err = -EINVAL;
955 goto out;
956 }
957 blen = ALIGN(nb, 4);
958 packet = kzalloc(sizeof(struct if_spi_packet) + blen, GFP_ATOMIC);
959 if (!packet) {
960 err = -ENOMEM;
961 goto out;
962 }
963 packet->blen = blen;
964 memcpy(packet->buffer, buf, nb);
965 memset(packet->buffer + nb, 0, blen - nb);
966 851
967 switch (type) { 852 switch (type) {
968 case MVMS_CMD: 853 case MVMS_CMD:
969 priv->dnld_sent = DNLD_CMD_SENT; 854 err = spu_write(card, IF_SPI_CMD_RDWRPORT_REG, buf, nb);
970 spin_lock_irqsave(&card->buffer_lock, flags);
971 list_add_tail(&packet->list, &card->cmd_packet_list);
972 spin_unlock_irqrestore(&card->buffer_lock, flags);
973 break; 855 break;
974 case MVMS_DAT: 856 case MVMS_DAT:
975 priv->dnld_sent = DNLD_DATA_SENT; 857 err = spu_write(card, IF_SPI_DATA_RDWRPORT_REG, buf, nb);
976 spin_lock_irqsave(&card->buffer_lock, flags);
977 list_add_tail(&packet->list, &card->data_packet_list);
978 spin_unlock_irqrestore(&card->buffer_lock, flags);
979 break; 858 break;
980 default: 859 default:
981 lbs_pr_err("can't transfer buffer of type %d", type); 860 lbs_pr_err("can't transfer buffer of type %d", type);
@@ -983,9 +862,6 @@ static int if_spi_host_to_card(struct lbs_private *priv,
983 break; 862 break;
984 } 863 }
985 864
986 /* Wake up the spi thread */
987 up(&card->spi_ready);
988out:
989 lbs_deb_leave_args(LBS_DEB_SPI, "err=%d", err); 865 lbs_deb_leave_args(LBS_DEB_SPI, "err=%d", err);
990 return err; 866 return err;
991} 867}
@@ -1062,9 +938,6 @@ static int __devinit if_spi_probe(struct spi_device *spi)
1062 938
1063 sema_init(&card->spi_ready, 0); 939 sema_init(&card->spi_ready, 0);
1064 sema_init(&card->spi_thread_terminated, 0); 940 sema_init(&card->spi_thread_terminated, 0);
1065 INIT_LIST_HEAD(&card->cmd_packet_list);
1066 INIT_LIST_HEAD(&card->data_packet_list);
1067 spin_lock_init(&card->buffer_lock);
1068 941
1069 /* Initialize the SPI Interface Unit */ 942 /* Initialize the SPI Interface Unit */
1070 err = spu_init(card, pdata->use_dummy_writes); 943 err = spu_init(card, pdata->use_dummy_writes);
@@ -1117,6 +990,9 @@ static int __devinit if_spi_probe(struct spi_device *spi)
1117 card->priv = priv; 990 card->priv = priv;
1118 priv->card = card; 991 priv->card = card;
1119 priv->hw_host_to_card = if_spi_host_to_card; 992 priv->hw_host_to_card = if_spi_host_to_card;
993 priv->enter_deep_sleep = NULL;
994 priv->exit_deep_sleep = NULL;
995 priv->reset_deep_sleep_wakeup = NULL;
1120 priv->fw_ready = 1; 996 priv->fw_ready = 1;
1121 997
1122 /* Initialize interrupt handling stuff. */ 998 /* Initialize interrupt handling stuff. */
@@ -1138,6 +1014,9 @@ static int __devinit if_spi_probe(struct spi_device *spi)
1138 goto terminate_thread; 1014 goto terminate_thread;
1139 } 1015 }
1140 1016
1017 /* poke the IRQ handler so that we don't miss the first interrupt */
1018 up(&card->spi_ready);
1019
1141 /* Start the card. 1020 /* Start the card.
1142 * This will call register_netdev, and we'll start 1021 * This will call register_netdev, and we'll start
1143 * getting interrupts... */ 1022 * getting interrupts... */
diff --git a/drivers/net/wireless/libertas/if_usb.c b/drivers/net/wireless/libertas/if_usb.c
index 3fac4efa5ac8..f12d667ba100 100644
--- a/drivers/net/wireless/libertas/if_usb.c
+++ b/drivers/net/wireless/libertas/if_usb.c
@@ -300,6 +300,9 @@ static int if_usb_probe(struct usb_interface *intf,
300 cardp->priv->fw_ready = 1; 300 cardp->priv->fw_ready = 1;
301 301
302 priv->hw_host_to_card = if_usb_host_to_card; 302 priv->hw_host_to_card = if_usb_host_to_card;
303 priv->enter_deep_sleep = NULL;
304 priv->exit_deep_sleep = NULL;
305 priv->reset_deep_sleep_wakeup = NULL;
303#ifdef CONFIG_OLPC 306#ifdef CONFIG_OLPC
304 if (machine_is_olpc()) 307 if (machine_is_olpc())
305 priv->reset_card = if_usb_reset_olpc_card; 308 priv->reset_card = if_usb_reset_olpc_card;
diff --git a/drivers/net/wireless/libertas/main.c b/drivers/net/wireless/libertas/main.c
index 87b4e497faa2..01f46cf288d7 100644
--- a/drivers/net/wireless/libertas/main.c
+++ b/drivers/net/wireless/libertas/main.c
@@ -14,11 +14,13 @@
14#include <linux/stddef.h> 14#include <linux/stddef.h>
15#include <linux/ieee80211.h> 15#include <linux/ieee80211.h>
16#include <net/iw_handler.h> 16#include <net/iw_handler.h>
17#include <net/cfg80211.h>
17 18
18#include "host.h" 19#include "host.h"
19#include "decl.h" 20#include "decl.h"
20#include "dev.h" 21#include "dev.h"
21#include "wext.h" 22#include "wext.h"
23#include "cfg.h"
22#include "debugfs.h" 24#include "debugfs.h"
23#include "scan.h" 25#include "scan.h"
24#include "assoc.h" 26#include "assoc.h"
@@ -43,119 +45,6 @@ module_param_named(libertas_debug, lbs_debug, int, 0644);
43struct cmd_confirm_sleep confirm_sleep; 45struct cmd_confirm_sleep confirm_sleep;
44 46
45 47
46#define LBS_TX_PWR_DEFAULT 20 /*100mW */
47#define LBS_TX_PWR_US_DEFAULT 20 /*100mW */
48#define LBS_TX_PWR_JP_DEFAULT 16 /*50mW */
49#define LBS_TX_PWR_FR_DEFAULT 20 /*100mW */
50#define LBS_TX_PWR_EMEA_DEFAULT 20 /*100mW */
51
52/* Format { channel, frequency (MHz), maxtxpower } */
53/* band: 'B/G', region: USA FCC/Canada IC */
54static struct chan_freq_power channel_freq_power_US_BG[] = {
55 {1, 2412, LBS_TX_PWR_US_DEFAULT},
56 {2, 2417, LBS_TX_PWR_US_DEFAULT},
57 {3, 2422, LBS_TX_PWR_US_DEFAULT},
58 {4, 2427, LBS_TX_PWR_US_DEFAULT},
59 {5, 2432, LBS_TX_PWR_US_DEFAULT},
60 {6, 2437, LBS_TX_PWR_US_DEFAULT},
61 {7, 2442, LBS_TX_PWR_US_DEFAULT},
62 {8, 2447, LBS_TX_PWR_US_DEFAULT},
63 {9, 2452, LBS_TX_PWR_US_DEFAULT},
64 {10, 2457, LBS_TX_PWR_US_DEFAULT},
65 {11, 2462, LBS_TX_PWR_US_DEFAULT}
66};
67
68/* band: 'B/G', region: Europe ETSI */
69static struct chan_freq_power channel_freq_power_EU_BG[] = {
70 {1, 2412, LBS_TX_PWR_EMEA_DEFAULT},
71 {2, 2417, LBS_TX_PWR_EMEA_DEFAULT},
72 {3, 2422, LBS_TX_PWR_EMEA_DEFAULT},
73 {4, 2427, LBS_TX_PWR_EMEA_DEFAULT},
74 {5, 2432, LBS_TX_PWR_EMEA_DEFAULT},
75 {6, 2437, LBS_TX_PWR_EMEA_DEFAULT},
76 {7, 2442, LBS_TX_PWR_EMEA_DEFAULT},
77 {8, 2447, LBS_TX_PWR_EMEA_DEFAULT},
78 {9, 2452, LBS_TX_PWR_EMEA_DEFAULT},
79 {10, 2457, LBS_TX_PWR_EMEA_DEFAULT},
80 {11, 2462, LBS_TX_PWR_EMEA_DEFAULT},
81 {12, 2467, LBS_TX_PWR_EMEA_DEFAULT},
82 {13, 2472, LBS_TX_PWR_EMEA_DEFAULT}
83};
84
85/* band: 'B/G', region: Spain */
86static struct chan_freq_power channel_freq_power_SPN_BG[] = {
87 {10, 2457, LBS_TX_PWR_DEFAULT},
88 {11, 2462, LBS_TX_PWR_DEFAULT}
89};
90
91/* band: 'B/G', region: France */
92static struct chan_freq_power channel_freq_power_FR_BG[] = {
93 {10, 2457, LBS_TX_PWR_FR_DEFAULT},
94 {11, 2462, LBS_TX_PWR_FR_DEFAULT},
95 {12, 2467, LBS_TX_PWR_FR_DEFAULT},
96 {13, 2472, LBS_TX_PWR_FR_DEFAULT}
97};
98
99/* band: 'B/G', region: Japan */
100static struct chan_freq_power channel_freq_power_JPN_BG[] = {
101 {1, 2412, LBS_TX_PWR_JP_DEFAULT},
102 {2, 2417, LBS_TX_PWR_JP_DEFAULT},
103 {3, 2422, LBS_TX_PWR_JP_DEFAULT},
104 {4, 2427, LBS_TX_PWR_JP_DEFAULT},
105 {5, 2432, LBS_TX_PWR_JP_DEFAULT},
106 {6, 2437, LBS_TX_PWR_JP_DEFAULT},
107 {7, 2442, LBS_TX_PWR_JP_DEFAULT},
108 {8, 2447, LBS_TX_PWR_JP_DEFAULT},
109 {9, 2452, LBS_TX_PWR_JP_DEFAULT},
110 {10, 2457, LBS_TX_PWR_JP_DEFAULT},
111 {11, 2462, LBS_TX_PWR_JP_DEFAULT},
112 {12, 2467, LBS_TX_PWR_JP_DEFAULT},
113 {13, 2472, LBS_TX_PWR_JP_DEFAULT},
114 {14, 2484, LBS_TX_PWR_JP_DEFAULT}
115};
116
117/**
118 * the structure for channel, frequency and power
119 */
120struct region_cfp_table {
121 u8 region;
122 struct chan_freq_power *cfp_BG;
123 int cfp_no_BG;
124};
125
126/**
127 * the structure for the mapping between region and CFP
128 */
129static struct region_cfp_table region_cfp_table[] = {
130 {0x10, /*US FCC */
131 channel_freq_power_US_BG,
132 ARRAY_SIZE(channel_freq_power_US_BG),
133 }
134 ,
135 {0x20, /*CANADA IC */
136 channel_freq_power_US_BG,
137 ARRAY_SIZE(channel_freq_power_US_BG),
138 }
139 ,
140 {0x30, /*EU*/ channel_freq_power_EU_BG,
141 ARRAY_SIZE(channel_freq_power_EU_BG),
142 }
143 ,
144 {0x31, /*SPAIN*/ channel_freq_power_SPN_BG,
145 ARRAY_SIZE(channel_freq_power_SPN_BG),
146 }
147 ,
148 {0x32, /*FRANCE*/ channel_freq_power_FR_BG,
149 ARRAY_SIZE(channel_freq_power_FR_BG),
150 }
151 ,
152 {0x40, /*JAPAN*/ channel_freq_power_JPN_BG,
153 ARRAY_SIZE(channel_freq_power_JPN_BG),
154 }
155 ,
156/*Add new region here */
157};
158
159/** 48/**
160 * the table to keep region code 49 * the table to keep region code
161 */ 50 */
@@ -163,13 +52,6 @@ u16 lbs_region_code_to_index[MRVDRV_MAX_REGION_CODE] =
163 { 0x10, 0x20, 0x30, 0x31, 0x32, 0x40 }; 52 { 0x10, 0x20, 0x30, 0x31, 0x32, 0x40 };
164 53
165/** 54/**
166 * 802.11b/g supported bitrates (in 500Kb/s units)
167 */
168u8 lbs_bg_rates[MAX_RATES] =
169 { 0x02, 0x04, 0x0b, 0x16, 0x0c, 0x12, 0x18, 0x24, 0x30, 0x48, 0x60, 0x6c,
1700x00, 0x00 };
171
172/**
173 * FW rate table. FW refers to rates by their index in this table, not by the 55 * FW rate table. FW refers to rates by their index in this table, not by the
174 * rate value itself. Values of 0x00 are 56 * rate value itself. Values of 0x00 are
175 * reserved positions. 57 * reserved positions.
@@ -403,7 +285,7 @@ static ssize_t lbs_mesh_set(struct device *dev,
403 return count; 285 return count;
404 if (enable) 286 if (enable)
405 action = CMD_ACT_MESH_CONFIG_START; 287 action = CMD_ACT_MESH_CONFIG_START;
406 ret = lbs_mesh_config(priv, action, priv->curbssparams.channel); 288 ret = lbs_mesh_config(priv, action, priv->channel);
407 if (ret) 289 if (ret)
408 return ret; 290 return ret;
409 291
@@ -574,8 +456,10 @@ void lbs_host_to_card_done(struct lbs_private *priv)
574 priv->dnld_sent = DNLD_RES_RECEIVED; 456 priv->dnld_sent = DNLD_RES_RECEIVED;
575 457
576 /* Wake main thread if commands are pending */ 458 /* Wake main thread if commands are pending */
577 if (!priv->cur_cmd || priv->tx_pending_len > 0) 459 if (!priv->cur_cmd || priv->tx_pending_len > 0) {
578 wake_up_interruptible(&priv->waitq); 460 if (!priv->wakeup_dev_required)
461 wake_up_interruptible(&priv->waitq);
462 }
579 463
580 spin_unlock_irqrestore(&priv->driver_lock, flags); 464 spin_unlock_irqrestore(&priv->driver_lock, flags);
581 lbs_deb_leave(LBS_DEB_THREAD); 465 lbs_deb_leave(LBS_DEB_THREAD);
@@ -770,7 +654,8 @@ static int lbs_thread(void *data)
770 shouldsleep = 0; /* We have a command response */ 654 shouldsleep = 0; /* We have a command response */
771 else if (priv->cur_cmd) 655 else if (priv->cur_cmd)
772 shouldsleep = 1; /* Can't send a command; one already running */ 656 shouldsleep = 1; /* Can't send a command; one already running */
773 else if (!list_empty(&priv->cmdpendingq)) 657 else if (!list_empty(&priv->cmdpendingq) &&
658 !(priv->wakeup_dev_required))
774 shouldsleep = 0; /* We have a command to send */ 659 shouldsleep = 0; /* We have a command to send */
775 else if (__kfifo_len(priv->event_fifo)) 660 else if (__kfifo_len(priv->event_fifo))
776 shouldsleep = 0; /* We have an event to process */ 661 shouldsleep = 0; /* We have an event to process */
@@ -822,6 +707,26 @@ static int lbs_thread(void *data)
822 } 707 }
823 spin_unlock_irq(&priv->driver_lock); 708 spin_unlock_irq(&priv->driver_lock);
824 709
710 /* Process hardware events, e.g. card removed, link lost */
711 spin_lock_irq(&priv->driver_lock);
712 while (__kfifo_len(priv->event_fifo)) {
713 u32 event;
714 __kfifo_get(priv->event_fifo, (unsigned char *) &event,
715 sizeof(event));
716 spin_unlock_irq(&priv->driver_lock);
717 lbs_process_event(priv, event);
718 spin_lock_irq(&priv->driver_lock);
719 }
720 spin_unlock_irq(&priv->driver_lock);
721
722 if (priv->wakeup_dev_required) {
723 lbs_deb_thread("Waking up device...\n");
724 /* Wake up device */
725 if (priv->exit_deep_sleep(priv))
726 lbs_deb_thread("Wakeup device failed\n");
727 continue;
728 }
729
825 /* command timeout stuff */ 730 /* command timeout stuff */
826 if (priv->cmd_timed_out && priv->cur_cmd) { 731 if (priv->cmd_timed_out && priv->cur_cmd) {
827 struct cmd_ctrl_node *cmdnode = priv->cur_cmd; 732 struct cmd_ctrl_node *cmdnode = priv->cur_cmd;
@@ -849,18 +754,7 @@ static int lbs_thread(void *data)
849 } 754 }
850 priv->cmd_timed_out = 0; 755 priv->cmd_timed_out = 0;
851 756
852 /* Process hardware events, e.g. card removed, link lost */
853 spin_lock_irq(&priv->driver_lock);
854 while (__kfifo_len(priv->event_fifo)) {
855 u32 event;
856 757
857 __kfifo_get(priv->event_fifo, (unsigned char *) &event,
858 sizeof(event));
859 spin_unlock_irq(&priv->driver_lock);
860 lbs_process_event(priv, event);
861 spin_lock_irq(&priv->driver_lock);
862 }
863 spin_unlock_irq(&priv->driver_lock);
864 758
865 if (!priv->fw_ready) 759 if (!priv->fw_ready)
866 continue; 760 continue;
@@ -894,6 +788,9 @@ static int lbs_thread(void *data)
894 (priv->psstate == PS_STATE_PRE_SLEEP)) 788 (priv->psstate == PS_STATE_PRE_SLEEP))
895 continue; 789 continue;
896 790
791 if (priv->is_deep_sleep)
792 continue;
793
897 /* Execute the next command */ 794 /* Execute the next command */
898 if (!priv->dnld_sent && !priv->cur_cmd) 795 if (!priv->dnld_sent && !priv->cur_cmd)
899 lbs_execute_next_command(priv); 796 lbs_execute_next_command(priv);
@@ -928,6 +825,7 @@ static int lbs_thread(void *data)
928 } 825 }
929 826
930 del_timer(&priv->command_timer); 827 del_timer(&priv->command_timer);
828 del_timer(&priv->auto_deepsleep_timer);
931 wake_up_all(&priv->cmd_pending); 829 wake_up_all(&priv->cmd_pending);
932 830
933 lbs_deb_leave(LBS_DEB_THREAD); 831 lbs_deb_leave(LBS_DEB_THREAD);
@@ -1050,6 +948,62 @@ out:
1050 lbs_deb_leave(LBS_DEB_CMD); 948 lbs_deb_leave(LBS_DEB_CMD);
1051} 949}
1052 950
951/**
952 * This function put the device back to deep sleep mode when timer expires
953 * and no activity (command, event, data etc.) is detected.
954 */
955static void auto_deepsleep_timer_fn(unsigned long data)
956{
957 struct lbs_private *priv = (struct lbs_private *)data;
958 int ret;
959
960 lbs_deb_enter(LBS_DEB_CMD);
961
962 if (priv->is_activity_detected) {
963 priv->is_activity_detected = 0;
964 } else {
965 if (priv->is_auto_deep_sleep_enabled &&
966 (!priv->wakeup_dev_required) &&
967 (priv->connect_status != LBS_CONNECTED)) {
968 lbs_deb_main("Entering auto deep sleep mode...\n");
969 ret = lbs_prepare_and_send_command(priv,
970 CMD_802_11_DEEP_SLEEP, 0,
971 0, 0, NULL);
972 if (ret)
973 lbs_pr_err("Enter Deep Sleep command failed\n");
974 }
975 }
976 mod_timer(&priv->auto_deepsleep_timer , jiffies +
977 (priv->auto_deep_sleep_timeout * HZ)/1000);
978 lbs_deb_leave(LBS_DEB_CMD);
979}
980
981int lbs_enter_auto_deep_sleep(struct lbs_private *priv)
982{
983 lbs_deb_enter(LBS_DEB_SDIO);
984
985 priv->is_auto_deep_sleep_enabled = 1;
986 if (priv->is_deep_sleep)
987 priv->wakeup_dev_required = 1;
988 mod_timer(&priv->auto_deepsleep_timer ,
989 jiffies + (priv->auto_deep_sleep_timeout * HZ)/1000);
990
991 lbs_deb_leave(LBS_DEB_SDIO);
992 return 0;
993}
994
995int lbs_exit_auto_deep_sleep(struct lbs_private *priv)
996{
997 lbs_deb_enter(LBS_DEB_SDIO);
998
999 priv->is_auto_deep_sleep_enabled = 0;
1000 priv->auto_deep_sleep_timeout = 0;
1001 del_timer(&priv->auto_deepsleep_timer);
1002
1003 lbs_deb_leave(LBS_DEB_SDIO);
1004 return 0;
1005}
1006
1053static void lbs_sync_channel_worker(struct work_struct *work) 1007static void lbs_sync_channel_worker(struct work_struct *work)
1054{ 1008{
1055 struct lbs_private *priv = container_of(work, struct lbs_private, 1009 struct lbs_private *priv = container_of(work, struct lbs_private,
@@ -1092,18 +1046,24 @@ static int lbs_init_adapter(struct lbs_private *priv)
1092 priv->mesh_connect_status = LBS_DISCONNECTED; 1046 priv->mesh_connect_status = LBS_DISCONNECTED;
1093 priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM; 1047 priv->secinfo.auth_mode = IW_AUTH_ALG_OPEN_SYSTEM;
1094 priv->mode = IW_MODE_INFRA; 1048 priv->mode = IW_MODE_INFRA;
1095 priv->curbssparams.channel = DEFAULT_AD_HOC_CHANNEL; 1049 priv->channel = DEFAULT_AD_HOC_CHANNEL;
1096 priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON; 1050 priv->mac_control = CMD_ACT_MAC_RX_ON | CMD_ACT_MAC_TX_ON;
1097 priv->radio_on = 1; 1051 priv->radio_on = 1;
1098 priv->enablehwauto = 1; 1052 priv->enablehwauto = 1;
1099 priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE; 1053 priv->capability = WLAN_CAPABILITY_SHORT_PREAMBLE;
1100 priv->psmode = LBS802_11POWERMODECAM; 1054 priv->psmode = LBS802_11POWERMODECAM;
1101 priv->psstate = PS_STATE_FULL_POWER; 1055 priv->psstate = PS_STATE_FULL_POWER;
1056 priv->is_deep_sleep = 0;
1057 priv->is_auto_deep_sleep_enabled = 0;
1058 priv->wakeup_dev_required = 0;
1059 init_waitqueue_head(&priv->ds_awake_q);
1102 1060
1103 mutex_init(&priv->lock); 1061 mutex_init(&priv->lock);
1104 1062
1105 setup_timer(&priv->command_timer, command_timer_fn, 1063 setup_timer(&priv->command_timer, command_timer_fn,
1106 (unsigned long)priv); 1064 (unsigned long)priv);
1065 setup_timer(&priv->auto_deepsleep_timer, auto_deepsleep_timer_fn,
1066 (unsigned long)priv);
1107 1067
1108 INIT_LIST_HEAD(&priv->cmdfreeq); 1068 INIT_LIST_HEAD(&priv->cmdfreeq);
1109 INIT_LIST_HEAD(&priv->cmdpendingq); 1069 INIT_LIST_HEAD(&priv->cmdpendingq);
@@ -1142,6 +1102,7 @@ static void lbs_free_adapter(struct lbs_private *priv)
1142 if (priv->event_fifo) 1102 if (priv->event_fifo)
1143 kfifo_free(priv->event_fifo); 1103 kfifo_free(priv->event_fifo);
1144 del_timer(&priv->command_timer); 1104 del_timer(&priv->command_timer);
1105 del_timer(&priv->auto_deepsleep_timer);
1145 kfree(priv->networks); 1106 kfree(priv->networks);
1146 priv->networks = NULL; 1107 priv->networks = NULL;
1147 1108
@@ -1168,31 +1129,41 @@ static const struct net_device_ops lbs_netdev_ops = {
1168 */ 1129 */
1169struct lbs_private *lbs_add_card(void *card, struct device *dmdev) 1130struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
1170{ 1131{
1171 struct net_device *dev = NULL; 1132 struct net_device *dev;
1133 struct wireless_dev *wdev;
1172 struct lbs_private *priv = NULL; 1134 struct lbs_private *priv = NULL;
1173 1135
1174 lbs_deb_enter(LBS_DEB_MAIN); 1136 lbs_deb_enter(LBS_DEB_MAIN);
1175 1137
1176 /* Allocate an Ethernet device and register it */ 1138 /* Allocate an Ethernet device and register it */
1177 dev = alloc_etherdev(sizeof(struct lbs_private)); 1139 wdev = lbs_cfg_alloc(dmdev);
1178 if (!dev) { 1140 if (IS_ERR(wdev)) {
1179 lbs_pr_err("init wlanX device failed\n"); 1141 lbs_pr_err("cfg80211 init failed\n");
1180 goto done; 1142 goto done;
1181 } 1143 }
1182 priv = netdev_priv(dev); 1144 /* TODO? */
1183 dev->ml_priv = priv; 1145 wdev->iftype = NL80211_IFTYPE_STATION;
1146 priv = wdev_priv(wdev);
1147 priv->wdev = wdev;
1184 1148
1185 if (lbs_init_adapter(priv)) { 1149 if (lbs_init_adapter(priv)) {
1186 lbs_pr_err("failed to initialize adapter structure.\n"); 1150 lbs_pr_err("failed to initialize adapter structure.\n");
1187 goto err_init_adapter; 1151 goto err_wdev;
1188 } 1152 }
1189 1153
1154 //TODO? dev = alloc_netdev_mq(0, "wlan%d", ether_setup, IWM_TX_QUEUES);
1155 dev = alloc_netdev(0, "wlan%d", ether_setup);
1156 if (!dev) {
1157 dev_err(dmdev, "no memory for network device instance\n");
1158 goto err_adapter;
1159 }
1160
1161 dev->ieee80211_ptr = wdev;
1162 dev->ml_priv = priv;
1163 SET_NETDEV_DEV(dev, dmdev);
1164 wdev->netdev = dev;
1190 priv->dev = dev; 1165 priv->dev = dev;
1191 priv->card = card;
1192 priv->mesh_open = 0;
1193 priv->infra_open = 0;
1194 1166
1195 /* Setup the OS Interface to our functions */
1196 dev->netdev_ops = &lbs_netdev_ops; 1167 dev->netdev_ops = &lbs_netdev_ops;
1197 dev->watchdog_timeo = 5 * HZ; 1168 dev->watchdog_timeo = 5 * HZ;
1198 dev->ethtool_ops = &lbs_ethtool_ops; 1169 dev->ethtool_ops = &lbs_ethtool_ops;
@@ -1201,7 +1172,14 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
1201#endif 1172#endif
1202 dev->flags |= IFF_BROADCAST | IFF_MULTICAST; 1173 dev->flags |= IFF_BROADCAST | IFF_MULTICAST;
1203 1174
1204 SET_NETDEV_DEV(dev, dmdev); 1175
1176 // TODO: kzalloc + iwm_init_default_profile(iwm, iwm->umac_profile); ??
1177
1178
1179 priv->card = card;
1180 priv->mesh_open = 0;
1181 priv->infra_open = 0;
1182
1205 1183
1206 priv->rtap_net_dev = NULL; 1184 priv->rtap_net_dev = NULL;
1207 strcpy(dev->name, "wlan%d"); 1185 strcpy(dev->name, "wlan%d");
@@ -1211,7 +1189,7 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
1211 priv->main_thread = kthread_run(lbs_thread, dev, "lbs_main"); 1189 priv->main_thread = kthread_run(lbs_thread, dev, "lbs_main");
1212 if (IS_ERR(priv->main_thread)) { 1190 if (IS_ERR(priv->main_thread)) {
1213 lbs_deb_thread("Error creating main thread.\n"); 1191 lbs_deb_thread("Error creating main thread.\n");
1214 goto err_init_adapter; 1192 goto err_ndev;
1215 } 1193 }
1216 1194
1217 priv->work_thread = create_singlethread_workqueue("lbs_worker"); 1195 priv->work_thread = create_singlethread_workqueue("lbs_worker");
@@ -1228,9 +1206,15 @@ struct lbs_private *lbs_add_card(void *card, struct device *dmdev)
1228 1206
1229 goto done; 1207 goto done;
1230 1208
1231err_init_adapter: 1209 err_ndev:
1232 lbs_free_adapter(priv);
1233 free_netdev(dev); 1210 free_netdev(dev);
1211
1212 err_adapter:
1213 lbs_free_adapter(priv);
1214
1215 err_wdev:
1216 lbs_cfg_free(priv);
1217
1234 priv = NULL; 1218 priv = NULL;
1235 1219
1236done: 1220done:
@@ -1243,7 +1227,6 @@ EXPORT_SYMBOL_GPL(lbs_add_card);
1243void lbs_remove_card(struct lbs_private *priv) 1227void lbs_remove_card(struct lbs_private *priv)
1244{ 1228{
1245 struct net_device *dev = priv->dev; 1229 struct net_device *dev = priv->dev;
1246 union iwreq_data wrqu;
1247 1230
1248 lbs_deb_enter(LBS_DEB_MAIN); 1231 lbs_deb_enter(LBS_DEB_MAIN);
1249 1232
@@ -1268,15 +1251,19 @@ void lbs_remove_card(struct lbs_private *priv)
1268 lbs_ps_wakeup(priv, CMD_OPTION_WAITFORRSP); 1251 lbs_ps_wakeup(priv, CMD_OPTION_WAITFORRSP);
1269 } 1252 }
1270 1253
1271 memset(wrqu.ap_addr.sa_data, 0xaa, ETH_ALEN); 1254 lbs_send_disconnect_notification(priv);
1272 wrqu.ap_addr.sa_family = ARPHRD_ETHER; 1255
1273 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL); 1256 if (priv->is_deep_sleep) {
1257 priv->is_deep_sleep = 0;
1258 wake_up_interruptible(&priv->ds_awake_q);
1259 }
1274 1260
1275 /* Stop the thread servicing the interrupts */ 1261 /* Stop the thread servicing the interrupts */
1276 priv->surpriseremoved = 1; 1262 priv->surpriseremoved = 1;
1277 kthread_stop(priv->main_thread); 1263 kthread_stop(priv->main_thread);
1278 1264
1279 lbs_free_adapter(priv); 1265 lbs_free_adapter(priv);
1266 lbs_cfg_free(priv);
1280 1267
1281 priv->dev = NULL; 1268 priv->dev = NULL;
1282 free_netdev(dev); 1269 free_netdev(dev);
@@ -1298,11 +1285,8 @@ int lbs_start_card(struct lbs_private *priv)
1298 if (ret) 1285 if (ret)
1299 goto done; 1286 goto done;
1300 1287
1301 /* init 802.11d */ 1288 if (lbs_cfg_register(priv)) {
1302 lbs_init_11d(priv); 1289 lbs_pr_err("cannot register device\n");
1303
1304 if (register_netdev(dev)) {
1305 lbs_pr_err("cannot register ethX device\n");
1306 goto done; 1290 goto done;
1307 } 1291 }
1308 1292
@@ -1327,10 +1311,10 @@ int lbs_start_card(struct lbs_private *priv)
1327 1311
1328 priv->mesh_tlv = TLV_TYPE_OLD_MESH_ID; 1312 priv->mesh_tlv = TLV_TYPE_OLD_MESH_ID;
1329 if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, 1313 if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
1330 priv->curbssparams.channel)) { 1314 priv->channel)) {
1331 priv->mesh_tlv = TLV_TYPE_MESH_ID; 1315 priv->mesh_tlv = TLV_TYPE_MESH_ID;
1332 if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, 1316 if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
1333 priv->curbssparams.channel)) 1317 priv->channel))
1334 priv->mesh_tlv = 0; 1318 priv->mesh_tlv = 0;
1335 } 1319 }
1336 } else if (priv->mesh_fw_ver == MESH_FW_NEW) { 1320 } else if (priv->mesh_fw_ver == MESH_FW_NEW) {
@@ -1339,7 +1323,7 @@ int lbs_start_card(struct lbs_private *priv)
1339 */ 1323 */
1340 priv->mesh_tlv = TLV_TYPE_MESH_ID; 1324 priv->mesh_tlv = TLV_TYPE_MESH_ID;
1341 if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, 1325 if (lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
1342 priv->curbssparams.channel)) 1326 priv->channel))
1343 priv->mesh_tlv = 0; 1327 priv->mesh_tlv = 0;
1344 } 1328 }
1345 if (priv->mesh_tlv) { 1329 if (priv->mesh_tlv) {
@@ -1392,6 +1376,7 @@ void lbs_stop_card(struct lbs_private *priv)
1392 1376
1393 /* Delete the timeout of the currently processing command */ 1377 /* Delete the timeout of the currently processing command */
1394 del_timer_sync(&priv->command_timer); 1378 del_timer_sync(&priv->command_timer);
1379 del_timer_sync(&priv->auto_deepsleep_timer);
1395 1380
1396 /* Flush pending command nodes */ 1381 /* Flush pending command nodes */
1397 spin_lock_irqsave(&priv->driver_lock, flags); 1382 spin_lock_irqsave(&priv->driver_lock, flags);
@@ -1509,68 +1494,6 @@ static void lbs_remove_mesh(struct lbs_private *priv)
1509 lbs_deb_leave(LBS_DEB_MESH); 1494 lbs_deb_leave(LBS_DEB_MESH);
1510} 1495}
1511 1496
1512/**
1513 * @brief This function finds the CFP in
1514 * region_cfp_table based on region and band parameter.
1515 *
1516 * @param region The region code
1517 * @param band The band
1518 * @param cfp_no A pointer to CFP number
1519 * @return A pointer to CFP
1520 */
1521struct chan_freq_power *lbs_get_region_cfp_table(u8 region, int *cfp_no)
1522{
1523 int i, end;
1524
1525 lbs_deb_enter(LBS_DEB_MAIN);
1526
1527 end = ARRAY_SIZE(region_cfp_table);
1528
1529 for (i = 0; i < end ; i++) {
1530 lbs_deb_main("region_cfp_table[i].region=%d\n",
1531 region_cfp_table[i].region);
1532 if (region_cfp_table[i].region == region) {
1533 *cfp_no = region_cfp_table[i].cfp_no_BG;
1534 lbs_deb_leave(LBS_DEB_MAIN);
1535 return region_cfp_table[i].cfp_BG;
1536 }
1537 }
1538
1539 lbs_deb_leave_args(LBS_DEB_MAIN, "ret NULL");
1540 return NULL;
1541}
1542
1543int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band)
1544{
1545 int ret = 0;
1546 int i = 0;
1547
1548 struct chan_freq_power *cfp;
1549 int cfp_no;
1550
1551 lbs_deb_enter(LBS_DEB_MAIN);
1552
1553 memset(priv->region_channel, 0, sizeof(priv->region_channel));
1554
1555 cfp = lbs_get_region_cfp_table(region, &cfp_no);
1556 if (cfp != NULL) {
1557 priv->region_channel[i].nrcfp = cfp_no;
1558 priv->region_channel[i].CFP = cfp;
1559 } else {
1560 lbs_deb_main("wrong region code %#x in band B/G\n",
1561 region);
1562 ret = -1;
1563 goto out;
1564 }
1565 priv->region_channel[i].valid = 1;
1566 priv->region_channel[i].region = region;
1567 priv->region_channel[i].band = band;
1568 i++;
1569out:
1570 lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret);
1571 return ret;
1572}
1573
1574void lbs_queue_event(struct lbs_private *priv, u32 event) 1497void lbs_queue_event(struct lbs_private *priv, u32 event)
1575{ 1498{
1576 unsigned long flags; 1499 unsigned long flags;
diff --git a/drivers/net/wireless/libertas/persistcfg.c b/drivers/net/wireless/libertas/persistcfg.c
index 18fe29faf99b..871f914a75fc 100644
--- a/drivers/net/wireless/libertas/persistcfg.c
+++ b/drivers/net/wireless/libertas/persistcfg.c
@@ -187,9 +187,9 @@ static ssize_t mesh_id_get(struct device *dev, struct device_attribute *attr,
187 if (ret) 187 if (ret)
188 return ret; 188 return ret;
189 189
190 if (defs.meshie.val.mesh_id_len > IW_ESSID_MAX_SIZE) { 190 if (defs.meshie.val.mesh_id_len > IEEE80211_MAX_SSID_LEN) {
191 lbs_pr_err("inconsistent mesh ID length"); 191 lbs_pr_err("inconsistent mesh ID length");
192 defs.meshie.val.mesh_id_len = IW_ESSID_MAX_SIZE; 192 defs.meshie.val.mesh_id_len = IEEE80211_MAX_SSID_LEN;
193 } 193 }
194 194
195 /* SSID not null terminated: reserve room for \0 + \n */ 195 /* SSID not null terminated: reserve room for \0 + \n */
@@ -214,7 +214,7 @@ static ssize_t mesh_id_set(struct device *dev, struct device_attribute *attr,
214 int len; 214 int len;
215 int ret; 215 int ret;
216 216
217 if (count < 2 || count > IW_ESSID_MAX_SIZE + 1) 217 if (count < 2 || count > IEEE80211_MAX_SSID_LEN + 1)
218 return -EINVAL; 218 return -EINVAL;
219 219
220 memset(&cmd, 0, sizeof(struct cmd_ds_mesh_config)); 220 memset(&cmd, 0, sizeof(struct cmd_ds_mesh_config));
@@ -233,7 +233,7 @@ static ssize_t mesh_id_set(struct device *dev, struct device_attribute *attr,
233 /* SSID len */ 233 /* SSID len */
234 ie->val.mesh_id_len = len; 234 ie->val.mesh_id_len = len;
235 /* IE len */ 235 /* IE len */
236 ie->len = sizeof(struct mrvl_meshie_val) - IW_ESSID_MAX_SIZE + len; 236 ie->len = sizeof(struct mrvl_meshie_val) - IEEE80211_MAX_SSID_LEN + len;
237 237
238 ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET, 238 ret = lbs_mesh_config_send(priv, &cmd, CMD_ACT_MESH_CONFIG_SET,
239 CMD_TYPE_MESH_SET_MESH_IE); 239 CMD_TYPE_MESH_SET_MESH_IE);
diff --git a/drivers/net/wireless/libertas/rx.c b/drivers/net/wireless/libertas/rx.c
index 65f02cc6752f..9f18a19cc49d 100644
--- a/drivers/net/wireless/libertas/rx.c
+++ b/drivers/net/wireless/libertas/rx.c
@@ -4,7 +4,7 @@
4#include <linux/etherdevice.h> 4#include <linux/etherdevice.h>
5#include <linux/types.h> 5#include <linux/types.h>
6 6
7#include "hostcmd.h" 7#include "host.h"
8#include "radiotap.h" 8#include "radiotap.h"
9#include "decl.h" 9#include "decl.h"
10#include "dev.h" 10#include "dev.h"
diff --git a/drivers/net/wireless/libertas/scan.c b/drivers/net/wireless/libertas/scan.c
index 6c95af3023cc..c6a6c042b82f 100644
--- a/drivers/net/wireless/libertas/scan.c
+++ b/drivers/net/wireless/libertas/scan.c
@@ -12,18 +12,19 @@
12#include <net/lib80211.h> 12#include <net/lib80211.h>
13 13
14#include "host.h" 14#include "host.h"
15#include "decl.h"
16#include "dev.h" 15#include "dev.h"
17#include "scan.h" 16#include "scan.h"
17#include "assoc.h"
18#include "wext.h"
18#include "cmd.h" 19#include "cmd.h"
19 20
20//! Approximate amount of data needed to pass a scan result back to iwlist 21//! Approximate amount of data needed to pass a scan result back to iwlist
21#define MAX_SCAN_CELL_SIZE (IW_EV_ADDR_LEN \ 22#define MAX_SCAN_CELL_SIZE (IW_EV_ADDR_LEN \
22 + IW_ESSID_MAX_SIZE \ 23 + IEEE80211_MAX_SSID_LEN \
23 + IW_EV_UINT_LEN \ 24 + IW_EV_UINT_LEN \
24 + IW_EV_FREQ_LEN \ 25 + IW_EV_FREQ_LEN \
25 + IW_EV_QUAL_LEN \ 26 + IW_EV_QUAL_LEN \
26 + IW_ESSID_MAX_SIZE \ 27 + IEEE80211_MAX_SSID_LEN \
27 + IW_EV_PARAM_LEN \ 28 + IW_EV_PARAM_LEN \
28 + 40) /* 40 for WPAIE */ 29 + 40) /* 40 for WPAIE */
29 30
@@ -121,6 +122,189 @@ static inline int is_same_network(struct bss_descriptor *src,
121 122
122 123
123 124
125/*********************************************************************/
126/* */
127/* Region channel support */
128/* */
129/*********************************************************************/
130
131#define LBS_TX_PWR_DEFAULT 20 /*100mW */
132#define LBS_TX_PWR_US_DEFAULT 20 /*100mW */
133#define LBS_TX_PWR_JP_DEFAULT 16 /*50mW */
134#define LBS_TX_PWR_FR_DEFAULT 20 /*100mW */
135#define LBS_TX_PWR_EMEA_DEFAULT 20 /*100mW */
136
137/* Format { channel, frequency (MHz), maxtxpower } */
138/* band: 'B/G', region: USA FCC/Canada IC */
139static struct chan_freq_power channel_freq_power_US_BG[] = {
140 {1, 2412, LBS_TX_PWR_US_DEFAULT},
141 {2, 2417, LBS_TX_PWR_US_DEFAULT},
142 {3, 2422, LBS_TX_PWR_US_DEFAULT},
143 {4, 2427, LBS_TX_PWR_US_DEFAULT},
144 {5, 2432, LBS_TX_PWR_US_DEFAULT},
145 {6, 2437, LBS_TX_PWR_US_DEFAULT},
146 {7, 2442, LBS_TX_PWR_US_DEFAULT},
147 {8, 2447, LBS_TX_PWR_US_DEFAULT},
148 {9, 2452, LBS_TX_PWR_US_DEFAULT},
149 {10, 2457, LBS_TX_PWR_US_DEFAULT},
150 {11, 2462, LBS_TX_PWR_US_DEFAULT}
151};
152
153/* band: 'B/G', region: Europe ETSI */
154static struct chan_freq_power channel_freq_power_EU_BG[] = {
155 {1, 2412, LBS_TX_PWR_EMEA_DEFAULT},
156 {2, 2417, LBS_TX_PWR_EMEA_DEFAULT},
157 {3, 2422, LBS_TX_PWR_EMEA_DEFAULT},
158 {4, 2427, LBS_TX_PWR_EMEA_DEFAULT},
159 {5, 2432, LBS_TX_PWR_EMEA_DEFAULT},
160 {6, 2437, LBS_TX_PWR_EMEA_DEFAULT},
161 {7, 2442, LBS_TX_PWR_EMEA_DEFAULT},
162 {8, 2447, LBS_TX_PWR_EMEA_DEFAULT},
163 {9, 2452, LBS_TX_PWR_EMEA_DEFAULT},
164 {10, 2457, LBS_TX_PWR_EMEA_DEFAULT},
165 {11, 2462, LBS_TX_PWR_EMEA_DEFAULT},
166 {12, 2467, LBS_TX_PWR_EMEA_DEFAULT},
167 {13, 2472, LBS_TX_PWR_EMEA_DEFAULT}
168};
169
170/* band: 'B/G', region: Spain */
171static struct chan_freq_power channel_freq_power_SPN_BG[] = {
172 {10, 2457, LBS_TX_PWR_DEFAULT},
173 {11, 2462, LBS_TX_PWR_DEFAULT}
174};
175
176/* band: 'B/G', region: France */
177static struct chan_freq_power channel_freq_power_FR_BG[] = {
178 {10, 2457, LBS_TX_PWR_FR_DEFAULT},
179 {11, 2462, LBS_TX_PWR_FR_DEFAULT},
180 {12, 2467, LBS_TX_PWR_FR_DEFAULT},
181 {13, 2472, LBS_TX_PWR_FR_DEFAULT}
182};
183
184/* band: 'B/G', region: Japan */
185static struct chan_freq_power channel_freq_power_JPN_BG[] = {
186 {1, 2412, LBS_TX_PWR_JP_DEFAULT},
187 {2, 2417, LBS_TX_PWR_JP_DEFAULT},
188 {3, 2422, LBS_TX_PWR_JP_DEFAULT},
189 {4, 2427, LBS_TX_PWR_JP_DEFAULT},
190 {5, 2432, LBS_TX_PWR_JP_DEFAULT},
191 {6, 2437, LBS_TX_PWR_JP_DEFAULT},
192 {7, 2442, LBS_TX_PWR_JP_DEFAULT},
193 {8, 2447, LBS_TX_PWR_JP_DEFAULT},
194 {9, 2452, LBS_TX_PWR_JP_DEFAULT},
195 {10, 2457, LBS_TX_PWR_JP_DEFAULT},
196 {11, 2462, LBS_TX_PWR_JP_DEFAULT},
197 {12, 2467, LBS_TX_PWR_JP_DEFAULT},
198 {13, 2472, LBS_TX_PWR_JP_DEFAULT},
199 {14, 2484, LBS_TX_PWR_JP_DEFAULT}
200};
201
202/**
203 * the structure for channel, frequency and power
204 */
205struct region_cfp_table {
206 u8 region;
207 struct chan_freq_power *cfp_BG;
208 int cfp_no_BG;
209};
210
211/**
212 * the structure for the mapping between region and CFP
213 */
214static struct region_cfp_table region_cfp_table[] = {
215 {0x10, /*US FCC */
216 channel_freq_power_US_BG,
217 ARRAY_SIZE(channel_freq_power_US_BG),
218 }
219 ,
220 {0x20, /*CANADA IC */
221 channel_freq_power_US_BG,
222 ARRAY_SIZE(channel_freq_power_US_BG),
223 }
224 ,
225 {0x30, /*EU*/ channel_freq_power_EU_BG,
226 ARRAY_SIZE(channel_freq_power_EU_BG),
227 }
228 ,
229 {0x31, /*SPAIN*/ channel_freq_power_SPN_BG,
230 ARRAY_SIZE(channel_freq_power_SPN_BG),
231 }
232 ,
233 {0x32, /*FRANCE*/ channel_freq_power_FR_BG,
234 ARRAY_SIZE(channel_freq_power_FR_BG),
235 }
236 ,
237 {0x40, /*JAPAN*/ channel_freq_power_JPN_BG,
238 ARRAY_SIZE(channel_freq_power_JPN_BG),
239 }
240 ,
241/*Add new region here */
242};
243
244/**
245 * @brief This function finds the CFP in
246 * region_cfp_table based on region and band parameter.
247 *
248 * @param region The region code
249 * @param band The band
250 * @param cfp_no A pointer to CFP number
251 * @return A pointer to CFP
252 */
253static struct chan_freq_power *lbs_get_region_cfp_table(u8 region, int *cfp_no)
254{
255 int i, end;
256
257 lbs_deb_enter(LBS_DEB_MAIN);
258
259 end = ARRAY_SIZE(region_cfp_table);
260
261 for (i = 0; i < end ; i++) {
262 lbs_deb_main("region_cfp_table[i].region=%d\n",
263 region_cfp_table[i].region);
264 if (region_cfp_table[i].region == region) {
265 *cfp_no = region_cfp_table[i].cfp_no_BG;
266 lbs_deb_leave(LBS_DEB_MAIN);
267 return region_cfp_table[i].cfp_BG;
268 }
269 }
270
271 lbs_deb_leave_args(LBS_DEB_MAIN, "ret NULL");
272 return NULL;
273}
274
275int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band)
276{
277 int ret = 0;
278 int i = 0;
279
280 struct chan_freq_power *cfp;
281 int cfp_no;
282
283 lbs_deb_enter(LBS_DEB_MAIN);
284
285 memset(priv->region_channel, 0, sizeof(priv->region_channel));
286
287 cfp = lbs_get_region_cfp_table(region, &cfp_no);
288 if (cfp != NULL) {
289 priv->region_channel[i].nrcfp = cfp_no;
290 priv->region_channel[i].CFP = cfp;
291 } else {
292 lbs_deb_main("wrong region code %#x in band B/G\n",
293 region);
294 ret = -1;
295 goto out;
296 }
297 priv->region_channel[i].valid = 1;
298 priv->region_channel[i].region = region;
299 priv->region_channel[i].band = band;
300 i++;
301out:
302 lbs_deb_leave_args(LBS_DEB_MAIN, "ret %d", ret);
303 return ret;
304}
305
306
307
124 308
125/*********************************************************************/ 309/*********************************************************************/
126/* */ 310/* */
@@ -161,31 +345,15 @@ static int lbs_scan_create_channel_list(struct lbs_private *priv,
161 scantype = CMD_SCAN_TYPE_ACTIVE; 345 scantype = CMD_SCAN_TYPE_ACTIVE;
162 346
163 for (rgnidx = 0; rgnidx < ARRAY_SIZE(priv->region_channel); rgnidx++) { 347 for (rgnidx = 0; rgnidx < ARRAY_SIZE(priv->region_channel); rgnidx++) {
164 if (priv->enable11d && (priv->connect_status != LBS_CONNECTED) 348 if (!priv->region_channel[rgnidx].valid)
165 && (priv->mesh_connect_status != LBS_CONNECTED)) { 349 continue;
166 /* Scan all the supported chan for the first scan */ 350 scanregion = &priv->region_channel[rgnidx];
167 if (!priv->universal_channel[rgnidx].valid)
168 continue;
169 scanregion = &priv->universal_channel[rgnidx];
170
171 /* clear the parsed_region_chan for the first scan */
172 memset(&priv->parsed_region_chan, 0x00,
173 sizeof(priv->parsed_region_chan));
174 } else {
175 if (!priv->region_channel[rgnidx].valid)
176 continue;
177 scanregion = &priv->region_channel[rgnidx];
178 }
179 351
180 for (nextchan = 0; nextchan < scanregion->nrcfp; nextchan++, chanidx++) { 352 for (nextchan = 0; nextchan < scanregion->nrcfp; nextchan++, chanidx++) {
181 struct chanscanparamset *chan = &scanchanlist[chanidx]; 353 struct chanscanparamset *chan = &scanchanlist[chanidx];
182 354
183 cfp = scanregion->CFP + nextchan; 355 cfp = scanregion->CFP + nextchan;
184 356
185 if (priv->enable11d)
186 scantype = lbs_get_scan_type_11d(cfp->channel,
187 &priv->parsed_region_chan);
188
189 if (scanregion->band == BAND_B || scanregion->band == BAND_G) 357 if (scanregion->band == BAND_B || scanregion->band == BAND_G)
190 chan->radiotype = CMD_SCAN_RADIO_TYPE_BG; 358 chan->radiotype = CMD_SCAN_RADIO_TYPE_BG;
191 359
@@ -519,7 +687,6 @@ static int lbs_process_bss(struct bss_descriptor *bss,
519 struct ieee_ie_cf_param_set *cf; 687 struct ieee_ie_cf_param_set *cf;
520 struct ieee_ie_ibss_param_set *ibss; 688 struct ieee_ie_ibss_param_set *ibss;
521 DECLARE_SSID_BUF(ssid); 689 DECLARE_SSID_BUF(ssid);
522 struct ieee_ie_country_info_set *pcountryinfo;
523 uint8_t *pos, *end, *p; 690 uint8_t *pos, *end, *p;
524 uint8_t n_ex_rates = 0, got_basic_rates = 0, n_basic_rates = 0; 691 uint8_t n_ex_rates = 0, got_basic_rates = 0, n_basic_rates = 0;
525 uint16_t beaconsize = 0; 692 uint16_t beaconsize = 0;
@@ -642,26 +809,6 @@ static int lbs_process_bss(struct bss_descriptor *bss,
642 lbs_deb_scan("got IBSS IE\n"); 809 lbs_deb_scan("got IBSS IE\n");
643 break; 810 break;
644 811
645 case WLAN_EID_COUNTRY:
646 pcountryinfo = (struct ieee_ie_country_info_set *) pos;
647 lbs_deb_scan("got COUNTRY IE\n");
648 if (pcountryinfo->header.len < sizeof(pcountryinfo->countrycode)
649 || pcountryinfo->header.len > 254) {
650 lbs_deb_scan("%s: 11D- Err CountryInfo len %d, min %zd, max 254\n",
651 __func__,
652 pcountryinfo->header.len,
653 sizeof(pcountryinfo->countrycode));
654 ret = -1;
655 goto done;
656 }
657
658 memcpy(&bss->countryinfo, pcountryinfo,
659 pcountryinfo->header.len + 2);
660 lbs_deb_hex(LBS_DEB_SCAN, "process_bss: 11d countryinfo",
661 (uint8_t *) pcountryinfo,
662 (int) (pcountryinfo->header.len + 2));
663 break;
664
665 case WLAN_EID_EXT_SUPP_RATES: 812 case WLAN_EID_EXT_SUPP_RATES:
666 /* only process extended supported rate if data rate is 813 /* only process extended supported rate if data rate is
667 * already found. Data rate IE should come before 814 * already found. Data rate IE should come before
@@ -812,7 +959,7 @@ static inline char *lbs_translate_scan(struct lbs_private *priv,
812 /* SSID */ 959 /* SSID */
813 iwe.cmd = SIOCGIWESSID; 960 iwe.cmd = SIOCGIWESSID;
814 iwe.u.data.flags = 1; 961 iwe.u.data.flags = 1;
815 iwe.u.data.length = min((uint32_t) bss->ssid_len, (uint32_t) IW_ESSID_MAX_SIZE); 962 iwe.u.data.length = min((uint32_t) bss->ssid_len, (uint32_t) IEEE80211_MAX_SSID_LEN);
816 start = iwe_stream_add_point(info, start, stop, &iwe, bss->ssid); 963 start = iwe_stream_add_point(info, start, stop, &iwe, bss->ssid);
817 964
818 /* Mode */ 965 /* Mode */
@@ -1022,9 +1169,12 @@ int lbs_get_scan(struct net_device *dev, struct iw_request_info *info,
1022 return -EAGAIN; 1169 return -EAGAIN;
1023 1170
1024 /* Update RSSI if current BSS is a locally created ad-hoc BSS */ 1171 /* Update RSSI if current BSS is a locally created ad-hoc BSS */
1025 if ((priv->mode == IW_MODE_ADHOC) && priv->adhoccreate) 1172 if ((priv->mode == IW_MODE_ADHOC) && priv->adhoccreate) {
1026 lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0, 1173 err = lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
1027 CMD_OPTION_WAITFORRSP, 0, NULL); 1174 CMD_OPTION_WAITFORRSP, 0, NULL);
1175 if (err)
1176 goto out;
1177 }
1028 1178
1029 mutex_lock(&priv->lock); 1179 mutex_lock(&priv->lock);
1030 list_for_each_entry_safe (iter_bss, safe, &priv->network_list, list) { 1180 list_for_each_entry_safe (iter_bss, safe, &priv->network_list, list) {
@@ -1058,7 +1208,7 @@ int lbs_get_scan(struct net_device *dev, struct iw_request_info *info,
1058 1208
1059 dwrq->length = (ev - extra); 1209 dwrq->length = (ev - extra);
1060 dwrq->flags = 0; 1210 dwrq->flags = 0;
1061 1211out:
1062 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", err); 1212 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", err);
1063 return err; 1213 return err;
1064} 1214}
@@ -1141,11 +1291,11 @@ static int lbs_ret_80211_scan(struct lbs_private *priv, unsigned long dummy,
1141 /* The size of the TLV buffer is equal to the entire command response 1291 /* The size of the TLV buffer is equal to the entire command response
1142 * size (scanrespsize) minus the fixed fields (sizeof()'s), the 1292 * size (scanrespsize) minus the fixed fields (sizeof()'s), the
1143 * BSS Descriptions (bssdescriptsize as bytesLef) and the command 1293 * BSS Descriptions (bssdescriptsize as bytesLef) and the command
1144 * response header (S_DS_GEN) 1294 * response header (sizeof(struct cmd_header))
1145 */ 1295 */
1146 tlvbufsize = scanrespsize - (bytesleft + sizeof(scanresp->bssdescriptsize) 1296 tlvbufsize = scanrespsize - (bytesleft + sizeof(scanresp->bssdescriptsize)
1147 + sizeof(scanresp->nr_sets) 1297 + sizeof(scanresp->nr_sets)
1148 + S_DS_GEN); 1298 + sizeof(struct cmd_header));
1149 1299
1150 /* 1300 /*
1151 * Process each scan response returned (scanresp->nr_sets). Save 1301 * Process each scan response returned (scanresp->nr_sets). Save
diff --git a/drivers/net/wireless/libertas/scan.h b/drivers/net/wireless/libertas/scan.h
index fab7d5d097fc..8fb1706d7526 100644
--- a/drivers/net/wireless/libertas/scan.h
+++ b/drivers/net/wireless/libertas/scan.h
@@ -9,8 +9,36 @@
9 9
10#include <net/iw_handler.h> 10#include <net/iw_handler.h>
11 11
12struct lbs_private;
13
12#define MAX_NETWORK_COUNT 128 14#define MAX_NETWORK_COUNT 128
13 15
16/** Chan-freq-TxPower mapping table*/
17struct chan_freq_power {
18 /** channel Number */
19 u16 channel;
20 /** frequency of this channel */
21 u32 freq;
22 /** Max allowed Tx power level */
23 u16 maxtxpower;
24 /** TRUE:channel unsupported; FLASE:supported*/
25 u8 unsupported;
26};
27
28/** region-band mapping table*/
29struct region_channel {
30 /** TRUE if this entry is valid */
31 u8 valid;
32 /** region code for US, Japan ... */
33 u8 region;
34 /** band B/G/A, used for BAND_CONFIG cmd */
35 u8 band;
36 /** Actual No. of elements in the array below */
37 u8 nrcfp;
38 /** chan-freq-txpower mapping table*/
39 struct chan_freq_power *CFP;
40};
41
14/** 42/**
15 * @brief Maximum number of channels that can be sent in a setuserscan ioctl 43 * @brief Maximum number of channels that can be sent in a setuserscan ioctl
16 */ 44 */
@@ -18,6 +46,8 @@
18 46
19int lbs_ssid_cmp(u8 *ssid1, u8 ssid1_len, u8 *ssid2, u8 ssid2_len); 47int lbs_ssid_cmp(u8 *ssid1, u8 ssid1_len, u8 *ssid2, u8 ssid2_len);
20 48
49int lbs_set_regiontable(struct lbs_private *priv, u8 region, u8 band);
50
21int lbs_send_specific_ssid_scan(struct lbs_private *priv, u8 *ssid, 51int lbs_send_specific_ssid_scan(struct lbs_private *priv, u8 *ssid,
22 u8 ssid_len); 52 u8 ssid_len);
23 53
diff --git a/drivers/net/wireless/libertas/tx.c b/drivers/net/wireless/libertas/tx.c
index 4c018f7a0a8d..5d7c011fe296 100644
--- a/drivers/net/wireless/libertas/tx.c
+++ b/drivers/net/wireless/libertas/tx.c
@@ -4,7 +4,7 @@
4#include <linux/netdevice.h> 4#include <linux/netdevice.h>
5#include <linux/etherdevice.h> 5#include <linux/etherdevice.h>
6 6
7#include "hostcmd.h" 7#include "host.h"
8#include "radiotap.h" 8#include "radiotap.h"
9#include "decl.h" 9#include "decl.h"
10#include "defs.h" 10#include "defs.h"
diff --git a/drivers/net/wireless/libertas/types.h b/drivers/net/wireless/libertas/types.h
index 99905df65b25..3e72c86ceca8 100644
--- a/drivers/net/wireless/libertas/types.h
+++ b/drivers/net/wireless/libertas/types.h
@@ -5,8 +5,8 @@
5#define _LBS_TYPES_H_ 5#define _LBS_TYPES_H_
6 6
7#include <linux/if_ether.h> 7#include <linux/if_ether.h>
8#include <linux/ieee80211.h>
8#include <asm/byteorder.h> 9#include <asm/byteorder.h>
9#include <linux/wireless.h>
10 10
11struct ieee_ie_header { 11struct ieee_ie_header {
12 u8 id; 12 u8 id;
@@ -247,7 +247,7 @@ struct mrvl_meshie_val {
247 uint8_t active_metric_id; 247 uint8_t active_metric_id;
248 uint8_t mesh_capability; 248 uint8_t mesh_capability;
249 uint8_t mesh_id_len; 249 uint8_t mesh_id_len;
250 uint8_t mesh_id[IW_ESSID_MAX_SIZE]; 250 uint8_t mesh_id[IEEE80211_MAX_SSID_LEN];
251} __attribute__ ((packed)); 251} __attribute__ ((packed));
252 252
253struct mrvl_meshie { 253struct mrvl_meshie {
diff --git a/drivers/net/wireless/libertas/wext.c b/drivers/net/wireless/libertas/wext.c
index be837a0d2517..a8eb9e1fcf36 100644
--- a/drivers/net/wireless/libertas/wext.c
+++ b/drivers/net/wireless/libertas/wext.c
@@ -45,6 +45,63 @@ static inline void lbs_cancel_association_work(struct lbs_private *priv)
45 priv->pending_assoc_req = NULL; 45 priv->pending_assoc_req = NULL;
46} 46}
47 47
48void lbs_send_disconnect_notification(struct lbs_private *priv)
49{
50 union iwreq_data wrqu;
51
52 memset(wrqu.ap_addr.sa_data, 0x00, ETH_ALEN);
53 wrqu.ap_addr.sa_family = ARPHRD_ETHER;
54 wireless_send_event(priv->dev, SIOCGIWAP, &wrqu, NULL);
55}
56
57static void lbs_send_iwevcustom_event(struct lbs_private *priv, s8 *str)
58{
59 union iwreq_data iwrq;
60 u8 buf[50];
61
62 lbs_deb_enter(LBS_DEB_WEXT);
63
64 memset(&iwrq, 0, sizeof(union iwreq_data));
65 memset(buf, 0, sizeof(buf));
66
67 snprintf(buf, sizeof(buf) - 1, "%s", str);
68
69 iwrq.data.length = strlen(buf) + 1 + IW_EV_LCP_LEN;
70
71 /* Send Event to upper layer */
72 lbs_deb_wext("event indication string %s\n", (char *)buf);
73 lbs_deb_wext("event indication length %d\n", iwrq.data.length);
74 lbs_deb_wext("sending wireless event IWEVCUSTOM for %s\n", str);
75
76 wireless_send_event(priv->dev, IWEVCUSTOM, &iwrq, buf);
77
78 lbs_deb_leave(LBS_DEB_WEXT);
79}
80
81/**
82 * @brief This function handles MIC failure event.
83 *
84 * @param priv A pointer to struct lbs_private structure
85 * @para event the event id
86 * @return n/a
87 */
88void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event)
89{
90 char buf[50];
91
92 lbs_deb_enter(LBS_DEB_CMD);
93 memset(buf, 0, sizeof(buf));
94
95 sprintf(buf, "%s", "MLME-MICHAELMICFAILURE.indication ");
96
97 if (event == MACREG_INT_CODE_MIC_ERR_UNICAST)
98 strcat(buf, "unicast ");
99 else
100 strcat(buf, "multicast ");
101
102 lbs_send_iwevcustom_event(priv, buf);
103 lbs_deb_leave(LBS_DEB_CMD);
104}
48 105
49/** 106/**
50 * @brief Find the channel frequency power info with specific channel 107 * @brief Find the channel frequency power info with specific channel
@@ -66,8 +123,6 @@ struct chan_freq_power *lbs_find_cfp_by_band_and_channel(
66 for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) { 123 for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) {
67 rc = &priv->region_channel[j]; 124 rc = &priv->region_channel[j];
68 125
69 if (priv->enable11d)
70 rc = &priv->universal_channel[j];
71 if (!rc->valid || !rc->CFP) 126 if (!rc->valid || !rc->CFP)
72 continue; 127 continue;
73 if (rc->band != band) 128 if (rc->band != band)
@@ -107,8 +162,6 @@ static struct chan_freq_power *find_cfp_by_band_and_freq(
107 for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) { 162 for (j = 0; !cfp && (j < ARRAY_SIZE(priv->region_channel)); j++) {
108 rc = &priv->region_channel[j]; 163 rc = &priv->region_channel[j];
109 164
110 if (priv->enable11d)
111 rc = &priv->universal_channel[j];
112 if (!rc->valid || !rc->CFP) 165 if (!rc->valid || !rc->CFP)
113 continue; 166 continue;
114 if (rc->band != band) 167 if (rc->band != band)
@@ -169,12 +222,12 @@ static int lbs_get_freq(struct net_device *dev, struct iw_request_info *info,
169 lbs_deb_enter(LBS_DEB_WEXT); 222 lbs_deb_enter(LBS_DEB_WEXT);
170 223
171 cfp = lbs_find_cfp_by_band_and_channel(priv, 0, 224 cfp = lbs_find_cfp_by_band_and_channel(priv, 0,
172 priv->curbssparams.channel); 225 priv->channel);
173 226
174 if (!cfp) { 227 if (!cfp) {
175 if (priv->curbssparams.channel) 228 if (priv->channel)
176 lbs_deb_wext("invalid channel %d\n", 229 lbs_deb_wext("invalid channel %d\n",
177 priv->curbssparams.channel); 230 priv->channel);
178 return -EINVAL; 231 return -EINVAL;
179 } 232 }
180 233
@@ -547,8 +600,6 @@ static int lbs_get_range(struct net_device *dev, struct iw_request_info *info,
547 struct chan_freq_power *cfp; 600 struct chan_freq_power *cfp;
548 u8 rates[MAX_RATES + 1]; 601 u8 rates[MAX_RATES + 1];
549 602
550 u8 flag = 0;
551
552 lbs_deb_enter(LBS_DEB_WEXT); 603 lbs_deb_enter(LBS_DEB_WEXT);
553 604
554 dwrq->length = sizeof(struct iw_range); 605 dwrq->length = sizeof(struct iw_range);
@@ -570,52 +621,21 @@ static int lbs_get_range(struct net_device *dev, struct iw_request_info *info,
570 621
571 range->scan_capa = IW_SCAN_CAPA_ESSID; 622 range->scan_capa = IW_SCAN_CAPA_ESSID;
572 623
573 if (priv->enable11d && 624 for (j = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
574 (priv->connect_status == LBS_CONNECTED || 625 && (j < ARRAY_SIZE(priv->region_channel)); j++) {
575 priv->mesh_connect_status == LBS_CONNECTED)) { 626 cfp = priv->region_channel[j].CFP;
576 u8 chan_no;
577 u8 band;
578
579 struct parsed_region_chan_11d *parsed_region_chan =
580 &priv->parsed_region_chan;
581
582 if (parsed_region_chan == NULL) {
583 lbs_deb_wext("11d: parsed_region_chan is NULL\n");
584 goto out;
585 }
586 band = parsed_region_chan->band;
587 lbs_deb_wext("band %d, nr_char %d\n", band,
588 parsed_region_chan->nr_chan);
589
590 for (i = 0; (range->num_frequency < IW_MAX_FREQUENCIES) 627 for (i = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
591 && (i < parsed_region_chan->nr_chan); i++) { 628 && priv->region_channel[j].valid
592 chan_no = parsed_region_chan->chanpwr[i].chan; 629 && cfp
593 lbs_deb_wext("chan_no %d\n", chan_no); 630 && (i < priv->region_channel[j].nrcfp); i++) {
594 range->freq[range->num_frequency].i = (long)chan_no; 631 range->freq[range->num_frequency].i =
632 (long)cfp->channel;
595 range->freq[range->num_frequency].m = 633 range->freq[range->num_frequency].m =
596 (long)lbs_chan_2_freq(chan_no) * 100000; 634 (long)cfp->freq * 100000;
597 range->freq[range->num_frequency].e = 1; 635 range->freq[range->num_frequency].e = 1;
636 cfp++;
598 range->num_frequency++; 637 range->num_frequency++;
599 } 638 }
600 flag = 1;
601 }
602 if (!flag) {
603 for (j = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
604 && (j < ARRAY_SIZE(priv->region_channel)); j++) {
605 cfp = priv->region_channel[j].CFP;
606 for (i = 0; (range->num_frequency < IW_MAX_FREQUENCIES)
607 && priv->region_channel[j].valid
608 && cfp
609 && (i < priv->region_channel[j].nrcfp); i++) {
610 range->freq[range->num_frequency].i =
611 (long)cfp->channel;
612 range->freq[range->num_frequency].m =
613 (long)cfp->freq * 100000;
614 range->freq[range->num_frequency].e = 1;
615 cfp++;
616 range->num_frequency++;
617 }
618 }
619 } 639 }
620 640
621 lbs_deb_wext("IW_MAX_FREQUENCIES %d, num_frequency %d\n", 641 lbs_deb_wext("IW_MAX_FREQUENCIES %d, num_frequency %d\n",
@@ -700,7 +720,6 @@ static int lbs_get_range(struct net_device *dev, struct iw_request_info *info,
700 | IW_ENC_CAPA_CIPHER_CCMP; 720 | IW_ENC_CAPA_CIPHER_CCMP;
701 } 721 }
702 722
703out:
704 lbs_deb_leave(LBS_DEB_WEXT); 723 lbs_deb_leave(LBS_DEB_WEXT);
705 return 0; 724 return 0;
706} 725}
@@ -709,6 +728,7 @@ static int lbs_set_power(struct net_device *dev, struct iw_request_info *info,
709 struct iw_param *vwrq, char *extra) 728 struct iw_param *vwrq, char *extra)
710{ 729{
711 struct lbs_private *priv = dev->ml_priv; 730 struct lbs_private *priv = dev->ml_priv;
731 int ret = 0;
712 732
713 lbs_deb_enter(LBS_DEB_WEXT); 733 lbs_deb_enter(LBS_DEB_WEXT);
714 734
@@ -737,8 +757,54 @@ static int lbs_set_power(struct net_device *dev, struct iw_request_info *info,
737 "setting power timeout is not supported\n"); 757 "setting power timeout is not supported\n");
738 return -EINVAL; 758 return -EINVAL;
739 } else if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_PERIOD) { 759 } else if ((vwrq->flags & IW_POWER_TYPE) == IW_POWER_PERIOD) {
740 lbs_deb_wext("setting power period not supported\n"); 760 vwrq->value = vwrq->value / 1000;
741 return -EINVAL; 761 if (!priv->enter_deep_sleep) {
762 lbs_pr_err("deep sleep feature is not implemented "
763 "for this interface driver\n");
764 return -EINVAL;
765 }
766
767 if (priv->connect_status == LBS_CONNECTED) {
768 if ((priv->is_auto_deep_sleep_enabled) &&
769 (vwrq->value == -1000)) {
770 lbs_exit_auto_deep_sleep(priv);
771 return 0;
772 } else {
773 lbs_pr_err("can't use deep sleep cmd in "
774 "connected state\n");
775 return -EINVAL;
776 }
777 }
778
779 if ((vwrq->value < 0) && (vwrq->value != -1000)) {
780 lbs_pr_err("unknown option\n");
781 return -EINVAL;
782 }
783
784 if (vwrq->value > 0) {
785 if (!priv->is_auto_deep_sleep_enabled) {
786 priv->is_activity_detected = 0;
787 priv->auto_deep_sleep_timeout = vwrq->value;
788 lbs_enter_auto_deep_sleep(priv);
789 } else {
790 priv->auto_deep_sleep_timeout = vwrq->value;
791 lbs_deb_debugfs("auto deep sleep: "
792 "already enabled\n");
793 }
794 return 0;
795 } else {
796 if (priv->is_auto_deep_sleep_enabled) {
797 lbs_exit_auto_deep_sleep(priv);
798 /* Try to exit deep sleep if auto */
799 /*deep sleep disabled */
800 ret = lbs_set_deep_sleep(priv, 0);
801 }
802 if (vwrq->value == 0)
803 ret = lbs_set_deep_sleep(priv, 1);
804 else if (vwrq->value == -1000)
805 ret = lbs_set_deep_sleep(priv, 0);
806 return ret;
807 }
742 } 808 }
743 809
744 if (priv->psmode != LBS802_11POWERMODECAM) { 810 if (priv->psmode != LBS802_11POWERMODECAM) {
@@ -752,6 +818,7 @@ static int lbs_set_power(struct net_device *dev, struct iw_request_info *info,
752 } 818 }
753 819
754 lbs_deb_leave(LBS_DEB_WEXT); 820 lbs_deb_leave(LBS_DEB_WEXT);
821
755 return 0; 822 return 0;
756} 823}
757 824
@@ -785,7 +852,7 @@ static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
785 u32 rssi_qual; 852 u32 rssi_qual;
786 u32 tx_qual; 853 u32 tx_qual;
787 u32 quality = 0; 854 u32 quality = 0;
788 int stats_valid = 0; 855 int ret, stats_valid = 0;
789 u8 rssi; 856 u8 rssi;
790 u32 tx_retries; 857 u32 tx_retries;
791 struct cmd_ds_802_11_get_log log; 858 struct cmd_ds_802_11_get_log log;
@@ -834,7 +901,9 @@ static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
834 901
835 memset(&log, 0, sizeof(log)); 902 memset(&log, 0, sizeof(log));
836 log.hdr.size = cpu_to_le16(sizeof(log)); 903 log.hdr.size = cpu_to_le16(sizeof(log));
837 lbs_cmd_with_response(priv, CMD_802_11_GET_LOG, &log); 904 ret = lbs_cmd_with_response(priv, CMD_802_11_GET_LOG, &log);
905 if (ret)
906 goto out;
838 907
839 tx_retries = le32_to_cpu(log.retry); 908 tx_retries = le32_to_cpu(log.retry);
840 909
@@ -862,8 +931,10 @@ static struct iw_statistics *lbs_get_wireless_stats(struct net_device *dev)
862 stats_valid = 1; 931 stats_valid = 1;
863 932
864 /* update stats asynchronously for future calls */ 933 /* update stats asynchronously for future calls */
865 lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0, 934 ret = lbs_prepare_and_send_command(priv, CMD_802_11_RSSI, 0,
866 0, 0, NULL); 935 0, 0, NULL);
936 if (ret)
937 lbs_pr_err("RSSI command failed\n");
867out: 938out:
868 if (!stats_valid) { 939 if (!stats_valid) {
869 priv->wstats.miss.beacon = 0; 940 priv->wstats.miss.beacon = 0;
@@ -973,7 +1044,7 @@ static int lbs_mesh_set_freq(struct net_device *dev,
973 goto out; 1044 goto out;
974 } 1045 }
975 1046
976 if (fwrq->m != priv->curbssparams.channel) { 1047 if (fwrq->m != priv->channel) {
977 lbs_deb_wext("mesh channel change forces eth disconnect\n"); 1048 lbs_deb_wext("mesh channel change forces eth disconnect\n");
978 if (priv->mode == IW_MODE_INFRA) 1049 if (priv->mode == IW_MODE_INFRA)
979 lbs_cmd_80211_deauthenticate(priv, 1050 lbs_cmd_80211_deauthenticate(priv,
@@ -1000,6 +1071,7 @@ static int lbs_set_rate(struct net_device *dev, struct iw_request_info *info,
1000 u8 rates[MAX_RATES + 1]; 1071 u8 rates[MAX_RATES + 1];
1001 1072
1002 lbs_deb_enter(LBS_DEB_WEXT); 1073 lbs_deb_enter(LBS_DEB_WEXT);
1074
1003 lbs_deb_wext("vwrq->value %d\n", vwrq->value); 1075 lbs_deb_wext("vwrq->value %d\n", vwrq->value);
1004 lbs_deb_wext("vwrq->fixed %d\n", vwrq->fixed); 1076 lbs_deb_wext("vwrq->fixed %d\n", vwrq->fixed);
1005 1077
@@ -1975,7 +2047,7 @@ static int lbs_set_essid(struct net_device *dev, struct iw_request_info *info,
1975{ 2047{
1976 struct lbs_private *priv = dev->ml_priv; 2048 struct lbs_private *priv = dev->ml_priv;
1977 int ret = 0; 2049 int ret = 0;
1978 u8 ssid[IW_ESSID_MAX_SIZE]; 2050 u8 ssid[IEEE80211_MAX_SSID_LEN];
1979 u8 ssid_len = 0; 2051 u8 ssid_len = 0;
1980 struct assoc_request * assoc_req; 2052 struct assoc_request * assoc_req;
1981 int in_ssid_len = dwrq->length; 2053 int in_ssid_len = dwrq->length;
@@ -1989,7 +2061,7 @@ static int lbs_set_essid(struct net_device *dev, struct iw_request_info *info,
1989 } 2061 }
1990 2062
1991 /* Check the size of the string */ 2063 /* Check the size of the string */
1992 if (in_ssid_len > IW_ESSID_MAX_SIZE) { 2064 if (in_ssid_len > IEEE80211_MAX_SSID_LEN) {
1993 ret = -E2BIG; 2065 ret = -E2BIG;
1994 goto out; 2066 goto out;
1995 } 2067 }
@@ -2020,7 +2092,7 @@ out:
2020 ret = -ENOMEM; 2092 ret = -ENOMEM;
2021 } else { 2093 } else {
2022 /* Copy the SSID to the association request */ 2094 /* Copy the SSID to the association request */
2023 memcpy(&assoc_req->ssid, &ssid, IW_ESSID_MAX_SIZE); 2095 memcpy(&assoc_req->ssid, &ssid, IEEE80211_MAX_SSID_LEN);
2024 assoc_req->ssid_len = ssid_len; 2096 assoc_req->ssid_len = ssid_len;
2025 set_bit(ASSOC_FLAG_SSID, &assoc_req->flags); 2097 set_bit(ASSOC_FLAG_SSID, &assoc_req->flags);
2026 lbs_postpone_association_work(priv); 2098 lbs_postpone_association_work(priv);
@@ -2071,7 +2143,7 @@ static int lbs_mesh_set_essid(struct net_device *dev,
2071 } 2143 }
2072 2144
2073 /* Check the size of the string */ 2145 /* Check the size of the string */
2074 if (dwrq->length > IW_ESSID_MAX_SIZE) { 2146 if (dwrq->length > IEEE80211_MAX_SSID_LEN) {
2075 ret = -E2BIG; 2147 ret = -E2BIG;
2076 goto out; 2148 goto out;
2077 } 2149 }
@@ -2086,7 +2158,7 @@ static int lbs_mesh_set_essid(struct net_device *dev,
2086 } 2158 }
2087 2159
2088 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START, 2160 lbs_mesh_config(priv, CMD_ACT_MESH_CONFIG_START,
2089 priv->curbssparams.channel); 2161 priv->channel);
2090 out: 2162 out:
2091 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret); 2163 lbs_deb_leave_args(LBS_DEB_WEXT, "ret %d", ret);
2092 return ret; 2164 return ret;
diff --git a/drivers/net/wireless/libertas/wext.h b/drivers/net/wireless/libertas/wext.h
index 4c08db497606..7863baf7d234 100644
--- a/drivers/net/wireless/libertas/wext.h
+++ b/drivers/net/wireless/libertas/wext.h
@@ -4,7 +4,15 @@
4#ifndef _LBS_WEXT_H_ 4#ifndef _LBS_WEXT_H_
5#define _LBS_WEXT_H_ 5#define _LBS_WEXT_H_
6 6
7void lbs_send_disconnect_notification(struct lbs_private *priv);
8void lbs_send_mic_failureevent(struct lbs_private *priv, u32 event);
9
7extern struct iw_handler_def lbs_handler_def; 10extern struct iw_handler_def lbs_handler_def;
8extern struct iw_handler_def mesh_handler_def; 11extern struct iw_handler_def mesh_handler_def;
9 12
13struct chan_freq_power *lbs_find_cfp_by_band_and_channel(
14 struct lbs_private *priv,
15 u8 band,
16 u16 channel);
17
10#endif 18#endif
diff --git a/drivers/net/wireless/mac80211_hwsim.c b/drivers/net/wireless/mac80211_hwsim.c
index 38cfd79e0590..fc4ec48eda12 100644
--- a/drivers/net/wireless/mac80211_hwsim.c
+++ b/drivers/net/wireless/mac80211_hwsim.c
@@ -284,7 +284,7 @@ struct mac80211_hwsim_data {
284 struct ieee80211_channel *channel; 284 struct ieee80211_channel *channel;
285 unsigned long beacon_int; /* in jiffies unit */ 285 unsigned long beacon_int; /* in jiffies unit */
286 unsigned int rx_filter; 286 unsigned int rx_filter;
287 int started; 287 bool started, idle;
288 struct timer_list beacon_timer; 288 struct timer_list beacon_timer;
289 enum ps_mode { 289 enum ps_mode {
290 PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL 290 PS_DISABLED, PS_ENABLED, PS_AUTO_POLL, PS_MANUAL_POLL
@@ -365,6 +365,49 @@ static void mac80211_hwsim_monitor_rx(struct ieee80211_hw *hw,
365} 365}
366 366
367 367
368static void mac80211_hwsim_monitor_ack(struct ieee80211_hw *hw, const u8 *addr)
369{
370 struct mac80211_hwsim_data *data = hw->priv;
371 struct sk_buff *skb;
372 struct hwsim_radiotap_hdr *hdr;
373 u16 flags;
374 struct ieee80211_hdr *hdr11;
375
376 if (!netif_running(hwsim_mon))
377 return;
378
379 skb = dev_alloc_skb(100);
380 if (skb == NULL)
381 return;
382
383 hdr = (struct hwsim_radiotap_hdr *) skb_put(skb, sizeof(*hdr));
384 hdr->hdr.it_version = PKTHDR_RADIOTAP_VERSION;
385 hdr->hdr.it_pad = 0;
386 hdr->hdr.it_len = cpu_to_le16(sizeof(*hdr));
387 hdr->hdr.it_present = cpu_to_le32((1 << IEEE80211_RADIOTAP_FLAGS) |
388 (1 << IEEE80211_RADIOTAP_CHANNEL));
389 hdr->rt_flags = 0;
390 hdr->rt_rate = 0;
391 hdr->rt_channel = cpu_to_le16(data->channel->center_freq);
392 flags = IEEE80211_CHAN_2GHZ;
393 hdr->rt_chbitmask = cpu_to_le16(flags);
394
395 hdr11 = (struct ieee80211_hdr *) skb_put(skb, 10);
396 hdr11->frame_control = cpu_to_le16(IEEE80211_FTYPE_CTL |
397 IEEE80211_STYPE_ACK);
398 hdr11->duration_id = cpu_to_le16(0);
399 memcpy(hdr11->addr1, addr, ETH_ALEN);
400
401 skb->dev = hwsim_mon;
402 skb_set_mac_header(skb, 0);
403 skb->ip_summed = CHECKSUM_UNNECESSARY;
404 skb->pkt_type = PACKET_OTHERHOST;
405 skb->protocol = htons(ETH_P_802_2);
406 memset(skb->cb, 0, sizeof(skb->cb));
407 netif_rx(skb);
408}
409
410
368static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data, 411static bool hwsim_ps_rx_ok(struct mac80211_hwsim_data *data,
369 struct sk_buff *skb) 412 struct sk_buff *skb)
370{ 413{
@@ -402,6 +445,12 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
402 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 445 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
403 struct ieee80211_rx_status rx_status; 446 struct ieee80211_rx_status rx_status;
404 447
448 if (data->idle) {
449 printk(KERN_DEBUG "%s: Trying to TX when idle - reject\n",
450 wiphy_name(hw->wiphy));
451 return false;
452 }
453
405 memset(&rx_status, 0, sizeof(rx_status)); 454 memset(&rx_status, 0, sizeof(rx_status));
406 /* TODO: set mactime */ 455 /* TODO: set mactime */
407 rx_status.freq = data->channel->center_freq; 456 rx_status.freq = data->channel->center_freq;
@@ -428,7 +477,8 @@ static bool mac80211_hwsim_tx_frame(struct ieee80211_hw *hw,
428 if (data == data2) 477 if (data == data2)
429 continue; 478 continue;
430 479
431 if (!data2->started || !hwsim_ps_rx_ok(data2, skb) || 480 if (data2->idle || !data2->started ||
481 !hwsim_ps_rx_ok(data2, skb) ||
432 !data->channel || !data2->channel || 482 !data->channel || !data2->channel ||
433 data->channel->center_freq != data2->channel->center_freq || 483 data->channel->center_freq != data2->channel->center_freq ||
434 !(data->group & data2->group)) 484 !(data->group & data2->group))
@@ -464,6 +514,10 @@ static int mac80211_hwsim_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
464 } 514 }
465 515
466 ack = mac80211_hwsim_tx_frame(hw, skb); 516 ack = mac80211_hwsim_tx_frame(hw, skb);
517 if (ack && skb->len >= 16) {
518 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *) skb->data;
519 mac80211_hwsim_monitor_ack(hw, hdr->addr2);
520 }
467 521
468 txi = IEEE80211_SKB_CB(skb); 522 txi = IEEE80211_SKB_CB(skb);
469 523
@@ -571,6 +625,8 @@ static int mac80211_hwsim_config(struct ieee80211_hw *hw, u32 changed)
571 !!(conf->flags & IEEE80211_CONF_IDLE), 625 !!(conf->flags & IEEE80211_CONF_IDLE),
572 !!(conf->flags & IEEE80211_CONF_PS)); 626 !!(conf->flags & IEEE80211_CONF_PS));
573 627
628 data->idle = !!(conf->flags & IEEE80211_CONF_IDLE);
629
574 data->channel = conf->channel; 630 data->channel = conf->channel;
575 if (!data->started || !data->beacon_int) 631 if (!data->started || !data->beacon_int)
576 del_timer(&data->beacon_timer); 632 del_timer(&data->beacon_timer);
@@ -1045,19 +1101,20 @@ static int __init init_mac80211_hwsim(void)
1045 sband->channels = data->channels_2ghz; 1101 sband->channels = data->channels_2ghz;
1046 sband->n_channels = 1102 sband->n_channels =
1047 ARRAY_SIZE(hwsim_channels_2ghz); 1103 ARRAY_SIZE(hwsim_channels_2ghz);
1104 sband->bitrates = data->rates;
1105 sband->n_bitrates = ARRAY_SIZE(hwsim_rates);
1048 break; 1106 break;
1049 case IEEE80211_BAND_5GHZ: 1107 case IEEE80211_BAND_5GHZ:
1050 sband->channels = data->channels_5ghz; 1108 sband->channels = data->channels_5ghz;
1051 sband->n_channels = 1109 sband->n_channels =
1052 ARRAY_SIZE(hwsim_channels_5ghz); 1110 ARRAY_SIZE(hwsim_channels_5ghz);
1111 sband->bitrates = data->rates + 4;
1112 sband->n_bitrates = ARRAY_SIZE(hwsim_rates) - 4;
1053 break; 1113 break;
1054 default: 1114 default:
1055 break; 1115 break;
1056 } 1116 }
1057 1117
1058 sband->bitrates = data->rates;
1059 sband->n_bitrates = ARRAY_SIZE(hwsim_rates);
1060
1061 sband->ht_cap.ht_supported = true; 1118 sband->ht_cap.ht_supported = true;
1062 sband->ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 | 1119 sband->ht_cap.cap = IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
1063 IEEE80211_HT_CAP_GRN_FLD | 1120 IEEE80211_HT_CAP_GRN_FLD |
diff --git a/drivers/net/wireless/mwl8k.c b/drivers/net/wireless/mwl8k.c
index 746532ebe5a8..2ebfee4da3fa 100644
--- a/drivers/net/wireless/mwl8k.c
+++ b/drivers/net/wireless/mwl8k.c
@@ -12,6 +12,7 @@
12#include <linux/init.h> 12#include <linux/init.h>
13#include <linux/module.h> 13#include <linux/module.h>
14#include <linux/kernel.h> 14#include <linux/kernel.h>
15#include <linux/sched.h>
15#include <linux/spinlock.h> 16#include <linux/spinlock.h>
16#include <linux/list.h> 17#include <linux/list.h>
17#include <linux/pci.h> 18#include <linux/pci.h>
@@ -27,18 +28,6 @@
27#define MWL8K_NAME KBUILD_MODNAME 28#define MWL8K_NAME KBUILD_MODNAME
28#define MWL8K_VERSION "0.10" 29#define MWL8K_VERSION "0.10"
29 30
30MODULE_DESCRIPTION(MWL8K_DESC);
31MODULE_VERSION(MWL8K_VERSION);
32MODULE_AUTHOR("Lennert Buytenhek <buytenh@marvell.com>");
33MODULE_LICENSE("GPL");
34
35static DEFINE_PCI_DEVICE_TABLE(mwl8k_table) = {
36 { PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = 8687, },
37 { PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = 8687, },
38 { }
39};
40MODULE_DEVICE_TABLE(pci, mwl8k_table);
41
42/* Register definitions */ 31/* Register definitions */
43#define MWL8K_HIU_GEN_PTR 0x00000c10 32#define MWL8K_HIU_GEN_PTR 0x00000c10
44#define MWL8K_MODE_STA 0x0000005a 33#define MWL8K_MODE_STA 0x0000005a
@@ -88,72 +77,89 @@ MODULE_DEVICE_TABLE(pci, mwl8k_table);
88 MWL8K_A2H_INT_RX_READY | \ 77 MWL8K_A2H_INT_RX_READY | \
89 MWL8K_A2H_INT_TX_DONE) 78 MWL8K_A2H_INT_TX_DONE)
90 79
91/* WME stream classes */
92#define WME_AC_BE 0 /* best effort */
93#define WME_AC_BK 1 /* background */
94#define WME_AC_VI 2 /* video */
95#define WME_AC_VO 3 /* voice */
96
97#define MWL8K_RX_QUEUES 1 80#define MWL8K_RX_QUEUES 1
98#define MWL8K_TX_QUEUES 4 81#define MWL8K_TX_QUEUES 4
99 82
83struct rxd_ops {
84 int rxd_size;
85 void (*rxd_init)(void *rxd, dma_addr_t next_dma_addr);
86 void (*rxd_refill)(void *rxd, dma_addr_t addr, int len);
87 int (*rxd_process)(void *rxd, struct ieee80211_rx_status *status);
88};
89
90struct mwl8k_device_info {
91 char *part_name;
92 char *helper_image;
93 char *fw_image;
94 struct rxd_ops *rxd_ops;
95 u16 modes;
96};
97
100struct mwl8k_rx_queue { 98struct mwl8k_rx_queue {
101 int rx_desc_count; 99 int rxd_count;
102 100
103 /* hw receives here */ 101 /* hw receives here */
104 int rx_head; 102 int head;
105 103
106 /* refill descs here */ 104 /* refill descs here */
107 int rx_tail; 105 int tail;
108 106
109 struct mwl8k_rx_desc *rx_desc_area; 107 void *rxd;
110 dma_addr_t rx_desc_dma; 108 dma_addr_t rxd_dma;
111 struct sk_buff **rx_skb; 109 struct {
110 struct sk_buff *skb;
111 DECLARE_PCI_UNMAP_ADDR(dma)
112 } *buf;
112}; 113};
113 114
114struct mwl8k_tx_queue { 115struct mwl8k_tx_queue {
115 /* hw transmits here */ 116 /* hw transmits here */
116 int tx_head; 117 int head;
117 118
118 /* sw appends here */ 119 /* sw appends here */
119 int tx_tail; 120 int tail;
120 121
121 struct ieee80211_tx_queue_stats tx_stats; 122 struct ieee80211_tx_queue_stats stats;
122 struct mwl8k_tx_desc *tx_desc_area; 123 struct mwl8k_tx_desc *txd;
123 dma_addr_t tx_desc_dma; 124 dma_addr_t txd_dma;
124 struct sk_buff **tx_skb; 125 struct sk_buff **skb;
125}; 126};
126 127
127/* Pointers to the firmware data and meta information about it. */ 128/* Pointers to the firmware data and meta information about it. */
128struct mwl8k_firmware { 129struct mwl8k_firmware {
129 /* Microcode */
130 struct firmware *ucode;
131
132 /* Boot helper code */ 130 /* Boot helper code */
133 struct firmware *helper; 131 struct firmware *helper;
132
133 /* Microcode */
134 struct firmware *ucode;
134}; 135};
135 136
136struct mwl8k_priv { 137struct mwl8k_priv {
138 void __iomem *sram;
137 void __iomem *regs; 139 void __iomem *regs;
138 struct ieee80211_hw *hw; 140 struct ieee80211_hw *hw;
139 141
140 struct pci_dev *pdev; 142 struct pci_dev *pdev;
141 u8 name[16]; 143
144 struct mwl8k_device_info *device_info;
145 bool ap_fw;
146 struct rxd_ops *rxd_ops;
142 147
143 /* firmware files and meta data */ 148 /* firmware files and meta data */
144 struct mwl8k_firmware fw; 149 struct mwl8k_firmware fw;
145 u32 part_num;
146 150
147 /* firmware access */ 151 /* firmware access */
148 struct mutex fw_mutex; 152 struct mutex fw_mutex;
149 struct task_struct *fw_mutex_owner; 153 struct task_struct *fw_mutex_owner;
150 int fw_mutex_depth; 154 int fw_mutex_depth;
151 struct completion *tx_wait;
152 struct completion *hostcmd_wait; 155 struct completion *hostcmd_wait;
153 156
154 /* lock held over TX and TX reap */ 157 /* lock held over TX and TX reap */
155 spinlock_t tx_lock; 158 spinlock_t tx_lock;
156 159
160 /* TX quiesce completion, protected by fw_mutex and tx_lock */
161 struct completion *tx_wait;
162
157 struct ieee80211_vif *vif; 163 struct ieee80211_vif *vif;
158 164
159 struct ieee80211_channel *current_channel; 165 struct ieee80211_channel *current_channel;
@@ -178,10 +184,11 @@ struct mwl8k_priv {
178 /* PHY parameters */ 184 /* PHY parameters */
179 struct ieee80211_supported_band band; 185 struct ieee80211_supported_band band;
180 struct ieee80211_channel channels[14]; 186 struct ieee80211_channel channels[14];
181 struct ieee80211_rate rates[12]; 187 struct ieee80211_rate rates[13];
182 188
183 bool radio_on; 189 bool radio_on;
184 bool radio_short_preamble; 190 bool radio_short_preamble;
191 bool sniffer_enabled;
185 bool wmm_enabled; 192 bool wmm_enabled;
186 193
187 /* XXX need to convert this to handle multiple interfaces */ 194 /* XXX need to convert this to handle multiple interfaces */
@@ -199,9 +206,6 @@ struct mwl8k_priv {
199 206
200 /* Tasklet to reclaim TX descriptors and buffers after tx */ 207 /* Tasklet to reclaim TX descriptors and buffers after tx */
201 struct tasklet_struct tx_reclaim_task; 208 struct tasklet_struct tx_reclaim_task;
202
203 /* Work thread to serialize configuration requests */
204 struct workqueue_struct *config_wq;
205}; 209};
206 210
207/* Per interface specific private data */ 211/* Per interface specific private data */
@@ -220,7 +224,7 @@ struct mwl8k_vif {
220 * Subset of supported legacy rates. 224 * Subset of supported legacy rates.
221 * Intersection of AP and STA supported rates. 225 * Intersection of AP and STA supported rates.
222 */ 226 */
223 struct ieee80211_rate legacy_rates[12]; 227 struct ieee80211_rate legacy_rates[13];
224 228
225 /* number of supported legacy rates */ 229 /* number of supported legacy rates */
226 u8 legacy_nrates; 230 u8 legacy_nrates;
@@ -252,9 +256,10 @@ static const struct ieee80211_rate mwl8k_rates[] = {
252 { .bitrate = 10, .hw_value = 2, }, 256 { .bitrate = 10, .hw_value = 2, },
253 { .bitrate = 20, .hw_value = 4, }, 257 { .bitrate = 20, .hw_value = 4, },
254 { .bitrate = 55, .hw_value = 11, }, 258 { .bitrate = 55, .hw_value = 11, },
259 { .bitrate = 110, .hw_value = 22, },
260 { .bitrate = 220, .hw_value = 44, },
255 { .bitrate = 60, .hw_value = 12, }, 261 { .bitrate = 60, .hw_value = 12, },
256 { .bitrate = 90, .hw_value = 18, }, 262 { .bitrate = 90, .hw_value = 18, },
257 { .bitrate = 110, .hw_value = 22, },
258 { .bitrate = 120, .hw_value = 24, }, 263 { .bitrate = 120, .hw_value = 24, },
259 { .bitrate = 180, .hw_value = 36, }, 264 { .bitrate = 180, .hw_value = 36, },
260 { .bitrate = 240, .hw_value = 48, }, 265 { .bitrate = 240, .hw_value = 48, },
@@ -270,10 +275,12 @@ static const struct ieee80211_rate mwl8k_rates[] = {
270/* Firmware command codes */ 275/* Firmware command codes */
271#define MWL8K_CMD_CODE_DNLD 0x0001 276#define MWL8K_CMD_CODE_DNLD 0x0001
272#define MWL8K_CMD_GET_HW_SPEC 0x0003 277#define MWL8K_CMD_GET_HW_SPEC 0x0003
278#define MWL8K_CMD_SET_HW_SPEC 0x0004
273#define MWL8K_CMD_MAC_MULTICAST_ADR 0x0010 279#define MWL8K_CMD_MAC_MULTICAST_ADR 0x0010
274#define MWL8K_CMD_GET_STAT 0x0014 280#define MWL8K_CMD_GET_STAT 0x0014
275#define MWL8K_CMD_RADIO_CONTROL 0x001c 281#define MWL8K_CMD_RADIO_CONTROL 0x001c
276#define MWL8K_CMD_RF_TX_POWER 0x001e 282#define MWL8K_CMD_RF_TX_POWER 0x001e
283#define MWL8K_CMD_RF_ANTENNA 0x0020
277#define MWL8K_CMD_SET_PRE_SCAN 0x0107 284#define MWL8K_CMD_SET_PRE_SCAN 0x0107
278#define MWL8K_CMD_SET_POST_SCAN 0x0108 285#define MWL8K_CMD_SET_POST_SCAN 0x0108
279#define MWL8K_CMD_SET_RF_CHANNEL 0x010a 286#define MWL8K_CMD_SET_RF_CHANNEL 0x010a
@@ -287,6 +294,7 @@ static const struct ieee80211_rate mwl8k_rates[] = {
287#define MWL8K_CMD_MIMO_CONFIG 0x0125 294#define MWL8K_CMD_MIMO_CONFIG 0x0125
288#define MWL8K_CMD_USE_FIXED_RATE 0x0126 295#define MWL8K_CMD_USE_FIXED_RATE 0x0126
289#define MWL8K_CMD_ENABLE_SNIFFER 0x0150 296#define MWL8K_CMD_ENABLE_SNIFFER 0x0150
297#define MWL8K_CMD_SET_MAC_ADDR 0x0202
290#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203 298#define MWL8K_CMD_SET_RATEADAPT_MODE 0x0203
291#define MWL8K_CMD_UPDATE_STADB 0x1123 299#define MWL8K_CMD_UPDATE_STADB 0x1123
292 300
@@ -299,10 +307,12 @@ static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
299 switch (cmd & ~0x8000) { 307 switch (cmd & ~0x8000) {
300 MWL8K_CMDNAME(CODE_DNLD); 308 MWL8K_CMDNAME(CODE_DNLD);
301 MWL8K_CMDNAME(GET_HW_SPEC); 309 MWL8K_CMDNAME(GET_HW_SPEC);
310 MWL8K_CMDNAME(SET_HW_SPEC);
302 MWL8K_CMDNAME(MAC_MULTICAST_ADR); 311 MWL8K_CMDNAME(MAC_MULTICAST_ADR);
303 MWL8K_CMDNAME(GET_STAT); 312 MWL8K_CMDNAME(GET_STAT);
304 MWL8K_CMDNAME(RADIO_CONTROL); 313 MWL8K_CMDNAME(RADIO_CONTROL);
305 MWL8K_CMDNAME(RF_TX_POWER); 314 MWL8K_CMDNAME(RF_TX_POWER);
315 MWL8K_CMDNAME(RF_ANTENNA);
306 MWL8K_CMDNAME(SET_PRE_SCAN); 316 MWL8K_CMDNAME(SET_PRE_SCAN);
307 MWL8K_CMDNAME(SET_POST_SCAN); 317 MWL8K_CMDNAME(SET_POST_SCAN);
308 MWL8K_CMDNAME(SET_RF_CHANNEL); 318 MWL8K_CMDNAME(SET_RF_CHANNEL);
@@ -316,6 +326,7 @@ static const char *mwl8k_cmd_name(u16 cmd, char *buf, int bufsize)
316 MWL8K_CMDNAME(MIMO_CONFIG); 326 MWL8K_CMDNAME(MIMO_CONFIG);
317 MWL8K_CMDNAME(USE_FIXED_RATE); 327 MWL8K_CMDNAME(USE_FIXED_RATE);
318 MWL8K_CMDNAME(ENABLE_SNIFFER); 328 MWL8K_CMDNAME(ENABLE_SNIFFER);
329 MWL8K_CMDNAME(SET_MAC_ADDR);
319 MWL8K_CMDNAME(SET_RATEADAPT_MODE); 330 MWL8K_CMDNAME(SET_RATEADAPT_MODE);
320 MWL8K_CMDNAME(UPDATE_STADB); 331 MWL8K_CMDNAME(UPDATE_STADB);
321 default: 332 default:
@@ -353,41 +364,35 @@ static void mwl8k_release_firmware(struct mwl8k_priv *priv)
353 364
354/* Request fw image */ 365/* Request fw image */
355static int mwl8k_request_fw(struct mwl8k_priv *priv, 366static int mwl8k_request_fw(struct mwl8k_priv *priv,
356 const char *fname, struct firmware **fw) 367 const char *fname, struct firmware **fw)
357{ 368{
358 /* release current image */ 369 /* release current image */
359 if (*fw != NULL) 370 if (*fw != NULL)
360 mwl8k_release_fw(fw); 371 mwl8k_release_fw(fw);
361 372
362 return request_firmware((const struct firmware **)fw, 373 return request_firmware((const struct firmware **)fw,
363 fname, &priv->pdev->dev); 374 fname, &priv->pdev->dev);
364} 375}
365 376
366static int mwl8k_request_firmware(struct mwl8k_priv *priv, u32 part_num) 377static int mwl8k_request_firmware(struct mwl8k_priv *priv)
367{ 378{
368 u8 filename[64]; 379 struct mwl8k_device_info *di = priv->device_info;
369 int rc; 380 int rc;
370 381
371 priv->part_num = part_num; 382 if (di->helper_image != NULL) {
372 383 rc = mwl8k_request_fw(priv, di->helper_image, &priv->fw.helper);
373 snprintf(filename, sizeof(filename), 384 if (rc) {
374 "mwl8k/helper_%u.fw", priv->part_num); 385 printk(KERN_ERR "%s: Error requesting helper "
375 386 "firmware file %s\n", pci_name(priv->pdev),
376 rc = mwl8k_request_fw(priv, filename, &priv->fw.helper); 387 di->helper_image);
377 if (rc) { 388 return rc;
378 printk(KERN_ERR 389 }
379 "%s Error requesting helper firmware file %s\n",
380 pci_name(priv->pdev), filename);
381 return rc;
382 } 390 }
383 391
384 snprintf(filename, sizeof(filename), 392 rc = mwl8k_request_fw(priv, di->fw_image, &priv->fw.ucode);
385 "mwl8k/fmimage_%u.fw", priv->part_num);
386
387 rc = mwl8k_request_fw(priv, filename, &priv->fw.ucode);
388 if (rc) { 393 if (rc) {
389 printk(KERN_ERR "%s Error requesting firmware file %s\n", 394 printk(KERN_ERR "%s: Error requesting firmware file %s\n",
390 pci_name(priv->pdev), filename); 395 pci_name(priv->pdev), di->fw_image);
391 mwl8k_release_fw(&priv->fw.helper); 396 mwl8k_release_fw(&priv->fw.helper);
392 return rc; 397 return rc;
393 } 398 }
@@ -434,6 +439,7 @@ mwl8k_send_fw_load_cmd(struct mwl8k_priv *priv, void *data, int length)
434 break; 439 break;
435 } 440 }
436 441
442 cond_resched();
437 udelay(1); 443 udelay(1);
438 } while (--loops); 444 } while (--loops);
439 445
@@ -542,43 +548,62 @@ static int mwl8k_feed_fw_image(struct mwl8k_priv *priv,
542 return rc; 548 return rc;
543} 549}
544 550
545static int mwl8k_load_firmware(struct mwl8k_priv *priv) 551static int mwl8k_load_firmware(struct ieee80211_hw *hw)
546{ 552{
547 int loops, rc; 553 struct mwl8k_priv *priv = hw->priv;
554 struct firmware *fw = priv->fw.ucode;
555 struct mwl8k_device_info *di = priv->device_info;
556 int rc;
557 int loops;
558
559 if (!memcmp(fw->data, "\x01\x00\x00\x00", 4)) {
560 struct firmware *helper = priv->fw.helper;
548 561
549 const u8 *ucode = priv->fw.ucode->data; 562 if (helper == NULL) {
550 size_t ucode_len = priv->fw.ucode->size; 563 printk(KERN_ERR "%s: helper image needed but none "
551 const u8 *helper = priv->fw.helper->data; 564 "given\n", pci_name(priv->pdev));
552 size_t helper_len = priv->fw.helper->size; 565 return -EINVAL;
566 }
553 567
554 if (!memcmp(ucode, "\x01\x00\x00\x00", 4)) { 568 rc = mwl8k_load_fw_image(priv, helper->data, helper->size);
555 rc = mwl8k_load_fw_image(priv, helper, helper_len);
556 if (rc) { 569 if (rc) {
557 printk(KERN_ERR "%s: unable to load firmware " 570 printk(KERN_ERR "%s: unable to load firmware "
558 "helper image\n", pci_name(priv->pdev)); 571 "helper image\n", pci_name(priv->pdev));
559 return rc; 572 return rc;
560 } 573 }
561 msleep(1); 574 msleep(1);
562 575
563 rc = mwl8k_feed_fw_image(priv, ucode, ucode_len); 576 rc = mwl8k_feed_fw_image(priv, fw->data, fw->size);
564 } else { 577 } else {
565 rc = mwl8k_load_fw_image(priv, ucode, ucode_len); 578 rc = mwl8k_load_fw_image(priv, fw->data, fw->size);
566 } 579 }
567 580
568 if (rc) { 581 if (rc) {
569 printk(KERN_ERR "%s: unable to load firmware data\n", 582 printk(KERN_ERR "%s: unable to load firmware image\n",
570 pci_name(priv->pdev)); 583 pci_name(priv->pdev));
571 return rc; 584 return rc;
572 } 585 }
573 586
574 iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR); 587 if (di->modes & BIT(NL80211_IFTYPE_AP))
588 iowrite32(MWL8K_MODE_AP, priv->regs + MWL8K_HIU_GEN_PTR);
589 else
590 iowrite32(MWL8K_MODE_STA, priv->regs + MWL8K_HIU_GEN_PTR);
575 msleep(1); 591 msleep(1);
576 592
577 loops = 200000; 593 loops = 200000;
578 do { 594 do {
579 if (ioread32(priv->regs + MWL8K_HIU_INT_CODE) 595 u32 ready_code;
580 == MWL8K_FWSTA_READY) 596
597 ready_code = ioread32(priv->regs + MWL8K_HIU_INT_CODE);
598 if (ready_code == MWL8K_FWAP_READY) {
599 priv->ap_fw = 1;
581 break; 600 break;
601 } else if (ready_code == MWL8K_FWSTA_READY) {
602 priv->ap_fw = 0;
603 break;
604 }
605
606 cond_resched();
582 udelay(1); 607 udelay(1);
583 } while (--loops); 608 } while (--loops);
584 609
@@ -605,7 +630,7 @@ struct ewc_ht_info {
605/* Peer Entry flags - used to define the type of the peer node */ 630/* Peer Entry flags - used to define the type of the peer node */
606#define MWL8K_PEER_TYPE_ACCESSPOINT 2 631#define MWL8K_PEER_TYPE_ACCESSPOINT 2
607 632
608#define MWL8K_IEEE_LEGACY_DATA_RATES 12 633#define MWL8K_IEEE_LEGACY_DATA_RATES 13
609#define MWL8K_MCS_BITMAP_SIZE 16 634#define MWL8K_MCS_BITMAP_SIZE 16
610 635
611struct peer_capability_info { 636struct peer_capability_info {
@@ -731,16 +756,96 @@ static inline void mwl8k_add_dma_header(struct sk_buff *skb)
731 756
732 757
733/* 758/*
734 * Packet reception. 759 * Packet reception for 88w8366.
735 */ 760 */
736#define MWL8K_RX_CTRL_OWNED_BY_HOST 0x02 761struct mwl8k_rxd_8366 {
762 __le16 pkt_len;
763 __u8 sq2;
764 __u8 rate;
765 __le32 pkt_phys_addr;
766 __le32 next_rxd_phys_addr;
767 __le16 qos_control;
768 __le16 htsig2;
769 __le32 hw_rssi_info;
770 __le32 hw_noise_floor_info;
771 __u8 noise_floor;
772 __u8 pad0[3];
773 __u8 rssi;
774 __u8 rx_status;
775 __u8 channel;
776 __u8 rx_ctrl;
777} __attribute__((packed));
737 778
738struct mwl8k_rx_desc { 779#define MWL8K_8366_RX_CTRL_OWNED_BY_HOST 0x80
780
781static void mwl8k_rxd_8366_init(void *_rxd, dma_addr_t next_dma_addr)
782{
783 struct mwl8k_rxd_8366 *rxd = _rxd;
784
785 rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr);
786 rxd->rx_ctrl = MWL8K_8366_RX_CTRL_OWNED_BY_HOST;
787}
788
789static void mwl8k_rxd_8366_refill(void *_rxd, dma_addr_t addr, int len)
790{
791 struct mwl8k_rxd_8366 *rxd = _rxd;
792
793 rxd->pkt_len = cpu_to_le16(len);
794 rxd->pkt_phys_addr = cpu_to_le32(addr);
795 wmb();
796 rxd->rx_ctrl = 0;
797}
798
799static int
800mwl8k_rxd_8366_process(void *_rxd, struct ieee80211_rx_status *status)
801{
802 struct mwl8k_rxd_8366 *rxd = _rxd;
803
804 if (!(rxd->rx_ctrl & MWL8K_8366_RX_CTRL_OWNED_BY_HOST))
805 return -1;
806 rmb();
807
808 memset(status, 0, sizeof(*status));
809
810 status->signal = -rxd->rssi;
811 status->noise = -rxd->noise_floor;
812
813 if (rxd->rate & 0x80) {
814 status->flag |= RX_FLAG_HT;
815 status->rate_idx = rxd->rate & 0x7f;
816 } else {
817 int i;
818
819 for (i = 0; i < ARRAY_SIZE(mwl8k_rates); i++) {
820 if (mwl8k_rates[i].hw_value == rxd->rate) {
821 status->rate_idx = i;
822 break;
823 }
824 }
825 }
826
827 status->band = IEEE80211_BAND_2GHZ;
828 status->freq = ieee80211_channel_to_frequency(rxd->channel);
829
830 return le16_to_cpu(rxd->pkt_len);
831}
832
833static struct rxd_ops rxd_8366_ops = {
834 .rxd_size = sizeof(struct mwl8k_rxd_8366),
835 .rxd_init = mwl8k_rxd_8366_init,
836 .rxd_refill = mwl8k_rxd_8366_refill,
837 .rxd_process = mwl8k_rxd_8366_process,
838};
839
840/*
841 * Packet reception for 88w8687.
842 */
843struct mwl8k_rxd_8687 {
739 __le16 pkt_len; 844 __le16 pkt_len;
740 __u8 link_quality; 845 __u8 link_quality;
741 __u8 noise_level; 846 __u8 noise_level;
742 __le32 pkt_phys_addr; 847 __le32 pkt_phys_addr;
743 __le32 next_rx_desc_phys_addr; 848 __le32 next_rxd_phys_addr;
744 __le16 qos_control; 849 __le16 qos_control;
745 __le16 rate_info; 850 __le16 rate_info;
746 __le32 pad0[4]; 851 __le32 pad0[4];
@@ -752,6 +857,76 @@ struct mwl8k_rx_desc {
752 __u8 pad2[2]; 857 __u8 pad2[2];
753} __attribute__((packed)); 858} __attribute__((packed));
754 859
860#define MWL8K_8687_RATE_INFO_SHORTPRE 0x8000
861#define MWL8K_8687_RATE_INFO_ANTSELECT(x) (((x) >> 11) & 0x3)
862#define MWL8K_8687_RATE_INFO_RATEID(x) (((x) >> 3) & 0x3f)
863#define MWL8K_8687_RATE_INFO_40MHZ 0x0004
864#define MWL8K_8687_RATE_INFO_SHORTGI 0x0002
865#define MWL8K_8687_RATE_INFO_MCS_FORMAT 0x0001
866
867#define MWL8K_8687_RX_CTRL_OWNED_BY_HOST 0x02
868
869static void mwl8k_rxd_8687_init(void *_rxd, dma_addr_t next_dma_addr)
870{
871 struct mwl8k_rxd_8687 *rxd = _rxd;
872
873 rxd->next_rxd_phys_addr = cpu_to_le32(next_dma_addr);
874 rxd->rx_ctrl = MWL8K_8687_RX_CTRL_OWNED_BY_HOST;
875}
876
877static void mwl8k_rxd_8687_refill(void *_rxd, dma_addr_t addr, int len)
878{
879 struct mwl8k_rxd_8687 *rxd = _rxd;
880
881 rxd->pkt_len = cpu_to_le16(len);
882 rxd->pkt_phys_addr = cpu_to_le32(addr);
883 wmb();
884 rxd->rx_ctrl = 0;
885}
886
887static int
888mwl8k_rxd_8687_process(void *_rxd, struct ieee80211_rx_status *status)
889{
890 struct mwl8k_rxd_8687 *rxd = _rxd;
891 u16 rate_info;
892
893 if (!(rxd->rx_ctrl & MWL8K_8687_RX_CTRL_OWNED_BY_HOST))
894 return -1;
895 rmb();
896
897 rate_info = le16_to_cpu(rxd->rate_info);
898
899 memset(status, 0, sizeof(*status));
900
901 status->signal = -rxd->rssi;
902 status->noise = -rxd->noise_level;
903 status->qual = rxd->link_quality;
904 status->antenna = MWL8K_8687_RATE_INFO_ANTSELECT(rate_info);
905 status->rate_idx = MWL8K_8687_RATE_INFO_RATEID(rate_info);
906
907 if (rate_info & MWL8K_8687_RATE_INFO_SHORTPRE)
908 status->flag |= RX_FLAG_SHORTPRE;
909 if (rate_info & MWL8K_8687_RATE_INFO_40MHZ)
910 status->flag |= RX_FLAG_40MHZ;
911 if (rate_info & MWL8K_8687_RATE_INFO_SHORTGI)
912 status->flag |= RX_FLAG_SHORT_GI;
913 if (rate_info & MWL8K_8687_RATE_INFO_MCS_FORMAT)
914 status->flag |= RX_FLAG_HT;
915
916 status->band = IEEE80211_BAND_2GHZ;
917 status->freq = ieee80211_channel_to_frequency(rxd->channel);
918
919 return le16_to_cpu(rxd->pkt_len);
920}
921
922static struct rxd_ops rxd_8687_ops = {
923 .rxd_size = sizeof(struct mwl8k_rxd_8687),
924 .rxd_init = mwl8k_rxd_8687_init,
925 .rxd_refill = mwl8k_rxd_8687_refill,
926 .rxd_process = mwl8k_rxd_8687_process,
927};
928
929
755#define MWL8K_RX_DESCS 256 930#define MWL8K_RX_DESCS 256
756#define MWL8K_RX_MAXSZ 3800 931#define MWL8K_RX_MAXSZ 3800
757 932
@@ -762,43 +937,44 @@ static int mwl8k_rxq_init(struct ieee80211_hw *hw, int index)
762 int size; 937 int size;
763 int i; 938 int i;
764 939
765 rxq->rx_desc_count = 0; 940 rxq->rxd_count = 0;
766 rxq->rx_head = 0; 941 rxq->head = 0;
767 rxq->rx_tail = 0; 942 rxq->tail = 0;
768 943
769 size = MWL8K_RX_DESCS * sizeof(struct mwl8k_rx_desc); 944 size = MWL8K_RX_DESCS * priv->rxd_ops->rxd_size;
770 945
771 rxq->rx_desc_area = 946 rxq->rxd = pci_alloc_consistent(priv->pdev, size, &rxq->rxd_dma);
772 pci_alloc_consistent(priv->pdev, size, &rxq->rx_desc_dma); 947 if (rxq->rxd == NULL) {
773 if (rxq->rx_desc_area == NULL) {
774 printk(KERN_ERR "%s: failed to alloc RX descriptors\n", 948 printk(KERN_ERR "%s: failed to alloc RX descriptors\n",
775 priv->name); 949 wiphy_name(hw->wiphy));
776 return -ENOMEM; 950 return -ENOMEM;
777 } 951 }
778 memset(rxq->rx_desc_area, 0, size); 952 memset(rxq->rxd, 0, size);
779 953
780 rxq->rx_skb = kmalloc(MWL8K_RX_DESCS * 954 rxq->buf = kmalloc(MWL8K_RX_DESCS * sizeof(*rxq->buf), GFP_KERNEL);
781 sizeof(*rxq->rx_skb), GFP_KERNEL); 955 if (rxq->buf == NULL) {
782 if (rxq->rx_skb == NULL) {
783 printk(KERN_ERR "%s: failed to alloc RX skbuff list\n", 956 printk(KERN_ERR "%s: failed to alloc RX skbuff list\n",
784 priv->name); 957 wiphy_name(hw->wiphy));
785 pci_free_consistent(priv->pdev, size, 958 pci_free_consistent(priv->pdev, size, rxq->rxd, rxq->rxd_dma);
786 rxq->rx_desc_area, rxq->rx_desc_dma);
787 return -ENOMEM; 959 return -ENOMEM;
788 } 960 }
789 memset(rxq->rx_skb, 0, MWL8K_RX_DESCS * sizeof(*rxq->rx_skb)); 961 memset(rxq->buf, 0, MWL8K_RX_DESCS * sizeof(*rxq->buf));
790 962
791 for (i = 0; i < MWL8K_RX_DESCS; i++) { 963 for (i = 0; i < MWL8K_RX_DESCS; i++) {
792 struct mwl8k_rx_desc *rx_desc; 964 int desc_size;
965 void *rxd;
793 int nexti; 966 int nexti;
967 dma_addr_t next_dma_addr;
794 968
795 rx_desc = rxq->rx_desc_area + i; 969 desc_size = priv->rxd_ops->rxd_size;
796 nexti = (i + 1) % MWL8K_RX_DESCS; 970 rxd = rxq->rxd + (i * priv->rxd_ops->rxd_size);
797 971
798 rx_desc->next_rx_desc_phys_addr = 972 nexti = i + 1;
799 cpu_to_le32(rxq->rx_desc_dma 973 if (nexti == MWL8K_RX_DESCS)
800 + nexti * sizeof(*rx_desc)); 974 nexti = 0;
801 rx_desc->rx_ctrl = MWL8K_RX_CTRL_OWNED_BY_HOST; 975 next_dma_addr = rxq->rxd_dma + (nexti * desc_size);
976
977 priv->rxd_ops->rxd_init(rxd, next_dma_addr);
802 } 978 }
803 979
804 return 0; 980 return 0;
@@ -811,27 +987,28 @@ static int rxq_refill(struct ieee80211_hw *hw, int index, int limit)
811 int refilled; 987 int refilled;
812 988
813 refilled = 0; 989 refilled = 0;
814 while (rxq->rx_desc_count < MWL8K_RX_DESCS && limit--) { 990 while (rxq->rxd_count < MWL8K_RX_DESCS && limit--) {
815 struct sk_buff *skb; 991 struct sk_buff *skb;
992 dma_addr_t addr;
816 int rx; 993 int rx;
994 void *rxd;
817 995
818 skb = dev_alloc_skb(MWL8K_RX_MAXSZ); 996 skb = dev_alloc_skb(MWL8K_RX_MAXSZ);
819 if (skb == NULL) 997 if (skb == NULL)
820 break; 998 break;
821 999
822 rxq->rx_desc_count++; 1000 addr = pci_map_single(priv->pdev, skb->data,
823 1001 MWL8K_RX_MAXSZ, DMA_FROM_DEVICE);
824 rx = rxq->rx_tail;
825 rxq->rx_tail = (rx + 1) % MWL8K_RX_DESCS;
826 1002
827 rxq->rx_desc_area[rx].pkt_phys_addr = 1003 rxq->rxd_count++;
828 cpu_to_le32(pci_map_single(priv->pdev, skb->data, 1004 rx = rxq->tail++;
829 MWL8K_RX_MAXSZ, DMA_FROM_DEVICE)); 1005 if (rxq->tail == MWL8K_RX_DESCS)
1006 rxq->tail = 0;
1007 rxq->buf[rx].skb = skb;
1008 pci_unmap_addr_set(&rxq->buf[rx], dma, addr);
830 1009
831 rxq->rx_desc_area[rx].pkt_len = cpu_to_le16(MWL8K_RX_MAXSZ); 1010 rxd = rxq->rxd + (rx * priv->rxd_ops->rxd_size);
832 rxq->rx_skb[rx] = skb; 1011 priv->rxd_ops->rxd_refill(rxd, addr, MWL8K_RX_MAXSZ);
833 wmb();
834 rxq->rx_desc_area[rx].rx_ctrl = 0;
835 1012
836 refilled++; 1013 refilled++;
837 } 1014 }
@@ -847,24 +1024,24 @@ static void mwl8k_rxq_deinit(struct ieee80211_hw *hw, int index)
847 int i; 1024 int i;
848 1025
849 for (i = 0; i < MWL8K_RX_DESCS; i++) { 1026 for (i = 0; i < MWL8K_RX_DESCS; i++) {
850 if (rxq->rx_skb[i] != NULL) { 1027 if (rxq->buf[i].skb != NULL) {
851 unsigned long addr; 1028 pci_unmap_single(priv->pdev,
852 1029 pci_unmap_addr(&rxq->buf[i], dma),
853 addr = le32_to_cpu(rxq->rx_desc_area[i].pkt_phys_addr); 1030 MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE);
854 pci_unmap_single(priv->pdev, addr, MWL8K_RX_MAXSZ, 1031 pci_unmap_addr_set(&rxq->buf[i], dma, 0);
855 PCI_DMA_FROMDEVICE); 1032
856 kfree_skb(rxq->rx_skb[i]); 1033 kfree_skb(rxq->buf[i].skb);
857 rxq->rx_skb[i] = NULL; 1034 rxq->buf[i].skb = NULL;
858 } 1035 }
859 } 1036 }
860 1037
861 kfree(rxq->rx_skb); 1038 kfree(rxq->buf);
862 rxq->rx_skb = NULL; 1039 rxq->buf = NULL;
863 1040
864 pci_free_consistent(priv->pdev, 1041 pci_free_consistent(priv->pdev,
865 MWL8K_RX_DESCS * sizeof(struct mwl8k_rx_desc), 1042 MWL8K_RX_DESCS * priv->rxd_ops->rxd_size,
866 rxq->rx_desc_area, rxq->rx_desc_dma); 1043 rxq->rxd, rxq->rxd_dma);
867 rxq->rx_desc_area = NULL; 1044 rxq->rxd = NULL;
868} 1045}
869 1046
870 1047
@@ -880,9 +1057,11 @@ mwl8k_capture_bssid(struct mwl8k_priv *priv, struct ieee80211_hdr *wh)
880 !compare_ether_addr(wh->addr3, priv->capture_bssid); 1057 !compare_ether_addr(wh->addr3, priv->capture_bssid);
881} 1058}
882 1059
883static inline void mwl8k_save_beacon(struct mwl8k_priv *priv, 1060static inline void mwl8k_save_beacon(struct ieee80211_hw *hw,
884 struct sk_buff *skb) 1061 struct sk_buff *skb)
885{ 1062{
1063 struct mwl8k_priv *priv = hw->priv;
1064
886 priv->capture_beacon = false; 1065 priv->capture_beacon = false;
887 memset(priv->capture_bssid, 0, ETH_ALEN); 1066 memset(priv->capture_bssid, 0, ETH_ALEN);
888 1067
@@ -893,8 +1072,7 @@ static inline void mwl8k_save_beacon(struct mwl8k_priv *priv,
893 */ 1072 */
894 priv->beacon_skb = skb_copy(skb, GFP_ATOMIC); 1073 priv->beacon_skb = skb_copy(skb, GFP_ATOMIC);
895 if (priv->beacon_skb != NULL) 1074 if (priv->beacon_skb != NULL)
896 queue_work(priv->config_wq, 1075 ieee80211_queue_work(hw, &priv->finalize_join_worker);
897 &priv->finalize_join_worker);
898} 1076}
899 1077
900static int rxq_process(struct ieee80211_hw *hw, int index, int limit) 1078static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
@@ -904,53 +1082,46 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
904 int processed; 1082 int processed;
905 1083
906 processed = 0; 1084 processed = 0;
907 while (rxq->rx_desc_count && limit--) { 1085 while (rxq->rxd_count && limit--) {
908 struct mwl8k_rx_desc *rx_desc;
909 struct sk_buff *skb; 1086 struct sk_buff *skb;
1087 void *rxd;
1088 int pkt_len;
910 struct ieee80211_rx_status status; 1089 struct ieee80211_rx_status status;
911 unsigned long addr;
912 struct ieee80211_hdr *wh;
913 1090
914 rx_desc = rxq->rx_desc_area + rxq->rx_head; 1091 skb = rxq->buf[rxq->head].skb;
915 if (!(rx_desc->rx_ctrl & MWL8K_RX_CTRL_OWNED_BY_HOST)) 1092 if (skb == NULL)
916 break; 1093 break;
917 rmb();
918 1094
919 skb = rxq->rx_skb[rxq->rx_head]; 1095 rxd = rxq->rxd + (rxq->head * priv->rxd_ops->rxd_size);
920 if (skb == NULL) 1096
1097 pkt_len = priv->rxd_ops->rxd_process(rxd, &status);
1098 if (pkt_len < 0)
921 break; 1099 break;
922 rxq->rx_skb[rxq->rx_head] = NULL;
923 1100
924 rxq->rx_head = (rxq->rx_head + 1) % MWL8K_RX_DESCS; 1101 rxq->buf[rxq->head].skb = NULL;
925 rxq->rx_desc_count--;
926 1102
927 addr = le32_to_cpu(rx_desc->pkt_phys_addr); 1103 pci_unmap_single(priv->pdev,
928 pci_unmap_single(priv->pdev, addr, 1104 pci_unmap_addr(&rxq->buf[rxq->head], dma),
929 MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE); 1105 MWL8K_RX_MAXSZ, PCI_DMA_FROMDEVICE);
1106 pci_unmap_addr_set(&rxq->buf[rxq->head], dma, 0);
930 1107
931 skb_put(skb, le16_to_cpu(rx_desc->pkt_len)); 1108 rxq->head++;
932 mwl8k_remove_dma_header(skb); 1109 if (rxq->head == MWL8K_RX_DESCS)
1110 rxq->head = 0;
1111
1112 rxq->rxd_count--;
933 1113
934 wh = (struct ieee80211_hdr *)skb->data; 1114 skb_put(skb, pkt_len);
1115 mwl8k_remove_dma_header(skb);
935 1116
936 /* 1117 /*
937 * Check for pending join operation. save a copy of 1118 * Check for a pending join operation. Save a
938 * the beacon and schedule a tasklet to send finalize 1119 * copy of the beacon and schedule a tasklet to
939 * join command to the firmware. 1120 * send a FINALIZE_JOIN command to the firmware.
940 */ 1121 */
941 if (mwl8k_capture_bssid(priv, wh)) 1122 if (mwl8k_capture_bssid(priv, (void *)skb->data))
942 mwl8k_save_beacon(priv, skb); 1123 mwl8k_save_beacon(hw, skb);
943 1124
944 memset(&status, 0, sizeof(status));
945 status.mactime = 0;
946 status.signal = -rx_desc->rssi;
947 status.noise = -rx_desc->noise_level;
948 status.qual = rx_desc->link_quality;
949 status.antenna = 1;
950 status.rate_idx = 1;
951 status.flag = 0;
952 status.band = IEEE80211_BAND_2GHZ;
953 status.freq = ieee80211_channel_to_frequency(rx_desc->channel);
954 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); 1125 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
955 ieee80211_rx_irqsafe(hw, skb); 1126 ieee80211_rx_irqsafe(hw, skb);
956 1127
@@ -965,24 +1136,10 @@ static int rxq_process(struct ieee80211_hw *hw, int index, int limit)
965 * Packet transmission. 1136 * Packet transmission.
966 */ 1137 */
967 1138
968/* Transmit queue assignment. */
969enum {
970 MWL8K_WME_AC_BK = 0, /* background access */
971 MWL8K_WME_AC_BE = 1, /* best effort access */
972 MWL8K_WME_AC_VI = 2, /* video access */
973 MWL8K_WME_AC_VO = 3, /* voice access */
974};
975
976/* Transmit packet ACK policy */ 1139/* Transmit packet ACK policy */
977#define MWL8K_TXD_ACK_POLICY_NORMAL 0 1140#define MWL8K_TXD_ACK_POLICY_NORMAL 0
978#define MWL8K_TXD_ACK_POLICY_BLOCKACK 3 1141#define MWL8K_TXD_ACK_POLICY_BLOCKACK 3
979 1142
980#define GET_TXQ(_ac) (\
981 ((_ac) == WME_AC_VO) ? MWL8K_WME_AC_VO : \
982 ((_ac) == WME_AC_VI) ? MWL8K_WME_AC_VI : \
983 ((_ac) == WME_AC_BK) ? MWL8K_WME_AC_BK : \
984 MWL8K_WME_AC_BE)
985
986#define MWL8K_TXD_STATUS_OK 0x00000001 1143#define MWL8K_TXD_STATUS_OK 0x00000001
987#define MWL8K_TXD_STATUS_OK_RETRY 0x00000002 1144#define MWL8K_TXD_STATUS_OK_RETRY 0x00000002
988#define MWL8K_TXD_STATUS_OK_MORE_RETRY 0x00000004 1145#define MWL8K_TXD_STATUS_OK_MORE_RETRY 0x00000004
@@ -997,7 +1154,7 @@ struct mwl8k_tx_desc {
997 __le32 pkt_phys_addr; 1154 __le32 pkt_phys_addr;
998 __le16 pkt_len; 1155 __le16 pkt_len;
999 __u8 dest_MAC_addr[ETH_ALEN]; 1156 __u8 dest_MAC_addr[ETH_ALEN];
1000 __le32 next_tx_desc_phys_addr; 1157 __le32 next_txd_phys_addr;
1001 __le32 reserved; 1158 __le32 reserved;
1002 __le16 rate_info; 1159 __le16 rate_info;
1003 __u8 peer_id; 1160 __u8 peer_id;
@@ -1013,44 +1170,40 @@ static int mwl8k_txq_init(struct ieee80211_hw *hw, int index)
1013 int size; 1170 int size;
1014 int i; 1171 int i;
1015 1172
1016 memset(&txq->tx_stats, 0, sizeof(struct ieee80211_tx_queue_stats)); 1173 memset(&txq->stats, 0, sizeof(struct ieee80211_tx_queue_stats));
1017 txq->tx_stats.limit = MWL8K_TX_DESCS; 1174 txq->stats.limit = MWL8K_TX_DESCS;
1018 txq->tx_head = 0; 1175 txq->head = 0;
1019 txq->tx_tail = 0; 1176 txq->tail = 0;
1020 1177
1021 size = MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc); 1178 size = MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc);
1022 1179
1023 txq->tx_desc_area = 1180 txq->txd = pci_alloc_consistent(priv->pdev, size, &txq->txd_dma);
1024 pci_alloc_consistent(priv->pdev, size, &txq->tx_desc_dma); 1181 if (txq->txd == NULL) {
1025 if (txq->tx_desc_area == NULL) {
1026 printk(KERN_ERR "%s: failed to alloc TX descriptors\n", 1182 printk(KERN_ERR "%s: failed to alloc TX descriptors\n",
1027 priv->name); 1183 wiphy_name(hw->wiphy));
1028 return -ENOMEM; 1184 return -ENOMEM;
1029 } 1185 }
1030 memset(txq->tx_desc_area, 0, size); 1186 memset(txq->txd, 0, size);
1031 1187
1032 txq->tx_skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->tx_skb), 1188 txq->skb = kmalloc(MWL8K_TX_DESCS * sizeof(*txq->skb), GFP_KERNEL);
1033 GFP_KERNEL); 1189 if (txq->skb == NULL) {
1034 if (txq->tx_skb == NULL) {
1035 printk(KERN_ERR "%s: failed to alloc TX skbuff list\n", 1190 printk(KERN_ERR "%s: failed to alloc TX skbuff list\n",
1036 priv->name); 1191 wiphy_name(hw->wiphy));
1037 pci_free_consistent(priv->pdev, size, 1192 pci_free_consistent(priv->pdev, size, txq->txd, txq->txd_dma);
1038 txq->tx_desc_area, txq->tx_desc_dma);
1039 return -ENOMEM; 1193 return -ENOMEM;
1040 } 1194 }
1041 memset(txq->tx_skb, 0, MWL8K_TX_DESCS * sizeof(*txq->tx_skb)); 1195 memset(txq->skb, 0, MWL8K_TX_DESCS * sizeof(*txq->skb));
1042 1196
1043 for (i = 0; i < MWL8K_TX_DESCS; i++) { 1197 for (i = 0; i < MWL8K_TX_DESCS; i++) {
1044 struct mwl8k_tx_desc *tx_desc; 1198 struct mwl8k_tx_desc *tx_desc;
1045 int nexti; 1199 int nexti;
1046 1200
1047 tx_desc = txq->tx_desc_area + i; 1201 tx_desc = txq->txd + i;
1048 nexti = (i + 1) % MWL8K_TX_DESCS; 1202 nexti = (i + 1) % MWL8K_TX_DESCS;
1049 1203
1050 tx_desc->status = 0; 1204 tx_desc->status = 0;
1051 tx_desc->next_tx_desc_phys_addr = 1205 tx_desc->next_txd_phys_addr =
1052 cpu_to_le32(txq->tx_desc_dma + 1206 cpu_to_le32(txq->txd_dma + nexti * sizeof(*tx_desc));
1053 nexti * sizeof(*tx_desc));
1054 } 1207 }
1055 1208
1056 return 0; 1209 return 0;
@@ -1065,11 +1218,6 @@ static inline void mwl8k_tx_start(struct mwl8k_priv *priv)
1065 ioread32(priv->regs + MWL8K_HIU_INT_CODE); 1218 ioread32(priv->regs + MWL8K_HIU_INT_CODE);
1066} 1219}
1067 1220
1068static inline int mwl8k_txq_busy(struct mwl8k_priv *priv)
1069{
1070 return priv->pending_tx_pkts;
1071}
1072
1073struct mwl8k_txq_info { 1221struct mwl8k_txq_info {
1074 u32 fw_owned; 1222 u32 fw_owned;
1075 u32 drv_owned; 1223 u32 drv_owned;
@@ -1089,14 +1237,13 @@ static int mwl8k_scan_tx_ring(struct mwl8k_priv *priv,
1089 1237
1090 memset(txinfo, 0, MWL8K_TX_QUEUES * sizeof(struct mwl8k_txq_info)); 1238 memset(txinfo, 0, MWL8K_TX_QUEUES * sizeof(struct mwl8k_txq_info));
1091 1239
1092 spin_lock_bh(&priv->tx_lock);
1093 for (count = 0; count < MWL8K_TX_QUEUES; count++) { 1240 for (count = 0; count < MWL8K_TX_QUEUES; count++) {
1094 txq = priv->txq + count; 1241 txq = priv->txq + count;
1095 txinfo[count].len = txq->tx_stats.len; 1242 txinfo[count].len = txq->stats.len;
1096 txinfo[count].head = txq->tx_head; 1243 txinfo[count].head = txq->head;
1097 txinfo[count].tail = txq->tx_tail; 1244 txinfo[count].tail = txq->tail;
1098 for (desc = 0; desc < MWL8K_TX_DESCS; desc++) { 1245 for (desc = 0; desc < MWL8K_TX_DESCS; desc++) {
1099 tx_desc = txq->tx_desc_area + desc; 1246 tx_desc = txq->txd + desc;
1100 status = le32_to_cpu(tx_desc->status); 1247 status = le32_to_cpu(tx_desc->status);
1101 1248
1102 if (status & MWL8K_TXD_STATUS_FW_OWNED) 1249 if (status & MWL8K_TXD_STATUS_FW_OWNED)
@@ -1108,30 +1255,26 @@ static int mwl8k_scan_tx_ring(struct mwl8k_priv *priv,
1108 txinfo[count].unused++; 1255 txinfo[count].unused++;
1109 } 1256 }
1110 } 1257 }
1111 spin_unlock_bh(&priv->tx_lock);
1112 1258
1113 return ndescs; 1259 return ndescs;
1114} 1260}
1115 1261
1116/* 1262/*
1117 * Must be called with hw->fw_mutex held and tx queues stopped. 1263 * Must be called with priv->fw_mutex held and tx queues stopped.
1118 */ 1264 */
1119static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw) 1265static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
1120{ 1266{
1121 struct mwl8k_priv *priv = hw->priv; 1267 struct mwl8k_priv *priv = hw->priv;
1122 DECLARE_COMPLETION_ONSTACK(cmd_wait); 1268 DECLARE_COMPLETION_ONSTACK(tx_wait);
1123 u32 count; 1269 u32 count;
1124 unsigned long timeout; 1270 unsigned long timeout;
1125 1271
1126 might_sleep(); 1272 might_sleep();
1127 1273
1128 spin_lock_bh(&priv->tx_lock); 1274 spin_lock_bh(&priv->tx_lock);
1129 count = mwl8k_txq_busy(priv); 1275 count = priv->pending_tx_pkts;
1130 if (count) { 1276 if (count)
1131 priv->tx_wait = &cmd_wait; 1277 priv->tx_wait = &tx_wait;
1132 if (priv->radio_on)
1133 mwl8k_tx_start(priv);
1134 }
1135 spin_unlock_bh(&priv->tx_lock); 1278 spin_unlock_bh(&priv->tx_lock);
1136 1279
1137 if (count) { 1280 if (count) {
@@ -1139,23 +1282,23 @@ static int mwl8k_tx_wait_empty(struct ieee80211_hw *hw)
1139 int index; 1282 int index;
1140 int newcount; 1283 int newcount;
1141 1284
1142 timeout = wait_for_completion_timeout(&cmd_wait, 1285 timeout = wait_for_completion_timeout(&tx_wait,
1143 msecs_to_jiffies(5000)); 1286 msecs_to_jiffies(5000));
1144 if (timeout) 1287 if (timeout)
1145 return 0; 1288 return 0;
1146 1289
1147 spin_lock_bh(&priv->tx_lock); 1290 spin_lock_bh(&priv->tx_lock);
1148 priv->tx_wait = NULL; 1291 priv->tx_wait = NULL;
1149 newcount = mwl8k_txq_busy(priv); 1292 newcount = priv->pending_tx_pkts;
1293 mwl8k_scan_tx_ring(priv, txinfo);
1150 spin_unlock_bh(&priv->tx_lock); 1294 spin_unlock_bh(&priv->tx_lock);
1151 1295
1152 printk(KERN_ERR "%s(%u) TIMEDOUT:5000ms Pend:%u-->%u\n", 1296 printk(KERN_ERR "%s(%u) TIMEDOUT:5000ms Pend:%u-->%u\n",
1153 __func__, __LINE__, count, newcount); 1297 __func__, __LINE__, count, newcount);
1154 1298
1155 mwl8k_scan_tx_ring(priv, txinfo);
1156 for (index = 0; index < MWL8K_TX_QUEUES; index++) 1299 for (index = 0; index < MWL8K_TX_QUEUES; index++)
1157 printk(KERN_ERR 1300 printk(KERN_ERR "TXQ:%u L:%u H:%u T:%u FW:%u "
1158 "TXQ:%u L:%u H:%u T:%u FW:%u DRV:%u U:%u\n", 1301 "DRV:%u U:%u\n",
1159 index, 1302 index,
1160 txinfo[index].len, 1303 txinfo[index].len,
1161 txinfo[index].head, 1304 txinfo[index].head,
@@ -1181,7 +1324,7 @@ static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
1181 struct mwl8k_tx_queue *txq = priv->txq + index; 1324 struct mwl8k_tx_queue *txq = priv->txq + index;
1182 int wake = 0; 1325 int wake = 0;
1183 1326
1184 while (txq->tx_stats.len > 0) { 1327 while (txq->stats.len > 0) {
1185 int tx; 1328 int tx;
1186 struct mwl8k_tx_desc *tx_desc; 1329 struct mwl8k_tx_desc *tx_desc;
1187 unsigned long addr; 1330 unsigned long addr;
@@ -1190,8 +1333,8 @@ static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
1190 struct ieee80211_tx_info *info; 1333 struct ieee80211_tx_info *info;
1191 u32 status; 1334 u32 status;
1192 1335
1193 tx = txq->tx_head; 1336 tx = txq->head;
1194 tx_desc = txq->tx_desc_area + tx; 1337 tx_desc = txq->txd + tx;
1195 1338
1196 status = le32_to_cpu(tx_desc->status); 1339 status = le32_to_cpu(tx_desc->status);
1197 1340
@@ -1202,15 +1345,15 @@ static void mwl8k_txq_reclaim(struct ieee80211_hw *hw, int index, int force)
1202 ~cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED); 1345 ~cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED);
1203 } 1346 }
1204 1347
1205 txq->tx_head = (tx + 1) % MWL8K_TX_DESCS; 1348 txq->head = (tx + 1) % MWL8K_TX_DESCS;
1206 BUG_ON(txq->tx_stats.len == 0); 1349 BUG_ON(txq->stats.len == 0);
1207 txq->tx_stats.len--; 1350 txq->stats.len--;
1208 priv->pending_tx_pkts--; 1351 priv->pending_tx_pkts--;
1209 1352
1210 addr = le32_to_cpu(tx_desc->pkt_phys_addr); 1353 addr = le32_to_cpu(tx_desc->pkt_phys_addr);
1211 size = le16_to_cpu(tx_desc->pkt_len); 1354 size = le16_to_cpu(tx_desc->pkt_len);
1212 skb = txq->tx_skb[tx]; 1355 skb = txq->skb[tx];
1213 txq->tx_skb[tx] = NULL; 1356 txq->skb[tx] = NULL;
1214 1357
1215 BUG_ON(skb == NULL); 1358 BUG_ON(skb == NULL);
1216 pci_unmap_single(priv->pdev, addr, size, PCI_DMA_TODEVICE); 1359 pci_unmap_single(priv->pdev, addr, size, PCI_DMA_TODEVICE);
@@ -1243,13 +1386,13 @@ static void mwl8k_txq_deinit(struct ieee80211_hw *hw, int index)
1243 1386
1244 mwl8k_txq_reclaim(hw, index, 1); 1387 mwl8k_txq_reclaim(hw, index, 1);
1245 1388
1246 kfree(txq->tx_skb); 1389 kfree(txq->skb);
1247 txq->tx_skb = NULL; 1390 txq->skb = NULL;
1248 1391
1249 pci_free_consistent(priv->pdev, 1392 pci_free_consistent(priv->pdev,
1250 MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc), 1393 MWL8K_TX_DESCS * sizeof(struct mwl8k_tx_desc),
1251 txq->tx_desc_area, txq->tx_desc_dma); 1394 txq->txd, txq->txd_dma);
1252 txq->tx_desc_area = NULL; 1395 txq->txd = NULL;
1253} 1396}
1254 1397
1255static int 1398static int
@@ -1317,7 +1460,7 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1317 1460
1318 if (pci_dma_mapping_error(priv->pdev, dma)) { 1461 if (pci_dma_mapping_error(priv->pdev, dma)) {
1319 printk(KERN_DEBUG "%s: failed to dma map skb, " 1462 printk(KERN_DEBUG "%s: failed to dma map skb, "
1320 "dropping TX frame.\n", priv->name); 1463 "dropping TX frame.\n", wiphy_name(hw->wiphy));
1321 dev_kfree_skb(skb); 1464 dev_kfree_skb(skb);
1322 return NETDEV_TX_OK; 1465 return NETDEV_TX_OK;
1323 } 1466 }
@@ -1326,10 +1469,10 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1326 1469
1327 txq = priv->txq + index; 1470 txq = priv->txq + index;
1328 1471
1329 BUG_ON(txq->tx_skb[txq->tx_tail] != NULL); 1472 BUG_ON(txq->skb[txq->tail] != NULL);
1330 txq->tx_skb[txq->tx_tail] = skb; 1473 txq->skb[txq->tail] = skb;
1331 1474
1332 tx = txq->tx_desc_area + txq->tx_tail; 1475 tx = txq->txd + txq->tail;
1333 tx->data_rate = txdatarate; 1476 tx->data_rate = txdatarate;
1334 tx->tx_priority = index; 1477 tx->tx_priority = index;
1335 tx->qos_control = cpu_to_le16(qos); 1478 tx->qos_control = cpu_to_le16(qos);
@@ -1340,15 +1483,15 @@ mwl8k_txq_xmit(struct ieee80211_hw *hw, int index, struct sk_buff *skb)
1340 wmb(); 1483 wmb();
1341 tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus); 1484 tx->status = cpu_to_le32(MWL8K_TXD_STATUS_FW_OWNED | txstatus);
1342 1485
1343 txq->tx_stats.count++; 1486 txq->stats.count++;
1344 txq->tx_stats.len++; 1487 txq->stats.len++;
1345 priv->pending_tx_pkts++; 1488 priv->pending_tx_pkts++;
1346 1489
1347 txq->tx_tail++; 1490 txq->tail++;
1348 if (txq->tx_tail == MWL8K_TX_DESCS) 1491 if (txq->tail == MWL8K_TX_DESCS)
1349 txq->tx_tail = 0; 1492 txq->tail = 0;
1350 1493
1351 if (txq->tx_head == txq->tx_tail) 1494 if (txq->head == txq->tail)
1352 ieee80211_stop_queue(hw, index); 1495 ieee80211_stop_queue(hw, index);
1353 1496
1354 mwl8k_tx_start(priv); 1497 mwl8k_tx_start(priv);
@@ -1431,7 +1574,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
1431 unsigned long timeout = 0; 1574 unsigned long timeout = 0;
1432 u8 buf[32]; 1575 u8 buf[32];
1433 1576
1434 cmd->result = 0xFFFF; 1577 cmd->result = 0xffff;
1435 dma_size = le16_to_cpu(cmd->length); 1578 dma_size = le16_to_cpu(cmd->length);
1436 dma_addr = pci_map_single(priv->pdev, cmd, dma_size, 1579 dma_addr = pci_map_single(priv->pdev, cmd, dma_size,
1437 PCI_DMA_BIDIRECTIONAL); 1580 PCI_DMA_BIDIRECTIONAL);
@@ -1464,7 +1607,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
1464 1607
1465 if (!timeout) { 1608 if (!timeout) {
1466 printk(KERN_ERR "%s: Command %s timeout after %u ms\n", 1609 printk(KERN_ERR "%s: Command %s timeout after %u ms\n",
1467 priv->name, 1610 wiphy_name(hw->wiphy),
1468 mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), 1611 mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
1469 MWL8K_CMD_TIMEOUT_MS); 1612 MWL8K_CMD_TIMEOUT_MS);
1470 rc = -ETIMEDOUT; 1613 rc = -ETIMEDOUT;
@@ -1472,7 +1615,7 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
1472 rc = cmd->result ? -EINVAL : 0; 1615 rc = cmd->result ? -EINVAL : 0;
1473 if (rc) 1616 if (rc)
1474 printk(KERN_ERR "%s: Command %s error 0x%x\n", 1617 printk(KERN_ERR "%s: Command %s error 0x%x\n",
1475 priv->name, 1618 wiphy_name(hw->wiphy),
1476 mwl8k_cmd_name(cmd->code, buf, sizeof(buf)), 1619 mwl8k_cmd_name(cmd->code, buf, sizeof(buf)),
1477 le16_to_cpu(cmd->result)); 1620 le16_to_cpu(cmd->result));
1478 } 1621 }
@@ -1481,9 +1624,9 @@ static int mwl8k_post_cmd(struct ieee80211_hw *hw, struct mwl8k_cmd_pkt *cmd)
1481} 1624}
1482 1625
1483/* 1626/*
1484 * GET_HW_SPEC. 1627 * CMD_GET_HW_SPEC (STA version).
1485 */ 1628 */
1486struct mwl8k_cmd_get_hw_spec { 1629struct mwl8k_cmd_get_hw_spec_sta {
1487 struct mwl8k_cmd_pkt header; 1630 struct mwl8k_cmd_pkt header;
1488 __u8 hw_rev; 1631 __u8 hw_rev;
1489 __u8 host_interface; 1632 __u8 host_interface;
@@ -1499,13 +1642,13 @@ struct mwl8k_cmd_get_hw_spec {
1499 __le32 tx_queue_ptrs[MWL8K_TX_QUEUES]; 1642 __le32 tx_queue_ptrs[MWL8K_TX_QUEUES];
1500 __le32 caps2; 1643 __le32 caps2;
1501 __le32 num_tx_desc_per_queue; 1644 __le32 num_tx_desc_per_queue;
1502 __le32 total_rx_desc; 1645 __le32 total_rxd;
1503} __attribute__((packed)); 1646} __attribute__((packed));
1504 1647
1505static int mwl8k_cmd_get_hw_spec(struct ieee80211_hw *hw) 1648static int mwl8k_cmd_get_hw_spec_sta(struct ieee80211_hw *hw)
1506{ 1649{
1507 struct mwl8k_priv *priv = hw->priv; 1650 struct mwl8k_priv *priv = hw->priv;
1508 struct mwl8k_cmd_get_hw_spec *cmd; 1651 struct mwl8k_cmd_get_hw_spec_sta *cmd;
1509 int rc; 1652 int rc;
1510 int i; 1653 int i;
1511 1654
@@ -1518,12 +1661,12 @@ static int mwl8k_cmd_get_hw_spec(struct ieee80211_hw *hw)
1518 1661
1519 memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr)); 1662 memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr));
1520 cmd->ps_cookie = cpu_to_le32(priv->cookie_dma); 1663 cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
1521 cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rx_desc_dma); 1664 cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma);
1522 cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES); 1665 cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
1523 for (i = 0; i < MWL8K_TX_QUEUES; i++) 1666 for (i = 0; i < MWL8K_TX_QUEUES; i++)
1524 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].tx_desc_dma); 1667 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma);
1525 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS); 1668 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
1526 cmd->total_rx_desc = cpu_to_le32(MWL8K_RX_DESCS); 1669 cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
1527 1670
1528 rc = mwl8k_post_cmd(hw, &cmd->header); 1671 rc = mwl8k_post_cmd(hw, &cmd->header);
1529 1672
@@ -1539,6 +1682,129 @@ static int mwl8k_cmd_get_hw_spec(struct ieee80211_hw *hw)
1539} 1682}
1540 1683
1541/* 1684/*
1685 * CMD_GET_HW_SPEC (AP version).
1686 */
1687struct mwl8k_cmd_get_hw_spec_ap {
1688 struct mwl8k_cmd_pkt header;
1689 __u8 hw_rev;
1690 __u8 host_interface;
1691 __le16 num_wcb;
1692 __le16 num_mcaddrs;
1693 __u8 perm_addr[ETH_ALEN];
1694 __le16 region_code;
1695 __le16 num_antenna;
1696 __le32 fw_rev;
1697 __le32 wcbbase0;
1698 __le32 rxwrptr;
1699 __le32 rxrdptr;
1700 __le32 ps_cookie;
1701 __le32 wcbbase1;
1702 __le32 wcbbase2;
1703 __le32 wcbbase3;
1704} __attribute__((packed));
1705
1706static int mwl8k_cmd_get_hw_spec_ap(struct ieee80211_hw *hw)
1707{
1708 struct mwl8k_priv *priv = hw->priv;
1709 struct mwl8k_cmd_get_hw_spec_ap *cmd;
1710 int rc;
1711
1712 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1713 if (cmd == NULL)
1714 return -ENOMEM;
1715
1716 cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_HW_SPEC);
1717 cmd->header.length = cpu_to_le16(sizeof(*cmd));
1718
1719 memset(cmd->perm_addr, 0xff, sizeof(cmd->perm_addr));
1720 cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
1721
1722 rc = mwl8k_post_cmd(hw, &cmd->header);
1723
1724 if (!rc) {
1725 int off;
1726
1727 SET_IEEE80211_PERM_ADDR(hw, cmd->perm_addr);
1728 priv->num_mcaddrs = le16_to_cpu(cmd->num_mcaddrs);
1729 priv->fw_rev = le32_to_cpu(cmd->fw_rev);
1730 priv->hw_rev = cmd->hw_rev;
1731
1732 off = le32_to_cpu(cmd->wcbbase0) & 0xffff;
1733 iowrite32(cpu_to_le32(priv->txq[0].txd_dma), priv->sram + off);
1734
1735 off = le32_to_cpu(cmd->rxwrptr) & 0xffff;
1736 iowrite32(cpu_to_le32(priv->rxq[0].rxd_dma), priv->sram + off);
1737
1738 off = le32_to_cpu(cmd->rxrdptr) & 0xffff;
1739 iowrite32(cpu_to_le32(priv->rxq[0].rxd_dma), priv->sram + off);
1740
1741 off = le32_to_cpu(cmd->wcbbase1) & 0xffff;
1742 iowrite32(cpu_to_le32(priv->txq[1].txd_dma), priv->sram + off);
1743
1744 off = le32_to_cpu(cmd->wcbbase2) & 0xffff;
1745 iowrite32(cpu_to_le32(priv->txq[2].txd_dma), priv->sram + off);
1746
1747 off = le32_to_cpu(cmd->wcbbase3) & 0xffff;
1748 iowrite32(cpu_to_le32(priv->txq[3].txd_dma), priv->sram + off);
1749 }
1750
1751 kfree(cmd);
1752 return rc;
1753}
1754
1755/*
1756 * CMD_SET_HW_SPEC.
1757 */
1758struct mwl8k_cmd_set_hw_spec {
1759 struct mwl8k_cmd_pkt header;
1760 __u8 hw_rev;
1761 __u8 host_interface;
1762 __le16 num_mcaddrs;
1763 __u8 perm_addr[ETH_ALEN];
1764 __le16 region_code;
1765 __le32 fw_rev;
1766 __le32 ps_cookie;
1767 __le32 caps;
1768 __le32 rx_queue_ptr;
1769 __le32 num_tx_queues;
1770 __le32 tx_queue_ptrs[MWL8K_TX_QUEUES];
1771 __le32 flags;
1772 __le32 num_tx_desc_per_queue;
1773 __le32 total_rxd;
1774} __attribute__((packed));
1775
1776#define MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT 0x00000080
1777
1778static int mwl8k_cmd_set_hw_spec(struct ieee80211_hw *hw)
1779{
1780 struct mwl8k_priv *priv = hw->priv;
1781 struct mwl8k_cmd_set_hw_spec *cmd;
1782 int rc;
1783 int i;
1784
1785 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
1786 if (cmd == NULL)
1787 return -ENOMEM;
1788
1789 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_HW_SPEC);
1790 cmd->header.length = cpu_to_le16(sizeof(*cmd));
1791
1792 cmd->ps_cookie = cpu_to_le32(priv->cookie_dma);
1793 cmd->rx_queue_ptr = cpu_to_le32(priv->rxq[0].rxd_dma);
1794 cmd->num_tx_queues = cpu_to_le32(MWL8K_TX_QUEUES);
1795 for (i = 0; i < MWL8K_TX_QUEUES; i++)
1796 cmd->tx_queue_ptrs[i] = cpu_to_le32(priv->txq[i].txd_dma);
1797 cmd->flags = cpu_to_le32(MWL8K_SET_HW_SPEC_FLAG_HOST_DECR_MGMT);
1798 cmd->num_tx_desc_per_queue = cpu_to_le32(MWL8K_TX_DESCS);
1799 cmd->total_rxd = cpu_to_le32(MWL8K_RX_DESCS);
1800
1801 rc = mwl8k_post_cmd(hw, &cmd->header);
1802 kfree(cmd);
1803
1804 return rc;
1805}
1806
1807/*
1542 * CMD_MAC_MULTICAST_ADR. 1808 * CMD_MAC_MULTICAST_ADR.
1543 */ 1809 */
1544struct mwl8k_cmd_mac_multicast_adr { 1810struct mwl8k_cmd_mac_multicast_adr {
@@ -1548,19 +1814,23 @@ struct mwl8k_cmd_mac_multicast_adr {
1548 __u8 addr[0][ETH_ALEN]; 1814 __u8 addr[0][ETH_ALEN];
1549}; 1815};
1550 1816
1551#define MWL8K_ENABLE_RX_MULTICAST 0x000F 1817#define MWL8K_ENABLE_RX_DIRECTED 0x0001
1818#define MWL8K_ENABLE_RX_MULTICAST 0x0002
1819#define MWL8K_ENABLE_RX_ALL_MULTICAST 0x0004
1820#define MWL8K_ENABLE_RX_BROADCAST 0x0008
1552 1821
1553static struct mwl8k_cmd_pkt * 1822static struct mwl8k_cmd_pkt *
1554__mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, 1823__mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw, int allmulti,
1555 int mc_count, struct dev_addr_list *mclist) 1824 int mc_count, struct dev_addr_list *mclist)
1556{ 1825{
1557 struct mwl8k_priv *priv = hw->priv; 1826 struct mwl8k_priv *priv = hw->priv;
1558 struct mwl8k_cmd_mac_multicast_adr *cmd; 1827 struct mwl8k_cmd_mac_multicast_adr *cmd;
1559 int size; 1828 int size;
1560 int i;
1561 1829
1562 if (mc_count > priv->num_mcaddrs) 1830 if (allmulti || mc_count > priv->num_mcaddrs) {
1563 mc_count = priv->num_mcaddrs; 1831 allmulti = 1;
1832 mc_count = 0;
1833 }
1564 1834
1565 size = sizeof(*cmd) + mc_count * ETH_ALEN; 1835 size = sizeof(*cmd) + mc_count * ETH_ALEN;
1566 1836
@@ -1570,16 +1840,24 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw,
1570 1840
1571 cmd->header.code = cpu_to_le16(MWL8K_CMD_MAC_MULTICAST_ADR); 1841 cmd->header.code = cpu_to_le16(MWL8K_CMD_MAC_MULTICAST_ADR);
1572 cmd->header.length = cpu_to_le16(size); 1842 cmd->header.length = cpu_to_le16(size);
1573 cmd->action = cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST); 1843 cmd->action = cpu_to_le16(MWL8K_ENABLE_RX_DIRECTED |
1574 cmd->numaddr = cpu_to_le16(mc_count); 1844 MWL8K_ENABLE_RX_BROADCAST);
1575 1845
1576 for (i = 0; i < mc_count && mclist; i++) { 1846 if (allmulti) {
1577 if (mclist->da_addrlen != ETH_ALEN) { 1847 cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_ALL_MULTICAST);
1578 kfree(cmd); 1848 } else if (mc_count) {
1579 return NULL; 1849 int i;
1850
1851 cmd->action |= cpu_to_le16(MWL8K_ENABLE_RX_MULTICAST);
1852 cmd->numaddr = cpu_to_le16(mc_count);
1853 for (i = 0; i < mc_count && mclist; i++) {
1854 if (mclist->da_addrlen != ETH_ALEN) {
1855 kfree(cmd);
1856 return NULL;
1857 }
1858 memcpy(cmd->addr[i], mclist->da_addr, ETH_ALEN);
1859 mclist = mclist->next;
1580 } 1860 }
1581 memcpy(cmd->addr[i], mclist->da_addr, ETH_ALEN);
1582 mclist = mclist->next;
1583 } 1861 }
1584 1862
1585 return &cmd->header; 1863 return &cmd->header;
@@ -1590,7 +1868,6 @@ __mwl8k_cmd_mac_multicast_adr(struct ieee80211_hw *hw,
1590 */ 1868 */
1591struct mwl8k_cmd_802_11_get_stat { 1869struct mwl8k_cmd_802_11_get_stat {
1592 struct mwl8k_cmd_pkt header; 1870 struct mwl8k_cmd_pkt header;
1593 __le16 action;
1594 __le32 stats[64]; 1871 __le32 stats[64];
1595} __attribute__((packed)); 1872} __attribute__((packed));
1596 1873
@@ -1611,7 +1888,6 @@ static int mwl8k_cmd_802_11_get_stat(struct ieee80211_hw *hw,
1611 1888
1612 cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_STAT); 1889 cmd->header.code = cpu_to_le16(MWL8K_CMD_GET_STAT);
1613 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 1890 cmd->header.length = cpu_to_le16(sizeof(*cmd));
1614 cmd->action = cpu_to_le16(MWL8K_CMD_GET);
1615 1891
1616 rc = mwl8k_post_cmd(hw, &cmd->header); 1892 rc = mwl8k_post_cmd(hw, &cmd->header);
1617 if (!rc) { 1893 if (!rc) {
@@ -1727,6 +2003,39 @@ static int mwl8k_cmd_802_11_rf_tx_power(struct ieee80211_hw *hw, int dBm)
1727} 2003}
1728 2004
1729/* 2005/*
2006 * CMD_RF_ANTENNA.
2007 */
2008struct mwl8k_cmd_rf_antenna {
2009 struct mwl8k_cmd_pkt header;
2010 __le16 antenna;
2011 __le16 mode;
2012} __attribute__((packed));
2013
2014#define MWL8K_RF_ANTENNA_RX 1
2015#define MWL8K_RF_ANTENNA_TX 2
2016
2017static int
2018mwl8k_cmd_rf_antenna(struct ieee80211_hw *hw, int antenna, int mask)
2019{
2020 struct mwl8k_cmd_rf_antenna *cmd;
2021 int rc;
2022
2023 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2024 if (cmd == NULL)
2025 return -ENOMEM;
2026
2027 cmd->header.code = cpu_to_le16(MWL8K_CMD_RF_ANTENNA);
2028 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2029 cmd->antenna = cpu_to_le16(antenna);
2030 cmd->mode = cpu_to_le16(mask);
2031
2032 rc = mwl8k_post_cmd(hw, &cmd->header);
2033 kfree(cmd);
2034
2035 return rc;
2036}
2037
2038/*
1730 * CMD_SET_PRE_SCAN. 2039 * CMD_SET_PRE_SCAN.
1731 */ 2040 */
1732struct mwl8k_cmd_set_pre_scan { 2041struct mwl8k_cmd_set_pre_scan {
@@ -1904,6 +2213,46 @@ static int mwl8k_enable_sniffer(struct ieee80211_hw *hw, bool enable)
1904} 2213}
1905 2214
1906/* 2215/*
2216 * CMD_SET_MAC_ADDR.
2217 */
2218struct mwl8k_cmd_set_mac_addr {
2219 struct mwl8k_cmd_pkt header;
2220 union {
2221 struct {
2222 __le16 mac_type;
2223 __u8 mac_addr[ETH_ALEN];
2224 } mbss;
2225 __u8 mac_addr[ETH_ALEN];
2226 };
2227} __attribute__((packed));
2228
2229static int mwl8k_set_mac_addr(struct ieee80211_hw *hw, u8 *mac)
2230{
2231 struct mwl8k_priv *priv = hw->priv;
2232 struct mwl8k_cmd_set_mac_addr *cmd;
2233 int rc;
2234
2235 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
2236 if (cmd == NULL)
2237 return -ENOMEM;
2238
2239 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_MAC_ADDR);
2240 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2241 if (priv->ap_fw) {
2242 cmd->mbss.mac_type = 0;
2243 memcpy(cmd->mbss.mac_addr, mac, ETH_ALEN);
2244 } else {
2245 memcpy(cmd->mac_addr, mac, ETH_ALEN);
2246 }
2247
2248 rc = mwl8k_post_cmd(hw, &cmd->header);
2249 kfree(cmd);
2250
2251 return rc;
2252}
2253
2254
2255/*
1907 * CMD_SET_RATEADAPT_MODE. 2256 * CMD_SET_RATEADAPT_MODE.
1908 */ 2257 */
1909struct mwl8k_cmd_set_rate_adapt_mode { 2258struct mwl8k_cmd_set_rate_adapt_mode {
@@ -2005,17 +2354,34 @@ struct mwl8k_cmd_set_edca_params {
2005 /* TX opportunity in units of 32 us */ 2354 /* TX opportunity in units of 32 us */
2006 __le16 txop; 2355 __le16 txop;
2007 2356
2008 /* Log exponent of max contention period: 0...15*/ 2357 union {
2009 __u8 log_cw_max; 2358 struct {
2359 /* Log exponent of max contention period: 0...15 */
2360 __le32 log_cw_max;
2361
2362 /* Log exponent of min contention period: 0...15 */
2363 __le32 log_cw_min;
2364
2365 /* Adaptive interframe spacing in units of 32us */
2366 __u8 aifs;
2367
2368 /* TX queue to configure */
2369 __u8 txq;
2370 } ap;
2371 struct {
2372 /* Log exponent of max contention period: 0...15 */
2373 __u8 log_cw_max;
2010 2374
2011 /* Log exponent of min contention period: 0...15 */ 2375 /* Log exponent of min contention period: 0...15 */
2012 __u8 log_cw_min; 2376 __u8 log_cw_min;
2013 2377
2014 /* Adaptive interframe spacing in units of 32us */ 2378 /* Adaptive interframe spacing in units of 32us */
2015 __u8 aifs; 2379 __u8 aifs;
2016 2380
2017 /* TX queue to configure */ 2381 /* TX queue to configure */
2018 __u8 txq; 2382 __u8 txq;
2383 } sta;
2384 };
2019} __attribute__((packed)); 2385} __attribute__((packed));
2020 2386
2021#define MWL8K_SET_EDCA_CW 0x01 2387#define MWL8K_SET_EDCA_CW 0x01
@@ -2031,6 +2397,7 @@ mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
2031 __u16 cw_min, __u16 cw_max, 2397 __u16 cw_min, __u16 cw_max,
2032 __u8 aifs, __u16 txop) 2398 __u8 aifs, __u16 txop)
2033{ 2399{
2400 struct mwl8k_priv *priv = hw->priv;
2034 struct mwl8k_cmd_set_edca_params *cmd; 2401 struct mwl8k_cmd_set_edca_params *cmd;
2035 int rc; 2402 int rc;
2036 2403
@@ -2038,14 +2405,27 @@ mwl8k_set_edca_params(struct ieee80211_hw *hw, __u8 qnum,
2038 if (cmd == NULL) 2405 if (cmd == NULL)
2039 return -ENOMEM; 2406 return -ENOMEM;
2040 2407
2408 /*
2409 * Queues 0 (BE) and 1 (BK) are swapped in hardware for
2410 * this call.
2411 */
2412 qnum ^= !(qnum >> 1);
2413
2041 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_EDCA_PARAMS); 2414 cmd->header.code = cpu_to_le16(MWL8K_CMD_SET_EDCA_PARAMS);
2042 cmd->header.length = cpu_to_le16(sizeof(*cmd)); 2415 cmd->header.length = cpu_to_le16(sizeof(*cmd));
2043 cmd->action = cpu_to_le16(MWL8K_SET_EDCA_ALL); 2416 cmd->action = cpu_to_le16(MWL8K_SET_EDCA_ALL);
2044 cmd->txop = cpu_to_le16(txop); 2417 cmd->txop = cpu_to_le16(txop);
2045 cmd->log_cw_max = (u8)ilog2(cw_max + 1); 2418 if (priv->ap_fw) {
2046 cmd->log_cw_min = (u8)ilog2(cw_min + 1); 2419 cmd->ap.log_cw_max = cpu_to_le32(ilog2(cw_max + 1));
2047 cmd->aifs = aifs; 2420 cmd->ap.log_cw_min = cpu_to_le32(ilog2(cw_min + 1));
2048 cmd->txq = qnum; 2421 cmd->ap.aifs = aifs;
2422 cmd->ap.txq = qnum;
2423 } else {
2424 cmd->sta.log_cw_max = (u8)ilog2(cw_max + 1);
2425 cmd->sta.log_cw_min = (u8)ilog2(cw_min + 1);
2426 cmd->sta.aifs = aifs;
2427 cmd->sta.txq = qnum;
2428 }
2049 2429
2050 rc = mwl8k_post_cmd(hw, &cmd->header); 2430 rc = mwl8k_post_cmd(hw, &cmd->header);
2051 kfree(cmd); 2431 kfree(cmd);
@@ -2093,8 +2473,8 @@ static int mwl8k_finalize_join(struct ieee80211_hw *hw, void *frame,
2093 /* XXX TBD Might just have to abort and return an error */ 2473 /* XXX TBD Might just have to abort and return an error */
2094 if (payload_len > MWL8K_FJ_BEACON_MAXLEN) 2474 if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
2095 printk(KERN_ERR "%s(): WARNING: Incomplete beacon " 2475 printk(KERN_ERR "%s(): WARNING: Incomplete beacon "
2096 "sent to firmware. Sz=%u MAX=%u\n", __func__, 2476 "sent to firmware. Sz=%u MAX=%u\n", __func__,
2097 payload_len, MWL8K_FJ_BEACON_MAXLEN); 2477 payload_len, MWL8K_FJ_BEACON_MAXLEN);
2098 2478
2099 if (payload_len > MWL8K_FJ_BEACON_MAXLEN) 2479 if (payload_len > MWL8K_FJ_BEACON_MAXLEN)
2100 payload_len = MWL8K_FJ_BEACON_MAXLEN; 2480 payload_len = MWL8K_FJ_BEACON_MAXLEN;
@@ -2341,9 +2721,10 @@ static int mwl8k_cmd_use_fixed_rate(struct ieee80211_hw *hw,
2341 cmd->rate_type = cpu_to_le32(rate_type); 2721 cmd->rate_type = cpu_to_le32(rate_type);
2342 2722
2343 if (rate_table != NULL) { 2723 if (rate_table != NULL) {
2344 /* Copy over each field manually so 2724 /*
2345 * that bitflipping can be done 2725 * Copy over each field manually so that endian
2346 */ 2726 * conversion can be done.
2727 */
2347 cmd->rate_table.allow_rate_drop = 2728 cmd->rate_table.allow_rate_drop =
2348 cpu_to_le32(rate_table->allow_rate_drop); 2729 cpu_to_le32(rate_table->allow_rate_drop);
2349 cmd->rate_table.num_rates = 2730 cmd->rate_table.num_rates =
@@ -2399,7 +2780,7 @@ static irqreturn_t mwl8k_interrupt(int irq, void *dev_id)
2399 2780
2400 if (status & MWL8K_A2H_INT_QUEUE_EMPTY) { 2781 if (status & MWL8K_A2H_INT_QUEUE_EMPTY) {
2401 if (!mutex_is_locked(&priv->fw_mutex) && 2782 if (!mutex_is_locked(&priv->fw_mutex) &&
2402 priv->radio_on && mwl8k_txq_busy(priv)) 2783 priv->radio_on && priv->pending_tx_pkts)
2403 mwl8k_tx_start(priv); 2784 mwl8k_tx_start(priv);
2404 } 2785 }
2405 2786
@@ -2418,7 +2799,7 @@ static int mwl8k_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
2418 2799
2419 if (priv->current_channel == NULL) { 2800 if (priv->current_channel == NULL) {
2420 printk(KERN_DEBUG "%s: dropped TX frame since radio " 2801 printk(KERN_DEBUG "%s: dropped TX frame since radio "
2421 "disabled\n", priv->name); 2802 "disabled\n", wiphy_name(hw->wiphy));
2422 dev_kfree_skb(skb); 2803 dev_kfree_skb(skb);
2423 return NETDEV_TX_OK; 2804 return NETDEV_TX_OK;
2424 } 2805 }
@@ -2437,7 +2818,7 @@ static int mwl8k_start(struct ieee80211_hw *hw)
2437 IRQF_SHARED, MWL8K_NAME, hw); 2818 IRQF_SHARED, MWL8K_NAME, hw);
2438 if (rc) { 2819 if (rc) {
2439 printk(KERN_ERR "%s: failed to register IRQ handler\n", 2820 printk(KERN_ERR "%s: failed to register IRQ handler\n",
2440 priv->name); 2821 wiphy_name(hw->wiphy));
2441 return -EIO; 2822 return -EIO;
2442 } 2823 }
2443 2824
@@ -2451,12 +2832,17 @@ static int mwl8k_start(struct ieee80211_hw *hw)
2451 if (!rc) { 2832 if (!rc) {
2452 rc = mwl8k_cmd_802_11_radio_enable(hw); 2833 rc = mwl8k_cmd_802_11_radio_enable(hw);
2453 2834
2454 if (!rc) 2835 if (!priv->ap_fw) {
2455 rc = mwl8k_cmd_set_pre_scan(hw); 2836 if (!rc)
2837 rc = mwl8k_enable_sniffer(hw, 0);
2456 2838
2457 if (!rc) 2839 if (!rc)
2458 rc = mwl8k_cmd_set_post_scan(hw, 2840 rc = mwl8k_cmd_set_pre_scan(hw);
2459 "\x00\x00\x00\x00\x00\x00"); 2841
2842 if (!rc)
2843 rc = mwl8k_cmd_set_post_scan(hw,
2844 "\x00\x00\x00\x00\x00\x00");
2845 }
2460 2846
2461 if (!rc) 2847 if (!rc)
2462 rc = mwl8k_cmd_setrateadaptmode(hw, 0); 2848 rc = mwl8k_cmd_setrateadaptmode(hw, 0);
@@ -2464,9 +2850,6 @@ static int mwl8k_start(struct ieee80211_hw *hw)
2464 if (!rc) 2850 if (!rc)
2465 rc = mwl8k_set_wmm(hw, 0); 2851 rc = mwl8k_set_wmm(hw, 0);
2466 2852
2467 if (!rc)
2468 rc = mwl8k_enable_sniffer(hw, 0);
2469
2470 mwl8k_fw_unlock(hw); 2853 mwl8k_fw_unlock(hw);
2471 } 2854 }
2472 2855
@@ -2500,9 +2883,6 @@ static void mwl8k_stop(struct ieee80211_hw *hw)
2500 /* Stop tx reclaim tasklet */ 2883 /* Stop tx reclaim tasklet */
2501 tasklet_disable(&priv->tx_reclaim_task); 2884 tasklet_disable(&priv->tx_reclaim_task);
2502 2885
2503 /* Stop config thread */
2504 flush_workqueue(priv->config_wq);
2505
2506 /* Return all skbs to mac80211 */ 2886 /* Return all skbs to mac80211 */
2507 for (i = 0; i < MWL8K_TX_QUEUES; i++) 2887 for (i = 0; i < MWL8K_TX_QUEUES; i++)
2508 mwl8k_txq_reclaim(hw, i, 1); 2888 mwl8k_txq_reclaim(hw, i, 1);
@@ -2526,11 +2906,24 @@ static int mwl8k_add_interface(struct ieee80211_hw *hw,
2526 if (conf->type != NL80211_IFTYPE_STATION) 2906 if (conf->type != NL80211_IFTYPE_STATION)
2527 return -EINVAL; 2907 return -EINVAL;
2528 2908
2909 /*
2910 * Reject interface creation if sniffer mode is active, as
2911 * STA operation is mutually exclusive with hardware sniffer
2912 * mode.
2913 */
2914 if (priv->sniffer_enabled) {
2915 printk(KERN_INFO "%s: unable to create STA "
2916 "interface due to sniffer mode being enabled\n",
2917 wiphy_name(hw->wiphy));
2918 return -EINVAL;
2919 }
2920
2529 /* Clean out driver private area */ 2921 /* Clean out driver private area */
2530 mwl8k_vif = MWL8K_VIF(conf->vif); 2922 mwl8k_vif = MWL8K_VIF(conf->vif);
2531 memset(mwl8k_vif, 0, sizeof(*mwl8k_vif)); 2923 memset(mwl8k_vif, 0, sizeof(*mwl8k_vif));
2532 2924
2533 /* Save the mac address */ 2925 /* Set and save the mac address */
2926 mwl8k_set_mac_addr(hw, conf->mac_addr);
2534 memcpy(mwl8k_vif->mac_addr, conf->mac_addr, ETH_ALEN); 2927 memcpy(mwl8k_vif->mac_addr, conf->mac_addr, ETH_ALEN);
2535 2928
2536 /* Back pointer to parent config block */ 2929 /* Back pointer to parent config block */
@@ -2558,6 +2951,8 @@ static void mwl8k_remove_interface(struct ieee80211_hw *hw,
2558 if (priv->vif == NULL) 2951 if (priv->vif == NULL)
2559 return; 2952 return;
2560 2953
2954 mwl8k_set_mac_addr(hw, "\x00\x00\x00\x00\x00\x00");
2955
2561 priv->vif = NULL; 2956 priv->vif = NULL;
2562} 2957}
2563 2958
@@ -2593,8 +2988,13 @@ static int mwl8k_config(struct ieee80211_hw *hw, u32 changed)
2593 if (rc) 2988 if (rc)
2594 goto out; 2989 goto out;
2595 2990
2596 if (mwl8k_cmd_mimo_config(hw, 0x7, 0x7)) 2991 if (priv->ap_fw) {
2597 rc = -EINVAL; 2992 rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_RX, 0x7);
2993 if (!rc)
2994 rc = mwl8k_cmd_rf_antenna(hw, MWL8K_RF_ANTENNA_TX, 0x7);
2995 } else {
2996 rc = mwl8k_cmd_mimo_config(hw, 0x7, 0x7);
2997 }
2598 2998
2599out: 2999out:
2600 mwl8k_fw_unlock(hw); 3000 mwl8k_fw_unlock(hw);
@@ -2681,32 +3081,108 @@ static u64 mwl8k_prepare_multicast(struct ieee80211_hw *hw,
2681{ 3081{
2682 struct mwl8k_cmd_pkt *cmd; 3082 struct mwl8k_cmd_pkt *cmd;
2683 3083
2684 cmd = __mwl8k_cmd_mac_multicast_adr(hw, mc_count, mclist); 3084 /*
3085 * Synthesize and return a command packet that programs the
3086 * hardware multicast address filter. At this point we don't
3087 * know whether FIF_ALLMULTI is being requested, but if it is,
3088 * we'll end up throwing this packet away and creating a new
3089 * one in mwl8k_configure_filter().
3090 */
3091 cmd = __mwl8k_cmd_mac_multicast_adr(hw, 0, mc_count, mclist);
2685 3092
2686 return (unsigned long)cmd; 3093 return (unsigned long)cmd;
2687} 3094}
2688 3095
3096static int
3097mwl8k_configure_filter_sniffer(struct ieee80211_hw *hw,
3098 unsigned int changed_flags,
3099 unsigned int *total_flags)
3100{
3101 struct mwl8k_priv *priv = hw->priv;
3102
3103 /*
3104 * Hardware sniffer mode is mutually exclusive with STA
3105 * operation, so refuse to enable sniffer mode if a STA
3106 * interface is active.
3107 */
3108 if (priv->vif != NULL) {
3109 if (net_ratelimit())
3110 printk(KERN_INFO "%s: not enabling sniffer "
3111 "mode because STA interface is active\n",
3112 wiphy_name(hw->wiphy));
3113 return 0;
3114 }
3115
3116 if (!priv->sniffer_enabled) {
3117 if (mwl8k_enable_sniffer(hw, 1))
3118 return 0;
3119 priv->sniffer_enabled = true;
3120 }
3121
3122 *total_flags &= FIF_PROMISC_IN_BSS | FIF_ALLMULTI |
3123 FIF_BCN_PRBRESP_PROMISC | FIF_CONTROL |
3124 FIF_OTHER_BSS;
3125
3126 return 1;
3127}
3128
2689static void mwl8k_configure_filter(struct ieee80211_hw *hw, 3129static void mwl8k_configure_filter(struct ieee80211_hw *hw,
2690 unsigned int changed_flags, 3130 unsigned int changed_flags,
2691 unsigned int *total_flags, 3131 unsigned int *total_flags,
2692 u64 multicast) 3132 u64 multicast)
2693{ 3133{
2694 struct mwl8k_priv *priv = hw->priv; 3134 struct mwl8k_priv *priv = hw->priv;
2695 struct mwl8k_cmd_pkt *multicast_adr_cmd; 3135 struct mwl8k_cmd_pkt *cmd = (void *)(unsigned long)multicast;
3136
3137 /*
3138 * AP firmware doesn't allow fine-grained control over
3139 * the receive filter.
3140 */
3141 if (priv->ap_fw) {
3142 *total_flags &= FIF_ALLMULTI | FIF_BCN_PRBRESP_PROMISC;
3143 kfree(cmd);
3144 return;
3145 }
3146
3147 /*
3148 * Enable hardware sniffer mode if FIF_CONTROL or
3149 * FIF_OTHER_BSS is requested.
3150 */
3151 if (*total_flags & (FIF_CONTROL | FIF_OTHER_BSS) &&
3152 mwl8k_configure_filter_sniffer(hw, changed_flags, total_flags)) {
3153 kfree(cmd);
3154 return;
3155 }
2696 3156
2697 /* Clear unsupported feature flags */ 3157 /* Clear unsupported feature flags */
2698 *total_flags &= FIF_BCN_PRBRESP_PROMISC; 3158 *total_flags &= FIF_ALLMULTI | FIF_BCN_PRBRESP_PROMISC;
2699 3159
2700 if (mwl8k_fw_lock(hw)) 3160 if (mwl8k_fw_lock(hw))
2701 return; 3161 return;
2702 3162
3163 if (priv->sniffer_enabled) {
3164 mwl8k_enable_sniffer(hw, 0);
3165 priv->sniffer_enabled = false;
3166 }
3167
2703 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) { 3168 if (changed_flags & FIF_BCN_PRBRESP_PROMISC) {
2704 if (*total_flags & FIF_BCN_PRBRESP_PROMISC) 3169 if (*total_flags & FIF_BCN_PRBRESP_PROMISC) {
3170 /*
3171 * Disable the BSS filter.
3172 */
2705 mwl8k_cmd_set_pre_scan(hw); 3173 mwl8k_cmd_set_pre_scan(hw);
2706 else { 3174 } else {
2707 u8 *bssid; 3175 u8 *bssid;
2708 3176
2709 bssid = "\x00\x00\x00\x00\x00\x00"; 3177 /*
3178 * Enable the BSS filter.
3179 *
3180 * If there is an active STA interface, use that
3181 * interface's BSSID, otherwise use a dummy one
3182 * (where the OUI part needs to be nonzero for
3183 * the BSSID to be accepted by POST_SCAN).
3184 */
3185 bssid = "\x01\x00\x00\x00\x00\x00";
2710 if (priv->vif != NULL) 3186 if (priv->vif != NULL)
2711 bssid = MWL8K_VIF(priv->vif)->bssid; 3187 bssid = MWL8K_VIF(priv->vif)->bssid;
2712 3188
@@ -2714,10 +3190,20 @@ static void mwl8k_configure_filter(struct ieee80211_hw *hw,
2714 } 3190 }
2715 } 3191 }
2716 3192
2717 multicast_adr_cmd = (void *)(unsigned long)multicast; 3193 /*
2718 if (multicast_adr_cmd != NULL) { 3194 * If FIF_ALLMULTI is being requested, throw away the command
2719 mwl8k_post_cmd(hw, multicast_adr_cmd); 3195 * packet that ->prepare_multicast() built and replace it with
2720 kfree(multicast_adr_cmd); 3196 * a command packet that enables reception of all multicast
3197 * packets.
3198 */
3199 if (*total_flags & FIF_ALLMULTI) {
3200 kfree(cmd);
3201 cmd = __mwl8k_cmd_mac_multicast_adr(hw, 1, 0, NULL);
3202 }
3203
3204 if (cmd != NULL) {
3205 mwl8k_post_cmd(hw, cmd);
3206 kfree(cmd);
2721 } 3207 }
2722 3208
2723 mwl8k_fw_unlock(hw); 3209 mwl8k_fw_unlock(hw);
@@ -2762,7 +3248,7 @@ static int mwl8k_get_tx_stats(struct ieee80211_hw *hw,
2762 spin_lock_bh(&priv->tx_lock); 3248 spin_lock_bh(&priv->tx_lock);
2763 for (index = 0; index < MWL8K_TX_QUEUES; index++) { 3249 for (index = 0; index < MWL8K_TX_QUEUES; index++) {
2764 txq = priv->txq + index; 3250 txq = priv->txq + index;
2765 memcpy(&stats[index], &txq->tx_stats, 3251 memcpy(&stats[index], &txq->stats,
2766 sizeof(struct ieee80211_tx_queue_stats)); 3252 sizeof(struct ieee80211_tx_queue_stats));
2767 } 3253 }
2768 spin_unlock_bh(&priv->tx_lock); 3254 spin_unlock_bh(&priv->tx_lock);
@@ -2802,7 +3288,7 @@ static void mwl8k_tx_reclaim_handler(unsigned long data)
2802 for (i = 0; i < MWL8K_TX_QUEUES; i++) 3288 for (i = 0; i < MWL8K_TX_QUEUES; i++)
2803 mwl8k_txq_reclaim(hw, i, 0); 3289 mwl8k_txq_reclaim(hw, i, 0);
2804 3290
2805 if (priv->tx_wait != NULL && mwl8k_txq_busy(priv) == 0) { 3291 if (priv->tx_wait != NULL && !priv->pending_tx_pkts) {
2806 complete(priv->tx_wait); 3292 complete(priv->tx_wait);
2807 priv->tx_wait = NULL; 3293 priv->tx_wait = NULL;
2808 } 3294 }
@@ -2822,6 +3308,36 @@ static void mwl8k_finalize_join_worker(struct work_struct *work)
2822 priv->beacon_skb = NULL; 3308 priv->beacon_skb = NULL;
2823} 3309}
2824 3310
3311enum {
3312 MWL8687 = 0,
3313 MWL8366,
3314};
3315
3316static struct mwl8k_device_info mwl8k_info_tbl[] __devinitdata = {
3317 {
3318 .part_name = "88w8687",
3319 .helper_image = "mwl8k/helper_8687.fw",
3320 .fw_image = "mwl8k/fmimage_8687.fw",
3321 .rxd_ops = &rxd_8687_ops,
3322 .modes = BIT(NL80211_IFTYPE_STATION),
3323 },
3324 {
3325 .part_name = "88w8366",
3326 .helper_image = "mwl8k/helper_8366.fw",
3327 .fw_image = "mwl8k/fmimage_8366.fw",
3328 .rxd_ops = &rxd_8366_ops,
3329 .modes = 0,
3330 },
3331};
3332
3333static DEFINE_PCI_DEVICE_TABLE(mwl8k_pci_id_table) = {
3334 { PCI_VDEVICE(MARVELL, 0x2a2b), .driver_data = MWL8687, },
3335 { PCI_VDEVICE(MARVELL, 0x2a30), .driver_data = MWL8687, },
3336 { PCI_VDEVICE(MARVELL, 0x2a40), .driver_data = MWL8366, },
3337 { },
3338};
3339MODULE_DEVICE_TABLE(pci, mwl8k_pci_id_table);
3340
2825static int __devinit mwl8k_probe(struct pci_dev *pdev, 3341static int __devinit mwl8k_probe(struct pci_dev *pdev,
2826 const struct pci_device_id *id) 3342 const struct pci_device_id *id)
2827{ 3343{
@@ -2862,17 +3378,34 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
2862 priv = hw->priv; 3378 priv = hw->priv;
2863 priv->hw = hw; 3379 priv->hw = hw;
2864 priv->pdev = pdev; 3380 priv->pdev = pdev;
3381 priv->device_info = &mwl8k_info_tbl[id->driver_data];
3382 priv->rxd_ops = priv->device_info->rxd_ops;
3383 priv->sniffer_enabled = false;
2865 priv->wmm_enabled = false; 3384 priv->wmm_enabled = false;
2866 priv->pending_tx_pkts = 0; 3385 priv->pending_tx_pkts = 0;
2867 strncpy(priv->name, MWL8K_NAME, sizeof(priv->name));
2868 3386
2869 SET_IEEE80211_DEV(hw, &pdev->dev); 3387 SET_IEEE80211_DEV(hw, &pdev->dev);
2870 pci_set_drvdata(pdev, hw); 3388 pci_set_drvdata(pdev, hw);
2871 3389
3390 priv->sram = pci_iomap(pdev, 0, 0x10000);
3391 if (priv->sram == NULL) {
3392 printk(KERN_ERR "%s: Cannot map device SRAM\n",
3393 wiphy_name(hw->wiphy));
3394 goto err_iounmap;
3395 }
3396
3397 /*
3398 * If BAR0 is a 32 bit BAR, the register BAR will be BAR1.
3399 * If BAR0 is a 64 bit BAR, the register BAR will be BAR2.
3400 */
2872 priv->regs = pci_iomap(pdev, 1, 0x10000); 3401 priv->regs = pci_iomap(pdev, 1, 0x10000);
2873 if (priv->regs == NULL) { 3402 if (priv->regs == NULL) {
2874 printk(KERN_ERR "%s: Cannot map device memory\n", priv->name); 3403 priv->regs = pci_iomap(pdev, 2, 0x10000);
2875 goto err_iounmap; 3404 if (priv->regs == NULL) {
3405 printk(KERN_ERR "%s: Cannot map device registers\n",
3406 wiphy_name(hw->wiphy));
3407 goto err_iounmap;
3408 }
2876 } 3409 }
2877 3410
2878 memcpy(priv->channels, mwl8k_channels, sizeof(mwl8k_channels)); 3411 memcpy(priv->channels, mwl8k_channels, sizeof(mwl8k_channels));
@@ -2897,7 +3430,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
2897 3430
2898 hw->queues = MWL8K_TX_QUEUES; 3431 hw->queues = MWL8K_TX_QUEUES;
2899 3432
2900 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 3433 hw->wiphy->interface_modes = priv->device_info->modes;
2901 3434
2902 /* Set rssi and noise values to dBm */ 3435 /* Set rssi and noise values to dBm */
2903 hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM; 3436 hw->flags |= IEEE80211_HW_SIGNAL_DBM | IEEE80211_HW_NOISE_DBM;
@@ -2916,11 +3449,6 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
2916 mwl8k_tx_reclaim_handler, (unsigned long)hw); 3449 mwl8k_tx_reclaim_handler, (unsigned long)hw);
2917 tasklet_disable(&priv->tx_reclaim_task); 3450 tasklet_disable(&priv->tx_reclaim_task);
2918 3451
2919 /* Config workthread */
2920 priv->config_wq = create_singlethread_workqueue("mwl8k_config");
2921 if (priv->config_wq == NULL)
2922 goto err_iounmap;
2923
2924 /* Power management cookie */ 3452 /* Power management cookie */
2925 priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma); 3453 priv->cookie = pci_alloc_consistent(priv->pdev, 4, &priv->cookie_dma);
2926 if (priv->cookie == NULL) 3454 if (priv->cookie == NULL)
@@ -2934,11 +3462,12 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
2934 mutex_init(&priv->fw_mutex); 3462 mutex_init(&priv->fw_mutex);
2935 priv->fw_mutex_owner = NULL; 3463 priv->fw_mutex_owner = NULL;
2936 priv->fw_mutex_depth = 0; 3464 priv->fw_mutex_depth = 0;
2937 priv->tx_wait = NULL;
2938 priv->hostcmd_wait = NULL; 3465 priv->hostcmd_wait = NULL;
2939 3466
2940 spin_lock_init(&priv->tx_lock); 3467 spin_lock_init(&priv->tx_lock);
2941 3468
3469 priv->tx_wait = NULL;
3470
2942 for (i = 0; i < MWL8K_TX_QUEUES; i++) { 3471 for (i = 0; i < MWL8K_TX_QUEUES; i++) {
2943 rc = mwl8k_txq_init(hw, i); 3472 rc = mwl8k_txq_init(hw, i);
2944 if (rc) 3473 if (rc)
@@ -2954,7 +3483,7 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
2954 IRQF_SHARED, MWL8K_NAME, hw); 3483 IRQF_SHARED, MWL8K_NAME, hw);
2955 if (rc) { 3484 if (rc) {
2956 printk(KERN_ERR "%s: failed to register IRQ handler\n", 3485 printk(KERN_ERR "%s: failed to register IRQ handler\n",
2957 priv->name); 3486 wiphy_name(hw->wiphy));
2958 goto err_free_queues; 3487 goto err_free_queues;
2959 } 3488 }
2960 3489
@@ -2962,16 +3491,18 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
2962 mwl8k_hw_reset(priv); 3491 mwl8k_hw_reset(priv);
2963 3492
2964 /* Ask userland hotplug daemon for the device firmware */ 3493 /* Ask userland hotplug daemon for the device firmware */
2965 rc = mwl8k_request_firmware(priv, (u32)id->driver_data); 3494 rc = mwl8k_request_firmware(priv);
2966 if (rc) { 3495 if (rc) {
2967 printk(KERN_ERR "%s: Firmware files not found\n", priv->name); 3496 printk(KERN_ERR "%s: Firmware files not found\n",
3497 wiphy_name(hw->wiphy));
2968 goto err_free_irq; 3498 goto err_free_irq;
2969 } 3499 }
2970 3500
2971 /* Load firmware into hardware */ 3501 /* Load firmware into hardware */
2972 rc = mwl8k_load_firmware(priv); 3502 rc = mwl8k_load_firmware(hw);
2973 if (rc) { 3503 if (rc) {
2974 printk(KERN_ERR "%s: Cannot start firmware\n", priv->name); 3504 printk(KERN_ERR "%s: Cannot start firmware\n",
3505 wiphy_name(hw->wiphy));
2975 goto err_stop_firmware; 3506 goto err_stop_firmware;
2976 } 3507 }
2977 3508
@@ -2986,16 +3517,31 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
2986 iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK); 3517 iowrite32(MWL8K_A2H_EVENTS, priv->regs + MWL8K_HIU_A2H_INTERRUPT_MASK);
2987 3518
2988 /* Get config data, mac addrs etc */ 3519 /* Get config data, mac addrs etc */
2989 rc = mwl8k_cmd_get_hw_spec(hw); 3520 if (priv->ap_fw) {
3521 rc = mwl8k_cmd_get_hw_spec_ap(hw);
3522 if (!rc)
3523 rc = mwl8k_cmd_set_hw_spec(hw);
3524 } else {
3525 rc = mwl8k_cmd_get_hw_spec_sta(hw);
3526 }
2990 if (rc) { 3527 if (rc) {
2991 printk(KERN_ERR "%s: Cannot initialise firmware\n", priv->name); 3528 printk(KERN_ERR "%s: Cannot initialise firmware\n",
3529 wiphy_name(hw->wiphy));
2992 goto err_stop_firmware; 3530 goto err_stop_firmware;
2993 } 3531 }
2994 3532
2995 /* Turn radio off */ 3533 /* Turn radio off */
2996 rc = mwl8k_cmd_802_11_radio_disable(hw); 3534 rc = mwl8k_cmd_802_11_radio_disable(hw);
2997 if (rc) { 3535 if (rc) {
2998 printk(KERN_ERR "%s: Cannot disable\n", priv->name); 3536 printk(KERN_ERR "%s: Cannot disable\n", wiphy_name(hw->wiphy));
3537 goto err_stop_firmware;
3538 }
3539
3540 /* Clear MAC address */
3541 rc = mwl8k_set_mac_addr(hw, "\x00\x00\x00\x00\x00\x00");
3542 if (rc) {
3543 printk(KERN_ERR "%s: Cannot clear MAC address\n",
3544 wiphy_name(hw->wiphy));
2999 goto err_stop_firmware; 3545 goto err_stop_firmware;
3000 } 3546 }
3001 3547
@@ -3005,13 +3551,15 @@ static int __devinit mwl8k_probe(struct pci_dev *pdev,
3005 3551
3006 rc = ieee80211_register_hw(hw); 3552 rc = ieee80211_register_hw(hw);
3007 if (rc) { 3553 if (rc) {
3008 printk(KERN_ERR "%s: Cannot register device\n", priv->name); 3554 printk(KERN_ERR "%s: Cannot register device\n",
3555 wiphy_name(hw->wiphy));
3009 goto err_stop_firmware; 3556 goto err_stop_firmware;
3010 } 3557 }
3011 3558
3012 printk(KERN_INFO "%s: 88w%u v%d, %pM, firmware version %u.%u.%u.%u\n", 3559 printk(KERN_INFO "%s: %s v%d, %pM, %s firmware %u.%u.%u.%u\n",
3013 wiphy_name(hw->wiphy), priv->part_num, priv->hw_rev, 3560 wiphy_name(hw->wiphy), priv->device_info->part_name,
3014 hw->wiphy->perm_addr, 3561 priv->hw_rev, hw->wiphy->perm_addr,
3562 priv->ap_fw ? "AP" : "STA",
3015 (priv->fw_rev >> 24) & 0xff, (priv->fw_rev >> 16) & 0xff, 3563 (priv->fw_rev >> 24) & 0xff, (priv->fw_rev >> 16) & 0xff,
3016 (priv->fw_rev >> 8) & 0xff, priv->fw_rev & 0xff); 3564 (priv->fw_rev >> 8) & 0xff, priv->fw_rev & 0xff);
3017 3565
@@ -3038,8 +3586,8 @@ err_iounmap:
3038 if (priv->regs != NULL) 3586 if (priv->regs != NULL)
3039 pci_iounmap(pdev, priv->regs); 3587 pci_iounmap(pdev, priv->regs);
3040 3588
3041 if (priv->config_wq != NULL) 3589 if (priv->sram != NULL)
3042 destroy_workqueue(priv->config_wq); 3590 pci_iounmap(pdev, priv->sram);
3043 3591
3044 pci_set_drvdata(pdev, NULL); 3592 pci_set_drvdata(pdev, NULL);
3045 ieee80211_free_hw(hw); 3593 ieee80211_free_hw(hw);
@@ -3073,9 +3621,6 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
3073 /* Remove tx reclaim tasklet */ 3621 /* Remove tx reclaim tasklet */
3074 tasklet_kill(&priv->tx_reclaim_task); 3622 tasklet_kill(&priv->tx_reclaim_task);
3075 3623
3076 /* Stop config thread */
3077 destroy_workqueue(priv->config_wq);
3078
3079 /* Stop hardware */ 3624 /* Stop hardware */
3080 mwl8k_hw_reset(priv); 3625 mwl8k_hw_reset(priv);
3081 3626
@@ -3088,10 +3633,10 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
3088 3633
3089 mwl8k_rxq_deinit(hw, 0); 3634 mwl8k_rxq_deinit(hw, 0);
3090 3635
3091 pci_free_consistent(priv->pdev, 4, 3636 pci_free_consistent(priv->pdev, 4, priv->cookie, priv->cookie_dma);
3092 priv->cookie, priv->cookie_dma);
3093 3637
3094 pci_iounmap(pdev, priv->regs); 3638 pci_iounmap(pdev, priv->regs);
3639 pci_iounmap(pdev, priv->sram);
3095 pci_set_drvdata(pdev, NULL); 3640 pci_set_drvdata(pdev, NULL);
3096 ieee80211_free_hw(hw); 3641 ieee80211_free_hw(hw);
3097 pci_release_regions(pdev); 3642 pci_release_regions(pdev);
@@ -3100,7 +3645,7 @@ static void __devexit mwl8k_remove(struct pci_dev *pdev)
3100 3645
3101static struct pci_driver mwl8k_driver = { 3646static struct pci_driver mwl8k_driver = {
3102 .name = MWL8K_NAME, 3647 .name = MWL8K_NAME,
3103 .id_table = mwl8k_table, 3648 .id_table = mwl8k_pci_id_table,
3104 .probe = mwl8k_probe, 3649 .probe = mwl8k_probe,
3105 .remove = __devexit_p(mwl8k_remove), 3650 .remove = __devexit_p(mwl8k_remove),
3106 .shutdown = __devexit_p(mwl8k_shutdown), 3651 .shutdown = __devexit_p(mwl8k_shutdown),
@@ -3118,3 +3663,8 @@ static void __exit mwl8k_exit(void)
3118 3663
3119module_init(mwl8k_init); 3664module_init(mwl8k_init);
3120module_exit(mwl8k_exit); 3665module_exit(mwl8k_exit);
3666
3667MODULE_DESCRIPTION(MWL8K_DESC);
3668MODULE_VERSION(MWL8K_VERSION);
3669MODULE_AUTHOR("Lennert Buytenhek <buytenh@marvell.com>");
3670MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/orinoco/Kconfig b/drivers/net/wireless/orinoco/Kconfig
index 83b635fd7784..e2a2c18920aa 100644
--- a/drivers/net/wireless/orinoco/Kconfig
+++ b/drivers/net/wireless/orinoco/Kconfig
@@ -1,8 +1,10 @@
1config HERMES 1config HERMES
2 tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)" 2 tristate "Hermes chipset 802.11b support (Orinoco/Prism2/Symbol)"
3 depends on (PPC_PMAC || PCI || PCMCIA) && WLAN_80211 3 depends on (PPC_PMAC || PCI || PCMCIA)
4 depends on CFG80211 4 depends on CFG80211 && CFG80211_WEXT
5 select WIRELESS_EXT 5 select WIRELESS_EXT
6 select WEXT_SPY
7 select WEXT_PRIV
6 select FW_LOADER 8 select FW_LOADER
7 select CRYPTO 9 select CRYPTO
8 select CRYPTO_MICHAEL_MIC 10 select CRYPTO_MICHAEL_MIC
diff --git a/drivers/net/wireless/orinoco/hw.c b/drivers/net/wireless/orinoco/hw.c
index 359652d35e63..404830f47ab2 100644
--- a/drivers/net/wireless/orinoco/hw.c
+++ b/drivers/net/wireless/orinoco/hw.c
@@ -60,8 +60,15 @@ static inline fwtype_t determine_firmware_type(struct comp_id *nic_id)
60/* Set priv->firmware type, determine firmware properties 60/* Set priv->firmware type, determine firmware properties
61 * This function can be called before we have registerred with netdev, 61 * This function can be called before we have registerred with netdev,
62 * so all errors go out with dev_* rather than printk 62 * so all errors go out with dev_* rather than printk
63 *
64 * If non-NULL stores a firmware description in fw_name.
65 * If non-NULL stores a HW version in hw_ver
66 *
67 * These are output via generic cfg80211 ethtool support.
63 */ 68 */
64int determine_fw_capabilities(struct orinoco_private *priv) 69int determine_fw_capabilities(struct orinoco_private *priv,
70 char *fw_name, size_t fw_name_len,
71 u32 *hw_ver)
65{ 72{
66 struct device *dev = priv->dev; 73 struct device *dev = priv->dev;
67 hermes_t *hw = &priv->hw; 74 hermes_t *hw = &priv->hw;
@@ -85,6 +92,12 @@ int determine_fw_capabilities(struct orinoco_private *priv)
85 dev_info(dev, "Hardware identity %04x:%04x:%04x:%04x\n", 92 dev_info(dev, "Hardware identity %04x:%04x:%04x:%04x\n",
86 nic_id.id, nic_id.variant, nic_id.major, nic_id.minor); 93 nic_id.id, nic_id.variant, nic_id.major, nic_id.minor);
87 94
95 if (hw_ver)
96 *hw_ver = (((nic_id.id & 0xff) << 24) |
97 ((nic_id.variant & 0xff) << 16) |
98 ((nic_id.major & 0xff) << 8) |
99 (nic_id.minor & 0xff));
100
88 priv->firmware_type = determine_firmware_type(&nic_id); 101 priv->firmware_type = determine_firmware_type(&nic_id);
89 102
90 /* Get the firmware version */ 103 /* Get the firmware version */
@@ -135,8 +148,9 @@ int determine_fw_capabilities(struct orinoco_private *priv)
135 case FIRMWARE_TYPE_AGERE: 148 case FIRMWARE_TYPE_AGERE:
136 /* Lucent Wavelan IEEE, Lucent Orinoco, Cabletron RoamAbout, 149 /* Lucent Wavelan IEEE, Lucent Orinoco, Cabletron RoamAbout,
137 ELSA, Melco, HP, IBM, Dell 1150, Compaq 110/210 */ 150 ELSA, Melco, HP, IBM, Dell 1150, Compaq 110/210 */
138 snprintf(priv->fw_name, sizeof(priv->fw_name) - 1, 151 if (fw_name)
139 "Lucent/Agere %d.%02d", sta_id.major, sta_id.minor); 152 snprintf(fw_name, fw_name_len, "Lucent/Agere %d.%02d",
153 sta_id.major, sta_id.minor);
140 154
141 firmver = ((unsigned long)sta_id.major << 16) | sta_id.minor; 155 firmver = ((unsigned long)sta_id.major << 16) | sta_id.minor;
142 156
@@ -185,8 +199,8 @@ int determine_fw_capabilities(struct orinoco_private *priv)
185 tmp[SYMBOL_MAX_VER_LEN] = '\0'; 199 tmp[SYMBOL_MAX_VER_LEN] = '\0';
186 } 200 }
187 201
188 snprintf(priv->fw_name, sizeof(priv->fw_name) - 1, 202 if (fw_name)
189 "Symbol %s", tmp); 203 snprintf(fw_name, fw_name_len, "Symbol %s", tmp);
190 204
191 priv->has_ibss = (firmver >= 0x20000); 205 priv->has_ibss = (firmver >= 0x20000);
192 priv->has_wep = (firmver >= 0x15012); 206 priv->has_wep = (firmver >= 0x15012);
@@ -224,9 +238,9 @@ int determine_fw_capabilities(struct orinoco_private *priv)
224 * different and less well tested */ 238 * different and less well tested */
225 /* D-Link MAC : 00:40:05:* */ 239 /* D-Link MAC : 00:40:05:* */
226 /* Addtron MAC : 00:90:D1:* */ 240 /* Addtron MAC : 00:90:D1:* */
227 snprintf(priv->fw_name, sizeof(priv->fw_name) - 1, 241 if (fw_name)
228 "Intersil %d.%d.%d", sta_id.major, sta_id.minor, 242 snprintf(fw_name, fw_name_len, "Intersil %d.%d.%d",
229 sta_id.variant); 243 sta_id.major, sta_id.minor, sta_id.variant);
230 244
231 firmver = ((unsigned long)sta_id.major << 16) | 245 firmver = ((unsigned long)sta_id.major << 16) |
232 ((unsigned long)sta_id.minor << 8) | sta_id.variant; 246 ((unsigned long)sta_id.minor << 8) | sta_id.variant;
@@ -245,7 +259,8 @@ int determine_fw_capabilities(struct orinoco_private *priv)
245 } 259 }
246 break; 260 break;
247 } 261 }
248 dev_info(dev, "Firmware determined as %s\n", priv->fw_name); 262 if (fw_name)
263 dev_info(dev, "Firmware determined as %s\n", fw_name);
249 264
250 return 0; 265 return 0;
251} 266}
diff --git a/drivers/net/wireless/orinoco/hw.h b/drivers/net/wireless/orinoco/hw.h
index 8df6e8752be6..e2f7fdc4d45a 100644
--- a/drivers/net/wireless/orinoco/hw.h
+++ b/drivers/net/wireless/orinoco/hw.h
@@ -24,7 +24,8 @@
24struct orinoco_private; 24struct orinoco_private;
25struct dev_addr_list; 25struct dev_addr_list;
26 26
27int determine_fw_capabilities(struct orinoco_private *priv); 27int determine_fw_capabilities(struct orinoco_private *priv, char *fw_name,
28 size_t fw_name_len, u32 *hw_ver);
28int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr); 29int orinoco_hw_read_card_settings(struct orinoco_private *priv, u8 *dev_addr);
29int orinoco_hw_allocate_fid(struct orinoco_private *priv); 30int orinoco_hw_allocate_fid(struct orinoco_private *priv);
30int orinoco_get_bitratemode(int bitrate, int automatic); 31int orinoco_get_bitratemode(int bitrate, int automatic);
diff --git a/drivers/net/wireless/orinoco/main.c b/drivers/net/wireless/orinoco/main.c
index 7a32bcb0c037..753a1804eee7 100644
--- a/drivers/net/wireless/orinoco/main.c
+++ b/drivers/net/wireless/orinoco/main.c
@@ -83,7 +83,6 @@
83#include <linux/device.h> 83#include <linux/device.h>
84#include <linux/netdevice.h> 84#include <linux/netdevice.h>
85#include <linux/etherdevice.h> 85#include <linux/etherdevice.h>
86#include <linux/ethtool.h>
87#include <linux/suspend.h> 86#include <linux/suspend.h>
88#include <linux/if_arp.h> 87#include <linux/if_arp.h>
89#include <linux/wireless.h> 88#include <linux/wireless.h>
@@ -162,8 +161,6 @@ static const u8 encaps_hdr[] = {0xaa, 0xaa, 0x03, 0x00, 0x00, 0x00};
162 | HERMES_EV_WTERR | HERMES_EV_INFO \ 161 | HERMES_EV_WTERR | HERMES_EV_INFO \
163 | HERMES_EV_INFDROP) 162 | HERMES_EV_INFDROP)
164 163
165static const struct ethtool_ops orinoco_ethtool_ops;
166
167/********************************************************************/ 164/********************************************************************/
168/* Data types */ 165/* Data types */
169/********************************************************************/ 166/********************************************************************/
@@ -1994,7 +1991,9 @@ int orinoco_init(struct orinoco_private *priv)
1994 goto out; 1991 goto out;
1995 } 1992 }
1996 1993
1997 err = determine_fw_capabilities(priv); 1994 err = determine_fw_capabilities(priv, wiphy->fw_version,
1995 sizeof(wiphy->fw_version),
1996 &wiphy->hw_version);
1998 if (err != 0) { 1997 if (err != 0) {
1999 dev_err(dev, "Incompatible firmware, aborting\n"); 1998 dev_err(dev, "Incompatible firmware, aborting\n");
2000 goto out; 1999 goto out;
@@ -2010,7 +2009,9 @@ int orinoco_init(struct orinoco_private *priv)
2010 priv->do_fw_download = 0; 2009 priv->do_fw_download = 0;
2011 2010
2012 /* Check firmware version again */ 2011 /* Check firmware version again */
2013 err = determine_fw_capabilities(priv); 2012 err = determine_fw_capabilities(priv, wiphy->fw_version,
2013 sizeof(wiphy->fw_version),
2014 &wiphy->hw_version);
2014 if (err != 0) { 2015 if (err != 0) {
2015 dev_err(dev, "Incompatible firmware, aborting\n"); 2016 dev_err(dev, "Incompatible firmware, aborting\n");
2016 goto out; 2017 goto out;
@@ -2212,7 +2213,6 @@ int orinoco_if_add(struct orinoco_private *priv,
2212 dev->ieee80211_ptr = wdev; 2213 dev->ieee80211_ptr = wdev;
2213 dev->netdev_ops = &orinoco_netdev_ops; 2214 dev->netdev_ops = &orinoco_netdev_ops;
2214 dev->watchdog_timeo = HZ; /* 1 second timeout */ 2215 dev->watchdog_timeo = HZ; /* 1 second timeout */
2215 dev->ethtool_ops = &orinoco_ethtool_ops;
2216 dev->wireless_handlers = &orinoco_handler_def; 2216 dev->wireless_handlers = &orinoco_handler_def;
2217#ifdef WIRELESS_SPY 2217#ifdef WIRELESS_SPY
2218 dev->wireless_data = &priv->wireless_data; 2218 dev->wireless_data = &priv->wireless_data;
@@ -2225,6 +2225,7 @@ int orinoco_if_add(struct orinoco_private *priv,
2225 netif_carrier_off(dev); 2225 netif_carrier_off(dev);
2226 2226
2227 memcpy(dev->dev_addr, wiphy->perm_addr, ETH_ALEN); 2227 memcpy(dev->dev_addr, wiphy->perm_addr, ETH_ALEN);
2228 memcpy(dev->perm_addr, wiphy->perm_addr, ETH_ALEN);
2228 2229
2229 dev->base_addr = base_addr; 2230 dev->base_addr = base_addr;
2230 dev->irq = irq; 2231 dev->irq = irq;
@@ -2348,27 +2349,6 @@ void orinoco_down(struct orinoco_private *priv)
2348} 2349}
2349EXPORT_SYMBOL(orinoco_down); 2350EXPORT_SYMBOL(orinoco_down);
2350 2351
2351static void orinoco_get_drvinfo(struct net_device *dev,
2352 struct ethtool_drvinfo *info)
2353{
2354 struct orinoco_private *priv = ndev_priv(dev);
2355
2356 strncpy(info->driver, DRIVER_NAME, sizeof(info->driver) - 1);
2357 strncpy(info->version, DRIVER_VERSION, sizeof(info->version) - 1);
2358 strncpy(info->fw_version, priv->fw_name, sizeof(info->fw_version) - 1);
2359 if (dev->dev.parent)
2360 strncpy(info->bus_info, dev_name(dev->dev.parent),
2361 sizeof(info->bus_info) - 1);
2362 else
2363 snprintf(info->bus_info, sizeof(info->bus_info) - 1,
2364 "PCMCIA %p", priv->hw.iobase);
2365}
2366
2367static const struct ethtool_ops orinoco_ethtool_ops = {
2368 .get_drvinfo = orinoco_get_drvinfo,
2369 .get_link = ethtool_op_get_link,
2370};
2371
2372/********************************************************************/ 2352/********************************************************************/
2373/* Module initialization */ 2353/* Module initialization */
2374/********************************************************************/ 2354/********************************************************************/
diff --git a/drivers/net/wireless/orinoco/orinoco.h b/drivers/net/wireless/orinoco/orinoco.h
index 9ac6f1dda4b0..665ef56f8382 100644
--- a/drivers/net/wireless/orinoco/orinoco.h
+++ b/drivers/net/wireless/orinoco/orinoco.h
@@ -93,7 +93,6 @@ struct orinoco_private {
93 93
94 /* Capabilities of the hardware/firmware */ 94 /* Capabilities of the hardware/firmware */
95 fwtype_t firmware_type; 95 fwtype_t firmware_type;
96 char fw_name[32];
97 int ibss_port; 96 int ibss_port;
98 int nicbuf_size; 97 int nicbuf_size;
99 u16 channel_mask; 98 u16 channel_mask;
diff --git a/drivers/net/wireless/p54/Kconfig b/drivers/net/wireless/p54/Kconfig
index b45d6a4ed1e8..b0342a520bf1 100644
--- a/drivers/net/wireless/p54/Kconfig
+++ b/drivers/net/wireless/p54/Kconfig
@@ -1,6 +1,6 @@
1config P54_COMMON 1config P54_COMMON
2 tristate "Softmac Prism54 support" 2 tristate "Softmac Prism54 support"
3 depends on MAC80211 && WLAN_80211 && EXPERIMENTAL 3 depends on MAC80211 && EXPERIMENTAL
4 select FW_LOADER 4 select FW_LOADER
5 ---help--- 5 ---help---
6 This is common code for isl38xx/stlc45xx based modules. 6 This is common code for isl38xx/stlc45xx based modules.
diff --git a/drivers/net/wireless/p54/eeprom.c b/drivers/net/wireless/p54/eeprom.c
index 0efe67deedee..8e3818f6832e 100644
--- a/drivers/net/wireless/p54/eeprom.c
+++ b/drivers/net/wireless/p54/eeprom.c
@@ -126,7 +126,7 @@ static int p54_generate_band(struct ieee80211_hw *dev,
126 int ret = -ENOMEM; 126 int ret = -ENOMEM;
127 127
128 if ((!list->entries) || (!list->band_channel_num[band])) 128 if ((!list->entries) || (!list->band_channel_num[band]))
129 return 0; 129 return -EINVAL;
130 130
131 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL); 131 tmp = kzalloc(sizeof(*tmp), GFP_KERNEL);
132 if (!tmp) 132 if (!tmp)
@@ -158,6 +158,7 @@ static int p54_generate_band(struct ieee80211_hw *dev,
158 (list->channels[i].data & CHAN_HAS_CURVE ? "" : 158 (list->channels[i].data & CHAN_HAS_CURVE ? "" :
159 " [curve data]"), 159 " [curve data]"),
160 list->channels[i].index, list->channels[i].freq); 160 list->channels[i].index, list->channels[i].freq);
161 continue;
161 } 162 }
162 163
163 tmp->channels[j].band = list->channels[i].band; 164 tmp->channels[j].band = list->channels[i].band;
@@ -165,7 +166,16 @@ static int p54_generate_band(struct ieee80211_hw *dev,
165 j++; 166 j++;
166 } 167 }
167 168
168 tmp->n_channels = list->band_channel_num[band]; 169 if (j == 0) {
170 printk(KERN_ERR "%s: Disabling totally damaged %s band.\n",
171 wiphy_name(dev->wiphy), (band == IEEE80211_BAND_2GHZ) ?
172 "2 GHz" : "5 GHz");
173
174 ret = -ENODATA;
175 goto err_out;
176 }
177
178 tmp->n_channels = j;
169 old = priv->band_table[band]; 179 old = priv->band_table[band];
170 priv->band_table[band] = tmp; 180 priv->band_table[band] = tmp;
171 if (old) { 181 if (old) {
@@ -228,13 +238,13 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
228 struct p54_common *priv = dev->priv; 238 struct p54_common *priv = dev->priv;
229 struct p54_channel_list *list; 239 struct p54_channel_list *list;
230 unsigned int i, j, max_channel_num; 240 unsigned int i, j, max_channel_num;
231 int ret = -ENOMEM; 241 int ret = 0;
232 u16 freq; 242 u16 freq;
233 243
234 if ((priv->iq_autocal_len != priv->curve_data->entries) || 244 if ((priv->iq_autocal_len != priv->curve_data->entries) ||
235 (priv->iq_autocal_len != priv->output_limit->entries)) 245 (priv->iq_autocal_len != priv->output_limit->entries))
236 printk(KERN_ERR "%s: EEPROM is damaged... you may not be able" 246 printk(KERN_ERR "%s: Unsupported or damaged EEPROM detected. "
237 "to use all channels with this device.\n", 247 "You may not be able to use all channels.\n",
238 wiphy_name(dev->wiphy)); 248 wiphy_name(dev->wiphy));
239 249
240 max_channel_num = max_t(unsigned int, priv->output_limit->entries, 250 max_channel_num = max_t(unsigned int, priv->output_limit->entries,
@@ -243,8 +253,10 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
243 priv->curve_data->entries); 253 priv->curve_data->entries);
244 254
245 list = kzalloc(sizeof(*list), GFP_KERNEL); 255 list = kzalloc(sizeof(*list), GFP_KERNEL);
246 if (!list) 256 if (!list) {
257 ret = -ENOMEM;
247 goto free; 258 goto free;
259 }
248 260
249 list->max_entries = max_channel_num; 261 list->max_entries = max_channel_num;
250 list->channels = kzalloc(sizeof(struct p54_channel_entry) * 262 list->channels = kzalloc(sizeof(struct p54_channel_entry) *
@@ -282,13 +294,8 @@ static int p54_generate_channel_lists(struct ieee80211_hw *dev)
282 p54_compare_channels, NULL); 294 p54_compare_channels, NULL);
283 295
284 for (i = 0, j = 0; i < IEEE80211_NUM_BANDS; i++) { 296 for (i = 0, j = 0; i < IEEE80211_NUM_BANDS; i++) {
285 if (list->band_channel_num[i]) { 297 if (p54_generate_band(dev, list, i) == 0)
286 ret = p54_generate_band(dev, list, i);
287 if (ret)
288 goto free;
289
290 j++; 298 j++;
291 }
292 } 299 }
293 if (j == 0) { 300 if (j == 0) {
294 /* no useable band available. */ 301 /* no useable band available. */
diff --git a/drivers/net/wireless/rt2x00/Kconfig b/drivers/net/wireless/rt2x00/Kconfig
index ed1f997e3521..bf60689aaabb 100644
--- a/drivers/net/wireless/rt2x00/Kconfig
+++ b/drivers/net/wireless/rt2x00/Kconfig
@@ -1,6 +1,6 @@
1menuconfig RT2X00 1menuconfig RT2X00
2 tristate "Ralink driver support" 2 tristate "Ralink driver support"
3 depends on MAC80211 && WLAN_80211 3 depends on MAC80211
4 ---help--- 4 ---help---
5 This will enable the support for the Ralink drivers, 5 This will enable the support for the Ralink drivers,
6 developed in the rt2x00 project <http://rt2x00.serialmonkey.com>. 6 developed in the rt2x00 project <http://rt2x00.serialmonkey.com>.
@@ -53,6 +53,36 @@ config RT61PCI
53 53
54 When compiled as a module, this driver will be called rt61pci. 54 When compiled as a module, this driver will be called rt61pci.
55 55
56config RT2800PCI_PCI
57 tristate
58 depends on PCI
59 default y
60
61config RT2800PCI_SOC
62 tristate
63 depends on RALINK_RT288X || RALINK_RT305X
64 default y
65
66config RT2800PCI
67 tristate "Ralink rt2800 (PCI/PCMCIA) support (VERY EXPERIMENTAL)"
68 depends on (RT2800PCI_PCI || RT2800PCI_SOC) && EXPERIMENTAL
69 select RT2800_LIB
70 select RT2X00_LIB_PCI if RT2800PCI_PCI
71 select RT2X00_LIB_SOC if RT2800PCI_SOC
72 select RT2X00_LIB_HT
73 select RT2X00_LIB_FIRMWARE
74 select RT2X00_LIB_CRYPTO
75 select CRC_CCITT
76 select EEPROM_93CX6
77 ---help---
78 This adds support for rt2800 wireless chipset family.
79 Supported chips: RT2760, RT2790, RT2860, RT2880, RT2890 & RT3052
80
81 This driver is non-functional at the moment and is intended for
82 developers.
83
84 When compiled as a module, this driver will be called "rt2800pci.ko".
85
56config RT2500USB 86config RT2500USB
57 tristate "Ralink rt2500 (USB) support" 87 tristate "Ralink rt2500 (USB) support"
58 depends on USB 88 depends on USB
@@ -78,8 +108,9 @@ config RT73USB
78 When compiled as a module, this driver will be called rt73usb. 108 When compiled as a module, this driver will be called rt73usb.
79 109
80config RT2800USB 110config RT2800USB
81 tristate "Ralink rt2800 (USB) support" 111 tristate "Ralink rt2800 (USB) support (EXPERIMENTAL)"
82 depends on USB && EXPERIMENTAL 112 depends on USB && EXPERIMENTAL
113 select RT2800_LIB
83 select RT2X00_LIB_USB 114 select RT2X00_LIB_USB
84 select RT2X00_LIB_HT 115 select RT2X00_LIB_HT
85 select RT2X00_LIB_FIRMWARE 116 select RT2X00_LIB_FIRMWARE
@@ -89,12 +120,23 @@ config RT2800USB
89 This adds experimental support for rt2800 wireless chipset family. 120 This adds experimental support for rt2800 wireless chipset family.
90 Supported chips: RT2770, RT2870 & RT3070. 121 Supported chips: RT2770, RT2870 & RT3070.
91 122
123 Known issues:
124 - support for RT2870 chips doesn't work with 802.11n APs yet
125 - support for RT3070 chips is non-functional at the moment
126
92 When compiled as a module, this driver will be called "rt2800usb.ko". 127 When compiled as a module, this driver will be called "rt2800usb.ko".
93 128
129config RT2800_LIB
130 tristate
131
94config RT2X00_LIB_PCI 132config RT2X00_LIB_PCI
95 tristate 133 tristate
96 select RT2X00_LIB 134 select RT2X00_LIB
97 135
136config RT2X00_LIB_SOC
137 tristate
138 select RT2X00_LIB
139
98config RT2X00_LIB_USB 140config RT2X00_LIB_USB
99 tristate 141 tristate
100 select RT2X00_LIB 142 select RT2X00_LIB
diff --git a/drivers/net/wireless/rt2x00/Makefile b/drivers/net/wireless/rt2x00/Makefile
index 13043ea97667..971339858297 100644
--- a/drivers/net/wireless/rt2x00/Makefile
+++ b/drivers/net/wireless/rt2x00/Makefile
@@ -11,10 +11,13 @@ rt2x00lib-$(CONFIG_RT2X00_LIB_HT) += rt2x00ht.o
11 11
12obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o 12obj-$(CONFIG_RT2X00_LIB) += rt2x00lib.o
13obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o 13obj-$(CONFIG_RT2X00_LIB_PCI) += rt2x00pci.o
14obj-$(CONFIG_RT2X00_LIB_SOC) += rt2x00soc.o
14obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o 15obj-$(CONFIG_RT2X00_LIB_USB) += rt2x00usb.o
16obj-$(CONFIG_RT2800_LIB) += rt2800lib.o
15obj-$(CONFIG_RT2400PCI) += rt2400pci.o 17obj-$(CONFIG_RT2400PCI) += rt2400pci.o
16obj-$(CONFIG_RT2500PCI) += rt2500pci.o 18obj-$(CONFIG_RT2500PCI) += rt2500pci.o
17obj-$(CONFIG_RT61PCI) += rt61pci.o 19obj-$(CONFIG_RT61PCI) += rt61pci.o
20obj-$(CONFIG_RT2800PCI) += rt2800pci.o
18obj-$(CONFIG_RT2500USB) += rt2500usb.o 21obj-$(CONFIG_RT2500USB) += rt2500usb.o
19obj-$(CONFIG_RT73USB) += rt73usb.o 22obj-$(CONFIG_RT73USB) += rt73usb.o
20obj-$(CONFIG_RT2800USB) += rt2800usb.o 23obj-$(CONFIG_RT2800USB) += rt2800usb.o
diff --git a/drivers/net/wireless/rt2x00/rt2800.h b/drivers/net/wireless/rt2x00/rt2800.h
new file mode 100644
index 000000000000..d9b6a72e6d27
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800.h
@@ -0,0 +1,1816 @@
1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2800
23 Abstract: Data structures and registers for the rt2800 modules.
24 Supported chipsets: RT2800E, RT2800ED & RT2800U.
25 */
26
27#ifndef RT2800_H
28#define RT2800_H
29
30/*
31 * RF chip defines.
32 *
33 * RF2820 2.4G 2T3R
34 * RF2850 2.4G/5G 2T3R
35 * RF2720 2.4G 1T2R
36 * RF2750 2.4G/5G 1T2R
37 * RF3020 2.4G 1T1R
38 * RF2020 2.4G B/G
39 * RF3021 2.4G 1T2R
40 * RF3022 2.4G 2T2R
41 * RF3052 2.4G 2T2R
42 */
43#define RF2820 0x0001
44#define RF2850 0x0002
45#define RF2720 0x0003
46#define RF2750 0x0004
47#define RF3020 0x0005
48#define RF2020 0x0006
49#define RF3021 0x0007
50#define RF3022 0x0008
51#define RF3052 0x0009
52
53/*
54 * Chipset version.
55 */
56#define RT2860C_VERSION 0x28600100
57#define RT2860D_VERSION 0x28600101
58#define RT2880E_VERSION 0x28720200
59#define RT2883_VERSION 0x28830300
60#define RT3070_VERSION 0x30700200
61
62/*
63 * Signal information.
64 * Default offset is required for RSSI <-> dBm conversion.
65 */
66#define DEFAULT_RSSI_OFFSET 120 /* FIXME */
67
68/*
69 * Register layout information.
70 */
71#define CSR_REG_BASE 0x1000
72#define CSR_REG_SIZE 0x0800
73#define EEPROM_BASE 0x0000
74#define EEPROM_SIZE 0x0110
75#define BBP_BASE 0x0000
76#define BBP_SIZE 0x0080
77#define RF_BASE 0x0004
78#define RF_SIZE 0x0010
79
80/*
81 * Number of TX queues.
82 */
83#define NUM_TX_QUEUES 4
84
85/*
86 * USB registers.
87 */
88
89/*
90 * INT_SOURCE_CSR: Interrupt source register.
91 * Write one to clear corresponding bit.
92 * TX_FIFO_STATUS: FIFO Statistics is full, sw should read 0x171c
93 */
94#define INT_SOURCE_CSR 0x0200
95#define INT_SOURCE_CSR_RXDELAYINT FIELD32(0x00000001)
96#define INT_SOURCE_CSR_TXDELAYINT FIELD32(0x00000002)
97#define INT_SOURCE_CSR_RX_DONE FIELD32(0x00000004)
98#define INT_SOURCE_CSR_AC0_DMA_DONE FIELD32(0x00000008)
99#define INT_SOURCE_CSR_AC1_DMA_DONE FIELD32(0x00000010)
100#define INT_SOURCE_CSR_AC2_DMA_DONE FIELD32(0x00000020)
101#define INT_SOURCE_CSR_AC3_DMA_DONE FIELD32(0x00000040)
102#define INT_SOURCE_CSR_HCCA_DMA_DONE FIELD32(0x00000080)
103#define INT_SOURCE_CSR_MGMT_DMA_DONE FIELD32(0x00000100)
104#define INT_SOURCE_CSR_MCU_COMMAND FIELD32(0x00000200)
105#define INT_SOURCE_CSR_RXTX_COHERENT FIELD32(0x00000400)
106#define INT_SOURCE_CSR_TBTT FIELD32(0x00000800)
107#define INT_SOURCE_CSR_PRE_TBTT FIELD32(0x00001000)
108#define INT_SOURCE_CSR_TX_FIFO_STATUS FIELD32(0x00002000)
109#define INT_SOURCE_CSR_AUTO_WAKEUP FIELD32(0x00004000)
110#define INT_SOURCE_CSR_GPTIMER FIELD32(0x00008000)
111#define INT_SOURCE_CSR_RX_COHERENT FIELD32(0x00010000)
112#define INT_SOURCE_CSR_TX_COHERENT FIELD32(0x00020000)
113
114/*
115 * INT_MASK_CSR: Interrupt MASK register. 1: the interrupt is mask OFF.
116 */
117#define INT_MASK_CSR 0x0204
118#define INT_MASK_CSR_RXDELAYINT FIELD32(0x00000001)
119#define INT_MASK_CSR_TXDELAYINT FIELD32(0x00000002)
120#define INT_MASK_CSR_RX_DONE FIELD32(0x00000004)
121#define INT_MASK_CSR_AC0_DMA_DONE FIELD32(0x00000008)
122#define INT_MASK_CSR_AC1_DMA_DONE FIELD32(0x00000010)
123#define INT_MASK_CSR_AC2_DMA_DONE FIELD32(0x00000020)
124#define INT_MASK_CSR_AC3_DMA_DONE FIELD32(0x00000040)
125#define INT_MASK_CSR_HCCA_DMA_DONE FIELD32(0x00000080)
126#define INT_MASK_CSR_MGMT_DMA_DONE FIELD32(0x00000100)
127#define INT_MASK_CSR_MCU_COMMAND FIELD32(0x00000200)
128#define INT_MASK_CSR_RXTX_COHERENT FIELD32(0x00000400)
129#define INT_MASK_CSR_TBTT FIELD32(0x00000800)
130#define INT_MASK_CSR_PRE_TBTT FIELD32(0x00001000)
131#define INT_MASK_CSR_TX_FIFO_STATUS FIELD32(0x00002000)
132#define INT_MASK_CSR_AUTO_WAKEUP FIELD32(0x00004000)
133#define INT_MASK_CSR_GPTIMER FIELD32(0x00008000)
134#define INT_MASK_CSR_RX_COHERENT FIELD32(0x00010000)
135#define INT_MASK_CSR_TX_COHERENT FIELD32(0x00020000)
136
137/*
138 * WPDMA_GLO_CFG
139 */
140#define WPDMA_GLO_CFG 0x0208
141#define WPDMA_GLO_CFG_ENABLE_TX_DMA FIELD32(0x00000001)
142#define WPDMA_GLO_CFG_TX_DMA_BUSY FIELD32(0x00000002)
143#define WPDMA_GLO_CFG_ENABLE_RX_DMA FIELD32(0x00000004)
144#define WPDMA_GLO_CFG_RX_DMA_BUSY FIELD32(0x00000008)
145#define WPDMA_GLO_CFG_WP_DMA_BURST_SIZE FIELD32(0x00000030)
146#define WPDMA_GLO_CFG_TX_WRITEBACK_DONE FIELD32(0x00000040)
147#define WPDMA_GLO_CFG_BIG_ENDIAN FIELD32(0x00000080)
148#define WPDMA_GLO_CFG_RX_HDR_SCATTER FIELD32(0x0000ff00)
149#define WPDMA_GLO_CFG_HDR_SEG_LEN FIELD32(0xffff0000)
150
151/*
152 * WPDMA_RST_IDX
153 */
154#define WPDMA_RST_IDX 0x020c
155#define WPDMA_RST_IDX_DTX_IDX0 FIELD32(0x00000001)
156#define WPDMA_RST_IDX_DTX_IDX1 FIELD32(0x00000002)
157#define WPDMA_RST_IDX_DTX_IDX2 FIELD32(0x00000004)
158#define WPDMA_RST_IDX_DTX_IDX3 FIELD32(0x00000008)
159#define WPDMA_RST_IDX_DTX_IDX4 FIELD32(0x00000010)
160#define WPDMA_RST_IDX_DTX_IDX5 FIELD32(0x00000020)
161#define WPDMA_RST_IDX_DRX_IDX0 FIELD32(0x00010000)
162
163/*
164 * DELAY_INT_CFG
165 */
166#define DELAY_INT_CFG 0x0210
167#define DELAY_INT_CFG_RXMAX_PTIME FIELD32(0x000000ff)
168#define DELAY_INT_CFG_RXMAX_PINT FIELD32(0x00007f00)
169#define DELAY_INT_CFG_RXDLY_INT_EN FIELD32(0x00008000)
170#define DELAY_INT_CFG_TXMAX_PTIME FIELD32(0x00ff0000)
171#define DELAY_INT_CFG_TXMAX_PINT FIELD32(0x7f000000)
172#define DELAY_INT_CFG_TXDLY_INT_EN FIELD32(0x80000000)
173
174/*
175 * WMM_AIFSN_CFG: Aifsn for each EDCA AC
176 * AIFSN0: AC_BE
177 * AIFSN1: AC_BK
178 * AIFSN2: AC_VI
179 * AIFSN3: AC_VO
180 */
181#define WMM_AIFSN_CFG 0x0214
182#define WMM_AIFSN_CFG_AIFSN0 FIELD32(0x0000000f)
183#define WMM_AIFSN_CFG_AIFSN1 FIELD32(0x000000f0)
184#define WMM_AIFSN_CFG_AIFSN2 FIELD32(0x00000f00)
185#define WMM_AIFSN_CFG_AIFSN3 FIELD32(0x0000f000)
186
187/*
188 * WMM_CWMIN_CSR: CWmin for each EDCA AC
189 * CWMIN0: AC_BE
190 * CWMIN1: AC_BK
191 * CWMIN2: AC_VI
192 * CWMIN3: AC_VO
193 */
194#define WMM_CWMIN_CFG 0x0218
195#define WMM_CWMIN_CFG_CWMIN0 FIELD32(0x0000000f)
196#define WMM_CWMIN_CFG_CWMIN1 FIELD32(0x000000f0)
197#define WMM_CWMIN_CFG_CWMIN2 FIELD32(0x00000f00)
198#define WMM_CWMIN_CFG_CWMIN3 FIELD32(0x0000f000)
199
200/*
201 * WMM_CWMAX_CSR: CWmax for each EDCA AC
202 * CWMAX0: AC_BE
203 * CWMAX1: AC_BK
204 * CWMAX2: AC_VI
205 * CWMAX3: AC_VO
206 */
207#define WMM_CWMAX_CFG 0x021c
208#define WMM_CWMAX_CFG_CWMAX0 FIELD32(0x0000000f)
209#define WMM_CWMAX_CFG_CWMAX1 FIELD32(0x000000f0)
210#define WMM_CWMAX_CFG_CWMAX2 FIELD32(0x00000f00)
211#define WMM_CWMAX_CFG_CWMAX3 FIELD32(0x0000f000)
212
213/*
214 * AC_TXOP0: AC_BK/AC_BE TXOP register
215 * AC0TXOP: AC_BK in unit of 32us
216 * AC1TXOP: AC_BE in unit of 32us
217 */
218#define WMM_TXOP0_CFG 0x0220
219#define WMM_TXOP0_CFG_AC0TXOP FIELD32(0x0000ffff)
220#define WMM_TXOP0_CFG_AC1TXOP FIELD32(0xffff0000)
221
222/*
223 * AC_TXOP1: AC_VO/AC_VI TXOP register
224 * AC2TXOP: AC_VI in unit of 32us
225 * AC3TXOP: AC_VO in unit of 32us
226 */
227#define WMM_TXOP1_CFG 0x0224
228#define WMM_TXOP1_CFG_AC2TXOP FIELD32(0x0000ffff)
229#define WMM_TXOP1_CFG_AC3TXOP FIELD32(0xffff0000)
230
231/*
232 * GPIO_CTRL_CFG:
233 */
234#define GPIO_CTRL_CFG 0x0228
235#define GPIO_CTRL_CFG_BIT0 FIELD32(0x00000001)
236#define GPIO_CTRL_CFG_BIT1 FIELD32(0x00000002)
237#define GPIO_CTRL_CFG_BIT2 FIELD32(0x00000004)
238#define GPIO_CTRL_CFG_BIT3 FIELD32(0x00000008)
239#define GPIO_CTRL_CFG_BIT4 FIELD32(0x00000010)
240#define GPIO_CTRL_CFG_BIT5 FIELD32(0x00000020)
241#define GPIO_CTRL_CFG_BIT6 FIELD32(0x00000040)
242#define GPIO_CTRL_CFG_BIT7 FIELD32(0x00000080)
243#define GPIO_CTRL_CFG_BIT8 FIELD32(0x00000100)
244
245/*
246 * MCU_CMD_CFG
247 */
248#define MCU_CMD_CFG 0x022c
249
250/*
251 * AC_BK register offsets
252 */
253#define TX_BASE_PTR0 0x0230
254#define TX_MAX_CNT0 0x0234
255#define TX_CTX_IDX0 0x0238
256#define TX_DTX_IDX0 0x023c
257
258/*
259 * AC_BE register offsets
260 */
261#define TX_BASE_PTR1 0x0240
262#define TX_MAX_CNT1 0x0244
263#define TX_CTX_IDX1 0x0248
264#define TX_DTX_IDX1 0x024c
265
266/*
267 * AC_VI register offsets
268 */
269#define TX_BASE_PTR2 0x0250
270#define TX_MAX_CNT2 0x0254
271#define TX_CTX_IDX2 0x0258
272#define TX_DTX_IDX2 0x025c
273
274/*
275 * AC_VO register offsets
276 */
277#define TX_BASE_PTR3 0x0260
278#define TX_MAX_CNT3 0x0264
279#define TX_CTX_IDX3 0x0268
280#define TX_DTX_IDX3 0x026c
281
282/*
283 * HCCA register offsets
284 */
285#define TX_BASE_PTR4 0x0270
286#define TX_MAX_CNT4 0x0274
287#define TX_CTX_IDX4 0x0278
288#define TX_DTX_IDX4 0x027c
289
290/*
291 * MGMT register offsets
292 */
293#define TX_BASE_PTR5 0x0280
294#define TX_MAX_CNT5 0x0284
295#define TX_CTX_IDX5 0x0288
296#define TX_DTX_IDX5 0x028c
297
298/*
299 * RX register offsets
300 */
301#define RX_BASE_PTR 0x0290
302#define RX_MAX_CNT 0x0294
303#define RX_CRX_IDX 0x0298
304#define RX_DRX_IDX 0x029c
305
306/*
307 * PBF_SYS_CTRL
308 * HOST_RAM_WRITE: enable Host program ram write selection
309 */
310#define PBF_SYS_CTRL 0x0400
311#define PBF_SYS_CTRL_READY FIELD32(0x00000080)
312#define PBF_SYS_CTRL_HOST_RAM_WRITE FIELD32(0x00010000)
313
314/*
315 * HOST-MCU shared memory
316 */
317#define HOST_CMD_CSR 0x0404
318#define HOST_CMD_CSR_HOST_COMMAND FIELD32(0x000000ff)
319
320/*
321 * PBF registers
322 * Most are for debug. Driver doesn't touch PBF register.
323 */
324#define PBF_CFG 0x0408
325#define PBF_MAX_PCNT 0x040c
326#define PBF_CTRL 0x0410
327#define PBF_INT_STA 0x0414
328#define PBF_INT_ENA 0x0418
329
330/*
331 * BCN_OFFSET0:
332 */
333#define BCN_OFFSET0 0x042c
334#define BCN_OFFSET0_BCN0 FIELD32(0x000000ff)
335#define BCN_OFFSET0_BCN1 FIELD32(0x0000ff00)
336#define BCN_OFFSET0_BCN2 FIELD32(0x00ff0000)
337#define BCN_OFFSET0_BCN3 FIELD32(0xff000000)
338
339/*
340 * BCN_OFFSET1:
341 */
342#define BCN_OFFSET1 0x0430
343#define BCN_OFFSET1_BCN4 FIELD32(0x000000ff)
344#define BCN_OFFSET1_BCN5 FIELD32(0x0000ff00)
345#define BCN_OFFSET1_BCN6 FIELD32(0x00ff0000)
346#define BCN_OFFSET1_BCN7 FIELD32(0xff000000)
347
348/*
349 * PBF registers
350 * Most are for debug. Driver doesn't touch PBF register.
351 */
352#define TXRXQ_PCNT 0x0438
353#define PBF_DBG 0x043c
354
355/*
356 * RF registers
357 */
358#define RF_CSR_CFG 0x0500
359#define RF_CSR_CFG_DATA FIELD32(0x000000ff)
360#define RF_CSR_CFG_REGNUM FIELD32(0x00001f00)
361#define RF_CSR_CFG_WRITE FIELD32(0x00010000)
362#define RF_CSR_CFG_BUSY FIELD32(0x00020000)
363
364/*
365 * MAC Control/Status Registers(CSR).
366 * Some values are set in TU, whereas 1 TU == 1024 us.
367 */
368
369/*
370 * MAC_CSR0: ASIC revision number.
371 * ASIC_REV: 0
372 * ASIC_VER: 2860 or 2870
373 */
374#define MAC_CSR0 0x1000
375#define MAC_CSR0_ASIC_REV FIELD32(0x0000ffff)
376#define MAC_CSR0_ASIC_VER FIELD32(0xffff0000)
377
378/*
379 * MAC_SYS_CTRL:
380 */
381#define MAC_SYS_CTRL 0x1004
382#define MAC_SYS_CTRL_RESET_CSR FIELD32(0x00000001)
383#define MAC_SYS_CTRL_RESET_BBP FIELD32(0x00000002)
384#define MAC_SYS_CTRL_ENABLE_TX FIELD32(0x00000004)
385#define MAC_SYS_CTRL_ENABLE_RX FIELD32(0x00000008)
386#define MAC_SYS_CTRL_CONTINUOUS_TX FIELD32(0x00000010)
387#define MAC_SYS_CTRL_LOOPBACK FIELD32(0x00000020)
388#define MAC_SYS_CTRL_WLAN_HALT FIELD32(0x00000040)
389#define MAC_SYS_CTRL_RX_TIMESTAMP FIELD32(0x00000080)
390
391/*
392 * MAC_ADDR_DW0: STA MAC register 0
393 */
394#define MAC_ADDR_DW0 0x1008
395#define MAC_ADDR_DW0_BYTE0 FIELD32(0x000000ff)
396#define MAC_ADDR_DW0_BYTE1 FIELD32(0x0000ff00)
397#define MAC_ADDR_DW0_BYTE2 FIELD32(0x00ff0000)
398#define MAC_ADDR_DW0_BYTE3 FIELD32(0xff000000)
399
400/*
401 * MAC_ADDR_DW1: STA MAC register 1
402 * UNICAST_TO_ME_MASK:
403 * Used to mask off bits from byte 5 of the MAC address
404 * to determine the UNICAST_TO_ME bit for RX frames.
405 * The full mask is complemented by BSS_ID_MASK:
406 * MASK = BSS_ID_MASK & UNICAST_TO_ME_MASK
407 */
408#define MAC_ADDR_DW1 0x100c
409#define MAC_ADDR_DW1_BYTE4 FIELD32(0x000000ff)
410#define MAC_ADDR_DW1_BYTE5 FIELD32(0x0000ff00)
411#define MAC_ADDR_DW1_UNICAST_TO_ME_MASK FIELD32(0x00ff0000)
412
413/*
414 * MAC_BSSID_DW0: BSSID register 0
415 */
416#define MAC_BSSID_DW0 0x1010
417#define MAC_BSSID_DW0_BYTE0 FIELD32(0x000000ff)
418#define MAC_BSSID_DW0_BYTE1 FIELD32(0x0000ff00)
419#define MAC_BSSID_DW0_BYTE2 FIELD32(0x00ff0000)
420#define MAC_BSSID_DW0_BYTE3 FIELD32(0xff000000)
421
422/*
423 * MAC_BSSID_DW1: BSSID register 1
424 * BSS_ID_MASK:
425 * 0: 1-BSSID mode (BSS index = 0)
426 * 1: 2-BSSID mode (BSS index: Byte5, bit 0)
427 * 2: 4-BSSID mode (BSS index: byte5, bit 0 - 1)
428 * 3: 8-BSSID mode (BSS index: byte5, bit 0 - 2)
429 * This mask is used to mask off bits 0, 1 and 2 of byte 5 of the
430 * BSSID. This will make sure that those bits will be ignored
431 * when determining the MY_BSS of RX frames.
432 */
433#define MAC_BSSID_DW1 0x1014
434#define MAC_BSSID_DW1_BYTE4 FIELD32(0x000000ff)
435#define MAC_BSSID_DW1_BYTE5 FIELD32(0x0000ff00)
436#define MAC_BSSID_DW1_BSS_ID_MASK FIELD32(0x00030000)
437#define MAC_BSSID_DW1_BSS_BCN_NUM FIELD32(0x001c0000)
438
439/*
440 * MAX_LEN_CFG: Maximum frame length register.
441 * MAX_MPDU: rt2860b max 16k bytes
442 * MAX_PSDU: Maximum PSDU length
443 * (power factor) 0:2^13, 1:2^14, 2:2^15, 3:2^16
444 */
445#define MAX_LEN_CFG 0x1018
446#define MAX_LEN_CFG_MAX_MPDU FIELD32(0x00000fff)
447#define MAX_LEN_CFG_MAX_PSDU FIELD32(0x00003000)
448#define MAX_LEN_CFG_MIN_PSDU FIELD32(0x0000c000)
449#define MAX_LEN_CFG_MIN_MPDU FIELD32(0x000f0000)
450
451/*
452 * BBP_CSR_CFG: BBP serial control register
453 * VALUE: Register value to program into BBP
454 * REG_NUM: Selected BBP register
455 * READ_CONTROL: 0 write BBP, 1 read BBP
456 * BUSY: ASIC is busy executing BBP commands
457 * BBP_PAR_DUR: 0 4 MAC clocks, 1 8 MAC clocks
458 * BBP_RW_MODE: 0 serial, 1 paralell
459 */
460#define BBP_CSR_CFG 0x101c
461#define BBP_CSR_CFG_VALUE FIELD32(0x000000ff)
462#define BBP_CSR_CFG_REGNUM FIELD32(0x0000ff00)
463#define BBP_CSR_CFG_READ_CONTROL FIELD32(0x00010000)
464#define BBP_CSR_CFG_BUSY FIELD32(0x00020000)
465#define BBP_CSR_CFG_BBP_PAR_DUR FIELD32(0x00040000)
466#define BBP_CSR_CFG_BBP_RW_MODE FIELD32(0x00080000)
467
468/*
469 * RF_CSR_CFG0: RF control register
470 * REGID_AND_VALUE: Register value to program into RF
471 * BITWIDTH: Selected RF register
472 * STANDBYMODE: 0 high when standby, 1 low when standby
473 * SEL: 0 RF_LE0 activate, 1 RF_LE1 activate
474 * BUSY: ASIC is busy executing RF commands
475 */
476#define RF_CSR_CFG0 0x1020
477#define RF_CSR_CFG0_REGID_AND_VALUE FIELD32(0x00ffffff)
478#define RF_CSR_CFG0_BITWIDTH FIELD32(0x1f000000)
479#define RF_CSR_CFG0_REG_VALUE_BW FIELD32(0x1fffffff)
480#define RF_CSR_CFG0_STANDBYMODE FIELD32(0x20000000)
481#define RF_CSR_CFG0_SEL FIELD32(0x40000000)
482#define RF_CSR_CFG0_BUSY FIELD32(0x80000000)
483
484/*
485 * RF_CSR_CFG1: RF control register
486 * REGID_AND_VALUE: Register value to program into RF
487 * RFGAP: Gap between BB_CONTROL_RF and RF_LE
488 * 0: 3 system clock cycle (37.5usec)
489 * 1: 5 system clock cycle (62.5usec)
490 */
491#define RF_CSR_CFG1 0x1024
492#define RF_CSR_CFG1_REGID_AND_VALUE FIELD32(0x00ffffff)
493#define RF_CSR_CFG1_RFGAP FIELD32(0x1f000000)
494
495/*
496 * RF_CSR_CFG2: RF control register
497 * VALUE: Register value to program into RF
498 */
499#define RF_CSR_CFG2 0x1028
500#define RF_CSR_CFG2_VALUE FIELD32(0x00ffffff)
501
502/*
503 * LED_CFG: LED control
504 * color LED's:
505 * 0: off
506 * 1: blinking upon TX2
507 * 2: periodic slow blinking
508 * 3: always on
509 * LED polarity:
510 * 0: active low
511 * 1: active high
512 */
513#define LED_CFG 0x102c
514#define LED_CFG_ON_PERIOD FIELD32(0x000000ff)
515#define LED_CFG_OFF_PERIOD FIELD32(0x0000ff00)
516#define LED_CFG_SLOW_BLINK_PERIOD FIELD32(0x003f0000)
517#define LED_CFG_R_LED_MODE FIELD32(0x03000000)
518#define LED_CFG_G_LED_MODE FIELD32(0x0c000000)
519#define LED_CFG_Y_LED_MODE FIELD32(0x30000000)
520#define LED_CFG_LED_POLAR FIELD32(0x40000000)
521
522/*
523 * XIFS_TIME_CFG: MAC timing
524 * CCKM_SIFS_TIME: unit 1us. Applied after CCK RX/TX
525 * OFDM_SIFS_TIME: unit 1us. Applied after OFDM RX/TX
526 * OFDM_XIFS_TIME: unit 1us. Applied after OFDM RX
527 * when MAC doesn't reference BBP signal BBRXEND
528 * EIFS: unit 1us
529 * BB_RXEND_ENABLE: reference RXEND signal to begin XIFS defer
530 *
531 */
532#define XIFS_TIME_CFG 0x1100
533#define XIFS_TIME_CFG_CCKM_SIFS_TIME FIELD32(0x000000ff)
534#define XIFS_TIME_CFG_OFDM_SIFS_TIME FIELD32(0x0000ff00)
535#define XIFS_TIME_CFG_OFDM_XIFS_TIME FIELD32(0x000f0000)
536#define XIFS_TIME_CFG_EIFS FIELD32(0x1ff00000)
537#define XIFS_TIME_CFG_BB_RXEND_ENABLE FIELD32(0x20000000)
538
539/*
540 * BKOFF_SLOT_CFG:
541 */
542#define BKOFF_SLOT_CFG 0x1104
543#define BKOFF_SLOT_CFG_SLOT_TIME FIELD32(0x000000ff)
544#define BKOFF_SLOT_CFG_CC_DELAY_TIME FIELD32(0x0000ff00)
545
546/*
547 * NAV_TIME_CFG:
548 */
549#define NAV_TIME_CFG 0x1108
550#define NAV_TIME_CFG_SIFS FIELD32(0x000000ff)
551#define NAV_TIME_CFG_SLOT_TIME FIELD32(0x0000ff00)
552#define NAV_TIME_CFG_EIFS FIELD32(0x01ff0000)
553#define NAV_TIME_ZERO_SIFS FIELD32(0x02000000)
554
555/*
556 * CH_TIME_CFG: count as channel busy
557 */
558#define CH_TIME_CFG 0x110c
559
560/*
561 * PBF_LIFE_TIMER: TX/RX MPDU timestamp timer (free run) Unit: 1us
562 */
563#define PBF_LIFE_TIMER 0x1110
564
565/*
566 * BCN_TIME_CFG:
567 * BEACON_INTERVAL: in unit of 1/16 TU
568 * TSF_TICKING: Enable TSF auto counting
569 * TSF_SYNC: Enable TSF sync, 00: disable, 01: infra mode, 10: ad-hoc mode
570 * BEACON_GEN: Enable beacon generator
571 */
572#define BCN_TIME_CFG 0x1114
573#define BCN_TIME_CFG_BEACON_INTERVAL FIELD32(0x0000ffff)
574#define BCN_TIME_CFG_TSF_TICKING FIELD32(0x00010000)
575#define BCN_TIME_CFG_TSF_SYNC FIELD32(0x00060000)
576#define BCN_TIME_CFG_TBTT_ENABLE FIELD32(0x00080000)
577#define BCN_TIME_CFG_BEACON_GEN FIELD32(0x00100000)
578#define BCN_TIME_CFG_TX_TIME_COMPENSATE FIELD32(0xf0000000)
579
580/*
581 * TBTT_SYNC_CFG:
582 */
583#define TBTT_SYNC_CFG 0x1118
584
585/*
586 * TSF_TIMER_DW0: Local lsb TSF timer, read-only
587 */
588#define TSF_TIMER_DW0 0x111c
589#define TSF_TIMER_DW0_LOW_WORD FIELD32(0xffffffff)
590
591/*
592 * TSF_TIMER_DW1: Local msb TSF timer, read-only
593 */
594#define TSF_TIMER_DW1 0x1120
595#define TSF_TIMER_DW1_HIGH_WORD FIELD32(0xffffffff)
596
597/*
598 * TBTT_TIMER: TImer remains till next TBTT, read-only
599 */
600#define TBTT_TIMER 0x1124
601
602/*
603 * INT_TIMER_CFG:
604 */
605#define INT_TIMER_CFG 0x1128
606
607/*
608 * INT_TIMER_EN: GP-timer and pre-tbtt Int enable
609 */
610#define INT_TIMER_EN 0x112c
611
612/*
613 * CH_IDLE_STA: channel idle time
614 */
615#define CH_IDLE_STA 0x1130
616
617/*
618 * CH_BUSY_STA: channel busy time
619 */
620#define CH_BUSY_STA 0x1134
621
622/*
623 * MAC_STATUS_CFG:
624 * BBP_RF_BUSY: When set to 0, BBP and RF are stable.
625 * if 1 or higher one of the 2 registers is busy.
626 */
627#define MAC_STATUS_CFG 0x1200
628#define MAC_STATUS_CFG_BBP_RF_BUSY FIELD32(0x00000003)
629
630/*
631 * PWR_PIN_CFG:
632 */
633#define PWR_PIN_CFG 0x1204
634
635/*
636 * AUTOWAKEUP_CFG: Manual power control / status register
637 * TBCN_BEFORE_WAKE: ForceWake has high privilege than PutToSleep when both set
638 * AUTOWAKE: 0:sleep, 1:awake
639 */
640#define AUTOWAKEUP_CFG 0x1208
641#define AUTOWAKEUP_CFG_AUTO_LEAD_TIME FIELD32(0x000000ff)
642#define AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE FIELD32(0x00007f00)
643#define AUTOWAKEUP_CFG_AUTOWAKE FIELD32(0x00008000)
644
645/*
646 * EDCA_AC0_CFG:
647 */
648#define EDCA_AC0_CFG 0x1300
649#define EDCA_AC0_CFG_TX_OP FIELD32(0x000000ff)
650#define EDCA_AC0_CFG_AIFSN FIELD32(0x00000f00)
651#define EDCA_AC0_CFG_CWMIN FIELD32(0x0000f000)
652#define EDCA_AC0_CFG_CWMAX FIELD32(0x000f0000)
653
654/*
655 * EDCA_AC1_CFG:
656 */
657#define EDCA_AC1_CFG 0x1304
658#define EDCA_AC1_CFG_TX_OP FIELD32(0x000000ff)
659#define EDCA_AC1_CFG_AIFSN FIELD32(0x00000f00)
660#define EDCA_AC1_CFG_CWMIN FIELD32(0x0000f000)
661#define EDCA_AC1_CFG_CWMAX FIELD32(0x000f0000)
662
663/*
664 * EDCA_AC2_CFG:
665 */
666#define EDCA_AC2_CFG 0x1308
667#define EDCA_AC2_CFG_TX_OP FIELD32(0x000000ff)
668#define EDCA_AC2_CFG_AIFSN FIELD32(0x00000f00)
669#define EDCA_AC2_CFG_CWMIN FIELD32(0x0000f000)
670#define EDCA_AC2_CFG_CWMAX FIELD32(0x000f0000)
671
672/*
673 * EDCA_AC3_CFG:
674 */
675#define EDCA_AC3_CFG 0x130c
676#define EDCA_AC3_CFG_TX_OP FIELD32(0x000000ff)
677#define EDCA_AC3_CFG_AIFSN FIELD32(0x00000f00)
678#define EDCA_AC3_CFG_CWMIN FIELD32(0x0000f000)
679#define EDCA_AC3_CFG_CWMAX FIELD32(0x000f0000)
680
681/*
682 * EDCA_TID_AC_MAP:
683 */
684#define EDCA_TID_AC_MAP 0x1310
685
686/*
687 * TX_PWR_CFG_0:
688 */
689#define TX_PWR_CFG_0 0x1314
690#define TX_PWR_CFG_0_1MBS FIELD32(0x0000000f)
691#define TX_PWR_CFG_0_2MBS FIELD32(0x000000f0)
692#define TX_PWR_CFG_0_55MBS FIELD32(0x00000f00)
693#define TX_PWR_CFG_0_11MBS FIELD32(0x0000f000)
694#define TX_PWR_CFG_0_6MBS FIELD32(0x000f0000)
695#define TX_PWR_CFG_0_9MBS FIELD32(0x00f00000)
696#define TX_PWR_CFG_0_12MBS FIELD32(0x0f000000)
697#define TX_PWR_CFG_0_18MBS FIELD32(0xf0000000)
698
699/*
700 * TX_PWR_CFG_1:
701 */
702#define TX_PWR_CFG_1 0x1318
703#define TX_PWR_CFG_1_24MBS FIELD32(0x0000000f)
704#define TX_PWR_CFG_1_36MBS FIELD32(0x000000f0)
705#define TX_PWR_CFG_1_48MBS FIELD32(0x00000f00)
706#define TX_PWR_CFG_1_54MBS FIELD32(0x0000f000)
707#define TX_PWR_CFG_1_MCS0 FIELD32(0x000f0000)
708#define TX_PWR_CFG_1_MCS1 FIELD32(0x00f00000)
709#define TX_PWR_CFG_1_MCS2 FIELD32(0x0f000000)
710#define TX_PWR_CFG_1_MCS3 FIELD32(0xf0000000)
711
712/*
713 * TX_PWR_CFG_2:
714 */
715#define TX_PWR_CFG_2 0x131c
716#define TX_PWR_CFG_2_MCS4 FIELD32(0x0000000f)
717#define TX_PWR_CFG_2_MCS5 FIELD32(0x000000f0)
718#define TX_PWR_CFG_2_MCS6 FIELD32(0x00000f00)
719#define TX_PWR_CFG_2_MCS7 FIELD32(0x0000f000)
720#define TX_PWR_CFG_2_MCS8 FIELD32(0x000f0000)
721#define TX_PWR_CFG_2_MCS9 FIELD32(0x00f00000)
722#define TX_PWR_CFG_2_MCS10 FIELD32(0x0f000000)
723#define TX_PWR_CFG_2_MCS11 FIELD32(0xf0000000)
724
725/*
726 * TX_PWR_CFG_3:
727 */
728#define TX_PWR_CFG_3 0x1320
729#define TX_PWR_CFG_3_MCS12 FIELD32(0x0000000f)
730#define TX_PWR_CFG_3_MCS13 FIELD32(0x000000f0)
731#define TX_PWR_CFG_3_MCS14 FIELD32(0x00000f00)
732#define TX_PWR_CFG_3_MCS15 FIELD32(0x0000f000)
733#define TX_PWR_CFG_3_UKNOWN1 FIELD32(0x000f0000)
734#define TX_PWR_CFG_3_UKNOWN2 FIELD32(0x00f00000)
735#define TX_PWR_CFG_3_UKNOWN3 FIELD32(0x0f000000)
736#define TX_PWR_CFG_3_UKNOWN4 FIELD32(0xf0000000)
737
738/*
739 * TX_PWR_CFG_4:
740 */
741#define TX_PWR_CFG_4 0x1324
742#define TX_PWR_CFG_4_UKNOWN5 FIELD32(0x0000000f)
743#define TX_PWR_CFG_4_UKNOWN6 FIELD32(0x000000f0)
744#define TX_PWR_CFG_4_UKNOWN7 FIELD32(0x00000f00)
745#define TX_PWR_CFG_4_UKNOWN8 FIELD32(0x0000f000)
746
747/*
748 * TX_PIN_CFG:
749 */
750#define TX_PIN_CFG 0x1328
751#define TX_PIN_CFG_PA_PE_A0_EN FIELD32(0x00000001)
752#define TX_PIN_CFG_PA_PE_G0_EN FIELD32(0x00000002)
753#define TX_PIN_CFG_PA_PE_A1_EN FIELD32(0x00000004)
754#define TX_PIN_CFG_PA_PE_G1_EN FIELD32(0x00000008)
755#define TX_PIN_CFG_PA_PE_A0_POL FIELD32(0x00000010)
756#define TX_PIN_CFG_PA_PE_G0_POL FIELD32(0x00000020)
757#define TX_PIN_CFG_PA_PE_A1_POL FIELD32(0x00000040)
758#define TX_PIN_CFG_PA_PE_G1_POL FIELD32(0x00000080)
759#define TX_PIN_CFG_LNA_PE_A0_EN FIELD32(0x00000100)
760#define TX_PIN_CFG_LNA_PE_G0_EN FIELD32(0x00000200)
761#define TX_PIN_CFG_LNA_PE_A1_EN FIELD32(0x00000400)
762#define TX_PIN_CFG_LNA_PE_G1_EN FIELD32(0x00000800)
763#define TX_PIN_CFG_LNA_PE_A0_POL FIELD32(0x00001000)
764#define TX_PIN_CFG_LNA_PE_G0_POL FIELD32(0x00002000)
765#define TX_PIN_CFG_LNA_PE_A1_POL FIELD32(0x00004000)
766#define TX_PIN_CFG_LNA_PE_G1_POL FIELD32(0x00008000)
767#define TX_PIN_CFG_RFTR_EN FIELD32(0x00010000)
768#define TX_PIN_CFG_RFTR_POL FIELD32(0x00020000)
769#define TX_PIN_CFG_TRSW_EN FIELD32(0x00040000)
770#define TX_PIN_CFG_TRSW_POL FIELD32(0x00080000)
771
772/*
773 * TX_BAND_CFG: 0x1 use upper 20MHz, 0x0 use lower 20MHz
774 */
775#define TX_BAND_CFG 0x132c
776#define TX_BAND_CFG_HT40_PLUS FIELD32(0x00000001)
777#define TX_BAND_CFG_A FIELD32(0x00000002)
778#define TX_BAND_CFG_BG FIELD32(0x00000004)
779
780/*
781 * TX_SW_CFG0:
782 */
783#define TX_SW_CFG0 0x1330
784
785/*
786 * TX_SW_CFG1:
787 */
788#define TX_SW_CFG1 0x1334
789
790/*
791 * TX_SW_CFG2:
792 */
793#define TX_SW_CFG2 0x1338
794
795/*
796 * TXOP_THRES_CFG:
797 */
798#define TXOP_THRES_CFG 0x133c
799
800/*
801 * TXOP_CTRL_CFG:
802 */
803#define TXOP_CTRL_CFG 0x1340
804
805/*
806 * TX_RTS_CFG:
807 * RTS_THRES: unit:byte
808 * RTS_FBK_EN: enable rts rate fallback
809 */
810#define TX_RTS_CFG 0x1344
811#define TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT FIELD32(0x000000ff)
812#define TX_RTS_CFG_RTS_THRES FIELD32(0x00ffff00)
813#define TX_RTS_CFG_RTS_FBK_EN FIELD32(0x01000000)
814
815/*
816 * TX_TIMEOUT_CFG:
817 * MPDU_LIFETIME: expiration time = 2^(9+MPDU LIFE TIME) us
818 * RX_ACK_TIMEOUT: unit:slot. Used for TX procedure
819 * TX_OP_TIMEOUT: TXOP timeout value for TXOP truncation.
820 * it is recommended that:
821 * (SLOT_TIME) > (TX_OP_TIMEOUT) > (RX_ACK_TIMEOUT)
822 */
823#define TX_TIMEOUT_CFG 0x1348
824#define TX_TIMEOUT_CFG_MPDU_LIFETIME FIELD32(0x000000f0)
825#define TX_TIMEOUT_CFG_RX_ACK_TIMEOUT FIELD32(0x0000ff00)
826#define TX_TIMEOUT_CFG_TX_OP_TIMEOUT FIELD32(0x00ff0000)
827
828/*
829 * TX_RTY_CFG:
830 * SHORT_RTY_LIMIT: short retry limit
831 * LONG_RTY_LIMIT: long retry limit
832 * LONG_RTY_THRE: Long retry threshoold
833 * NON_AGG_RTY_MODE: Non-Aggregate MPDU retry mode
834 * 0:expired by retry limit, 1: expired by mpdu life timer
835 * AGG_RTY_MODE: Aggregate MPDU retry mode
836 * 0:expired by retry limit, 1: expired by mpdu life timer
837 * TX_AUTO_FB_ENABLE: Tx retry PHY rate auto fallback enable
838 */
839#define TX_RTY_CFG 0x134c
840#define TX_RTY_CFG_SHORT_RTY_LIMIT FIELD32(0x000000ff)
841#define TX_RTY_CFG_LONG_RTY_LIMIT FIELD32(0x0000ff00)
842#define TX_RTY_CFG_LONG_RTY_THRE FIELD32(0x0fff0000)
843#define TX_RTY_CFG_NON_AGG_RTY_MODE FIELD32(0x10000000)
844#define TX_RTY_CFG_AGG_RTY_MODE FIELD32(0x20000000)
845#define TX_RTY_CFG_TX_AUTO_FB_ENABLE FIELD32(0x40000000)
846
847/*
848 * TX_LINK_CFG:
849 * REMOTE_MFB_LIFETIME: remote MFB life time. unit: 32us
850 * MFB_ENABLE: TX apply remote MFB 1:enable
851 * REMOTE_UMFS_ENABLE: remote unsolicit MFB enable
852 * 0: not apply remote remote unsolicit (MFS=7)
853 * TX_MRQ_EN: MCS request TX enable
854 * TX_RDG_EN: RDG TX enable
855 * TX_CF_ACK_EN: Piggyback CF-ACK enable
856 * REMOTE_MFB: remote MCS feedback
857 * REMOTE_MFS: remote MCS feedback sequence number
858 */
859#define TX_LINK_CFG 0x1350
860#define TX_LINK_CFG_REMOTE_MFB_LIFETIME FIELD32(0x000000ff)
861#define TX_LINK_CFG_MFB_ENABLE FIELD32(0x00000100)
862#define TX_LINK_CFG_REMOTE_UMFS_ENABLE FIELD32(0x00000200)
863#define TX_LINK_CFG_TX_MRQ_EN FIELD32(0x00000400)
864#define TX_LINK_CFG_TX_RDG_EN FIELD32(0x00000800)
865#define TX_LINK_CFG_TX_CF_ACK_EN FIELD32(0x00001000)
866#define TX_LINK_CFG_REMOTE_MFB FIELD32(0x00ff0000)
867#define TX_LINK_CFG_REMOTE_MFS FIELD32(0xff000000)
868
869/*
870 * HT_FBK_CFG0:
871 */
872#define HT_FBK_CFG0 0x1354
873#define HT_FBK_CFG0_HTMCS0FBK FIELD32(0x0000000f)
874#define HT_FBK_CFG0_HTMCS1FBK FIELD32(0x000000f0)
875#define HT_FBK_CFG0_HTMCS2FBK FIELD32(0x00000f00)
876#define HT_FBK_CFG0_HTMCS3FBK FIELD32(0x0000f000)
877#define HT_FBK_CFG0_HTMCS4FBK FIELD32(0x000f0000)
878#define HT_FBK_CFG0_HTMCS5FBK FIELD32(0x00f00000)
879#define HT_FBK_CFG0_HTMCS6FBK FIELD32(0x0f000000)
880#define HT_FBK_CFG0_HTMCS7FBK FIELD32(0xf0000000)
881
882/*
883 * HT_FBK_CFG1:
884 */
885#define HT_FBK_CFG1 0x1358
886#define HT_FBK_CFG1_HTMCS8FBK FIELD32(0x0000000f)
887#define HT_FBK_CFG1_HTMCS9FBK FIELD32(0x000000f0)
888#define HT_FBK_CFG1_HTMCS10FBK FIELD32(0x00000f00)
889#define HT_FBK_CFG1_HTMCS11FBK FIELD32(0x0000f000)
890#define HT_FBK_CFG1_HTMCS12FBK FIELD32(0x000f0000)
891#define HT_FBK_CFG1_HTMCS13FBK FIELD32(0x00f00000)
892#define HT_FBK_CFG1_HTMCS14FBK FIELD32(0x0f000000)
893#define HT_FBK_CFG1_HTMCS15FBK FIELD32(0xf0000000)
894
895/*
896 * LG_FBK_CFG0:
897 */
898#define LG_FBK_CFG0 0x135c
899#define LG_FBK_CFG0_OFDMMCS0FBK FIELD32(0x0000000f)
900#define LG_FBK_CFG0_OFDMMCS1FBK FIELD32(0x000000f0)
901#define LG_FBK_CFG0_OFDMMCS2FBK FIELD32(0x00000f00)
902#define LG_FBK_CFG0_OFDMMCS3FBK FIELD32(0x0000f000)
903#define LG_FBK_CFG0_OFDMMCS4FBK FIELD32(0x000f0000)
904#define LG_FBK_CFG0_OFDMMCS5FBK FIELD32(0x00f00000)
905#define LG_FBK_CFG0_OFDMMCS6FBK FIELD32(0x0f000000)
906#define LG_FBK_CFG0_OFDMMCS7FBK FIELD32(0xf0000000)
907
908/*
909 * LG_FBK_CFG1:
910 */
911#define LG_FBK_CFG1 0x1360
912#define LG_FBK_CFG0_CCKMCS0FBK FIELD32(0x0000000f)
913#define LG_FBK_CFG0_CCKMCS1FBK FIELD32(0x000000f0)
914#define LG_FBK_CFG0_CCKMCS2FBK FIELD32(0x00000f00)
915#define LG_FBK_CFG0_CCKMCS3FBK FIELD32(0x0000f000)
916
917/*
918 * CCK_PROT_CFG: CCK Protection
919 * PROTECT_RATE: Protection control frame rate for CCK TX(RTS/CTS/CFEnd)
920 * PROTECT_CTRL: Protection control frame type for CCK TX
921 * 0:none, 1:RTS/CTS, 2:CTS-to-self
922 * PROTECT_NAV: TXOP protection type for CCK TX
923 * 0:none, 1:ShortNAVprotect, 2:LongNAVProtect
924 * TX_OP_ALLOW_CCK: CCK TXOP allowance, 0:disallow
925 * TX_OP_ALLOW_OFDM: CCK TXOP allowance, 0:disallow
926 * TX_OP_ALLOW_MM20: CCK TXOP allowance, 0:disallow
927 * TX_OP_ALLOW_MM40: CCK TXOP allowance, 0:disallow
928 * TX_OP_ALLOW_GF20: CCK TXOP allowance, 0:disallow
929 * TX_OP_ALLOW_GF40: CCK TXOP allowance, 0:disallow
930 * RTS_TH_EN: RTS threshold enable on CCK TX
931 */
932#define CCK_PROT_CFG 0x1364
933#define CCK_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
934#define CCK_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
935#define CCK_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
936#define CCK_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
937#define CCK_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
938#define CCK_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
939#define CCK_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
940#define CCK_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
941#define CCK_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
942#define CCK_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
943
944/*
945 * OFDM_PROT_CFG: OFDM Protection
946 */
947#define OFDM_PROT_CFG 0x1368
948#define OFDM_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
949#define OFDM_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
950#define OFDM_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
951#define OFDM_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
952#define OFDM_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
953#define OFDM_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
954#define OFDM_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
955#define OFDM_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
956#define OFDM_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
957#define OFDM_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
958
959/*
960 * MM20_PROT_CFG: MM20 Protection
961 */
962#define MM20_PROT_CFG 0x136c
963#define MM20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
964#define MM20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
965#define MM20_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
966#define MM20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
967#define MM20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
968#define MM20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
969#define MM20_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
970#define MM20_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
971#define MM20_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
972#define MM20_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
973
974/*
975 * MM40_PROT_CFG: MM40 Protection
976 */
977#define MM40_PROT_CFG 0x1370
978#define MM40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
979#define MM40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
980#define MM40_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
981#define MM40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
982#define MM40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
983#define MM40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
984#define MM40_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
985#define MM40_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
986#define MM40_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
987#define MM40_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
988
989/*
990 * GF20_PROT_CFG: GF20 Protection
991 */
992#define GF20_PROT_CFG 0x1374
993#define GF20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
994#define GF20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
995#define GF20_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
996#define GF20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
997#define GF20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
998#define GF20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
999#define GF20_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
1000#define GF20_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
1001#define GF20_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
1002#define GF20_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
1003
1004/*
1005 * GF40_PROT_CFG: GF40 Protection
1006 */
1007#define GF40_PROT_CFG 0x1378
1008#define GF40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1009#define GF40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1010#define GF40_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
1011#define GF40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1012#define GF40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1013#define GF40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
1014#define GF40_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
1015#define GF40_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
1016#define GF40_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
1017#define GF40_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
1018
1019/*
1020 * EXP_CTS_TIME:
1021 */
1022#define EXP_CTS_TIME 0x137c
1023
1024/*
1025 * EXP_ACK_TIME:
1026 */
1027#define EXP_ACK_TIME 0x1380
1028
1029/*
1030 * RX_FILTER_CFG: RX configuration register.
1031 */
1032#define RX_FILTER_CFG 0x1400
1033#define RX_FILTER_CFG_DROP_CRC_ERROR FIELD32(0x00000001)
1034#define RX_FILTER_CFG_DROP_PHY_ERROR FIELD32(0x00000002)
1035#define RX_FILTER_CFG_DROP_NOT_TO_ME FIELD32(0x00000004)
1036#define RX_FILTER_CFG_DROP_NOT_MY_BSSD FIELD32(0x00000008)
1037#define RX_FILTER_CFG_DROP_VER_ERROR FIELD32(0x00000010)
1038#define RX_FILTER_CFG_DROP_MULTICAST FIELD32(0x00000020)
1039#define RX_FILTER_CFG_DROP_BROADCAST FIELD32(0x00000040)
1040#define RX_FILTER_CFG_DROP_DUPLICATE FIELD32(0x00000080)
1041#define RX_FILTER_CFG_DROP_CF_END_ACK FIELD32(0x00000100)
1042#define RX_FILTER_CFG_DROP_CF_END FIELD32(0x00000200)
1043#define RX_FILTER_CFG_DROP_ACK FIELD32(0x00000400)
1044#define RX_FILTER_CFG_DROP_CTS FIELD32(0x00000800)
1045#define RX_FILTER_CFG_DROP_RTS FIELD32(0x00001000)
1046#define RX_FILTER_CFG_DROP_PSPOLL FIELD32(0x00002000)
1047#define RX_FILTER_CFG_DROP_BA FIELD32(0x00004000)
1048#define RX_FILTER_CFG_DROP_BAR FIELD32(0x00008000)
1049#define RX_FILTER_CFG_DROP_CNTL FIELD32(0x00010000)
1050
1051/*
1052 * AUTO_RSP_CFG:
1053 * AUTORESPONDER: 0: disable, 1: enable
1054 * BAC_ACK_POLICY: 0:long, 1:short preamble
1055 * CTS_40_MMODE: Response CTS 40MHz duplicate mode
1056 * CTS_40_MREF: Response CTS 40MHz duplicate mode
1057 * AR_PREAMBLE: Auto responder preamble 0:long, 1:short preamble
1058 * DUAL_CTS_EN: Power bit value in control frame
1059 * ACK_CTS_PSM_BIT:Power bit value in control frame
1060 */
1061#define AUTO_RSP_CFG 0x1404
1062#define AUTO_RSP_CFG_AUTORESPONDER FIELD32(0x00000001)
1063#define AUTO_RSP_CFG_BAC_ACK_POLICY FIELD32(0x00000002)
1064#define AUTO_RSP_CFG_CTS_40_MMODE FIELD32(0x00000004)
1065#define AUTO_RSP_CFG_CTS_40_MREF FIELD32(0x00000008)
1066#define AUTO_RSP_CFG_AR_PREAMBLE FIELD32(0x00000010)
1067#define AUTO_RSP_CFG_DUAL_CTS_EN FIELD32(0x00000040)
1068#define AUTO_RSP_CFG_ACK_CTS_PSM_BIT FIELD32(0x00000080)
1069
1070/*
1071 * LEGACY_BASIC_RATE:
1072 */
1073#define LEGACY_BASIC_RATE 0x1408
1074
1075/*
1076 * HT_BASIC_RATE:
1077 */
1078#define HT_BASIC_RATE 0x140c
1079
1080/*
1081 * HT_CTRL_CFG:
1082 */
1083#define HT_CTRL_CFG 0x1410
1084
1085/*
1086 * SIFS_COST_CFG:
1087 */
1088#define SIFS_COST_CFG 0x1414
1089
1090/*
1091 * RX_PARSER_CFG:
1092 * Set NAV for all received frames
1093 */
1094#define RX_PARSER_CFG 0x1418
1095
1096/*
1097 * TX_SEC_CNT0:
1098 */
1099#define TX_SEC_CNT0 0x1500
1100
1101/*
1102 * RX_SEC_CNT0:
1103 */
1104#define RX_SEC_CNT0 0x1504
1105
1106/*
1107 * CCMP_FC_MUTE:
1108 */
1109#define CCMP_FC_MUTE 0x1508
1110
1111/*
1112 * TXOP_HLDR_ADDR0:
1113 */
1114#define TXOP_HLDR_ADDR0 0x1600
1115
1116/*
1117 * TXOP_HLDR_ADDR1:
1118 */
1119#define TXOP_HLDR_ADDR1 0x1604
1120
1121/*
1122 * TXOP_HLDR_ET:
1123 */
1124#define TXOP_HLDR_ET 0x1608
1125
1126/*
1127 * QOS_CFPOLL_RA_DW0:
1128 */
1129#define QOS_CFPOLL_RA_DW0 0x160c
1130
1131/*
1132 * QOS_CFPOLL_RA_DW1:
1133 */
1134#define QOS_CFPOLL_RA_DW1 0x1610
1135
1136/*
1137 * QOS_CFPOLL_QC:
1138 */
1139#define QOS_CFPOLL_QC 0x1614
1140
1141/*
1142 * RX_STA_CNT0: RX PLCP error count & RX CRC error count
1143 */
1144#define RX_STA_CNT0 0x1700
1145#define RX_STA_CNT0_CRC_ERR FIELD32(0x0000ffff)
1146#define RX_STA_CNT0_PHY_ERR FIELD32(0xffff0000)
1147
1148/*
1149 * RX_STA_CNT1: RX False CCA count & RX LONG frame count
1150 */
1151#define RX_STA_CNT1 0x1704
1152#define RX_STA_CNT1_FALSE_CCA FIELD32(0x0000ffff)
1153#define RX_STA_CNT1_PLCP_ERR FIELD32(0xffff0000)
1154
1155/*
1156 * RX_STA_CNT2:
1157 */
1158#define RX_STA_CNT2 0x1708
1159#define RX_STA_CNT2_RX_DUPLI_COUNT FIELD32(0x0000ffff)
1160#define RX_STA_CNT2_RX_FIFO_OVERFLOW FIELD32(0xffff0000)
1161
1162/*
1163 * TX_STA_CNT0: TX Beacon count
1164 */
1165#define TX_STA_CNT0 0x170c
1166#define TX_STA_CNT0_TX_FAIL_COUNT FIELD32(0x0000ffff)
1167#define TX_STA_CNT0_TX_BEACON_COUNT FIELD32(0xffff0000)
1168
1169/*
1170 * TX_STA_CNT1: TX tx count
1171 */
1172#define TX_STA_CNT1 0x1710
1173#define TX_STA_CNT1_TX_SUCCESS FIELD32(0x0000ffff)
1174#define TX_STA_CNT1_TX_RETRANSMIT FIELD32(0xffff0000)
1175
1176/*
1177 * TX_STA_CNT2: TX tx count
1178 */
1179#define TX_STA_CNT2 0x1714
1180#define TX_STA_CNT2_TX_ZERO_LEN_COUNT FIELD32(0x0000ffff)
1181#define TX_STA_CNT2_TX_UNDER_FLOW_COUNT FIELD32(0xffff0000)
1182
1183/*
1184 * TX_STA_FIFO: TX Result for specific PID status fifo register
1185 */
1186#define TX_STA_FIFO 0x1718
1187#define TX_STA_FIFO_VALID FIELD32(0x00000001)
1188#define TX_STA_FIFO_PID_TYPE FIELD32(0x0000001e)
1189#define TX_STA_FIFO_TX_SUCCESS FIELD32(0x00000020)
1190#define TX_STA_FIFO_TX_AGGRE FIELD32(0x00000040)
1191#define TX_STA_FIFO_TX_ACK_REQUIRED FIELD32(0x00000080)
1192#define TX_STA_FIFO_WCID FIELD32(0x0000ff00)
1193#define TX_STA_FIFO_SUCCESS_RATE FIELD32(0xffff0000)
1194#define TX_STA_FIFO_MCS FIELD32(0x007f0000)
1195#define TX_STA_FIFO_PHYMODE FIELD32(0xc0000000)
1196
1197/*
1198 * TX_AGG_CNT: Debug counter
1199 */
1200#define TX_AGG_CNT 0x171c
1201#define TX_AGG_CNT_NON_AGG_TX_COUNT FIELD32(0x0000ffff)
1202#define TX_AGG_CNT_AGG_TX_COUNT FIELD32(0xffff0000)
1203
1204/*
1205 * TX_AGG_CNT0:
1206 */
1207#define TX_AGG_CNT0 0x1720
1208#define TX_AGG_CNT0_AGG_SIZE_1_COUNT FIELD32(0x0000ffff)
1209#define TX_AGG_CNT0_AGG_SIZE_2_COUNT FIELD32(0xffff0000)
1210
1211/*
1212 * TX_AGG_CNT1:
1213 */
1214#define TX_AGG_CNT1 0x1724
1215#define TX_AGG_CNT1_AGG_SIZE_3_COUNT FIELD32(0x0000ffff)
1216#define TX_AGG_CNT1_AGG_SIZE_4_COUNT FIELD32(0xffff0000)
1217
1218/*
1219 * TX_AGG_CNT2:
1220 */
1221#define TX_AGG_CNT2 0x1728
1222#define TX_AGG_CNT2_AGG_SIZE_5_COUNT FIELD32(0x0000ffff)
1223#define TX_AGG_CNT2_AGG_SIZE_6_COUNT FIELD32(0xffff0000)
1224
1225/*
1226 * TX_AGG_CNT3:
1227 */
1228#define TX_AGG_CNT3 0x172c
1229#define TX_AGG_CNT3_AGG_SIZE_7_COUNT FIELD32(0x0000ffff)
1230#define TX_AGG_CNT3_AGG_SIZE_8_COUNT FIELD32(0xffff0000)
1231
1232/*
1233 * TX_AGG_CNT4:
1234 */
1235#define TX_AGG_CNT4 0x1730
1236#define TX_AGG_CNT4_AGG_SIZE_9_COUNT FIELD32(0x0000ffff)
1237#define TX_AGG_CNT4_AGG_SIZE_10_COUNT FIELD32(0xffff0000)
1238
1239/*
1240 * TX_AGG_CNT5:
1241 */
1242#define TX_AGG_CNT5 0x1734
1243#define TX_AGG_CNT5_AGG_SIZE_11_COUNT FIELD32(0x0000ffff)
1244#define TX_AGG_CNT5_AGG_SIZE_12_COUNT FIELD32(0xffff0000)
1245
1246/*
1247 * TX_AGG_CNT6:
1248 */
1249#define TX_AGG_CNT6 0x1738
1250#define TX_AGG_CNT6_AGG_SIZE_13_COUNT FIELD32(0x0000ffff)
1251#define TX_AGG_CNT6_AGG_SIZE_14_COUNT FIELD32(0xffff0000)
1252
1253/*
1254 * TX_AGG_CNT7:
1255 */
1256#define TX_AGG_CNT7 0x173c
1257#define TX_AGG_CNT7_AGG_SIZE_15_COUNT FIELD32(0x0000ffff)
1258#define TX_AGG_CNT7_AGG_SIZE_16_COUNT FIELD32(0xffff0000)
1259
1260/*
1261 * MPDU_DENSITY_CNT:
1262 * TX_ZERO_DEL: TX zero length delimiter count
1263 * RX_ZERO_DEL: RX zero length delimiter count
1264 */
1265#define MPDU_DENSITY_CNT 0x1740
1266#define MPDU_DENSITY_CNT_TX_ZERO_DEL FIELD32(0x0000ffff)
1267#define MPDU_DENSITY_CNT_RX_ZERO_DEL FIELD32(0xffff0000)
1268
1269/*
1270 * Security key table memory.
1271 * MAC_WCID_BASE: 8-bytes (use only 6 bytes) * 256 entry
1272 * PAIRWISE_KEY_TABLE_BASE: 32-byte * 256 entry
1273 * MAC_IVEIV_TABLE_BASE: 8-byte * 256-entry
1274 * MAC_WCID_ATTRIBUTE_BASE: 4-byte * 256-entry
1275 * SHARED_KEY_TABLE_BASE: 32-byte * 16-entry
1276 * SHARED_KEY_MODE_BASE: 4-byte * 16-entry
1277 */
1278#define MAC_WCID_BASE 0x1800
1279#define PAIRWISE_KEY_TABLE_BASE 0x4000
1280#define MAC_IVEIV_TABLE_BASE 0x6000
1281#define MAC_WCID_ATTRIBUTE_BASE 0x6800
1282#define SHARED_KEY_TABLE_BASE 0x6c00
1283#define SHARED_KEY_MODE_BASE 0x7000
1284
1285#define MAC_WCID_ENTRY(__idx) \
1286 ( MAC_WCID_BASE + ((__idx) * sizeof(struct mac_wcid_entry)) )
1287#define PAIRWISE_KEY_ENTRY(__idx) \
1288 ( PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
1289#define MAC_IVEIV_ENTRY(__idx) \
1290 ( MAC_IVEIV_TABLE_BASE + ((__idx) & sizeof(struct mac_iveiv_entry)) )
1291#define MAC_WCID_ATTR_ENTRY(__idx) \
1292 ( MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)) )
1293#define SHARED_KEY_ENTRY(__idx) \
1294 ( SHARED_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
1295#define SHARED_KEY_MODE_ENTRY(__idx) \
1296 ( SHARED_KEY_MODE_BASE + ((__idx) * sizeof(u32)) )
1297
1298struct mac_wcid_entry {
1299 u8 mac[6];
1300 u8 reserved[2];
1301} __attribute__ ((packed));
1302
1303struct hw_key_entry {
1304 u8 key[16];
1305 u8 tx_mic[8];
1306 u8 rx_mic[8];
1307} __attribute__ ((packed));
1308
1309struct mac_iveiv_entry {
1310 u8 iv[8];
1311} __attribute__ ((packed));
1312
1313/*
1314 * MAC_WCID_ATTRIBUTE:
1315 */
1316#define MAC_WCID_ATTRIBUTE_KEYTAB FIELD32(0x00000001)
1317#define MAC_WCID_ATTRIBUTE_CIPHER FIELD32(0x0000000e)
1318#define MAC_WCID_ATTRIBUTE_BSS_IDX FIELD32(0x00000070)
1319#define MAC_WCID_ATTRIBUTE_RX_WIUDF FIELD32(0x00000380)
1320
1321/*
1322 * SHARED_KEY_MODE:
1323 */
1324#define SHARED_KEY_MODE_BSS0_KEY0 FIELD32(0x00000007)
1325#define SHARED_KEY_MODE_BSS0_KEY1 FIELD32(0x00000070)
1326#define SHARED_KEY_MODE_BSS0_KEY2 FIELD32(0x00000700)
1327#define SHARED_KEY_MODE_BSS0_KEY3 FIELD32(0x00007000)
1328#define SHARED_KEY_MODE_BSS1_KEY0 FIELD32(0x00070000)
1329#define SHARED_KEY_MODE_BSS1_KEY1 FIELD32(0x00700000)
1330#define SHARED_KEY_MODE_BSS1_KEY2 FIELD32(0x07000000)
1331#define SHARED_KEY_MODE_BSS1_KEY3 FIELD32(0x70000000)
1332
1333/*
1334 * HOST-MCU communication
1335 */
1336
1337/*
1338 * H2M_MAILBOX_CSR: Host-to-MCU Mailbox.
1339 */
1340#define H2M_MAILBOX_CSR 0x7010
1341#define H2M_MAILBOX_CSR_ARG0 FIELD32(0x000000ff)
1342#define H2M_MAILBOX_CSR_ARG1 FIELD32(0x0000ff00)
1343#define H2M_MAILBOX_CSR_CMD_TOKEN FIELD32(0x00ff0000)
1344#define H2M_MAILBOX_CSR_OWNER FIELD32(0xff000000)
1345
1346/*
1347 * H2M_MAILBOX_CID:
1348 */
1349#define H2M_MAILBOX_CID 0x7014
1350#define H2M_MAILBOX_CID_CMD0 FIELD32(0x000000ff)
1351#define H2M_MAILBOX_CID_CMD1 FIELD32(0x0000ff00)
1352#define H2M_MAILBOX_CID_CMD2 FIELD32(0x00ff0000)
1353#define H2M_MAILBOX_CID_CMD3 FIELD32(0xff000000)
1354
1355/*
1356 * H2M_MAILBOX_STATUS:
1357 */
1358#define H2M_MAILBOX_STATUS 0x701c
1359
1360/*
1361 * H2M_INT_SRC:
1362 */
1363#define H2M_INT_SRC 0x7024
1364
1365/*
1366 * H2M_BBP_AGENT:
1367 */
1368#define H2M_BBP_AGENT 0x7028
1369
1370/*
1371 * MCU_LEDCS: LED control for MCU Mailbox.
1372 */
1373#define MCU_LEDCS_LED_MODE FIELD8(0x1f)
1374#define MCU_LEDCS_POLARITY FIELD8(0x01)
1375
1376/*
1377 * HW_CS_CTS_BASE:
1378 * Carrier-sense CTS frame base address.
1379 * It's where mac stores carrier-sense frame for carrier-sense function.
1380 */
1381#define HW_CS_CTS_BASE 0x7700
1382
1383/*
1384 * HW_DFS_CTS_BASE:
1385 * DFS CTS frame base address. It's where mac stores CTS frame for DFS.
1386 */
1387#define HW_DFS_CTS_BASE 0x7780
1388
1389/*
1390 * TXRX control registers - base address 0x3000
1391 */
1392
1393/*
1394 * TXRX_CSR1:
1395 * rt2860b UNKNOWN reg use R/O Reg Addr 0x77d0 first..
1396 */
1397#define TXRX_CSR1 0x77d0
1398
1399/*
1400 * HW_DEBUG_SETTING_BASE:
1401 * since NULL frame won't be that long (256 byte)
1402 * We steal 16 tail bytes to save debugging settings
1403 */
1404#define HW_DEBUG_SETTING_BASE 0x77f0
1405#define HW_DEBUG_SETTING_BASE2 0x7770
1406
1407/*
1408 * HW_BEACON_BASE
1409 * In order to support maximum 8 MBSS and its maximum length
1410 * is 512 bytes for each beacon
1411 * Three section discontinue memory segments will be used.
1412 * 1. The original region for BCN 0~3
1413 * 2. Extract memory from FCE table for BCN 4~5
1414 * 3. Extract memory from Pair-wise key table for BCN 6~7
1415 * It occupied those memory of wcid 238~253 for BCN 6
1416 * and wcid 222~237 for BCN 7
1417 *
1418 * IMPORTANT NOTE: Not sure why legacy driver does this,
1419 * but HW_BEACON_BASE7 is 0x0200 bytes below HW_BEACON_BASE6.
1420 */
1421#define HW_BEACON_BASE0 0x7800
1422#define HW_BEACON_BASE1 0x7a00
1423#define HW_BEACON_BASE2 0x7c00
1424#define HW_BEACON_BASE3 0x7e00
1425#define HW_BEACON_BASE4 0x7200
1426#define HW_BEACON_BASE5 0x7400
1427#define HW_BEACON_BASE6 0x5dc0
1428#define HW_BEACON_BASE7 0x5bc0
1429
1430#define HW_BEACON_OFFSET(__index) \
1431 ( ((__index) < 4) ? ( HW_BEACON_BASE0 + (__index * 0x0200) ) : \
1432 (((__index) < 6) ? ( HW_BEACON_BASE4 + ((__index - 4) * 0x0200) ) : \
1433 (HW_BEACON_BASE6 - ((__index - 6) * 0x0200))) )
1434
1435/*
1436 * BBP registers.
1437 * The wordsize of the BBP is 8 bits.
1438 */
1439
1440/*
1441 * BBP 1: TX Antenna
1442 */
1443#define BBP1_TX_POWER FIELD8(0x07)
1444#define BBP1_TX_ANTENNA FIELD8(0x18)
1445
1446/*
1447 * BBP 3: RX Antenna
1448 */
1449#define BBP3_RX_ANTENNA FIELD8(0x18)
1450#define BBP3_HT40_PLUS FIELD8(0x20)
1451
1452/*
1453 * BBP 4: Bandwidth
1454 */
1455#define BBP4_TX_BF FIELD8(0x01)
1456#define BBP4_BANDWIDTH FIELD8(0x18)
1457
1458/*
1459 * RFCSR registers
1460 * The wordsize of the RFCSR is 8 bits.
1461 */
1462
1463/*
1464 * RFCSR 6:
1465 */
1466#define RFCSR6_R FIELD8(0x03)
1467
1468/*
1469 * RFCSR 7:
1470 */
1471#define RFCSR7_RF_TUNING FIELD8(0x01)
1472
1473/*
1474 * RFCSR 12:
1475 */
1476#define RFCSR12_TX_POWER FIELD8(0x1f)
1477
1478/*
1479 * RFCSR 22:
1480 */
1481#define RFCSR22_BASEBAND_LOOPBACK FIELD8(0x01)
1482
1483/*
1484 * RFCSR 23:
1485 */
1486#define RFCSR23_FREQ_OFFSET FIELD8(0x7f)
1487
1488/*
1489 * RFCSR 30:
1490 */
1491#define RFCSR30_RF_CALIBRATION FIELD8(0x80)
1492
1493/*
1494 * RF registers
1495 */
1496
1497/*
1498 * RF 2
1499 */
1500#define RF2_ANTENNA_RX2 FIELD32(0x00000040)
1501#define RF2_ANTENNA_TX1 FIELD32(0x00004000)
1502#define RF2_ANTENNA_RX1 FIELD32(0x00020000)
1503
1504/*
1505 * RF 3
1506 */
1507#define RF3_TXPOWER_G FIELD32(0x00003e00)
1508#define RF3_TXPOWER_A_7DBM_BOOST FIELD32(0x00000200)
1509#define RF3_TXPOWER_A FIELD32(0x00003c00)
1510
1511/*
1512 * RF 4
1513 */
1514#define RF4_TXPOWER_G FIELD32(0x000007c0)
1515#define RF4_TXPOWER_A_7DBM_BOOST FIELD32(0x00000040)
1516#define RF4_TXPOWER_A FIELD32(0x00000780)
1517#define RF4_FREQ_OFFSET FIELD32(0x001f8000)
1518#define RF4_HT40 FIELD32(0x00200000)
1519
1520/*
1521 * EEPROM content.
1522 * The wordsize of the EEPROM is 16 bits.
1523 */
1524
1525/*
1526 * EEPROM Version
1527 */
1528#define EEPROM_VERSION 0x0001
1529#define EEPROM_VERSION_FAE FIELD16(0x00ff)
1530#define EEPROM_VERSION_VERSION FIELD16(0xff00)
1531
1532/*
1533 * HW MAC address.
1534 */
1535#define EEPROM_MAC_ADDR_0 0x0002
1536#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff)
1537#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00)
1538#define EEPROM_MAC_ADDR_1 0x0003
1539#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff)
1540#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00)
1541#define EEPROM_MAC_ADDR_2 0x0004
1542#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff)
1543#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00)
1544
1545/*
1546 * EEPROM ANTENNA config
1547 * RXPATH: 1: 1R, 2: 2R, 3: 3R
1548 * TXPATH: 1: 1T, 2: 2T
1549 */
1550#define EEPROM_ANTENNA 0x001a
1551#define EEPROM_ANTENNA_RXPATH FIELD16(0x000f)
1552#define EEPROM_ANTENNA_TXPATH FIELD16(0x00f0)
1553#define EEPROM_ANTENNA_RF_TYPE FIELD16(0x0f00)
1554
1555/*
1556 * EEPROM NIC config
1557 * CARDBUS_ACCEL: 0 - enable, 1 - disable
1558 */
1559#define EEPROM_NIC 0x001b
1560#define EEPROM_NIC_HW_RADIO FIELD16(0x0001)
1561#define EEPROM_NIC_DYNAMIC_TX_AGC FIELD16(0x0002)
1562#define EEPROM_NIC_EXTERNAL_LNA_BG FIELD16(0x0004)
1563#define EEPROM_NIC_EXTERNAL_LNA_A FIELD16(0x0008)
1564#define EEPROM_NIC_CARDBUS_ACCEL FIELD16(0x0010)
1565#define EEPROM_NIC_BW40M_SB_BG FIELD16(0x0020)
1566#define EEPROM_NIC_BW40M_SB_A FIELD16(0x0040)
1567#define EEPROM_NIC_WPS_PBC FIELD16(0x0080)
1568#define EEPROM_NIC_BW40M_BG FIELD16(0x0100)
1569#define EEPROM_NIC_BW40M_A FIELD16(0x0200)
1570
1571/*
1572 * EEPROM frequency
1573 */
1574#define EEPROM_FREQ 0x001d
1575#define EEPROM_FREQ_OFFSET FIELD16(0x00ff)
1576#define EEPROM_FREQ_LED_MODE FIELD16(0x7f00)
1577#define EEPROM_FREQ_LED_POLARITY FIELD16(0x1000)
1578
1579/*
1580 * EEPROM LED
1581 * POLARITY_RDY_G: Polarity RDY_G setting.
1582 * POLARITY_RDY_A: Polarity RDY_A setting.
1583 * POLARITY_ACT: Polarity ACT setting.
1584 * POLARITY_GPIO_0: Polarity GPIO0 setting.
1585 * POLARITY_GPIO_1: Polarity GPIO1 setting.
1586 * POLARITY_GPIO_2: Polarity GPIO2 setting.
1587 * POLARITY_GPIO_3: Polarity GPIO3 setting.
1588 * POLARITY_GPIO_4: Polarity GPIO4 setting.
1589 * LED_MODE: Led mode.
1590 */
1591#define EEPROM_LED1 0x001e
1592#define EEPROM_LED2 0x001f
1593#define EEPROM_LED3 0x0020
1594#define EEPROM_LED_POLARITY_RDY_BG FIELD16(0x0001)
1595#define EEPROM_LED_POLARITY_RDY_A FIELD16(0x0002)
1596#define EEPROM_LED_POLARITY_ACT FIELD16(0x0004)
1597#define EEPROM_LED_POLARITY_GPIO_0 FIELD16(0x0008)
1598#define EEPROM_LED_POLARITY_GPIO_1 FIELD16(0x0010)
1599#define EEPROM_LED_POLARITY_GPIO_2 FIELD16(0x0020)
1600#define EEPROM_LED_POLARITY_GPIO_3 FIELD16(0x0040)
1601#define EEPROM_LED_POLARITY_GPIO_4 FIELD16(0x0080)
1602#define EEPROM_LED_LED_MODE FIELD16(0x1f00)
1603
1604/*
1605 * EEPROM LNA
1606 */
1607#define EEPROM_LNA 0x0022
1608#define EEPROM_LNA_BG FIELD16(0x00ff)
1609#define EEPROM_LNA_A0 FIELD16(0xff00)
1610
1611/*
1612 * EEPROM RSSI BG offset
1613 */
1614#define EEPROM_RSSI_BG 0x0023
1615#define EEPROM_RSSI_BG_OFFSET0 FIELD16(0x00ff)
1616#define EEPROM_RSSI_BG_OFFSET1 FIELD16(0xff00)
1617
1618/*
1619 * EEPROM RSSI BG2 offset
1620 */
1621#define EEPROM_RSSI_BG2 0x0024
1622#define EEPROM_RSSI_BG2_OFFSET2 FIELD16(0x00ff)
1623#define EEPROM_RSSI_BG2_LNA_A1 FIELD16(0xff00)
1624
1625/*
1626 * EEPROM RSSI A offset
1627 */
1628#define EEPROM_RSSI_A 0x0025
1629#define EEPROM_RSSI_A_OFFSET0 FIELD16(0x00ff)
1630#define EEPROM_RSSI_A_OFFSET1 FIELD16(0xff00)
1631
1632/*
1633 * EEPROM RSSI A2 offset
1634 */
1635#define EEPROM_RSSI_A2 0x0026
1636#define EEPROM_RSSI_A2_OFFSET2 FIELD16(0x00ff)
1637#define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00)
1638
1639/*
1640 * EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power.
1641 * This is delta in 40MHZ.
1642 * VALUE: Tx Power dalta value (MAX=4)
1643 * TYPE: 1: Plus the delta value, 0: minus the delta value
1644 * TXPOWER: Enable:
1645 */
1646#define EEPROM_TXPOWER_DELTA 0x0028
1647#define EEPROM_TXPOWER_DELTA_VALUE FIELD16(0x003f)
1648#define EEPROM_TXPOWER_DELTA_TYPE FIELD16(0x0040)
1649#define EEPROM_TXPOWER_DELTA_TXPOWER FIELD16(0x0080)
1650
1651/*
1652 * EEPROM TXPOWER 802.11BG
1653 */
1654#define EEPROM_TXPOWER_BG1 0x0029
1655#define EEPROM_TXPOWER_BG2 0x0030
1656#define EEPROM_TXPOWER_BG_SIZE 7
1657#define EEPROM_TXPOWER_BG_1 FIELD16(0x00ff)
1658#define EEPROM_TXPOWER_BG_2 FIELD16(0xff00)
1659
1660/*
1661 * EEPROM TXPOWER 802.11A
1662 */
1663#define EEPROM_TXPOWER_A1 0x003c
1664#define EEPROM_TXPOWER_A2 0x0053
1665#define EEPROM_TXPOWER_A_SIZE 6
1666#define EEPROM_TXPOWER_A_1 FIELD16(0x00ff)
1667#define EEPROM_TXPOWER_A_2 FIELD16(0xff00)
1668
1669/*
1670 * EEPROM TXpower byrate: 20MHZ power
1671 */
1672#define EEPROM_TXPOWER_BYRATE 0x006f
1673
1674/*
1675 * EEPROM BBP.
1676 */
1677#define EEPROM_BBP_START 0x0078
1678#define EEPROM_BBP_SIZE 16
1679#define EEPROM_BBP_VALUE FIELD16(0x00ff)
1680#define EEPROM_BBP_REG_ID FIELD16(0xff00)
1681
1682/*
1683 * MCU mailbox commands.
1684 */
1685#define MCU_SLEEP 0x30
1686#define MCU_WAKEUP 0x31
1687#define MCU_RADIO_OFF 0x35
1688#define MCU_CURRENT 0x36
1689#define MCU_LED 0x50
1690#define MCU_LED_STRENGTH 0x51
1691#define MCU_LED_1 0x52
1692#define MCU_LED_2 0x53
1693#define MCU_LED_3 0x54
1694#define MCU_RADAR 0x60
1695#define MCU_BOOT_SIGNAL 0x72
1696#define MCU_BBP_SIGNAL 0x80
1697#define MCU_POWER_SAVE 0x83
1698
1699/*
1700 * MCU mailbox tokens
1701 */
1702#define TOKEN_WAKUP 3
1703
1704/*
1705 * DMA descriptor defines.
1706 */
1707#define TXWI_DESC_SIZE ( 4 * sizeof(__le32) )
1708#define RXWI_DESC_SIZE ( 4 * sizeof(__le32) )
1709
1710/*
1711 * TX WI structure
1712 */
1713
1714/*
1715 * Word0
1716 * FRAG: 1 To inform TKIP engine this is a fragment.
1717 * MIMO_PS: The remote peer is in dynamic MIMO-PS mode
1718 * TX_OP: 0:HT TXOP rule , 1:PIFS TX ,2:Backoff, 3:sifs
1719 * BW: Channel bandwidth 20MHz or 40 MHz
1720 * STBC: 1: STBC support MCS =0-7, 2,3 : RESERVED
1721 */
1722#define TXWI_W0_FRAG FIELD32(0x00000001)
1723#define TXWI_W0_MIMO_PS FIELD32(0x00000002)
1724#define TXWI_W0_CF_ACK FIELD32(0x00000004)
1725#define TXWI_W0_TS FIELD32(0x00000008)
1726#define TXWI_W0_AMPDU FIELD32(0x00000010)
1727#define TXWI_W0_MPDU_DENSITY FIELD32(0x000000e0)
1728#define TXWI_W0_TX_OP FIELD32(0x00000300)
1729#define TXWI_W0_MCS FIELD32(0x007f0000)
1730#define TXWI_W0_BW FIELD32(0x00800000)
1731#define TXWI_W0_SHORT_GI FIELD32(0x01000000)
1732#define TXWI_W0_STBC FIELD32(0x06000000)
1733#define TXWI_W0_IFS FIELD32(0x08000000)
1734#define TXWI_W0_PHYMODE FIELD32(0xc0000000)
1735
1736/*
1737 * Word1
1738 */
1739#define TXWI_W1_ACK FIELD32(0x00000001)
1740#define TXWI_W1_NSEQ FIELD32(0x00000002)
1741#define TXWI_W1_BW_WIN_SIZE FIELD32(0x000000fc)
1742#define TXWI_W1_WIRELESS_CLI_ID FIELD32(0x0000ff00)
1743#define TXWI_W1_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000)
1744#define TXWI_W1_PACKETID FIELD32(0xf0000000)
1745
1746/*
1747 * Word2
1748 */
1749#define TXWI_W2_IV FIELD32(0xffffffff)
1750
1751/*
1752 * Word3
1753 */
1754#define TXWI_W3_EIV FIELD32(0xffffffff)
1755
1756/*
1757 * RX WI structure
1758 */
1759
1760/*
1761 * Word0
1762 */
1763#define RXWI_W0_WIRELESS_CLI_ID FIELD32(0x000000ff)
1764#define RXWI_W0_KEY_INDEX FIELD32(0x00000300)
1765#define RXWI_W0_BSSID FIELD32(0x00001c00)
1766#define RXWI_W0_UDF FIELD32(0x0000e000)
1767#define RXWI_W0_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000)
1768#define RXWI_W0_TID FIELD32(0xf0000000)
1769
1770/*
1771 * Word1
1772 */
1773#define RXWI_W1_FRAG FIELD32(0x0000000f)
1774#define RXWI_W1_SEQUENCE FIELD32(0x0000fff0)
1775#define RXWI_W1_MCS FIELD32(0x007f0000)
1776#define RXWI_W1_BW FIELD32(0x00800000)
1777#define RXWI_W1_SHORT_GI FIELD32(0x01000000)
1778#define RXWI_W1_STBC FIELD32(0x06000000)
1779#define RXWI_W1_PHYMODE FIELD32(0xc0000000)
1780
1781/*
1782 * Word2
1783 */
1784#define RXWI_W2_RSSI0 FIELD32(0x000000ff)
1785#define RXWI_W2_RSSI1 FIELD32(0x0000ff00)
1786#define RXWI_W2_RSSI2 FIELD32(0x00ff0000)
1787
1788/*
1789 * Word3
1790 */
1791#define RXWI_W3_SNR0 FIELD32(0x000000ff)
1792#define RXWI_W3_SNR1 FIELD32(0x0000ff00)
1793
1794/*
1795 * Macros for converting txpower from EEPROM to mac80211 value
1796 * and from mac80211 value to register value.
1797 */
1798#define MIN_G_TXPOWER 0
1799#define MIN_A_TXPOWER -7
1800#define MAX_G_TXPOWER 31
1801#define MAX_A_TXPOWER 15
1802#define DEFAULT_TXPOWER 5
1803
1804#define TXPOWER_G_FROM_DEV(__txpower) \
1805 ((__txpower) > MAX_G_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
1806
1807#define TXPOWER_G_TO_DEV(__txpower) \
1808 clamp_t(char, __txpower, MIN_G_TXPOWER, MAX_G_TXPOWER)
1809
1810#define TXPOWER_A_FROM_DEV(__txpower) \
1811 ((__txpower) > MAX_A_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
1812
1813#define TXPOWER_A_TO_DEV(__txpower) \
1814 clamp_t(char, __txpower, MIN_A_TXPOWER, MAX_A_TXPOWER)
1815
1816#endif /* RT2800_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.c b/drivers/net/wireless/rt2x00/rt2800lib.c
new file mode 100644
index 000000000000..5c7d74a6f16e
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800lib.c
@@ -0,0 +1,1817 @@
1/*
2 Copyright (C) 2009 Bartlomiej Zolnierkiewicz
3
4 Based on the original rt2800pci.c and rt2800usb.c:
5
6 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
7 <http://rt2x00.serialmonkey.com>
8
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License as published by
11 the Free Software Foundation; either version 2 of the License, or
12 (at your option) any later version.
13
14 This program is distributed in the hope that it will be useful,
15 but WITHOUT ANY WARRANTY; without even the implied warranty of
16 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 GNU General Public License for more details.
18
19 You should have received a copy of the GNU General Public License
20 along with this program; if not, write to the
21 Free Software Foundation, Inc.,
22 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
23 */
24
25/*
26 Module: rt2800lib
27 Abstract: rt2800 generic device routines.
28 */
29
30#include <linux/kernel.h>
31#include <linux/module.h>
32
33#include "rt2x00.h"
34#ifdef CONFIG_RT2800USB
35#include "rt2x00usb.h"
36#endif
37#include "rt2800lib.h"
38#include "rt2800.h"
39#include "rt2800usb.h"
40
41MODULE_AUTHOR("Bartlomiej Zolnierkiewicz");
42MODULE_DESCRIPTION("rt2800 library");
43MODULE_LICENSE("GPL");
44
45/*
46 * Register access.
47 * All access to the CSR registers will go through the methods
48 * rt2800_register_read and rt2800_register_write.
49 * BBP and RF register require indirect register access,
50 * and use the CSR registers BBPCSR and RFCSR to achieve this.
51 * These indirect registers work with busy bits,
52 * and we will try maximal REGISTER_BUSY_COUNT times to access
53 * the register while taking a REGISTER_BUSY_DELAY us delay
54 * between each attampt. When the busy bit is still set at that time,
55 * the access attempt is considered to have failed,
56 * and we will print an error.
57 * The _lock versions must be used if you already hold the csr_mutex
58 */
59#define WAIT_FOR_BBP(__dev, __reg) \
60 rt2800_regbusy_read((__dev), BBP_CSR_CFG, BBP_CSR_CFG_BUSY, (__reg))
61#define WAIT_FOR_RFCSR(__dev, __reg) \
62 rt2800_regbusy_read((__dev), RF_CSR_CFG, RF_CSR_CFG_BUSY, (__reg))
63#define WAIT_FOR_RF(__dev, __reg) \
64 rt2800_regbusy_read((__dev), RF_CSR_CFG0, RF_CSR_CFG0_BUSY, (__reg))
65#define WAIT_FOR_MCU(__dev, __reg) \
66 rt2800_regbusy_read((__dev), H2M_MAILBOX_CSR, \
67 H2M_MAILBOX_CSR_OWNER, (__reg))
68
69static void rt2800_bbp_write(struct rt2x00_dev *rt2x00dev,
70 const unsigned int word, const u8 value)
71{
72 u32 reg;
73
74 mutex_lock(&rt2x00dev->csr_mutex);
75
76 /*
77 * Wait until the BBP becomes available, afterwards we
78 * can safely write the new data into the register.
79 */
80 if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
81 reg = 0;
82 rt2x00_set_field32(&reg, BBP_CSR_CFG_VALUE, value);
83 rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
84 rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
85 rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 0);
86 if (rt2x00_intf_is_pci(rt2x00dev))
87 rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
88
89 rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
90 }
91
92 mutex_unlock(&rt2x00dev->csr_mutex);
93}
94
95static void rt2800_bbp_read(struct rt2x00_dev *rt2x00dev,
96 const unsigned int word, u8 *value)
97{
98 u32 reg;
99
100 mutex_lock(&rt2x00dev->csr_mutex);
101
102 /*
103 * Wait until the BBP becomes available, afterwards we
104 * can safely write the read request into the register.
105 * After the data has been written, we wait until hardware
106 * returns the correct value, if at any time the register
107 * doesn't become available in time, reg will be 0xffffffff
108 * which means we return 0xff to the caller.
109 */
110 if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
111 reg = 0;
112 rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
113 rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
114 rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 1);
115 if (rt2x00_intf_is_pci(rt2x00dev))
116 rt2x00_set_field32(&reg, BBP_CSR_CFG_BBP_RW_MODE, 1);
117
118 rt2800_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
119
120 WAIT_FOR_BBP(rt2x00dev, &reg);
121 }
122
123 *value = rt2x00_get_field32(reg, BBP_CSR_CFG_VALUE);
124
125 mutex_unlock(&rt2x00dev->csr_mutex);
126}
127
128static void rt2800_rfcsr_write(struct rt2x00_dev *rt2x00dev,
129 const unsigned int word, const u8 value)
130{
131 u32 reg;
132
133 mutex_lock(&rt2x00dev->csr_mutex);
134
135 /*
136 * Wait until the RFCSR becomes available, afterwards we
137 * can safely write the new data into the register.
138 */
139 if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
140 reg = 0;
141 rt2x00_set_field32(&reg, RF_CSR_CFG_DATA, value);
142 rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
143 rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 1);
144 rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
145
146 rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg);
147 }
148
149 mutex_unlock(&rt2x00dev->csr_mutex);
150}
151
152static void rt2800_rfcsr_read(struct rt2x00_dev *rt2x00dev,
153 const unsigned int word, u8 *value)
154{
155 u32 reg;
156
157 mutex_lock(&rt2x00dev->csr_mutex);
158
159 /*
160 * Wait until the RFCSR becomes available, afterwards we
161 * can safely write the read request into the register.
162 * After the data has been written, we wait until hardware
163 * returns the correct value, if at any time the register
164 * doesn't become available in time, reg will be 0xffffffff
165 * which means we return 0xff to the caller.
166 */
167 if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
168 reg = 0;
169 rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
170 rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 0);
171 rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
172
173 rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG, reg);
174
175 WAIT_FOR_RFCSR(rt2x00dev, &reg);
176 }
177
178 *value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA);
179
180 mutex_unlock(&rt2x00dev->csr_mutex);
181}
182
183static void rt2800_rf_write(struct rt2x00_dev *rt2x00dev,
184 const unsigned int word, const u32 value)
185{
186 u32 reg;
187
188 mutex_lock(&rt2x00dev->csr_mutex);
189
190 /*
191 * Wait until the RF becomes available, afterwards we
192 * can safely write the new data into the register.
193 */
194 if (WAIT_FOR_RF(rt2x00dev, &reg)) {
195 reg = 0;
196 rt2x00_set_field32(&reg, RF_CSR_CFG0_REG_VALUE_BW, value);
197 rt2x00_set_field32(&reg, RF_CSR_CFG0_STANDBYMODE, 0);
198 rt2x00_set_field32(&reg, RF_CSR_CFG0_SEL, 0);
199 rt2x00_set_field32(&reg, RF_CSR_CFG0_BUSY, 1);
200
201 rt2800_register_write_lock(rt2x00dev, RF_CSR_CFG0, reg);
202 rt2x00_rf_write(rt2x00dev, word, value);
203 }
204
205 mutex_unlock(&rt2x00dev->csr_mutex);
206}
207
208void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
209 const u8 command, const u8 token,
210 const u8 arg0, const u8 arg1)
211{
212 u32 reg;
213
214 if (rt2x00_intf_is_pci(rt2x00dev)) {
215 /*
216 * RT2880 and RT3052 don't support MCU requests.
217 */
218 if (rt2x00_rt(&rt2x00dev->chip, RT2880) ||
219 rt2x00_rt(&rt2x00dev->chip, RT3052))
220 return;
221 }
222
223 mutex_lock(&rt2x00dev->csr_mutex);
224
225 /*
226 * Wait until the MCU becomes available, afterwards we
227 * can safely write the new data into the register.
228 */
229 if (WAIT_FOR_MCU(rt2x00dev, &reg)) {
230 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_OWNER, 1);
231 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_CMD_TOKEN, token);
232 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG0, arg0);
233 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG1, arg1);
234 rt2800_register_write_lock(rt2x00dev, H2M_MAILBOX_CSR, reg);
235
236 reg = 0;
237 rt2x00_set_field32(&reg, HOST_CMD_CSR_HOST_COMMAND, command);
238 rt2800_register_write_lock(rt2x00dev, HOST_CMD_CSR, reg);
239 }
240
241 mutex_unlock(&rt2x00dev->csr_mutex);
242}
243EXPORT_SYMBOL_GPL(rt2800_mcu_request);
244
245#ifdef CONFIG_RT2X00_LIB_DEBUGFS
246const struct rt2x00debug rt2800_rt2x00debug = {
247 .owner = THIS_MODULE,
248 .csr = {
249 .read = rt2800_register_read,
250 .write = rt2800_register_write,
251 .flags = RT2X00DEBUGFS_OFFSET,
252 .word_base = CSR_REG_BASE,
253 .word_size = sizeof(u32),
254 .word_count = CSR_REG_SIZE / sizeof(u32),
255 },
256 .eeprom = {
257 .read = rt2x00_eeprom_read,
258 .write = rt2x00_eeprom_write,
259 .word_base = EEPROM_BASE,
260 .word_size = sizeof(u16),
261 .word_count = EEPROM_SIZE / sizeof(u16),
262 },
263 .bbp = {
264 .read = rt2800_bbp_read,
265 .write = rt2800_bbp_write,
266 .word_base = BBP_BASE,
267 .word_size = sizeof(u8),
268 .word_count = BBP_SIZE / sizeof(u8),
269 },
270 .rf = {
271 .read = rt2x00_rf_read,
272 .write = rt2800_rf_write,
273 .word_base = RF_BASE,
274 .word_size = sizeof(u32),
275 .word_count = RF_SIZE / sizeof(u32),
276 },
277};
278EXPORT_SYMBOL_GPL(rt2800_rt2x00debug);
279#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
280
281int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev)
282{
283 u32 reg;
284
285 rt2800_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
286 return rt2x00_get_field32(reg, GPIO_CTRL_CFG_BIT2);
287}
288EXPORT_SYMBOL_GPL(rt2800_rfkill_poll);
289
290#ifdef CONFIG_RT2X00_LIB_LEDS
291static void rt2800_brightness_set(struct led_classdev *led_cdev,
292 enum led_brightness brightness)
293{
294 struct rt2x00_led *led =
295 container_of(led_cdev, struct rt2x00_led, led_dev);
296 unsigned int enabled = brightness != LED_OFF;
297 unsigned int bg_mode =
298 (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
299 unsigned int polarity =
300 rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
301 EEPROM_FREQ_LED_POLARITY);
302 unsigned int ledmode =
303 rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
304 EEPROM_FREQ_LED_MODE);
305
306 if (led->type == LED_TYPE_RADIO) {
307 rt2800_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
308 enabled ? 0x20 : 0);
309 } else if (led->type == LED_TYPE_ASSOC) {
310 rt2800_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
311 enabled ? (bg_mode ? 0x60 : 0xa0) : 0x20);
312 } else if (led->type == LED_TYPE_QUALITY) {
313 /*
314 * The brightness is divided into 6 levels (0 - 5),
315 * The specs tell us the following levels:
316 * 0, 1 ,3, 7, 15, 31
317 * to determine the level in a simple way we can simply
318 * work with bitshifting:
319 * (1 << level) - 1
320 */
321 rt2800_mcu_request(led->rt2x00dev, MCU_LED_STRENGTH, 0xff,
322 (1 << brightness / (LED_FULL / 6)) - 1,
323 polarity);
324 }
325}
326
327static int rt2800_blink_set(struct led_classdev *led_cdev,
328 unsigned long *delay_on, unsigned long *delay_off)
329{
330 struct rt2x00_led *led =
331 container_of(led_cdev, struct rt2x00_led, led_dev);
332 u32 reg;
333
334 rt2800_register_read(led->rt2x00dev, LED_CFG, &reg);
335 rt2x00_set_field32(&reg, LED_CFG_ON_PERIOD, *delay_on);
336 rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, *delay_off);
337 rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3);
338 rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3);
339 rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 12);
340 rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3);
341 rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1);
342 rt2800_register_write(led->rt2x00dev, LED_CFG, reg);
343
344 return 0;
345}
346
347void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
348 struct rt2x00_led *led, enum led_type type)
349{
350 led->rt2x00dev = rt2x00dev;
351 led->type = type;
352 led->led_dev.brightness_set = rt2800_brightness_set;
353 led->led_dev.blink_set = rt2800_blink_set;
354 led->flags = LED_INITIALIZED;
355}
356EXPORT_SYMBOL_GPL(rt2800_init_led);
357#endif /* CONFIG_RT2X00_LIB_LEDS */
358
359/*
360 * Configuration handlers.
361 */
362static void rt2800_config_wcid_attr(struct rt2x00_dev *rt2x00dev,
363 struct rt2x00lib_crypto *crypto,
364 struct ieee80211_key_conf *key)
365{
366 struct mac_wcid_entry wcid_entry;
367 struct mac_iveiv_entry iveiv_entry;
368 u32 offset;
369 u32 reg;
370
371 offset = MAC_WCID_ATTR_ENTRY(key->hw_key_idx);
372
373 rt2800_register_read(rt2x00dev, offset, &reg);
374 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_KEYTAB,
375 !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
376 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER,
377 (crypto->cmd == SET_KEY) * crypto->cipher);
378 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX,
379 (crypto->cmd == SET_KEY) * crypto->bssidx);
380 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_RX_WIUDF, crypto->cipher);
381 rt2800_register_write(rt2x00dev, offset, reg);
382
383 offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
384
385 memset(&iveiv_entry, 0, sizeof(iveiv_entry));
386 if ((crypto->cipher == CIPHER_TKIP) ||
387 (crypto->cipher == CIPHER_TKIP_NO_MIC) ||
388 (crypto->cipher == CIPHER_AES))
389 iveiv_entry.iv[3] |= 0x20;
390 iveiv_entry.iv[3] |= key->keyidx << 6;
391 rt2800_register_multiwrite(rt2x00dev, offset,
392 &iveiv_entry, sizeof(iveiv_entry));
393
394 offset = MAC_WCID_ENTRY(key->hw_key_idx);
395
396 memset(&wcid_entry, 0, sizeof(wcid_entry));
397 if (crypto->cmd == SET_KEY)
398 memcpy(&wcid_entry, crypto->address, ETH_ALEN);
399 rt2800_register_multiwrite(rt2x00dev, offset,
400 &wcid_entry, sizeof(wcid_entry));
401}
402
403int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev,
404 struct rt2x00lib_crypto *crypto,
405 struct ieee80211_key_conf *key)
406{
407 struct hw_key_entry key_entry;
408 struct rt2x00_field32 field;
409 u32 offset;
410 u32 reg;
411
412 if (crypto->cmd == SET_KEY) {
413 key->hw_key_idx = (4 * crypto->bssidx) + key->keyidx;
414
415 memcpy(key_entry.key, crypto->key,
416 sizeof(key_entry.key));
417 memcpy(key_entry.tx_mic, crypto->tx_mic,
418 sizeof(key_entry.tx_mic));
419 memcpy(key_entry.rx_mic, crypto->rx_mic,
420 sizeof(key_entry.rx_mic));
421
422 offset = SHARED_KEY_ENTRY(key->hw_key_idx);
423 rt2800_register_multiwrite(rt2x00dev, offset,
424 &key_entry, sizeof(key_entry));
425 }
426
427 /*
428 * The cipher types are stored over multiple registers
429 * starting with SHARED_KEY_MODE_BASE each word will have
430 * 32 bits and contains the cipher types for 2 bssidx each.
431 * Using the correct defines correctly will cause overhead,
432 * so just calculate the correct offset.
433 */
434 field.bit_offset = 4 * (key->hw_key_idx % 8);
435 field.bit_mask = 0x7 << field.bit_offset;
436
437 offset = SHARED_KEY_MODE_ENTRY(key->hw_key_idx / 8);
438
439 rt2800_register_read(rt2x00dev, offset, &reg);
440 rt2x00_set_field32(&reg, field,
441 (crypto->cmd == SET_KEY) * crypto->cipher);
442 rt2800_register_write(rt2x00dev, offset, reg);
443
444 /*
445 * Update WCID information
446 */
447 rt2800_config_wcid_attr(rt2x00dev, crypto, key);
448
449 return 0;
450}
451EXPORT_SYMBOL_GPL(rt2800_config_shared_key);
452
453int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
454 struct rt2x00lib_crypto *crypto,
455 struct ieee80211_key_conf *key)
456{
457 struct hw_key_entry key_entry;
458 u32 offset;
459
460 if (crypto->cmd == SET_KEY) {
461 /*
462 * 1 pairwise key is possible per AID, this means that the AID
463 * equals our hw_key_idx. Make sure the WCID starts _after_ the
464 * last possible shared key entry.
465 */
466 if (crypto->aid > (256 - 32))
467 return -ENOSPC;
468
469 key->hw_key_idx = 32 + crypto->aid;
470
471 memcpy(key_entry.key, crypto->key,
472 sizeof(key_entry.key));
473 memcpy(key_entry.tx_mic, crypto->tx_mic,
474 sizeof(key_entry.tx_mic));
475 memcpy(key_entry.rx_mic, crypto->rx_mic,
476 sizeof(key_entry.rx_mic));
477
478 offset = PAIRWISE_KEY_ENTRY(key->hw_key_idx);
479 rt2800_register_multiwrite(rt2x00dev, offset,
480 &key_entry, sizeof(key_entry));
481 }
482
483 /*
484 * Update WCID information
485 */
486 rt2800_config_wcid_attr(rt2x00dev, crypto, key);
487
488 return 0;
489}
490EXPORT_SYMBOL_GPL(rt2800_config_pairwise_key);
491
492void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
493 const unsigned int filter_flags)
494{
495 u32 reg;
496
497 /*
498 * Start configuration steps.
499 * Note that the version error will always be dropped
500 * and broadcast frames will always be accepted since
501 * there is no filter for it at this time.
502 */
503 rt2800_register_read(rt2x00dev, RX_FILTER_CFG, &reg);
504 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CRC_ERROR,
505 !(filter_flags & FIF_FCSFAIL));
506 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PHY_ERROR,
507 !(filter_flags & FIF_PLCPFAIL));
508 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME,
509 !(filter_flags & FIF_PROMISC_IN_BSS));
510 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0);
511 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_VER_ERROR, 1);
512 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_MULTICAST,
513 !(filter_flags & FIF_ALLMULTI));
514 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BROADCAST, 0);
515 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_DUPLICATE, 1);
516 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CF_END_ACK,
517 !(filter_flags & FIF_CONTROL));
518 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CF_END,
519 !(filter_flags & FIF_CONTROL));
520 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_ACK,
521 !(filter_flags & FIF_CONTROL));
522 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CTS,
523 !(filter_flags & FIF_CONTROL));
524 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_RTS,
525 !(filter_flags & FIF_CONTROL));
526 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PSPOLL,
527 !(filter_flags & FIF_PSPOLL));
528 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA, 1);
529 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR, 0);
530 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CNTL,
531 !(filter_flags & FIF_CONTROL));
532 rt2800_register_write(rt2x00dev, RX_FILTER_CFG, reg);
533}
534EXPORT_SYMBOL_GPL(rt2800_config_filter);
535
536void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
537 struct rt2x00intf_conf *conf, const unsigned int flags)
538{
539 unsigned int beacon_base;
540 u32 reg;
541
542 if (flags & CONFIG_UPDATE_TYPE) {
543 /*
544 * Clear current synchronisation setup.
545 * For the Beacon base registers we only need to clear
546 * the first byte since that byte contains the VALID and OWNER
547 * bits which (when set to 0) will invalidate the entire beacon.
548 */
549 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
550 rt2800_register_write(rt2x00dev, beacon_base, 0);
551
552 /*
553 * Enable synchronisation.
554 */
555 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
556 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
557 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, conf->sync);
558 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
559 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
560 }
561
562 if (flags & CONFIG_UPDATE_MAC) {
563 reg = le32_to_cpu(conf->mac[1]);
564 rt2x00_set_field32(&reg, MAC_ADDR_DW1_UNICAST_TO_ME_MASK, 0xff);
565 conf->mac[1] = cpu_to_le32(reg);
566
567 rt2800_register_multiwrite(rt2x00dev, MAC_ADDR_DW0,
568 conf->mac, sizeof(conf->mac));
569 }
570
571 if (flags & CONFIG_UPDATE_BSSID) {
572 reg = le32_to_cpu(conf->bssid[1]);
573 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 0);
574 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_BCN_NUM, 0);
575 conf->bssid[1] = cpu_to_le32(reg);
576
577 rt2800_register_multiwrite(rt2x00dev, MAC_BSSID_DW0,
578 conf->bssid, sizeof(conf->bssid));
579 }
580}
581EXPORT_SYMBOL_GPL(rt2800_config_intf);
582
583void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp)
584{
585 u32 reg;
586
587 rt2800_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
588 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_RX_ACK_TIMEOUT, 0x20);
589 rt2800_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
590
591 rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
592 rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY,
593 !!erp->short_preamble);
594 rt2x00_set_field32(&reg, AUTO_RSP_CFG_AR_PREAMBLE,
595 !!erp->short_preamble);
596 rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
597
598 rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
599 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL,
600 erp->cts_protection ? 2 : 0);
601 rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
602
603 rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE,
604 erp->basic_rates);
605 rt2800_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003);
606
607 rt2800_register_read(rt2x00dev, BKOFF_SLOT_CFG, &reg);
608 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME, erp->slot_time);
609 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2);
610 rt2800_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
611
612 rt2800_register_read(rt2x00dev, XIFS_TIME_CFG, &reg);
613 rt2x00_set_field32(&reg, XIFS_TIME_CFG_CCKM_SIFS_TIME, erp->sifs);
614 rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_SIFS_TIME, erp->sifs);
615 rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_XIFS_TIME, 4);
616 rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, erp->eifs);
617 rt2x00_set_field32(&reg, XIFS_TIME_CFG_BB_RXEND_ENABLE, 1);
618 rt2800_register_write(rt2x00dev, XIFS_TIME_CFG, reg);
619
620 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
621 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
622 erp->beacon_int * 16);
623 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
624}
625EXPORT_SYMBOL_GPL(rt2800_config_erp);
626
627void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant)
628{
629 u8 r1;
630 u8 r3;
631
632 rt2800_bbp_read(rt2x00dev, 1, &r1);
633 rt2800_bbp_read(rt2x00dev, 3, &r3);
634
635 /*
636 * Configure the TX antenna.
637 */
638 switch ((int)ant->tx) {
639 case 1:
640 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
641 if (rt2x00_intf_is_pci(rt2x00dev))
642 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
643 break;
644 case 2:
645 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
646 break;
647 case 3:
648 /* Do nothing */
649 break;
650 }
651
652 /*
653 * Configure the RX antenna.
654 */
655 switch ((int)ant->rx) {
656 case 1:
657 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
658 break;
659 case 2:
660 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 1);
661 break;
662 case 3:
663 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 2);
664 break;
665 }
666
667 rt2800_bbp_write(rt2x00dev, 3, r3);
668 rt2800_bbp_write(rt2x00dev, 1, r1);
669}
670EXPORT_SYMBOL_GPL(rt2800_config_ant);
671
672static void rt2800_config_lna_gain(struct rt2x00_dev *rt2x00dev,
673 struct rt2x00lib_conf *libconf)
674{
675 u16 eeprom;
676 short lna_gain;
677
678 if (libconf->rf.channel <= 14) {
679 rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
680 lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_BG);
681 } else if (libconf->rf.channel <= 64) {
682 rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
683 lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_A0);
684 } else if (libconf->rf.channel <= 128) {
685 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
686 lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_LNA_A1);
687 } else {
688 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
689 lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_LNA_A2);
690 }
691
692 rt2x00dev->lna_gain = lna_gain;
693}
694
695static void rt2800_config_channel_rt2x(struct rt2x00_dev *rt2x00dev,
696 struct ieee80211_conf *conf,
697 struct rf_channel *rf,
698 struct channel_info *info)
699{
700 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
701
702 if (rt2x00dev->default_ant.tx == 1)
703 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_TX1, 1);
704
705 if (rt2x00dev->default_ant.rx == 1) {
706 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX1, 1);
707 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
708 } else if (rt2x00dev->default_ant.rx == 2)
709 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
710
711 if (rf->channel > 14) {
712 /*
713 * When TX power is below 0, we should increase it by 7 to
714 * make it a positive value (Minumum value is -7).
715 * However this means that values between 0 and 7 have
716 * double meaning, and we should set a 7DBm boost flag.
717 */
718 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A_7DBM_BOOST,
719 (info->tx_power1 >= 0));
720
721 if (info->tx_power1 < 0)
722 info->tx_power1 += 7;
723
724 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A,
725 TXPOWER_A_TO_DEV(info->tx_power1));
726
727 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A_7DBM_BOOST,
728 (info->tx_power2 >= 0));
729
730 if (info->tx_power2 < 0)
731 info->tx_power2 += 7;
732
733 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A,
734 TXPOWER_A_TO_DEV(info->tx_power2));
735 } else {
736 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_G,
737 TXPOWER_G_TO_DEV(info->tx_power1));
738 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_G,
739 TXPOWER_G_TO_DEV(info->tx_power2));
740 }
741
742 rt2x00_set_field32(&rf->rf4, RF4_HT40, conf_is_ht40(conf));
743
744 rt2800_rf_write(rt2x00dev, 1, rf->rf1);
745 rt2800_rf_write(rt2x00dev, 2, rf->rf2);
746 rt2800_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
747 rt2800_rf_write(rt2x00dev, 4, rf->rf4);
748
749 udelay(200);
750
751 rt2800_rf_write(rt2x00dev, 1, rf->rf1);
752 rt2800_rf_write(rt2x00dev, 2, rf->rf2);
753 rt2800_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004);
754 rt2800_rf_write(rt2x00dev, 4, rf->rf4);
755
756 udelay(200);
757
758 rt2800_rf_write(rt2x00dev, 1, rf->rf1);
759 rt2800_rf_write(rt2x00dev, 2, rf->rf2);
760 rt2800_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
761 rt2800_rf_write(rt2x00dev, 4, rf->rf4);
762}
763
764static void rt2800_config_channel_rt3x(struct rt2x00_dev *rt2x00dev,
765 struct ieee80211_conf *conf,
766 struct rf_channel *rf,
767 struct channel_info *info)
768{
769 u8 rfcsr;
770
771 rt2800_rfcsr_write(rt2x00dev, 2, rf->rf1);
772 rt2800_rfcsr_write(rt2x00dev, 2, rf->rf3);
773
774 rt2800_rfcsr_read(rt2x00dev, 6, &rfcsr);
775 rt2x00_set_field8(&rfcsr, RFCSR6_R, rf->rf2);
776 rt2800_rfcsr_write(rt2x00dev, 6, rfcsr);
777
778 rt2800_rfcsr_read(rt2x00dev, 12, &rfcsr);
779 rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER,
780 TXPOWER_G_TO_DEV(info->tx_power1));
781 rt2800_rfcsr_write(rt2x00dev, 12, rfcsr);
782
783 rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
784 rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
785 rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
786
787 rt2800_rfcsr_write(rt2x00dev, 24,
788 rt2x00dev->calibration[conf_is_ht40(conf)]);
789
790 rt2800_rfcsr_read(rt2x00dev, 23, &rfcsr);
791 rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
792 rt2800_rfcsr_write(rt2x00dev, 23, rfcsr);
793}
794
795static void rt2800_config_channel(struct rt2x00_dev *rt2x00dev,
796 struct ieee80211_conf *conf,
797 struct rf_channel *rf,
798 struct channel_info *info)
799{
800 u32 reg;
801 unsigned int tx_pin;
802 u8 bbp;
803
804 if (rt2x00_rev(&rt2x00dev->chip) != RT3070_VERSION)
805 rt2800_config_channel_rt2x(rt2x00dev, conf, rf, info);
806 else
807 rt2800_config_channel_rt3x(rt2x00dev, conf, rf, info);
808
809 /*
810 * Change BBP settings
811 */
812 rt2800_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
813 rt2800_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
814 rt2800_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
815 rt2800_bbp_write(rt2x00dev, 86, 0);
816
817 if (rf->channel <= 14) {
818 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) {
819 rt2800_bbp_write(rt2x00dev, 82, 0x62);
820 rt2800_bbp_write(rt2x00dev, 75, 0x46);
821 } else {
822 rt2800_bbp_write(rt2x00dev, 82, 0x84);
823 rt2800_bbp_write(rt2x00dev, 75, 0x50);
824 }
825 } else {
826 rt2800_bbp_write(rt2x00dev, 82, 0xf2);
827
828 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags))
829 rt2800_bbp_write(rt2x00dev, 75, 0x46);
830 else
831 rt2800_bbp_write(rt2x00dev, 75, 0x50);
832 }
833
834 rt2800_register_read(rt2x00dev, TX_BAND_CFG, &reg);
835 rt2x00_set_field32(&reg, TX_BAND_CFG_HT40_PLUS, conf_is_ht40_plus(conf));
836 rt2x00_set_field32(&reg, TX_BAND_CFG_A, rf->channel > 14);
837 rt2x00_set_field32(&reg, TX_BAND_CFG_BG, rf->channel <= 14);
838 rt2800_register_write(rt2x00dev, TX_BAND_CFG, reg);
839
840 tx_pin = 0;
841
842 /* Turn on unused PA or LNA when not using 1T or 1R */
843 if (rt2x00dev->default_ant.tx != 1) {
844 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1);
845 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1);
846 }
847
848 /* Turn on unused PA or LNA when not using 1T or 1R */
849 if (rt2x00dev->default_ant.rx != 1) {
850 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
851 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
852 }
853
854 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
855 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
856 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFTR_EN, 1);
857 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_TRSW_EN, 1);
858 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, rf->channel <= 14);
859 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN, rf->channel > 14);
860
861 rt2800_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
862
863 rt2800_bbp_read(rt2x00dev, 4, &bbp);
864 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * conf_is_ht40(conf));
865 rt2800_bbp_write(rt2x00dev, 4, bbp);
866
867 rt2800_bbp_read(rt2x00dev, 3, &bbp);
868 rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf));
869 rt2800_bbp_write(rt2x00dev, 3, bbp);
870
871 if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
872 if (conf_is_ht40(conf)) {
873 rt2800_bbp_write(rt2x00dev, 69, 0x1a);
874 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
875 rt2800_bbp_write(rt2x00dev, 73, 0x16);
876 } else {
877 rt2800_bbp_write(rt2x00dev, 69, 0x16);
878 rt2800_bbp_write(rt2x00dev, 70, 0x08);
879 rt2800_bbp_write(rt2x00dev, 73, 0x11);
880 }
881 }
882
883 msleep(1);
884}
885
886static void rt2800_config_txpower(struct rt2x00_dev *rt2x00dev,
887 const int txpower)
888{
889 u32 reg;
890 u32 value = TXPOWER_G_TO_DEV(txpower);
891 u8 r1;
892
893 rt2800_bbp_read(rt2x00dev, 1, &r1);
894 rt2x00_set_field8(&reg, BBP1_TX_POWER, 0);
895 rt2800_bbp_write(rt2x00dev, 1, r1);
896
897 rt2800_register_read(rt2x00dev, TX_PWR_CFG_0, &reg);
898 rt2x00_set_field32(&reg, TX_PWR_CFG_0_1MBS, value);
899 rt2x00_set_field32(&reg, TX_PWR_CFG_0_2MBS, value);
900 rt2x00_set_field32(&reg, TX_PWR_CFG_0_55MBS, value);
901 rt2x00_set_field32(&reg, TX_PWR_CFG_0_11MBS, value);
902 rt2x00_set_field32(&reg, TX_PWR_CFG_0_6MBS, value);
903 rt2x00_set_field32(&reg, TX_PWR_CFG_0_9MBS, value);
904 rt2x00_set_field32(&reg, TX_PWR_CFG_0_12MBS, value);
905 rt2x00_set_field32(&reg, TX_PWR_CFG_0_18MBS, value);
906 rt2800_register_write(rt2x00dev, TX_PWR_CFG_0, reg);
907
908 rt2800_register_read(rt2x00dev, TX_PWR_CFG_1, &reg);
909 rt2x00_set_field32(&reg, TX_PWR_CFG_1_24MBS, value);
910 rt2x00_set_field32(&reg, TX_PWR_CFG_1_36MBS, value);
911 rt2x00_set_field32(&reg, TX_PWR_CFG_1_48MBS, value);
912 rt2x00_set_field32(&reg, TX_PWR_CFG_1_54MBS, value);
913 rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS0, value);
914 rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS1, value);
915 rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS2, value);
916 rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS3, value);
917 rt2800_register_write(rt2x00dev, TX_PWR_CFG_1, reg);
918
919 rt2800_register_read(rt2x00dev, TX_PWR_CFG_2, &reg);
920 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS4, value);
921 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS5, value);
922 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS6, value);
923 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS7, value);
924 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS8, value);
925 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS9, value);
926 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS10, value);
927 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS11, value);
928 rt2800_register_write(rt2x00dev, TX_PWR_CFG_2, reg);
929
930 rt2800_register_read(rt2x00dev, TX_PWR_CFG_3, &reg);
931 rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS12, value);
932 rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS13, value);
933 rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS14, value);
934 rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS15, value);
935 rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN1, value);
936 rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN2, value);
937 rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN3, value);
938 rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN4, value);
939 rt2800_register_write(rt2x00dev, TX_PWR_CFG_3, reg);
940
941 rt2800_register_read(rt2x00dev, TX_PWR_CFG_4, &reg);
942 rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN5, value);
943 rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN6, value);
944 rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN7, value);
945 rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN8, value);
946 rt2800_register_write(rt2x00dev, TX_PWR_CFG_4, reg);
947}
948
949static void rt2800_config_retry_limit(struct rt2x00_dev *rt2x00dev,
950 struct rt2x00lib_conf *libconf)
951{
952 u32 reg;
953
954 rt2800_register_read(rt2x00dev, TX_RTY_CFG, &reg);
955 rt2x00_set_field32(&reg, TX_RTY_CFG_SHORT_RTY_LIMIT,
956 libconf->conf->short_frame_max_tx_count);
957 rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT,
958 libconf->conf->long_frame_max_tx_count);
959 rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_THRE, 2000);
960 rt2x00_set_field32(&reg, TX_RTY_CFG_NON_AGG_RTY_MODE, 0);
961 rt2x00_set_field32(&reg, TX_RTY_CFG_AGG_RTY_MODE, 0);
962 rt2x00_set_field32(&reg, TX_RTY_CFG_TX_AUTO_FB_ENABLE, 1);
963 rt2800_register_write(rt2x00dev, TX_RTY_CFG, reg);
964}
965
966static void rt2800_config_ps(struct rt2x00_dev *rt2x00dev,
967 struct rt2x00lib_conf *libconf)
968{
969 enum dev_state state =
970 (libconf->conf->flags & IEEE80211_CONF_PS) ?
971 STATE_SLEEP : STATE_AWAKE;
972 u32 reg;
973
974 if (state == STATE_SLEEP) {
975 rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0);
976
977 rt2800_register_read(rt2x00dev, AUTOWAKEUP_CFG, &reg);
978 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 5);
979 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE,
980 libconf->conf->listen_interval - 1);
981 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 1);
982 rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg);
983
984 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
985 } else {
986 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
987
988 rt2800_register_read(rt2x00dev, AUTOWAKEUP_CFG, &reg);
989 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 0);
990 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE, 0);
991 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 0);
992 rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg);
993 }
994}
995
996void rt2800_config(struct rt2x00_dev *rt2x00dev,
997 struct rt2x00lib_conf *libconf,
998 const unsigned int flags)
999{
1000 /* Always recalculate LNA gain before changing configuration */
1001 rt2800_config_lna_gain(rt2x00dev, libconf);
1002
1003 if (flags & IEEE80211_CONF_CHANGE_CHANNEL)
1004 rt2800_config_channel(rt2x00dev, libconf->conf,
1005 &libconf->rf, &libconf->channel);
1006 if (flags & IEEE80211_CONF_CHANGE_POWER)
1007 rt2800_config_txpower(rt2x00dev, libconf->conf->power_level);
1008 if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
1009 rt2800_config_retry_limit(rt2x00dev, libconf);
1010 if (flags & IEEE80211_CONF_CHANGE_PS)
1011 rt2800_config_ps(rt2x00dev, libconf);
1012}
1013EXPORT_SYMBOL_GPL(rt2800_config);
1014
1015/*
1016 * Link tuning
1017 */
1018void rt2800_link_stats(struct rt2x00_dev *rt2x00dev, struct link_qual *qual)
1019{
1020 u32 reg;
1021
1022 /*
1023 * Update FCS error count from register.
1024 */
1025 rt2800_register_read(rt2x00dev, RX_STA_CNT0, &reg);
1026 qual->rx_failed = rt2x00_get_field32(reg, RX_STA_CNT0_CRC_ERR);
1027}
1028EXPORT_SYMBOL_GPL(rt2800_link_stats);
1029
1030static u8 rt2800_get_default_vgc(struct rt2x00_dev *rt2x00dev)
1031{
1032 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
1033 if (rt2x00_intf_is_usb(rt2x00dev) &&
1034 rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION)
1035 return 0x1c + (2 * rt2x00dev->lna_gain);
1036 else
1037 return 0x2e + rt2x00dev->lna_gain;
1038 }
1039
1040 if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
1041 return 0x32 + (rt2x00dev->lna_gain * 5) / 3;
1042 else
1043 return 0x3a + (rt2x00dev->lna_gain * 5) / 3;
1044}
1045
1046static inline void rt2800_set_vgc(struct rt2x00_dev *rt2x00dev,
1047 struct link_qual *qual, u8 vgc_level)
1048{
1049 if (qual->vgc_level != vgc_level) {
1050 rt2800_bbp_write(rt2x00dev, 66, vgc_level);
1051 qual->vgc_level = vgc_level;
1052 qual->vgc_level_reg = vgc_level;
1053 }
1054}
1055
1056void rt2800_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual)
1057{
1058 rt2800_set_vgc(rt2x00dev, qual, rt2800_get_default_vgc(rt2x00dev));
1059}
1060EXPORT_SYMBOL_GPL(rt2800_reset_tuner);
1061
1062void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
1063 const u32 count)
1064{
1065 if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION)
1066 return;
1067
1068 /*
1069 * When RSSI is better then -80 increase VGC level with 0x10
1070 */
1071 rt2800_set_vgc(rt2x00dev, qual,
1072 rt2800_get_default_vgc(rt2x00dev) +
1073 ((qual->rssi > -80) * 0x10));
1074}
1075EXPORT_SYMBOL_GPL(rt2800_link_tuner);
1076
1077/*
1078 * Initialization functions.
1079 */
1080int rt2800_init_registers(struct rt2x00_dev *rt2x00dev)
1081{
1082 u32 reg;
1083 unsigned int i;
1084
1085 if (rt2x00_intf_is_usb(rt2x00dev)) {
1086 /*
1087 * Wait untill BBP and RF are ready.
1088 */
1089 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1090 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
1091 if (reg && reg != ~0)
1092 break;
1093 msleep(1);
1094 }
1095
1096 if (i == REGISTER_BUSY_COUNT) {
1097 ERROR(rt2x00dev, "Unstable hardware.\n");
1098 return -EBUSY;
1099 }
1100
1101 rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
1102 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL,
1103 reg & ~0x00002000);
1104 } else if (rt2x00_intf_is_pci(rt2x00dev))
1105 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
1106
1107 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
1108 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
1109 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
1110 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
1111
1112 if (rt2x00_intf_is_usb(rt2x00dev)) {
1113 rt2800_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
1114#ifdef CONFIG_RT2800USB
1115 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
1116 USB_MODE_RESET, REGISTER_TIMEOUT);
1117#endif
1118 }
1119
1120 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
1121
1122 rt2800_register_read(rt2x00dev, BCN_OFFSET0, &reg);
1123 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN0, 0xe0); /* 0x3800 */
1124 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN1, 0xe8); /* 0x3a00 */
1125 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN2, 0xf0); /* 0x3c00 */
1126 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN3, 0xf8); /* 0x3e00 */
1127 rt2800_register_write(rt2x00dev, BCN_OFFSET0, reg);
1128
1129 rt2800_register_read(rt2x00dev, BCN_OFFSET1, &reg);
1130 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN4, 0xc8); /* 0x3200 */
1131 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN5, 0xd0); /* 0x3400 */
1132 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN6, 0x77); /* 0x1dc0 */
1133 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN7, 0x6f); /* 0x1bc0 */
1134 rt2800_register_write(rt2x00dev, BCN_OFFSET1, reg);
1135
1136 rt2800_register_write(rt2x00dev, LEGACY_BASIC_RATE, 0x0000013f);
1137 rt2800_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003);
1138
1139 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
1140
1141 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
1142 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL, 0);
1143 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
1144 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, 0);
1145 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
1146 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
1147 rt2x00_set_field32(&reg, BCN_TIME_CFG_TX_TIME_COMPENSATE, 0);
1148 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
1149
1150 if (rt2x00_intf_is_usb(rt2x00dev) &&
1151 rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) {
1152 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
1153 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
1154 rt2800_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
1155 } else {
1156 rt2800_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
1157 rt2800_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
1158 }
1159
1160 rt2800_register_read(rt2x00dev, TX_LINK_CFG, &reg);
1161 rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFB_LIFETIME, 32);
1162 rt2x00_set_field32(&reg, TX_LINK_CFG_MFB_ENABLE, 0);
1163 rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_UMFS_ENABLE, 0);
1164 rt2x00_set_field32(&reg, TX_LINK_CFG_TX_MRQ_EN, 0);
1165 rt2x00_set_field32(&reg, TX_LINK_CFG_TX_RDG_EN, 0);
1166 rt2x00_set_field32(&reg, TX_LINK_CFG_TX_CF_ACK_EN, 1);
1167 rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFB, 0);
1168 rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFS, 0);
1169 rt2800_register_write(rt2x00dev, TX_LINK_CFG, reg);
1170
1171 rt2800_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
1172 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_MPDU_LIFETIME, 9);
1173 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_TX_OP_TIMEOUT, 10);
1174 rt2800_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
1175
1176 rt2800_register_read(rt2x00dev, MAX_LEN_CFG, &reg);
1177 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE);
1178 if (rt2x00_rev(&rt2x00dev->chip) >= RT2880E_VERSION &&
1179 rt2x00_rev(&rt2x00dev->chip) < RT3070_VERSION)
1180 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
1181 else
1182 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
1183 rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_PSDU, 0);
1184 rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 0);
1185 rt2800_register_write(rt2x00dev, MAX_LEN_CFG, reg);
1186
1187 rt2800_register_write(rt2x00dev, PBF_MAX_PCNT, 0x1f3fbf9f);
1188
1189 rt2800_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
1190 rt2x00_set_field32(&reg, AUTO_RSP_CFG_AUTORESPONDER, 1);
1191 rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MMODE, 0);
1192 rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MREF, 0);
1193 rt2x00_set_field32(&reg, AUTO_RSP_CFG_DUAL_CTS_EN, 0);
1194 rt2x00_set_field32(&reg, AUTO_RSP_CFG_ACK_CTS_PSM_BIT, 0);
1195 rt2800_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
1196
1197 rt2800_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
1198 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 8);
1199 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_CTRL, 0);
1200 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV, 1);
1201 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1202 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1203 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1204 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM40, 1);
1205 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1206 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF40, 1);
1207 rt2800_register_write(rt2x00dev, CCK_PROT_CFG, reg);
1208
1209 rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
1210 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 8);
1211 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 0);
1212 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV, 1);
1213 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1214 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1215 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1216 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM40, 1);
1217 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1218 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF40, 1);
1219 rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
1220
1221 rt2800_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
1222 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_RATE, 0x4004);
1223 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_CTRL, 0);
1224 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_NAV, 1);
1225 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1226 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1227 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1228 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
1229 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1230 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
1231 rt2800_register_write(rt2x00dev, MM20_PROT_CFG, reg);
1232
1233 rt2800_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
1234 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084);
1235 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 0);
1236 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV, 1);
1237 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1238 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1239 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1240 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
1241 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1242 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
1243 rt2800_register_write(rt2x00dev, MM40_PROT_CFG, reg);
1244
1245 rt2800_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
1246 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_RATE, 0x4004);
1247 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_CTRL, 0);
1248 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_NAV, 1);
1249 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1250 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1251 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1252 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
1253 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1254 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
1255 rt2800_register_write(rt2x00dev, GF20_PROT_CFG, reg);
1256
1257 rt2800_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
1258 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_RATE, 0x4084);
1259 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_CTRL, 0);
1260 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_NAV, 1);
1261 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1262 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1263 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1264 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
1265 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1266 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
1267 rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg);
1268
1269 if (rt2x00_intf_is_usb(rt2x00dev)) {
1270 rt2800_register_write(rt2x00dev, PBF_CFG, 0xf40006);
1271
1272 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
1273 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
1274 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
1275 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
1276 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
1277 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_WP_DMA_BURST_SIZE, 3);
1278 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 0);
1279 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_BIG_ENDIAN, 0);
1280 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_HDR_SCATTER, 0);
1281 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_HDR_SEG_LEN, 0);
1282 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
1283 }
1284
1285 rt2800_register_write(rt2x00dev, TXOP_CTRL_CFG, 0x0000583f);
1286 rt2800_register_write(rt2x00dev, TXOP_HLDR_ET, 0x00000002);
1287
1288 rt2800_register_read(rt2x00dev, TX_RTS_CFG, &reg);
1289 rt2x00_set_field32(&reg, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 32);
1290 rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_THRES,
1291 IEEE80211_MAX_RTS_THRESHOLD);
1292 rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_FBK_EN, 0);
1293 rt2800_register_write(rt2x00dev, TX_RTS_CFG, reg);
1294
1295 rt2800_register_write(rt2x00dev, EXP_ACK_TIME, 0x002400ca);
1296 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
1297
1298 /*
1299 * ASIC will keep garbage value after boot, clear encryption keys.
1300 */
1301 for (i = 0; i < 4; i++)
1302 rt2800_register_write(rt2x00dev,
1303 SHARED_KEY_MODE_ENTRY(i), 0);
1304
1305 for (i = 0; i < 256; i++) {
1306 u32 wcid[2] = { 0xffffffff, 0x00ffffff };
1307 rt2800_register_multiwrite(rt2x00dev, MAC_WCID_ENTRY(i),
1308 wcid, sizeof(wcid));
1309
1310 rt2800_register_write(rt2x00dev, MAC_WCID_ATTR_ENTRY(i), 1);
1311 rt2800_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0);
1312 }
1313
1314 /*
1315 * Clear all beacons
1316 * For the Beacon base registers we only need to clear
1317 * the first byte since that byte contains the VALID and OWNER
1318 * bits which (when set to 0) will invalidate the entire beacon.
1319 */
1320 rt2800_register_write(rt2x00dev, HW_BEACON_BASE0, 0);
1321 rt2800_register_write(rt2x00dev, HW_BEACON_BASE1, 0);
1322 rt2800_register_write(rt2x00dev, HW_BEACON_BASE2, 0);
1323 rt2800_register_write(rt2x00dev, HW_BEACON_BASE3, 0);
1324 rt2800_register_write(rt2x00dev, HW_BEACON_BASE4, 0);
1325 rt2800_register_write(rt2x00dev, HW_BEACON_BASE5, 0);
1326 rt2800_register_write(rt2x00dev, HW_BEACON_BASE6, 0);
1327 rt2800_register_write(rt2x00dev, HW_BEACON_BASE7, 0);
1328
1329 if (rt2x00_intf_is_usb(rt2x00dev)) {
1330 rt2800_register_read(rt2x00dev, USB_CYC_CFG, &reg);
1331 rt2x00_set_field32(&reg, USB_CYC_CFG_CLOCK_CYCLE, 30);
1332 rt2800_register_write(rt2x00dev, USB_CYC_CFG, reg);
1333 }
1334
1335 rt2800_register_read(rt2x00dev, HT_FBK_CFG0, &reg);
1336 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS0FBK, 0);
1337 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS1FBK, 0);
1338 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS2FBK, 1);
1339 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS3FBK, 2);
1340 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS4FBK, 3);
1341 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS5FBK, 4);
1342 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS6FBK, 5);
1343 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS7FBK, 6);
1344 rt2800_register_write(rt2x00dev, HT_FBK_CFG0, reg);
1345
1346 rt2800_register_read(rt2x00dev, HT_FBK_CFG1, &reg);
1347 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS8FBK, 8);
1348 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS9FBK, 8);
1349 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS10FBK, 9);
1350 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS11FBK, 10);
1351 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS12FBK, 11);
1352 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS13FBK, 12);
1353 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS14FBK, 13);
1354 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS15FBK, 14);
1355 rt2800_register_write(rt2x00dev, HT_FBK_CFG1, reg);
1356
1357 rt2800_register_read(rt2x00dev, LG_FBK_CFG0, &reg);
1358 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS0FBK, 8);
1359 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS1FBK, 8);
1360 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS2FBK, 9);
1361 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS3FBK, 10);
1362 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS4FBK, 11);
1363 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS5FBK, 12);
1364 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS6FBK, 13);
1365 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS7FBK, 14);
1366 rt2800_register_write(rt2x00dev, LG_FBK_CFG0, reg);
1367
1368 rt2800_register_read(rt2x00dev, LG_FBK_CFG1, &reg);
1369 rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS0FBK, 0);
1370 rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS1FBK, 0);
1371 rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS2FBK, 1);
1372 rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS3FBK, 2);
1373 rt2800_register_write(rt2x00dev, LG_FBK_CFG1, reg);
1374
1375 /*
1376 * We must clear the error counters.
1377 * These registers are cleared on read,
1378 * so we may pass a useless variable to store the value.
1379 */
1380 rt2800_register_read(rt2x00dev, RX_STA_CNT0, &reg);
1381 rt2800_register_read(rt2x00dev, RX_STA_CNT1, &reg);
1382 rt2800_register_read(rt2x00dev, RX_STA_CNT2, &reg);
1383 rt2800_register_read(rt2x00dev, TX_STA_CNT0, &reg);
1384 rt2800_register_read(rt2x00dev, TX_STA_CNT1, &reg);
1385 rt2800_register_read(rt2x00dev, TX_STA_CNT2, &reg);
1386
1387 return 0;
1388}
1389EXPORT_SYMBOL_GPL(rt2800_init_registers);
1390
1391static int rt2800_wait_bbp_rf_ready(struct rt2x00_dev *rt2x00dev)
1392{
1393 unsigned int i;
1394 u32 reg;
1395
1396 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1397 rt2800_register_read(rt2x00dev, MAC_STATUS_CFG, &reg);
1398 if (!rt2x00_get_field32(reg, MAC_STATUS_CFG_BBP_RF_BUSY))
1399 return 0;
1400
1401 udelay(REGISTER_BUSY_DELAY);
1402 }
1403
1404 ERROR(rt2x00dev, "BBP/RF register access failed, aborting.\n");
1405 return -EACCES;
1406}
1407
1408static int rt2800_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
1409{
1410 unsigned int i;
1411 u8 value;
1412
1413 /*
1414 * BBP was enabled after firmware was loaded,
1415 * but we need to reactivate it now.
1416 */
1417 rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
1418 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
1419 msleep(1);
1420
1421 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1422 rt2800_bbp_read(rt2x00dev, 0, &value);
1423 if ((value != 0xff) && (value != 0x00))
1424 return 0;
1425 udelay(REGISTER_BUSY_DELAY);
1426 }
1427
1428 ERROR(rt2x00dev, "BBP register access failed, aborting.\n");
1429 return -EACCES;
1430}
1431
1432int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev)
1433{
1434 unsigned int i;
1435 u16 eeprom;
1436 u8 reg_id;
1437 u8 value;
1438
1439 if (unlikely(rt2800_wait_bbp_rf_ready(rt2x00dev) ||
1440 rt2800_wait_bbp_ready(rt2x00dev)))
1441 return -EACCES;
1442
1443 rt2800_bbp_write(rt2x00dev, 65, 0x2c);
1444 rt2800_bbp_write(rt2x00dev, 66, 0x38);
1445 rt2800_bbp_write(rt2x00dev, 69, 0x12);
1446 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
1447 rt2800_bbp_write(rt2x00dev, 73, 0x10);
1448 rt2800_bbp_write(rt2x00dev, 81, 0x37);
1449 rt2800_bbp_write(rt2x00dev, 82, 0x62);
1450 rt2800_bbp_write(rt2x00dev, 83, 0x6a);
1451 rt2800_bbp_write(rt2x00dev, 84, 0x99);
1452 rt2800_bbp_write(rt2x00dev, 86, 0x00);
1453 rt2800_bbp_write(rt2x00dev, 91, 0x04);
1454 rt2800_bbp_write(rt2x00dev, 92, 0x00);
1455 rt2800_bbp_write(rt2x00dev, 103, 0x00);
1456 rt2800_bbp_write(rt2x00dev, 105, 0x05);
1457
1458 if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
1459 rt2800_bbp_write(rt2x00dev, 69, 0x16);
1460 rt2800_bbp_write(rt2x00dev, 73, 0x12);
1461 }
1462
1463 if (rt2x00_rev(&rt2x00dev->chip) > RT2860D_VERSION)
1464 rt2800_bbp_write(rt2x00dev, 84, 0x19);
1465
1466 if (rt2x00_intf_is_usb(rt2x00dev) &&
1467 rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) {
1468 rt2800_bbp_write(rt2x00dev, 70, 0x0a);
1469 rt2800_bbp_write(rt2x00dev, 84, 0x99);
1470 rt2800_bbp_write(rt2x00dev, 105, 0x05);
1471 }
1472
1473 if (rt2x00_intf_is_pci(rt2x00dev) &&
1474 rt2x00_rt(&rt2x00dev->chip, RT3052)) {
1475 rt2800_bbp_write(rt2x00dev, 31, 0x08);
1476 rt2800_bbp_write(rt2x00dev, 78, 0x0e);
1477 rt2800_bbp_write(rt2x00dev, 80, 0x08);
1478 }
1479
1480 for (i = 0; i < EEPROM_BBP_SIZE; i++) {
1481 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
1482
1483 if (eeprom != 0xffff && eeprom != 0x0000) {
1484 reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
1485 value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE);
1486 rt2800_bbp_write(rt2x00dev, reg_id, value);
1487 }
1488 }
1489
1490 return 0;
1491}
1492EXPORT_SYMBOL_GPL(rt2800_init_bbp);
1493
1494static u8 rt2800_init_rx_filter(struct rt2x00_dev *rt2x00dev,
1495 bool bw40, u8 rfcsr24, u8 filter_target)
1496{
1497 unsigned int i;
1498 u8 bbp;
1499 u8 rfcsr;
1500 u8 passband;
1501 u8 stopband;
1502 u8 overtuned = 0;
1503
1504 rt2800_rfcsr_write(rt2x00dev, 24, rfcsr24);
1505
1506 rt2800_bbp_read(rt2x00dev, 4, &bbp);
1507 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * bw40);
1508 rt2800_bbp_write(rt2x00dev, 4, bbp);
1509
1510 rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
1511 rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 1);
1512 rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
1513
1514 /*
1515 * Set power & frequency of passband test tone
1516 */
1517 rt2800_bbp_write(rt2x00dev, 24, 0);
1518
1519 for (i = 0; i < 100; i++) {
1520 rt2800_bbp_write(rt2x00dev, 25, 0x90);
1521 msleep(1);
1522
1523 rt2800_bbp_read(rt2x00dev, 55, &passband);
1524 if (passband)
1525 break;
1526 }
1527
1528 /*
1529 * Set power & frequency of stopband test tone
1530 */
1531 rt2800_bbp_write(rt2x00dev, 24, 0x06);
1532
1533 for (i = 0; i < 100; i++) {
1534 rt2800_bbp_write(rt2x00dev, 25, 0x90);
1535 msleep(1);
1536
1537 rt2800_bbp_read(rt2x00dev, 55, &stopband);
1538
1539 if ((passband - stopband) <= filter_target) {
1540 rfcsr24++;
1541 overtuned += ((passband - stopband) == filter_target);
1542 } else
1543 break;
1544
1545 rt2800_rfcsr_write(rt2x00dev, 24, rfcsr24);
1546 }
1547
1548 rfcsr24 -= !!overtuned;
1549
1550 rt2800_rfcsr_write(rt2x00dev, 24, rfcsr24);
1551 return rfcsr24;
1552}
1553
1554int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev)
1555{
1556 u8 rfcsr;
1557 u8 bbp;
1558
1559 if (rt2x00_intf_is_usb(rt2x00dev) &&
1560 rt2x00_rev(&rt2x00dev->chip) != RT3070_VERSION)
1561 return 0;
1562
1563 if (rt2x00_intf_is_pci(rt2x00dev)) {
1564 if (!rt2x00_rf(&rt2x00dev->chip, RF3020) &&
1565 !rt2x00_rf(&rt2x00dev->chip, RF3021) &&
1566 !rt2x00_rf(&rt2x00dev->chip, RF3022))
1567 return 0;
1568 }
1569
1570 /*
1571 * Init RF calibration.
1572 */
1573 rt2800_rfcsr_read(rt2x00dev, 30, &rfcsr);
1574 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
1575 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
1576 msleep(1);
1577 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
1578 rt2800_rfcsr_write(rt2x00dev, 30, rfcsr);
1579
1580 if (rt2x00_intf_is_usb(rt2x00dev)) {
1581 rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
1582 rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
1583 rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
1584 rt2800_rfcsr_write(rt2x00dev, 7, 0x70);
1585 rt2800_rfcsr_write(rt2x00dev, 9, 0x0f);
1586 rt2800_rfcsr_write(rt2x00dev, 10, 0x71);
1587 rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
1588 rt2800_rfcsr_write(rt2x00dev, 12, 0x7b);
1589 rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
1590 rt2800_rfcsr_write(rt2x00dev, 15, 0x58);
1591 rt2800_rfcsr_write(rt2x00dev, 16, 0xb3);
1592 rt2800_rfcsr_write(rt2x00dev, 17, 0x92);
1593 rt2800_rfcsr_write(rt2x00dev, 18, 0x2c);
1594 rt2800_rfcsr_write(rt2x00dev, 19, 0x02);
1595 rt2800_rfcsr_write(rt2x00dev, 20, 0xba);
1596 rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
1597 rt2800_rfcsr_write(rt2x00dev, 24, 0x16);
1598 rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
1599 rt2800_rfcsr_write(rt2x00dev, 27, 0x03);
1600 rt2800_rfcsr_write(rt2x00dev, 29, 0x1f);
1601 } else if (rt2x00_intf_is_pci(rt2x00dev)) {
1602 rt2800_rfcsr_write(rt2x00dev, 0, 0x50);
1603 rt2800_rfcsr_write(rt2x00dev, 1, 0x01);
1604 rt2800_rfcsr_write(rt2x00dev, 2, 0xf7);
1605 rt2800_rfcsr_write(rt2x00dev, 3, 0x75);
1606 rt2800_rfcsr_write(rt2x00dev, 4, 0x40);
1607 rt2800_rfcsr_write(rt2x00dev, 5, 0x03);
1608 rt2800_rfcsr_write(rt2x00dev, 6, 0x02);
1609 rt2800_rfcsr_write(rt2x00dev, 7, 0x50);
1610 rt2800_rfcsr_write(rt2x00dev, 8, 0x39);
1611 rt2800_rfcsr_write(rt2x00dev, 9, 0x0f);
1612 rt2800_rfcsr_write(rt2x00dev, 10, 0x60);
1613 rt2800_rfcsr_write(rt2x00dev, 11, 0x21);
1614 rt2800_rfcsr_write(rt2x00dev, 12, 0x75);
1615 rt2800_rfcsr_write(rt2x00dev, 13, 0x75);
1616 rt2800_rfcsr_write(rt2x00dev, 14, 0x90);
1617 rt2800_rfcsr_write(rt2x00dev, 15, 0x58);
1618 rt2800_rfcsr_write(rt2x00dev, 16, 0xb3);
1619 rt2800_rfcsr_write(rt2x00dev, 17, 0x92);
1620 rt2800_rfcsr_write(rt2x00dev, 18, 0x2c);
1621 rt2800_rfcsr_write(rt2x00dev, 19, 0x02);
1622 rt2800_rfcsr_write(rt2x00dev, 20, 0xba);
1623 rt2800_rfcsr_write(rt2x00dev, 21, 0xdb);
1624 rt2800_rfcsr_write(rt2x00dev, 22, 0x00);
1625 rt2800_rfcsr_write(rt2x00dev, 23, 0x31);
1626 rt2800_rfcsr_write(rt2x00dev, 24, 0x08);
1627 rt2800_rfcsr_write(rt2x00dev, 25, 0x01);
1628 rt2800_rfcsr_write(rt2x00dev, 26, 0x25);
1629 rt2800_rfcsr_write(rt2x00dev, 27, 0x23);
1630 rt2800_rfcsr_write(rt2x00dev, 28, 0x13);
1631 rt2800_rfcsr_write(rt2x00dev, 29, 0x83);
1632 }
1633
1634 /*
1635 * Set RX Filter calibration for 20MHz and 40MHz
1636 */
1637 rt2x00dev->calibration[0] =
1638 rt2800_init_rx_filter(rt2x00dev, false, 0x07, 0x16);
1639 rt2x00dev->calibration[1] =
1640 rt2800_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
1641
1642 /*
1643 * Set back to initial state
1644 */
1645 rt2800_bbp_write(rt2x00dev, 24, 0);
1646
1647 rt2800_rfcsr_read(rt2x00dev, 22, &rfcsr);
1648 rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0);
1649 rt2800_rfcsr_write(rt2x00dev, 22, rfcsr);
1650
1651 /*
1652 * set BBP back to BW20
1653 */
1654 rt2800_bbp_read(rt2x00dev, 4, &bbp);
1655 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
1656 rt2800_bbp_write(rt2x00dev, 4, bbp);
1657
1658 return 0;
1659}
1660EXPORT_SYMBOL_GPL(rt2800_init_rfcsr);
1661
1662/*
1663 * IEEE80211 stack callback functions.
1664 */
1665static void rt2800_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx,
1666 u32 *iv32, u16 *iv16)
1667{
1668 struct rt2x00_dev *rt2x00dev = hw->priv;
1669 struct mac_iveiv_entry iveiv_entry;
1670 u32 offset;
1671
1672 offset = MAC_IVEIV_ENTRY(hw_key_idx);
1673 rt2800_register_multiread(rt2x00dev, offset,
1674 &iveiv_entry, sizeof(iveiv_entry));
1675
1676 memcpy(&iveiv_entry.iv[0], iv16, sizeof(iv16));
1677 memcpy(&iveiv_entry.iv[4], iv32, sizeof(iv32));
1678}
1679
1680static int rt2800_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
1681{
1682 struct rt2x00_dev *rt2x00dev = hw->priv;
1683 u32 reg;
1684 bool enabled = (value < IEEE80211_MAX_RTS_THRESHOLD);
1685
1686 rt2800_register_read(rt2x00dev, TX_RTS_CFG, &reg);
1687 rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_THRES, value);
1688 rt2800_register_write(rt2x00dev, TX_RTS_CFG, reg);
1689
1690 rt2800_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
1691 rt2x00_set_field32(&reg, CCK_PROT_CFG_RTS_TH_EN, enabled);
1692 rt2800_register_write(rt2x00dev, CCK_PROT_CFG, reg);
1693
1694 rt2800_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
1695 rt2x00_set_field32(&reg, OFDM_PROT_CFG_RTS_TH_EN, enabled);
1696 rt2800_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
1697
1698 rt2800_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
1699 rt2x00_set_field32(&reg, MM20_PROT_CFG_RTS_TH_EN, enabled);
1700 rt2800_register_write(rt2x00dev, MM20_PROT_CFG, reg);
1701
1702 rt2800_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
1703 rt2x00_set_field32(&reg, MM40_PROT_CFG_RTS_TH_EN, enabled);
1704 rt2800_register_write(rt2x00dev, MM40_PROT_CFG, reg);
1705
1706 rt2800_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
1707 rt2x00_set_field32(&reg, GF20_PROT_CFG_RTS_TH_EN, enabled);
1708 rt2800_register_write(rt2x00dev, GF20_PROT_CFG, reg);
1709
1710 rt2800_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
1711 rt2x00_set_field32(&reg, GF40_PROT_CFG_RTS_TH_EN, enabled);
1712 rt2800_register_write(rt2x00dev, GF40_PROT_CFG, reg);
1713
1714 return 0;
1715}
1716
1717static int rt2800_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
1718 const struct ieee80211_tx_queue_params *params)
1719{
1720 struct rt2x00_dev *rt2x00dev = hw->priv;
1721 struct data_queue *queue;
1722 struct rt2x00_field32 field;
1723 int retval;
1724 u32 reg;
1725 u32 offset;
1726
1727 /*
1728 * First pass the configuration through rt2x00lib, that will
1729 * update the queue settings and validate the input. After that
1730 * we are free to update the registers based on the value
1731 * in the queue parameter.
1732 */
1733 retval = rt2x00mac_conf_tx(hw, queue_idx, params);
1734 if (retval)
1735 return retval;
1736
1737 /*
1738 * We only need to perform additional register initialization
1739 * for WMM queues/
1740 */
1741 if (queue_idx >= 4)
1742 return 0;
1743
1744 queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
1745
1746 /* Update WMM TXOP register */
1747 offset = WMM_TXOP0_CFG + (sizeof(u32) * (!!(queue_idx & 2)));
1748 field.bit_offset = (queue_idx & 1) * 16;
1749 field.bit_mask = 0xffff << field.bit_offset;
1750
1751 rt2800_register_read(rt2x00dev, offset, &reg);
1752 rt2x00_set_field32(&reg, field, queue->txop);
1753 rt2800_register_write(rt2x00dev, offset, reg);
1754
1755 /* Update WMM registers */
1756 field.bit_offset = queue_idx * 4;
1757 field.bit_mask = 0xf << field.bit_offset;
1758
1759 rt2800_register_read(rt2x00dev, WMM_AIFSN_CFG, &reg);
1760 rt2x00_set_field32(&reg, field, queue->aifs);
1761 rt2800_register_write(rt2x00dev, WMM_AIFSN_CFG, reg);
1762
1763 rt2800_register_read(rt2x00dev, WMM_CWMIN_CFG, &reg);
1764 rt2x00_set_field32(&reg, field, queue->cw_min);
1765 rt2800_register_write(rt2x00dev, WMM_CWMIN_CFG, reg);
1766
1767 rt2800_register_read(rt2x00dev, WMM_CWMAX_CFG, &reg);
1768 rt2x00_set_field32(&reg, field, queue->cw_max);
1769 rt2800_register_write(rt2x00dev, WMM_CWMAX_CFG, reg);
1770
1771 /* Update EDCA registers */
1772 offset = EDCA_AC0_CFG + (sizeof(u32) * queue_idx);
1773
1774 rt2800_register_read(rt2x00dev, offset, &reg);
1775 rt2x00_set_field32(&reg, EDCA_AC0_CFG_TX_OP, queue->txop);
1776 rt2x00_set_field32(&reg, EDCA_AC0_CFG_AIFSN, queue->aifs);
1777 rt2x00_set_field32(&reg, EDCA_AC0_CFG_CWMIN, queue->cw_min);
1778 rt2x00_set_field32(&reg, EDCA_AC0_CFG_CWMAX, queue->cw_max);
1779 rt2800_register_write(rt2x00dev, offset, reg);
1780
1781 return 0;
1782}
1783
1784static u64 rt2800_get_tsf(struct ieee80211_hw *hw)
1785{
1786 struct rt2x00_dev *rt2x00dev = hw->priv;
1787 u64 tsf;
1788 u32 reg;
1789
1790 rt2800_register_read(rt2x00dev, TSF_TIMER_DW1, &reg);
1791 tsf = (u64) rt2x00_get_field32(reg, TSF_TIMER_DW1_HIGH_WORD) << 32;
1792 rt2800_register_read(rt2x00dev, TSF_TIMER_DW0, &reg);
1793 tsf |= rt2x00_get_field32(reg, TSF_TIMER_DW0_LOW_WORD);
1794
1795 return tsf;
1796}
1797
1798const struct ieee80211_ops rt2800_mac80211_ops = {
1799 .tx = rt2x00mac_tx,
1800 .start = rt2x00mac_start,
1801 .stop = rt2x00mac_stop,
1802 .add_interface = rt2x00mac_add_interface,
1803 .remove_interface = rt2x00mac_remove_interface,
1804 .config = rt2x00mac_config,
1805 .configure_filter = rt2x00mac_configure_filter,
1806 .set_tim = rt2x00mac_set_tim,
1807 .set_key = rt2x00mac_set_key,
1808 .get_stats = rt2x00mac_get_stats,
1809 .get_tkip_seq = rt2800_get_tkip_seq,
1810 .set_rts_threshold = rt2800_set_rts_threshold,
1811 .bss_info_changed = rt2x00mac_bss_info_changed,
1812 .conf_tx = rt2800_conf_tx,
1813 .get_tx_stats = rt2x00mac_get_tx_stats,
1814 .get_tsf = rt2800_get_tsf,
1815 .rfkill_poll = rt2x00mac_rfkill_poll,
1816};
1817EXPORT_SYMBOL_GPL(rt2800_mac80211_ops);
diff --git a/drivers/net/wireless/rt2x00/rt2800lib.h b/drivers/net/wireless/rt2x00/rt2800lib.h
new file mode 100644
index 000000000000..5eea8fcba6cc
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800lib.h
@@ -0,0 +1,134 @@
1/*
2 Copyright (C) 2009 Bartlomiej Zolnierkiewicz
3
4 This program is free software; you can redistribute it and/or modify
5 it under the terms of the GNU General Public License as published by
6 the Free Software Foundation; either version 2 of the License, or
7 (at your option) any later version.
8
9 This program is distributed in the hope that it will be useful,
10 but WITHOUT ANY WARRANTY; without even the implied warranty of
11 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 GNU General Public License for more details.
13
14 You should have received a copy of the GNU General Public License
15 along with this program; if not, write to the
16 Free Software Foundation, Inc.,
17 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
18 */
19
20#ifndef RT2800LIB_H
21#define RT2800LIB_H
22
23struct rt2800_ops {
24 void (*register_read)(struct rt2x00_dev *rt2x00dev,
25 const unsigned int offset, u32 *value);
26 void (*register_write)(struct rt2x00_dev *rt2x00dev,
27 const unsigned int offset, u32 value);
28 void (*register_write_lock)(struct rt2x00_dev *rt2x00dev,
29 const unsigned int offset, u32 value);
30
31 void (*register_multiread)(struct rt2x00_dev *rt2x00dev,
32 const unsigned int offset,
33 void *value, const u32 length);
34 void (*register_multiwrite)(struct rt2x00_dev *rt2x00dev,
35 const unsigned int offset,
36 const void *value, const u32 length);
37
38 int (*regbusy_read)(struct rt2x00_dev *rt2x00dev,
39 const unsigned int offset,
40 const struct rt2x00_field32 field, u32 *reg);
41};
42
43static inline void rt2800_register_read(struct rt2x00_dev *rt2x00dev,
44 const unsigned int offset,
45 u32 *value)
46{
47 const struct rt2800_ops *rt2800ops = rt2x00dev->priv;
48
49 rt2800ops->register_read(rt2x00dev, offset, value);
50}
51
52static inline void rt2800_register_write(struct rt2x00_dev *rt2x00dev,
53 const unsigned int offset,
54 u32 value)
55{
56 const struct rt2800_ops *rt2800ops = rt2x00dev->priv;
57
58 rt2800ops->register_write(rt2x00dev, offset, value);
59}
60
61static inline void rt2800_register_write_lock(struct rt2x00_dev *rt2x00dev,
62 const unsigned int offset,
63 u32 value)
64{
65 const struct rt2800_ops *rt2800ops = rt2x00dev->priv;
66
67 rt2800ops->register_write_lock(rt2x00dev, offset, value);
68}
69
70static inline void rt2800_register_multiread(struct rt2x00_dev *rt2x00dev,
71 const unsigned int offset,
72 void *value, const u32 length)
73{
74 const struct rt2800_ops *rt2800ops = rt2x00dev->priv;
75
76 rt2800ops->register_multiread(rt2x00dev, offset, value, length);
77}
78
79static inline void rt2800_register_multiwrite(struct rt2x00_dev *rt2x00dev,
80 const unsigned int offset,
81 const void *value,
82 const u32 length)
83{
84 const struct rt2800_ops *rt2800ops = rt2x00dev->priv;
85
86 rt2800ops->register_multiwrite(rt2x00dev, offset, value, length);
87}
88
89static inline int rt2800_regbusy_read(struct rt2x00_dev *rt2x00dev,
90 const unsigned int offset,
91 const struct rt2x00_field32 field,
92 u32 *reg)
93{
94 const struct rt2800_ops *rt2800ops = rt2x00dev->priv;
95
96 return rt2800ops->regbusy_read(rt2x00dev, offset, field, reg);
97}
98
99void rt2800_mcu_request(struct rt2x00_dev *rt2x00dev,
100 const u8 command, const u8 token,
101 const u8 arg0, const u8 arg1);
102
103extern const struct rt2x00debug rt2800_rt2x00debug;
104
105int rt2800_rfkill_poll(struct rt2x00_dev *rt2x00dev);
106void rt2800_init_led(struct rt2x00_dev *rt2x00dev,
107 struct rt2x00_led *led, enum led_type type);
108int rt2800_config_shared_key(struct rt2x00_dev *rt2x00dev,
109 struct rt2x00lib_crypto *crypto,
110 struct ieee80211_key_conf *key);
111int rt2800_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
112 struct rt2x00lib_crypto *crypto,
113 struct ieee80211_key_conf *key);
114void rt2800_config_filter(struct rt2x00_dev *rt2x00dev,
115 const unsigned int filter_flags);
116void rt2800_config_intf(struct rt2x00_dev *rt2x00dev, struct rt2x00_intf *intf,
117 struct rt2x00intf_conf *conf, const unsigned int flags);
118void rt2800_config_erp(struct rt2x00_dev *rt2x00dev, struct rt2x00lib_erp *erp);
119void rt2800_config_ant(struct rt2x00_dev *rt2x00dev, struct antenna_setup *ant);
120void rt2800_config(struct rt2x00_dev *rt2x00dev,
121 struct rt2x00lib_conf *libconf,
122 const unsigned int flags);
123void rt2800_link_stats(struct rt2x00_dev *rt2x00dev, struct link_qual *qual);
124void rt2800_reset_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual);
125void rt2800_link_tuner(struct rt2x00_dev *rt2x00dev, struct link_qual *qual,
126 const u32 count);
127
128int rt2800_init_registers(struct rt2x00_dev *rt2x00dev);
129int rt2800_init_bbp(struct rt2x00_dev *rt2x00dev);
130int rt2800_init_rfcsr(struct rt2x00_dev *rt2x00dev);
131
132extern const struct ieee80211_ops rt2800_mac80211_ops;
133
134#endif /* RT2800LIB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.c b/drivers/net/wireless/rt2x00/rt2800pci.c
new file mode 100644
index 000000000000..3c5b875cdee8
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800pci.c
@@ -0,0 +1,1685 @@
1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2800pci
23 Abstract: rt2800pci device specific routines.
24 Supported chipsets: RT2800E & RT2800ED.
25 */
26
27#include <linux/crc-ccitt.h>
28#include <linux/delay.h>
29#include <linux/etherdevice.h>
30#include <linux/init.h>
31#include <linux/kernel.h>
32#include <linux/module.h>
33#include <linux/pci.h>
34#include <linux/platform_device.h>
35#include <linux/eeprom_93cx6.h>
36
37#include "rt2x00.h"
38#include "rt2x00pci.h"
39#include "rt2x00soc.h"
40#include "rt2800lib.h"
41#include "rt2800.h"
42#include "rt2800pci.h"
43
44#ifdef CONFIG_RT2800PCI_PCI_MODULE
45#define CONFIG_RT2800PCI_PCI
46#endif
47
48#ifdef CONFIG_RT2800PCI_WISOC_MODULE
49#define CONFIG_RT2800PCI_WISOC
50#endif
51
52/*
53 * Allow hardware encryption to be disabled.
54 */
55static int modparam_nohwcrypt = 1;
56module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
57MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
58
59static void rt2800pci_mcu_status(struct rt2x00_dev *rt2x00dev, const u8 token)
60{
61 unsigned int i;
62 u32 reg;
63
64 for (i = 0; i < 200; i++) {
65 rt2800_register_read(rt2x00dev, H2M_MAILBOX_CID, &reg);
66
67 if ((rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD0) == token) ||
68 (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD1) == token) ||
69 (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD2) == token) ||
70 (rt2x00_get_field32(reg, H2M_MAILBOX_CID_CMD3) == token))
71 break;
72
73 udelay(REGISTER_BUSY_DELAY);
74 }
75
76 if (i == 200)
77 ERROR(rt2x00dev, "MCU request failed, no response from hardware\n");
78
79 rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0);
80 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
81}
82
83#ifdef CONFIG_RT2800PCI_WISOC
84static void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
85{
86 u32 *base_addr = (u32 *) KSEG1ADDR(0x1F040000); /* XXX for RT3052 */
87
88 memcpy_fromio(rt2x00dev->eeprom, base_addr, EEPROM_SIZE);
89}
90#else
91static inline void rt2800pci_read_eeprom_soc(struct rt2x00_dev *rt2x00dev)
92{
93}
94#endif /* CONFIG_RT2800PCI_WISOC */
95
96#ifdef CONFIG_RT2800PCI_PCI
97static void rt2800pci_eepromregister_read(struct eeprom_93cx6 *eeprom)
98{
99 struct rt2x00_dev *rt2x00dev = eeprom->data;
100 u32 reg;
101
102 rt2800_register_read(rt2x00dev, E2PROM_CSR, &reg);
103
104 eeprom->reg_data_in = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_IN);
105 eeprom->reg_data_out = !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_OUT);
106 eeprom->reg_data_clock =
107 !!rt2x00_get_field32(reg, E2PROM_CSR_DATA_CLOCK);
108 eeprom->reg_chip_select =
109 !!rt2x00_get_field32(reg, E2PROM_CSR_CHIP_SELECT);
110}
111
112static void rt2800pci_eepromregister_write(struct eeprom_93cx6 *eeprom)
113{
114 struct rt2x00_dev *rt2x00dev = eeprom->data;
115 u32 reg = 0;
116
117 rt2x00_set_field32(&reg, E2PROM_CSR_DATA_IN, !!eeprom->reg_data_in);
118 rt2x00_set_field32(&reg, E2PROM_CSR_DATA_OUT, !!eeprom->reg_data_out);
119 rt2x00_set_field32(&reg, E2PROM_CSR_DATA_CLOCK,
120 !!eeprom->reg_data_clock);
121 rt2x00_set_field32(&reg, E2PROM_CSR_CHIP_SELECT,
122 !!eeprom->reg_chip_select);
123
124 rt2800_register_write(rt2x00dev, E2PROM_CSR, reg);
125}
126
127static void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
128{
129 struct eeprom_93cx6 eeprom;
130 u32 reg;
131
132 rt2800_register_read(rt2x00dev, E2PROM_CSR, &reg);
133
134 eeprom.data = rt2x00dev;
135 eeprom.register_read = rt2800pci_eepromregister_read;
136 eeprom.register_write = rt2800pci_eepromregister_write;
137 eeprom.width = !rt2x00_get_field32(reg, E2PROM_CSR_TYPE) ?
138 PCI_EEPROM_WIDTH_93C46 : PCI_EEPROM_WIDTH_93C66;
139 eeprom.reg_data_in = 0;
140 eeprom.reg_data_out = 0;
141 eeprom.reg_data_clock = 0;
142 eeprom.reg_chip_select = 0;
143
144 eeprom_93cx6_multiread(&eeprom, EEPROM_BASE, rt2x00dev->eeprom,
145 EEPROM_SIZE / sizeof(u16));
146}
147
148static void rt2800pci_efuse_read(struct rt2x00_dev *rt2x00dev,
149 unsigned int i)
150{
151 u32 reg;
152
153 rt2800_register_read(rt2x00dev, EFUSE_CTRL, &reg);
154 rt2x00_set_field32(&reg, EFUSE_CTRL_ADDRESS_IN, i);
155 rt2x00_set_field32(&reg, EFUSE_CTRL_MODE, 0);
156 rt2x00_set_field32(&reg, EFUSE_CTRL_KICK, 1);
157 rt2800_register_write(rt2x00dev, EFUSE_CTRL, reg);
158
159 /* Wait until the EEPROM has been loaded */
160 rt2800_regbusy_read(rt2x00dev, EFUSE_CTRL, EFUSE_CTRL_KICK, &reg);
161
162 /* Apparently the data is read from end to start */
163 rt2800_register_read(rt2x00dev, EFUSE_DATA3,
164 (u32 *)&rt2x00dev->eeprom[i]);
165 rt2800_register_read(rt2x00dev, EFUSE_DATA2,
166 (u32 *)&rt2x00dev->eeprom[i + 2]);
167 rt2800_register_read(rt2x00dev, EFUSE_DATA1,
168 (u32 *)&rt2x00dev->eeprom[i + 4]);
169 rt2800_register_read(rt2x00dev, EFUSE_DATA0,
170 (u32 *)&rt2x00dev->eeprom[i + 6]);
171}
172
173static void rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
174{
175 unsigned int i;
176
177 for (i = 0; i < EEPROM_SIZE / sizeof(u16); i += 8)
178 rt2800pci_efuse_read(rt2x00dev, i);
179}
180#else
181static inline void rt2800pci_read_eeprom_pci(struct rt2x00_dev *rt2x00dev)
182{
183}
184
185static inline void rt2800pci_read_eeprom_efuse(struct rt2x00_dev *rt2x00dev)
186{
187}
188#endif /* CONFIG_RT2800PCI_PCI */
189
190/*
191 * Firmware functions
192 */
193static char *rt2800pci_get_firmware_name(struct rt2x00_dev *rt2x00dev)
194{
195 return FIRMWARE_RT2860;
196}
197
198static int rt2800pci_check_firmware(struct rt2x00_dev *rt2x00dev,
199 const u8 *data, const size_t len)
200{
201 u16 fw_crc;
202 u16 crc;
203
204 /*
205 * Only support 8kb firmware files.
206 */
207 if (len != 8192)
208 return FW_BAD_LENGTH;
209
210 /*
211 * The last 2 bytes in the firmware array are the crc checksum itself,
212 * this means that we should never pass those 2 bytes to the crc
213 * algorithm.
214 */
215 fw_crc = (data[len - 2] << 8 | data[len - 1]);
216
217 /*
218 * Use the crc ccitt algorithm.
219 * This will return the same value as the legacy driver which
220 * used bit ordering reversion on the both the firmware bytes
221 * before input input as well as on the final output.
222 * Obviously using crc ccitt directly is much more efficient.
223 */
224 crc = crc_ccitt(~0, data, len - 2);
225
226 /*
227 * There is a small difference between the crc-itu-t + bitrev and
228 * the crc-ccitt crc calculation. In the latter method the 2 bytes
229 * will be swapped, use swab16 to convert the crc to the correct
230 * value.
231 */
232 crc = swab16(crc);
233
234 return (fw_crc == crc) ? FW_OK : FW_BAD_CRC;
235}
236
237static int rt2800pci_load_firmware(struct rt2x00_dev *rt2x00dev,
238 const u8 *data, const size_t len)
239{
240 unsigned int i;
241 u32 reg;
242
243 /*
244 * Wait for stable hardware.
245 */
246 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
247 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
248 if (reg && reg != ~0)
249 break;
250 msleep(1);
251 }
252
253 if (i == REGISTER_BUSY_COUNT) {
254 ERROR(rt2x00dev, "Unstable hardware.\n");
255 return -EBUSY;
256 }
257
258 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000002);
259 rt2800_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0x00000000);
260
261 /*
262 * Disable DMA, will be reenabled later when enabling
263 * the radio.
264 */
265 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
266 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
267 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
268 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
269 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
270 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
271 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
272
273 /*
274 * enable Host program ram write selection
275 */
276 reg = 0;
277 rt2x00_set_field32(&reg, PBF_SYS_CTRL_HOST_RAM_WRITE, 1);
278 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, reg);
279
280 /*
281 * Write firmware to device.
282 */
283 rt2800_register_multiwrite(rt2x00dev, FIRMWARE_IMAGE_BASE,
284 data, len);
285
286 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000);
287 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001);
288
289 /*
290 * Wait for device to stabilize.
291 */
292 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
293 rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
294 if (rt2x00_get_field32(reg, PBF_SYS_CTRL_READY))
295 break;
296 msleep(1);
297 }
298
299 if (i == REGISTER_BUSY_COUNT) {
300 ERROR(rt2x00dev, "PBF system register not ready.\n");
301 return -EBUSY;
302 }
303
304 /*
305 * Disable interrupts
306 */
307 rt2x00dev->ops->lib->set_device_state(rt2x00dev, STATE_RADIO_IRQ_OFF);
308
309 /*
310 * Initialize BBP R/W access agent
311 */
312 rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
313 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
314
315 return 0;
316}
317
318/*
319 * Initialization functions.
320 */
321static bool rt2800pci_get_entry_state(struct queue_entry *entry)
322{
323 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
324 u32 word;
325
326 if (entry->queue->qid == QID_RX) {
327 rt2x00_desc_read(entry_priv->desc, 1, &word);
328
329 return (!rt2x00_get_field32(word, RXD_W1_DMA_DONE));
330 } else {
331 rt2x00_desc_read(entry_priv->desc, 1, &word);
332
333 return (!rt2x00_get_field32(word, TXD_W1_DMA_DONE));
334 }
335}
336
337static void rt2800pci_clear_entry(struct queue_entry *entry)
338{
339 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
340 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
341 u32 word;
342
343 if (entry->queue->qid == QID_RX) {
344 rt2x00_desc_read(entry_priv->desc, 0, &word);
345 rt2x00_set_field32(&word, RXD_W0_SDP0, skbdesc->skb_dma);
346 rt2x00_desc_write(entry_priv->desc, 0, word);
347
348 rt2x00_desc_read(entry_priv->desc, 1, &word);
349 rt2x00_set_field32(&word, RXD_W1_DMA_DONE, 0);
350 rt2x00_desc_write(entry_priv->desc, 1, word);
351 } else {
352 rt2x00_desc_read(entry_priv->desc, 1, &word);
353 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 1);
354 rt2x00_desc_write(entry_priv->desc, 1, word);
355 }
356}
357
358static int rt2800pci_init_queues(struct rt2x00_dev *rt2x00dev)
359{
360 struct queue_entry_priv_pci *entry_priv;
361 u32 reg;
362
363 rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
364 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
365 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
366 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
367 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
368 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
369 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
370 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
371 rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
372
373 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
374 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
375
376 /*
377 * Initialize registers.
378 */
379 entry_priv = rt2x00dev->tx[0].entries[0].priv_data;
380 rt2800_register_write(rt2x00dev, TX_BASE_PTR0, entry_priv->desc_dma);
381 rt2800_register_write(rt2x00dev, TX_MAX_CNT0, rt2x00dev->tx[0].limit);
382 rt2800_register_write(rt2x00dev, TX_CTX_IDX0, 0);
383 rt2800_register_write(rt2x00dev, TX_DTX_IDX0, 0);
384
385 entry_priv = rt2x00dev->tx[1].entries[0].priv_data;
386 rt2800_register_write(rt2x00dev, TX_BASE_PTR1, entry_priv->desc_dma);
387 rt2800_register_write(rt2x00dev, TX_MAX_CNT1, rt2x00dev->tx[1].limit);
388 rt2800_register_write(rt2x00dev, TX_CTX_IDX1, 0);
389 rt2800_register_write(rt2x00dev, TX_DTX_IDX1, 0);
390
391 entry_priv = rt2x00dev->tx[2].entries[0].priv_data;
392 rt2800_register_write(rt2x00dev, TX_BASE_PTR2, entry_priv->desc_dma);
393 rt2800_register_write(rt2x00dev, TX_MAX_CNT2, rt2x00dev->tx[2].limit);
394 rt2800_register_write(rt2x00dev, TX_CTX_IDX2, 0);
395 rt2800_register_write(rt2x00dev, TX_DTX_IDX2, 0);
396
397 entry_priv = rt2x00dev->tx[3].entries[0].priv_data;
398 rt2800_register_write(rt2x00dev, TX_BASE_PTR3, entry_priv->desc_dma);
399 rt2800_register_write(rt2x00dev, TX_MAX_CNT3, rt2x00dev->tx[3].limit);
400 rt2800_register_write(rt2x00dev, TX_CTX_IDX3, 0);
401 rt2800_register_write(rt2x00dev, TX_DTX_IDX3, 0);
402
403 entry_priv = rt2x00dev->rx->entries[0].priv_data;
404 rt2800_register_write(rt2x00dev, RX_BASE_PTR, entry_priv->desc_dma);
405 rt2800_register_write(rt2x00dev, RX_MAX_CNT, rt2x00dev->rx[0].limit);
406 rt2800_register_write(rt2x00dev, RX_CRX_IDX, rt2x00dev->rx[0].limit - 1);
407 rt2800_register_write(rt2x00dev, RX_DRX_IDX, 0);
408
409 /*
410 * Enable global DMA configuration
411 */
412 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
413 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
414 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
415 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
416 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
417
418 rt2800_register_write(rt2x00dev, DELAY_INT_CFG, 0);
419
420 return 0;
421}
422
423/*
424 * Device state switch handlers.
425 */
426static void rt2800pci_toggle_rx(struct rt2x00_dev *rt2x00dev,
427 enum dev_state state)
428{
429 u32 reg;
430
431 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
432 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX,
433 (state == STATE_RADIO_RX_ON) ||
434 (state == STATE_RADIO_RX_ON_LINK));
435 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
436}
437
438static void rt2800pci_toggle_irq(struct rt2x00_dev *rt2x00dev,
439 enum dev_state state)
440{
441 int mask = (state == STATE_RADIO_IRQ_ON);
442 u32 reg;
443
444 /*
445 * When interrupts are being enabled, the interrupt registers
446 * should clear the register to assure a clean state.
447 */
448 if (state == STATE_RADIO_IRQ_ON) {
449 rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
450 rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
451 }
452
453 rt2800_register_read(rt2x00dev, INT_MASK_CSR, &reg);
454 rt2x00_set_field32(&reg, INT_MASK_CSR_RXDELAYINT, mask);
455 rt2x00_set_field32(&reg, INT_MASK_CSR_TXDELAYINT, mask);
456 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_DONE, mask);
457 rt2x00_set_field32(&reg, INT_MASK_CSR_AC0_DMA_DONE, mask);
458 rt2x00_set_field32(&reg, INT_MASK_CSR_AC1_DMA_DONE, mask);
459 rt2x00_set_field32(&reg, INT_MASK_CSR_AC2_DMA_DONE, mask);
460 rt2x00_set_field32(&reg, INT_MASK_CSR_AC3_DMA_DONE, mask);
461 rt2x00_set_field32(&reg, INT_MASK_CSR_HCCA_DMA_DONE, mask);
462 rt2x00_set_field32(&reg, INT_MASK_CSR_MGMT_DMA_DONE, mask);
463 rt2x00_set_field32(&reg, INT_MASK_CSR_MCU_COMMAND, mask);
464 rt2x00_set_field32(&reg, INT_MASK_CSR_RXTX_COHERENT, mask);
465 rt2x00_set_field32(&reg, INT_MASK_CSR_TBTT, mask);
466 rt2x00_set_field32(&reg, INT_MASK_CSR_PRE_TBTT, mask);
467 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_FIFO_STATUS, mask);
468 rt2x00_set_field32(&reg, INT_MASK_CSR_AUTO_WAKEUP, mask);
469 rt2x00_set_field32(&reg, INT_MASK_CSR_GPTIMER, mask);
470 rt2x00_set_field32(&reg, INT_MASK_CSR_RX_COHERENT, mask);
471 rt2x00_set_field32(&reg, INT_MASK_CSR_TX_COHERENT, mask);
472 rt2800_register_write(rt2x00dev, INT_MASK_CSR, reg);
473}
474
475static int rt2800pci_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
476{
477 unsigned int i;
478 u32 reg;
479
480 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
481 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
482 if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
483 !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
484 return 0;
485
486 msleep(1);
487 }
488
489 ERROR(rt2x00dev, "WPDMA TX/RX busy, aborting.\n");
490 return -EACCES;
491}
492
493static int rt2800pci_enable_radio(struct rt2x00_dev *rt2x00dev)
494{
495 u32 reg;
496 u16 word;
497
498 /*
499 * Initialize all registers.
500 */
501 if (unlikely(rt2800pci_wait_wpdma_ready(rt2x00dev) ||
502 rt2800pci_init_queues(rt2x00dev) ||
503 rt2800_init_registers(rt2x00dev) ||
504 rt2800pci_wait_wpdma_ready(rt2x00dev) ||
505 rt2800_init_bbp(rt2x00dev) ||
506 rt2800_init_rfcsr(rt2x00dev)))
507 return -EIO;
508
509 /*
510 * Send signal to firmware during boot time.
511 */
512 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0);
513
514 /*
515 * Enable RX.
516 */
517 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
518 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
519 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 0);
520 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
521
522 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
523 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1);
524 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1);
525 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_WP_DMA_BURST_SIZE, 2);
526 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
527 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
528
529 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
530 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
531 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
532 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
533
534 /*
535 * Initialize LED control
536 */
537 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED1, &word);
538 rt2800_mcu_request(rt2x00dev, MCU_LED_1, 0xff,
539 word & 0xff, (word >> 8) & 0xff);
540
541 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED2, &word);
542 rt2800_mcu_request(rt2x00dev, MCU_LED_2, 0xff,
543 word & 0xff, (word >> 8) & 0xff);
544
545 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED3, &word);
546 rt2800_mcu_request(rt2x00dev, MCU_LED_3, 0xff,
547 word & 0xff, (word >> 8) & 0xff);
548
549 return 0;
550}
551
552static void rt2800pci_disable_radio(struct rt2x00_dev *rt2x00dev)
553{
554 u32 reg;
555
556 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
557 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
558 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
559 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
560 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
561 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
562 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
563
564 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0);
565 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
566 rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
567
568 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00001280);
569
570 rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
571 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, 1);
572 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, 1);
573 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, 1);
574 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, 1);
575 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX4, 1);
576 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX5, 1);
577 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DRX_IDX0, 1);
578 rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
579
580 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e1f);
581 rt2800_register_write(rt2x00dev, PBF_SYS_CTRL, 0x00000e00);
582
583 /* Wait for DMA, ignore error */
584 rt2800pci_wait_wpdma_ready(rt2x00dev);
585}
586
587static int rt2800pci_set_state(struct rt2x00_dev *rt2x00dev,
588 enum dev_state state)
589{
590 /*
591 * Always put the device to sleep (even when we intend to wakeup!)
592 * if the device is booting and wasn't asleep it will return
593 * failure when attempting to wakeup.
594 */
595 rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 2);
596
597 if (state == STATE_AWAKE) {
598 rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, TOKEN_WAKUP, 0, 0);
599 rt2800pci_mcu_status(rt2x00dev, TOKEN_WAKUP);
600 }
601
602 return 0;
603}
604
605static int rt2800pci_set_device_state(struct rt2x00_dev *rt2x00dev,
606 enum dev_state state)
607{
608 int retval = 0;
609
610 switch (state) {
611 case STATE_RADIO_ON:
612 /*
613 * Before the radio can be enabled, the device first has
614 * to be woken up. After that it needs a bit of time
615 * to be fully awake and then the radio can be enabled.
616 */
617 rt2800pci_set_state(rt2x00dev, STATE_AWAKE);
618 msleep(1);
619 retval = rt2800pci_enable_radio(rt2x00dev);
620 break;
621 case STATE_RADIO_OFF:
622 /*
623 * After the radio has been disabled, the device should
624 * be put to sleep for powersaving.
625 */
626 rt2800pci_disable_radio(rt2x00dev);
627 rt2800pci_set_state(rt2x00dev, STATE_SLEEP);
628 break;
629 case STATE_RADIO_RX_ON:
630 case STATE_RADIO_RX_ON_LINK:
631 case STATE_RADIO_RX_OFF:
632 case STATE_RADIO_RX_OFF_LINK:
633 rt2800pci_toggle_rx(rt2x00dev, state);
634 break;
635 case STATE_RADIO_IRQ_ON:
636 case STATE_RADIO_IRQ_OFF:
637 rt2800pci_toggle_irq(rt2x00dev, state);
638 break;
639 case STATE_DEEP_SLEEP:
640 case STATE_SLEEP:
641 case STATE_STANDBY:
642 case STATE_AWAKE:
643 retval = rt2800pci_set_state(rt2x00dev, state);
644 break;
645 default:
646 retval = -ENOTSUPP;
647 break;
648 }
649
650 if (unlikely(retval))
651 ERROR(rt2x00dev, "Device failed to enter state %d (%d).\n",
652 state, retval);
653
654 return retval;
655}
656
657/*
658 * TX descriptor initialization
659 */
660static void rt2800pci_write_tx_desc(struct rt2x00_dev *rt2x00dev,
661 struct sk_buff *skb,
662 struct txentry_desc *txdesc)
663{
664 struct skb_frame_desc *skbdesc = get_skb_frame_desc(skb);
665 __le32 *txd = skbdesc->desc;
666 __le32 *txwi = (__le32 *)(skb->data - rt2x00dev->hw->extra_tx_headroom);
667 u32 word;
668
669 /*
670 * Initialize TX Info descriptor
671 */
672 rt2x00_desc_read(txwi, 0, &word);
673 rt2x00_set_field32(&word, TXWI_W0_FRAG,
674 test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
675 rt2x00_set_field32(&word, TXWI_W0_MIMO_PS, 0);
676 rt2x00_set_field32(&word, TXWI_W0_CF_ACK, 0);
677 rt2x00_set_field32(&word, TXWI_W0_TS,
678 test_bit(ENTRY_TXD_REQ_TIMESTAMP, &txdesc->flags));
679 rt2x00_set_field32(&word, TXWI_W0_AMPDU,
680 test_bit(ENTRY_TXD_HT_AMPDU, &txdesc->flags));
681 rt2x00_set_field32(&word, TXWI_W0_MPDU_DENSITY, txdesc->mpdu_density);
682 rt2x00_set_field32(&word, TXWI_W0_TX_OP, txdesc->ifs);
683 rt2x00_set_field32(&word, TXWI_W0_MCS, txdesc->mcs);
684 rt2x00_set_field32(&word, TXWI_W0_BW,
685 test_bit(ENTRY_TXD_HT_BW_40, &txdesc->flags));
686 rt2x00_set_field32(&word, TXWI_W0_SHORT_GI,
687 test_bit(ENTRY_TXD_HT_SHORT_GI, &txdesc->flags));
688 rt2x00_set_field32(&word, TXWI_W0_STBC, txdesc->stbc);
689 rt2x00_set_field32(&word, TXWI_W0_PHYMODE, txdesc->rate_mode);
690 rt2x00_desc_write(txwi, 0, word);
691
692 rt2x00_desc_read(txwi, 1, &word);
693 rt2x00_set_field32(&word, TXWI_W1_ACK,
694 test_bit(ENTRY_TXD_ACK, &txdesc->flags));
695 rt2x00_set_field32(&word, TXWI_W1_NSEQ,
696 test_bit(ENTRY_TXD_GENERATE_SEQ, &txdesc->flags));
697 rt2x00_set_field32(&word, TXWI_W1_BW_WIN_SIZE, txdesc->ba_size);
698 rt2x00_set_field32(&word, TXWI_W1_WIRELESS_CLI_ID,
699 test_bit(ENTRY_TXD_ENCRYPT, &txdesc->flags) ?
700 txdesc->key_idx : 0xff);
701 rt2x00_set_field32(&word, TXWI_W1_MPDU_TOTAL_BYTE_COUNT,
702 skb->len - txdesc->l2pad);
703 rt2x00_set_field32(&word, TXWI_W1_PACKETID,
704 skbdesc->entry->queue->qid + 1);
705 rt2x00_desc_write(txwi, 1, word);
706
707 /*
708 * Always write 0 to IV/EIV fields, hardware will insert the IV
709 * from the IVEIV register when TXD_W3_WIV is set to 0.
710 * When TXD_W3_WIV is set to 1 it will use the IV data
711 * from the descriptor. The TXWI_W1_WIRELESS_CLI_ID indicates which
712 * crypto entry in the registers should be used to encrypt the frame.
713 */
714 _rt2x00_desc_write(txwi, 2, 0 /* skbdesc->iv[0] */);
715 _rt2x00_desc_write(txwi, 3, 0 /* skbdesc->iv[1] */);
716
717 /*
718 * The buffers pointed by SD_PTR0/SD_LEN0 and SD_PTR1/SD_LEN1
719 * must contains a TXWI structure + 802.11 header + padding + 802.11
720 * data. We choose to have SD_PTR0/SD_LEN0 only contains TXWI and
721 * SD_PTR1/SD_LEN1 contains 802.11 header + padding + 802.11
722 * data. It means that LAST_SEC0 is always 0.
723 */
724
725 /*
726 * Initialize TX descriptor
727 */
728 rt2x00_desc_read(txd, 0, &word);
729 rt2x00_set_field32(&word, TXD_W0_SD_PTR0, skbdesc->skb_dma);
730 rt2x00_desc_write(txd, 0, word);
731
732 rt2x00_desc_read(txd, 1, &word);
733 rt2x00_set_field32(&word, TXD_W1_SD_LEN1, skb->len);
734 rt2x00_set_field32(&word, TXD_W1_LAST_SEC1,
735 !test_bit(ENTRY_TXD_MORE_FRAG, &txdesc->flags));
736 rt2x00_set_field32(&word, TXD_W1_BURST,
737 test_bit(ENTRY_TXD_BURST, &txdesc->flags));
738 rt2x00_set_field32(&word, TXD_W1_SD_LEN0,
739 rt2x00dev->hw->extra_tx_headroom);
740 rt2x00_set_field32(&word, TXD_W1_LAST_SEC0, 0);
741 rt2x00_set_field32(&word, TXD_W1_DMA_DONE, 0);
742 rt2x00_desc_write(txd, 1, word);
743
744 rt2x00_desc_read(txd, 2, &word);
745 rt2x00_set_field32(&word, TXD_W2_SD_PTR1,
746 skbdesc->skb_dma + rt2x00dev->hw->extra_tx_headroom);
747 rt2x00_desc_write(txd, 2, word);
748
749 rt2x00_desc_read(txd, 3, &word);
750 rt2x00_set_field32(&word, TXD_W3_WIV,
751 !test_bit(ENTRY_TXD_ENCRYPT_IV, &txdesc->flags));
752 rt2x00_set_field32(&word, TXD_W3_QSEL, 2);
753 rt2x00_desc_write(txd, 3, word);
754}
755
756/*
757 * TX data initialization
758 */
759static void rt2800pci_write_beacon(struct queue_entry *entry)
760{
761 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
762 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
763 unsigned int beacon_base;
764 u32 reg;
765
766 /*
767 * Disable beaconing while we are reloading the beacon data,
768 * otherwise we might be sending out invalid data.
769 */
770 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
771 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
772 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
773
774 /*
775 * Write entire beacon with descriptor to register.
776 */
777 beacon_base = HW_BEACON_OFFSET(entry->entry_idx);
778 rt2800_register_multiwrite(rt2x00dev,
779 beacon_base,
780 skbdesc->desc, skbdesc->desc_len);
781 rt2800_register_multiwrite(rt2x00dev,
782 beacon_base + skbdesc->desc_len,
783 entry->skb->data, entry->skb->len);
784
785 /*
786 * Clean up beacon skb.
787 */
788 dev_kfree_skb_any(entry->skb);
789 entry->skb = NULL;
790}
791
792static void rt2800pci_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
793 const enum data_queue_qid queue_idx)
794{
795 struct data_queue *queue;
796 unsigned int idx, qidx = 0;
797 u32 reg;
798
799 if (queue_idx == QID_BEACON) {
800 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
801 if (!rt2x00_get_field32(reg, BCN_TIME_CFG_BEACON_GEN)) {
802 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
803 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
804 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
805 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
806 }
807 return;
808 }
809
810 if (queue_idx > QID_HCCA && queue_idx != QID_MGMT)
811 return;
812
813 queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
814 idx = queue->index[Q_INDEX];
815
816 if (queue_idx == QID_MGMT)
817 qidx = 5;
818 else
819 qidx = queue_idx;
820
821 rt2800_register_write(rt2x00dev, TX_CTX_IDX(qidx), idx);
822}
823
824static void rt2800pci_kill_tx_queue(struct rt2x00_dev *rt2x00dev,
825 const enum data_queue_qid qid)
826{
827 u32 reg;
828
829 if (qid == QID_BEACON) {
830 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, 0);
831 return;
832 }
833
834 rt2800_register_read(rt2x00dev, WPDMA_RST_IDX, &reg);
835 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX0, (qid == QID_AC_BE));
836 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX1, (qid == QID_AC_BK));
837 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX2, (qid == QID_AC_VI));
838 rt2x00_set_field32(&reg, WPDMA_RST_IDX_DTX_IDX3, (qid == QID_AC_VO));
839 rt2800_register_write(rt2x00dev, WPDMA_RST_IDX, reg);
840}
841
842/*
843 * RX control handlers
844 */
845static void rt2800pci_fill_rxdone(struct queue_entry *entry,
846 struct rxdone_entry_desc *rxdesc)
847{
848 struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
849 struct skb_frame_desc *skbdesc = get_skb_frame_desc(entry->skb);
850 struct queue_entry_priv_pci *entry_priv = entry->priv_data;
851 __le32 *rxd = entry_priv->desc;
852 __le32 *rxwi = (__le32 *)entry->skb->data;
853 u32 rxd3;
854 u32 rxwi0;
855 u32 rxwi1;
856 u32 rxwi2;
857 u32 rxwi3;
858
859 rt2x00_desc_read(rxd, 3, &rxd3);
860 rt2x00_desc_read(rxwi, 0, &rxwi0);
861 rt2x00_desc_read(rxwi, 1, &rxwi1);
862 rt2x00_desc_read(rxwi, 2, &rxwi2);
863 rt2x00_desc_read(rxwi, 3, &rxwi3);
864
865 if (rt2x00_get_field32(rxd3, RXD_W3_CRC_ERROR))
866 rxdesc->flags |= RX_FLAG_FAILED_FCS_CRC;
867
868 if (test_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags)) {
869 /*
870 * Unfortunately we don't know the cipher type used during
871 * decryption. This prevents us from correct providing
872 * correct statistics through debugfs.
873 */
874 rxdesc->cipher = rt2x00_get_field32(rxwi0, RXWI_W0_UDF);
875 rxdesc->cipher_status =
876 rt2x00_get_field32(rxd3, RXD_W3_CIPHER_ERROR);
877 }
878
879 if (rt2x00_get_field32(rxd3, RXD_W3_DECRYPTED)) {
880 /*
881 * Hardware has stripped IV/EIV data from 802.11 frame during
882 * decryption. Unfortunately the descriptor doesn't contain
883 * any fields with the EIV/IV data either, so they can't
884 * be restored by rt2x00lib.
885 */
886 rxdesc->flags |= RX_FLAG_IV_STRIPPED;
887
888 if (rxdesc->cipher_status == RX_CRYPTO_SUCCESS)
889 rxdesc->flags |= RX_FLAG_DECRYPTED;
890 else if (rxdesc->cipher_status == RX_CRYPTO_FAIL_MIC)
891 rxdesc->flags |= RX_FLAG_MMIC_ERROR;
892 }
893
894 if (rt2x00_get_field32(rxd3, RXD_W3_MY_BSS))
895 rxdesc->dev_flags |= RXDONE_MY_BSS;
896
897 if (rt2x00_get_field32(rxd3, RXD_W3_L2PAD)) {
898 rxdesc->dev_flags |= RXDONE_L2PAD;
899 skbdesc->flags |= SKBDESC_L2_PADDED;
900 }
901
902 if (rt2x00_get_field32(rxwi1, RXWI_W1_SHORT_GI))
903 rxdesc->flags |= RX_FLAG_SHORT_GI;
904
905 if (rt2x00_get_field32(rxwi1, RXWI_W1_BW))
906 rxdesc->flags |= RX_FLAG_40MHZ;
907
908 /*
909 * Detect RX rate, always use MCS as signal type.
910 */
911 rxdesc->dev_flags |= RXDONE_SIGNAL_MCS;
912 rxdesc->rate_mode = rt2x00_get_field32(rxwi1, RXWI_W1_PHYMODE);
913 rxdesc->signal = rt2x00_get_field32(rxwi1, RXWI_W1_MCS);
914
915 /*
916 * Mask of 0x8 bit to remove the short preamble flag.
917 */
918 if (rxdesc->rate_mode == RATE_MODE_CCK)
919 rxdesc->signal &= ~0x8;
920
921 rxdesc->rssi =
922 (rt2x00_get_field32(rxwi2, RXWI_W2_RSSI0) +
923 rt2x00_get_field32(rxwi2, RXWI_W2_RSSI1)) / 2;
924
925 rxdesc->noise =
926 (rt2x00_get_field32(rxwi3, RXWI_W3_SNR0) +
927 rt2x00_get_field32(rxwi3, RXWI_W3_SNR1)) / 2;
928
929 rxdesc->size = rt2x00_get_field32(rxwi0, RXWI_W0_MPDU_TOTAL_BYTE_COUNT);
930
931 /*
932 * Set RX IDX in register to inform hardware that we have handled
933 * this entry and it is available for reuse again.
934 */
935 rt2800_register_write(rt2x00dev, RX_CRX_IDX, entry->entry_idx);
936
937 /*
938 * Remove TXWI descriptor from start of buffer.
939 */
940 skb_pull(entry->skb, RXWI_DESC_SIZE);
941 skb_trim(entry->skb, rxdesc->size);
942}
943
944/*
945 * Interrupt functions.
946 */
947static void rt2800pci_txdone(struct rt2x00_dev *rt2x00dev)
948{
949 struct data_queue *queue;
950 struct queue_entry *entry;
951 struct queue_entry *entry_done;
952 struct queue_entry_priv_pci *entry_priv;
953 struct txdone_entry_desc txdesc;
954 u32 word;
955 u32 reg;
956 u32 old_reg;
957 unsigned int type;
958 unsigned int index;
959 u16 mcs, real_mcs;
960
961 /*
962 * During each loop we will compare the freshly read
963 * TX_STA_FIFO register value with the value read from
964 * the previous loop. If the 2 values are equal then
965 * we should stop processing because the chance it
966 * quite big that the device has been unplugged and
967 * we risk going into an endless loop.
968 */
969 old_reg = 0;
970
971 while (1) {
972 rt2800_register_read(rt2x00dev, TX_STA_FIFO, &reg);
973 if (!rt2x00_get_field32(reg, TX_STA_FIFO_VALID))
974 break;
975
976 if (old_reg == reg)
977 break;
978 old_reg = reg;
979
980 /*
981 * Skip this entry when it contains an invalid
982 * queue identication number.
983 */
984 type = rt2x00_get_field32(reg, TX_STA_FIFO_PID_TYPE) - 1;
985 if (type >= QID_RX)
986 continue;
987
988 queue = rt2x00queue_get_queue(rt2x00dev, type);
989 if (unlikely(!queue))
990 continue;
991
992 /*
993 * Skip this entry when it contains an invalid
994 * index number.
995 */
996 index = rt2x00_get_field32(reg, TX_STA_FIFO_WCID) - 1;
997 if (unlikely(index >= queue->limit))
998 continue;
999
1000 entry = &queue->entries[index];
1001 entry_priv = entry->priv_data;
1002 rt2x00_desc_read((__le32 *)entry->skb->data, 0, &word);
1003
1004 entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
1005 while (entry != entry_done) {
1006 /*
1007 * Catch up.
1008 * Just report any entries we missed as failed.
1009 */
1010 WARNING(rt2x00dev,
1011 "TX status report missed for entry %d\n",
1012 entry_done->entry_idx);
1013
1014 txdesc.flags = 0;
1015 __set_bit(TXDONE_UNKNOWN, &txdesc.flags);
1016 txdesc.retry = 0;
1017
1018 rt2x00lib_txdone(entry_done, &txdesc);
1019 entry_done = rt2x00queue_get_entry(queue, Q_INDEX_DONE);
1020 }
1021
1022 /*
1023 * Obtain the status about this packet.
1024 */
1025 txdesc.flags = 0;
1026 if (rt2x00_get_field32(reg, TX_STA_FIFO_TX_SUCCESS))
1027 __set_bit(TXDONE_SUCCESS, &txdesc.flags);
1028 else
1029 __set_bit(TXDONE_FAILURE, &txdesc.flags);
1030
1031 /*
1032 * Ralink has a retry mechanism using a global fallback
1033 * table. We setup this fallback table to try immediate
1034 * lower rate for all rates. In the TX_STA_FIFO,
1035 * the MCS field contains the MCS used for the successfull
1036 * transmission. If the first transmission succeed,
1037 * we have mcs == tx_mcs. On the second transmission,
1038 * we have mcs = tx_mcs - 1. So the number of
1039 * retry is (tx_mcs - mcs).
1040 */
1041 mcs = rt2x00_get_field32(word, TXWI_W0_MCS);
1042 real_mcs = rt2x00_get_field32(reg, TX_STA_FIFO_MCS);
1043 __set_bit(TXDONE_FALLBACK, &txdesc.flags);
1044 txdesc.retry = mcs - min(mcs, real_mcs);
1045
1046 rt2x00lib_txdone(entry, &txdesc);
1047 }
1048}
1049
1050static irqreturn_t rt2800pci_interrupt(int irq, void *dev_instance)
1051{
1052 struct rt2x00_dev *rt2x00dev = dev_instance;
1053 u32 reg;
1054
1055 /* Read status and ACK all interrupts */
1056 rt2800_register_read(rt2x00dev, INT_SOURCE_CSR, &reg);
1057 rt2800_register_write(rt2x00dev, INT_SOURCE_CSR, reg);
1058
1059 if (!reg)
1060 return IRQ_NONE;
1061
1062 if (!test_bit(DEVICE_STATE_ENABLED_RADIO, &rt2x00dev->flags))
1063 return IRQ_HANDLED;
1064
1065 /*
1066 * 1 - Rx ring done interrupt.
1067 */
1068 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_RX_DONE))
1069 rt2x00pci_rxdone(rt2x00dev);
1070
1071 if (rt2x00_get_field32(reg, INT_SOURCE_CSR_TX_FIFO_STATUS))
1072 rt2800pci_txdone(rt2x00dev);
1073
1074 return IRQ_HANDLED;
1075}
1076
1077/*
1078 * Device probe functions.
1079 */
1080static int rt2800pci_validate_eeprom(struct rt2x00_dev *rt2x00dev)
1081{
1082 u16 word;
1083 u8 *mac;
1084 u8 default_lna_gain;
1085
1086 /*
1087 * Read EEPROM into buffer
1088 */
1089 switch(rt2x00dev->chip.rt) {
1090 case RT2880:
1091 case RT3052:
1092 rt2800pci_read_eeprom_soc(rt2x00dev);
1093 break;
1094 case RT3090:
1095 rt2800pci_read_eeprom_efuse(rt2x00dev);
1096 break;
1097 default:
1098 rt2800pci_read_eeprom_pci(rt2x00dev);
1099 break;
1100 }
1101
1102 /*
1103 * Start validation of the data that has been read.
1104 */
1105 mac = rt2x00_eeprom_addr(rt2x00dev, EEPROM_MAC_ADDR_0);
1106 if (!is_valid_ether_addr(mac)) {
1107 random_ether_addr(mac);
1108 EEPROM(rt2x00dev, "MAC: %pM\n", mac);
1109 }
1110
1111 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &word);
1112 if (word == 0xffff) {
1113 rt2x00_set_field16(&word, EEPROM_ANTENNA_RXPATH, 2);
1114 rt2x00_set_field16(&word, EEPROM_ANTENNA_TXPATH, 1);
1115 rt2x00_set_field16(&word, EEPROM_ANTENNA_RF_TYPE, RF2820);
1116 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
1117 EEPROM(rt2x00dev, "Antenna: 0x%04x\n", word);
1118 } else if (rt2x00_rev(&rt2x00dev->chip) < RT2883_VERSION) {
1119 /*
1120 * There is a max of 2 RX streams for RT2860 series
1121 */
1122 if (rt2x00_get_field16(word, EEPROM_ANTENNA_RXPATH) > 2)
1123 rt2x00_set_field16(&word, EEPROM_ANTENNA_RXPATH, 2);
1124 rt2x00_eeprom_write(rt2x00dev, EEPROM_ANTENNA, word);
1125 }
1126
1127 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &word);
1128 if (word == 0xffff) {
1129 rt2x00_set_field16(&word, EEPROM_NIC_HW_RADIO, 0);
1130 rt2x00_set_field16(&word, EEPROM_NIC_DYNAMIC_TX_AGC, 0);
1131 rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_BG, 0);
1132 rt2x00_set_field16(&word, EEPROM_NIC_EXTERNAL_LNA_A, 0);
1133 rt2x00_set_field16(&word, EEPROM_NIC_CARDBUS_ACCEL, 0);
1134 rt2x00_set_field16(&word, EEPROM_NIC_BW40M_SB_BG, 0);
1135 rt2x00_set_field16(&word, EEPROM_NIC_BW40M_SB_A, 0);
1136 rt2x00_set_field16(&word, EEPROM_NIC_WPS_PBC, 0);
1137 rt2x00_set_field16(&word, EEPROM_NIC_BW40M_BG, 0);
1138 rt2x00_set_field16(&word, EEPROM_NIC_BW40M_A, 0);
1139 rt2x00_eeprom_write(rt2x00dev, EEPROM_NIC, word);
1140 EEPROM(rt2x00dev, "NIC: 0x%04x\n", word);
1141 }
1142
1143 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &word);
1144 if ((word & 0x00ff) == 0x00ff) {
1145 rt2x00_set_field16(&word, EEPROM_FREQ_OFFSET, 0);
1146 rt2x00_set_field16(&word, EEPROM_FREQ_LED_MODE,
1147 LED_MODE_TXRX_ACTIVITY);
1148 rt2x00_set_field16(&word, EEPROM_FREQ_LED_POLARITY, 0);
1149 rt2x00_eeprom_write(rt2x00dev, EEPROM_FREQ, word);
1150 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED1, 0x5555);
1151 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED2, 0x2221);
1152 rt2x00_eeprom_write(rt2x00dev, EEPROM_LED3, 0xa9f8);
1153 EEPROM(rt2x00dev, "Freq: 0x%04x\n", word);
1154 }
1155
1156 /*
1157 * During the LNA validation we are going to use
1158 * lna0 as correct value. Note that EEPROM_LNA
1159 * is never validated.
1160 */
1161 rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &word);
1162 default_lna_gain = rt2x00_get_field16(word, EEPROM_LNA_A0);
1163
1164 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG, &word);
1165 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET0)) > 10)
1166 rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET0, 0);
1167 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG_OFFSET1)) > 10)
1168 rt2x00_set_field16(&word, EEPROM_RSSI_BG_OFFSET1, 0);
1169 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG, word);
1170
1171 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &word);
1172 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_BG2_OFFSET2)) > 10)
1173 rt2x00_set_field16(&word, EEPROM_RSSI_BG2_OFFSET2, 0);
1174 if (rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0x00 ||
1175 rt2x00_get_field16(word, EEPROM_RSSI_BG2_LNA_A1) == 0xff)
1176 rt2x00_set_field16(&word, EEPROM_RSSI_BG2_LNA_A1,
1177 default_lna_gain);
1178 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_BG2, word);
1179
1180 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A, &word);
1181 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET0)) > 10)
1182 rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET0, 0);
1183 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A_OFFSET1)) > 10)
1184 rt2x00_set_field16(&word, EEPROM_RSSI_A_OFFSET1, 0);
1185 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A, word);
1186
1187 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &word);
1188 if (abs(rt2x00_get_field16(word, EEPROM_RSSI_A2_OFFSET2)) > 10)
1189 rt2x00_set_field16(&word, EEPROM_RSSI_A2_OFFSET2, 0);
1190 if (rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0x00 ||
1191 rt2x00_get_field16(word, EEPROM_RSSI_A2_LNA_A2) == 0xff)
1192 rt2x00_set_field16(&word, EEPROM_RSSI_A2_LNA_A2,
1193 default_lna_gain);
1194 rt2x00_eeprom_write(rt2x00dev, EEPROM_RSSI_A2, word);
1195
1196 return 0;
1197}
1198
1199static int rt2800pci_init_eeprom(struct rt2x00_dev *rt2x00dev)
1200{
1201 u32 reg;
1202 u16 value;
1203 u16 eeprom;
1204
1205 /*
1206 * Read EEPROM word for configuration.
1207 */
1208 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
1209
1210 /*
1211 * Identify RF chipset.
1212 */
1213 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
1214 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
1215 rt2x00_set_chip_rf(rt2x00dev, value, reg);
1216
1217 if (!rt2x00_rf(&rt2x00dev->chip, RF2820) &&
1218 !rt2x00_rf(&rt2x00dev->chip, RF2850) &&
1219 !rt2x00_rf(&rt2x00dev->chip, RF2720) &&
1220 !rt2x00_rf(&rt2x00dev->chip, RF2750) &&
1221 !rt2x00_rf(&rt2x00dev->chip, RF3020) &&
1222 !rt2x00_rf(&rt2x00dev->chip, RF2020) &&
1223 !rt2x00_rf(&rt2x00dev->chip, RF3021) &&
1224 !rt2x00_rf(&rt2x00dev->chip, RF3022)) {
1225 ERROR(rt2x00dev, "Invalid RF chipset detected.\n");
1226 return -ENODEV;
1227 }
1228
1229 /*
1230 * Identify default antenna configuration.
1231 */
1232 rt2x00dev->default_ant.tx =
1233 rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH);
1234 rt2x00dev->default_ant.rx =
1235 rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH);
1236
1237 /*
1238 * Read frequency offset and RF programming sequence.
1239 */
1240 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &eeprom);
1241 rt2x00dev->freq_offset = rt2x00_get_field16(eeprom, EEPROM_FREQ_OFFSET);
1242
1243 /*
1244 * Read external LNA informations.
1245 */
1246 rt2x00_eeprom_read(rt2x00dev, EEPROM_NIC, &eeprom);
1247
1248 if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_A))
1249 __set_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags);
1250 if (rt2x00_get_field16(eeprom, EEPROM_NIC_EXTERNAL_LNA_BG))
1251 __set_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags);
1252
1253 /*
1254 * Detect if this device has an hardware controlled radio.
1255 */
1256 if (rt2x00_get_field16(eeprom, EEPROM_NIC_HW_RADIO))
1257 __set_bit(CONFIG_SUPPORT_HW_BUTTON, &rt2x00dev->flags);
1258
1259 /*
1260 * Store led settings, for correct led behaviour.
1261 */
1262#ifdef CONFIG_RT2X00_LIB_LEDS
1263 rt2800_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
1264 rt2800_init_led(rt2x00dev, &rt2x00dev->led_assoc, LED_TYPE_ASSOC);
1265 rt2800_init_led(rt2x00dev, &rt2x00dev->led_qual, LED_TYPE_QUALITY);
1266
1267 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, &rt2x00dev->led_mcu_reg);
1268#endif /* CONFIG_RT2X00_LIB_LEDS */
1269
1270 return 0;
1271}
1272
1273/*
1274 * RF value list for rt2860
1275 * Supports: 2.4 GHz (all) & 5.2 GHz (RF2850 & RF2750)
1276 */
1277static const struct rf_channel rf_vals[] = {
1278 { 1, 0x18402ecc, 0x184c0786, 0x1816b455, 0x1800510b },
1279 { 2, 0x18402ecc, 0x184c0786, 0x18168a55, 0x1800519f },
1280 { 3, 0x18402ecc, 0x184c078a, 0x18168a55, 0x1800518b },
1281 { 4, 0x18402ecc, 0x184c078a, 0x18168a55, 0x1800519f },
1282 { 5, 0x18402ecc, 0x184c078e, 0x18168a55, 0x1800518b },
1283 { 6, 0x18402ecc, 0x184c078e, 0x18168a55, 0x1800519f },
1284 { 7, 0x18402ecc, 0x184c0792, 0x18168a55, 0x1800518b },
1285 { 8, 0x18402ecc, 0x184c0792, 0x18168a55, 0x1800519f },
1286 { 9, 0x18402ecc, 0x184c0796, 0x18168a55, 0x1800518b },
1287 { 10, 0x18402ecc, 0x184c0796, 0x18168a55, 0x1800519f },
1288 { 11, 0x18402ecc, 0x184c079a, 0x18168a55, 0x1800518b },
1289 { 12, 0x18402ecc, 0x184c079a, 0x18168a55, 0x1800519f },
1290 { 13, 0x18402ecc, 0x184c079e, 0x18168a55, 0x1800518b },
1291 { 14, 0x18402ecc, 0x184c07a2, 0x18168a55, 0x18005193 },
1292
1293 /* 802.11 UNI / HyperLan 2 */
1294 { 36, 0x18402ecc, 0x184c099a, 0x18158a55, 0x180ed1a3 },
1295 { 38, 0x18402ecc, 0x184c099e, 0x18158a55, 0x180ed193 },
1296 { 40, 0x18402ec8, 0x184c0682, 0x18158a55, 0x180ed183 },
1297 { 44, 0x18402ec8, 0x184c0682, 0x18158a55, 0x180ed1a3 },
1298 { 46, 0x18402ec8, 0x184c0686, 0x18158a55, 0x180ed18b },
1299 { 48, 0x18402ec8, 0x184c0686, 0x18158a55, 0x180ed19b },
1300 { 52, 0x18402ec8, 0x184c068a, 0x18158a55, 0x180ed193 },
1301 { 54, 0x18402ec8, 0x184c068a, 0x18158a55, 0x180ed1a3 },
1302 { 56, 0x18402ec8, 0x184c068e, 0x18158a55, 0x180ed18b },
1303 { 60, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed183 },
1304 { 62, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed193 },
1305 { 64, 0x18402ec8, 0x184c0692, 0x18158a55, 0x180ed1a3 },
1306
1307 /* 802.11 HyperLan 2 */
1308 { 100, 0x18402ec8, 0x184c06b2, 0x18178a55, 0x180ed783 },
1309 { 102, 0x18402ec8, 0x184c06b2, 0x18578a55, 0x180ed793 },
1310 { 104, 0x18402ec8, 0x185c06b2, 0x18578a55, 0x180ed1a3 },
1311 { 108, 0x18402ecc, 0x185c0a32, 0x18578a55, 0x180ed193 },
1312 { 110, 0x18402ecc, 0x184c0a36, 0x18178a55, 0x180ed183 },
1313 { 112, 0x18402ecc, 0x184c0a36, 0x18178a55, 0x180ed19b },
1314 { 116, 0x18402ecc, 0x184c0a3a, 0x18178a55, 0x180ed1a3 },
1315 { 118, 0x18402ecc, 0x184c0a3e, 0x18178a55, 0x180ed193 },
1316 { 120, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed183 },
1317 { 124, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed193 },
1318 { 126, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed15b },
1319 { 128, 0x18402ec4, 0x184c0382, 0x18178a55, 0x180ed1a3 },
1320 { 132, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed18b },
1321 { 134, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed193 },
1322 { 136, 0x18402ec4, 0x184c0386, 0x18178a55, 0x180ed19b },
1323 { 140, 0x18402ec4, 0x184c038a, 0x18178a55, 0x180ed183 },
1324
1325 /* 802.11 UNII */
1326 { 149, 0x18402ec4, 0x184c038a, 0x18178a55, 0x180ed1a7 },
1327 { 151, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed187 },
1328 { 153, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed18f },
1329 { 157, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed19f },
1330 { 159, 0x18402ec4, 0x184c038e, 0x18178a55, 0x180ed1a7 },
1331 { 161, 0x18402ec4, 0x184c0392, 0x18178a55, 0x180ed187 },
1332 { 165, 0x18402ec4, 0x184c0392, 0x18178a55, 0x180ed197 },
1333
1334 /* 802.11 Japan */
1335 { 184, 0x15002ccc, 0x1500491e, 0x1509be55, 0x150c0a0b },
1336 { 188, 0x15002ccc, 0x15004922, 0x1509be55, 0x150c0a13 },
1337 { 192, 0x15002ccc, 0x15004926, 0x1509be55, 0x150c0a1b },
1338 { 196, 0x15002ccc, 0x1500492a, 0x1509be55, 0x150c0a23 },
1339 { 208, 0x15002ccc, 0x1500493a, 0x1509be55, 0x150c0a13 },
1340 { 212, 0x15002ccc, 0x1500493e, 0x1509be55, 0x150c0a1b },
1341 { 216, 0x15002ccc, 0x15004982, 0x1509be55, 0x150c0a23 },
1342};
1343
1344static int rt2800pci_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
1345{
1346 struct hw_mode_spec *spec = &rt2x00dev->spec;
1347 struct channel_info *info;
1348 char *tx_power1;
1349 char *tx_power2;
1350 unsigned int i;
1351 u16 eeprom;
1352
1353 /*
1354 * Initialize all hw fields.
1355 */
1356 rt2x00dev->hw->flags =
1357 IEEE80211_HW_HOST_BROADCAST_PS_BUFFERING |
1358 IEEE80211_HW_SIGNAL_DBM |
1359 IEEE80211_HW_SUPPORTS_PS |
1360 IEEE80211_HW_PS_NULLFUNC_STACK;
1361 rt2x00dev->hw->extra_tx_headroom = TXWI_DESC_SIZE;
1362
1363 SET_IEEE80211_DEV(rt2x00dev->hw, rt2x00dev->dev);
1364 SET_IEEE80211_PERM_ADDR(rt2x00dev->hw,
1365 rt2x00_eeprom_addr(rt2x00dev,
1366 EEPROM_MAC_ADDR_0));
1367
1368 rt2x00_eeprom_read(rt2x00dev, EEPROM_ANTENNA, &eeprom);
1369
1370 /*
1371 * Initialize hw_mode information.
1372 */
1373 spec->supported_bands = SUPPORT_BAND_2GHZ;
1374 spec->supported_rates = SUPPORT_RATE_CCK | SUPPORT_RATE_OFDM;
1375
1376 if (rt2x00_rf(&rt2x00dev->chip, RF2820) ||
1377 rt2x00_rf(&rt2x00dev->chip, RF2720) ||
1378 rt2x00_rf(&rt2x00dev->chip, RF3020) ||
1379 rt2x00_rf(&rt2x00dev->chip, RF3021) ||
1380 rt2x00_rf(&rt2x00dev->chip, RF3022) ||
1381 rt2x00_rf(&rt2x00dev->chip, RF2020) ||
1382 rt2x00_rf(&rt2x00dev->chip, RF3052)) {
1383 spec->num_channels = 14;
1384 spec->channels = rf_vals;
1385 } else if (rt2x00_rf(&rt2x00dev->chip, RF2850) ||
1386 rt2x00_rf(&rt2x00dev->chip, RF2750)) {
1387 spec->supported_bands |= SUPPORT_BAND_5GHZ;
1388 spec->num_channels = ARRAY_SIZE(rf_vals);
1389 spec->channels = rf_vals;
1390 }
1391
1392 /*
1393 * Initialize HT information.
1394 */
1395 spec->ht.ht_supported = true;
1396 spec->ht.cap =
1397 IEEE80211_HT_CAP_SUP_WIDTH_20_40 |
1398 IEEE80211_HT_CAP_GRN_FLD |
1399 IEEE80211_HT_CAP_SGI_20 |
1400 IEEE80211_HT_CAP_SGI_40 |
1401 IEEE80211_HT_CAP_TX_STBC |
1402 IEEE80211_HT_CAP_RX_STBC |
1403 IEEE80211_HT_CAP_PSMP_SUPPORT;
1404 spec->ht.ampdu_factor = 3;
1405 spec->ht.ampdu_density = 4;
1406 spec->ht.mcs.tx_params =
1407 IEEE80211_HT_MCS_TX_DEFINED |
1408 IEEE80211_HT_MCS_TX_RX_DIFF |
1409 ((rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TXPATH) - 1) <<
1410 IEEE80211_HT_MCS_TX_MAX_STREAMS_SHIFT);
1411
1412 switch (rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RXPATH)) {
1413 case 3:
1414 spec->ht.mcs.rx_mask[2] = 0xff;
1415 case 2:
1416 spec->ht.mcs.rx_mask[1] = 0xff;
1417 case 1:
1418 spec->ht.mcs.rx_mask[0] = 0xff;
1419 spec->ht.mcs.rx_mask[4] = 0x1; /* MCS32 */
1420 break;
1421 }
1422
1423 /*
1424 * Create channel information array
1425 */
1426 info = kzalloc(spec->num_channels * sizeof(*info), GFP_KERNEL);
1427 if (!info)
1428 return -ENOMEM;
1429
1430 spec->channels_info = info;
1431
1432 tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG1);
1433 tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_BG2);
1434
1435 for (i = 0; i < 14; i++) {
1436 info[i].tx_power1 = TXPOWER_G_FROM_DEV(tx_power1[i]);
1437 info[i].tx_power2 = TXPOWER_G_FROM_DEV(tx_power2[i]);
1438 }
1439
1440 if (spec->num_channels > 14) {
1441 tx_power1 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A1);
1442 tx_power2 = rt2x00_eeprom_addr(rt2x00dev, EEPROM_TXPOWER_A2);
1443
1444 for (i = 14; i < spec->num_channels; i++) {
1445 info[i].tx_power1 = TXPOWER_A_FROM_DEV(tx_power1[i]);
1446 info[i].tx_power2 = TXPOWER_A_FROM_DEV(tx_power2[i]);
1447 }
1448 }
1449
1450 return 0;
1451}
1452
1453static const struct rt2800_ops rt2800pci_rt2800_ops = {
1454 .register_read = rt2x00pci_register_read,
1455 .register_write = rt2x00pci_register_write,
1456 .register_write_lock = rt2x00pci_register_write, /* same for PCI */
1457
1458 .register_multiread = rt2x00pci_register_multiread,
1459 .register_multiwrite = rt2x00pci_register_multiwrite,
1460
1461 .regbusy_read = rt2x00pci_regbusy_read,
1462};
1463
1464static int rt2800pci_probe_hw(struct rt2x00_dev *rt2x00dev)
1465{
1466 int retval;
1467
1468 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_PCI);
1469
1470 rt2x00dev->priv = (void *)&rt2800pci_rt2800_ops;
1471
1472 /*
1473 * Allocate eeprom data.
1474 */
1475 retval = rt2800pci_validate_eeprom(rt2x00dev);
1476 if (retval)
1477 return retval;
1478
1479 retval = rt2800pci_init_eeprom(rt2x00dev);
1480 if (retval)
1481 return retval;
1482
1483 /*
1484 * Initialize hw specifications.
1485 */
1486 retval = rt2800pci_probe_hw_mode(rt2x00dev);
1487 if (retval)
1488 return retval;
1489
1490 /*
1491 * This device has multiple filters for control frames
1492 * and has a separate filter for PS Poll frames.
1493 */
1494 __set_bit(DRIVER_SUPPORT_CONTROL_FILTERS, &rt2x00dev->flags);
1495 __set_bit(DRIVER_SUPPORT_CONTROL_FILTER_PSPOLL, &rt2x00dev->flags);
1496
1497 /*
1498 * This device requires firmware.
1499 */
1500 if (!rt2x00_rt(&rt2x00dev->chip, RT2880) &&
1501 !rt2x00_rt(&rt2x00dev->chip, RT3052))
1502 __set_bit(DRIVER_REQUIRE_FIRMWARE, &rt2x00dev->flags);
1503 __set_bit(DRIVER_REQUIRE_DMA, &rt2x00dev->flags);
1504 __set_bit(DRIVER_REQUIRE_L2PAD, &rt2x00dev->flags);
1505 if (!modparam_nohwcrypt)
1506 __set_bit(CONFIG_SUPPORT_HW_CRYPTO, &rt2x00dev->flags);
1507
1508 /*
1509 * Set the rssi offset.
1510 */
1511 rt2x00dev->rssi_offset = DEFAULT_RSSI_OFFSET;
1512
1513 return 0;
1514}
1515
1516static const struct rt2x00lib_ops rt2800pci_rt2x00_ops = {
1517 .irq_handler = rt2800pci_interrupt,
1518 .probe_hw = rt2800pci_probe_hw,
1519 .get_firmware_name = rt2800pci_get_firmware_name,
1520 .check_firmware = rt2800pci_check_firmware,
1521 .load_firmware = rt2800pci_load_firmware,
1522 .initialize = rt2x00pci_initialize,
1523 .uninitialize = rt2x00pci_uninitialize,
1524 .get_entry_state = rt2800pci_get_entry_state,
1525 .clear_entry = rt2800pci_clear_entry,
1526 .set_device_state = rt2800pci_set_device_state,
1527 .rfkill_poll = rt2800_rfkill_poll,
1528 .link_stats = rt2800_link_stats,
1529 .reset_tuner = rt2800_reset_tuner,
1530 .link_tuner = rt2800_link_tuner,
1531 .write_tx_desc = rt2800pci_write_tx_desc,
1532 .write_tx_data = rt2x00pci_write_tx_data,
1533 .write_beacon = rt2800pci_write_beacon,
1534 .kick_tx_queue = rt2800pci_kick_tx_queue,
1535 .kill_tx_queue = rt2800pci_kill_tx_queue,
1536 .fill_rxdone = rt2800pci_fill_rxdone,
1537 .config_shared_key = rt2800_config_shared_key,
1538 .config_pairwise_key = rt2800_config_pairwise_key,
1539 .config_filter = rt2800_config_filter,
1540 .config_intf = rt2800_config_intf,
1541 .config_erp = rt2800_config_erp,
1542 .config_ant = rt2800_config_ant,
1543 .config = rt2800_config,
1544};
1545
1546static const struct data_queue_desc rt2800pci_queue_rx = {
1547 .entry_num = RX_ENTRIES,
1548 .data_size = AGGREGATION_SIZE,
1549 .desc_size = RXD_DESC_SIZE,
1550 .priv_size = sizeof(struct queue_entry_priv_pci),
1551};
1552
1553static const struct data_queue_desc rt2800pci_queue_tx = {
1554 .entry_num = TX_ENTRIES,
1555 .data_size = AGGREGATION_SIZE,
1556 .desc_size = TXD_DESC_SIZE,
1557 .priv_size = sizeof(struct queue_entry_priv_pci),
1558};
1559
1560static const struct data_queue_desc rt2800pci_queue_bcn = {
1561 .entry_num = 8 * BEACON_ENTRIES,
1562 .data_size = 0, /* No DMA required for beacons */
1563 .desc_size = TXWI_DESC_SIZE,
1564 .priv_size = sizeof(struct queue_entry_priv_pci),
1565};
1566
1567static const struct rt2x00_ops rt2800pci_ops = {
1568 .name = KBUILD_MODNAME,
1569 .max_sta_intf = 1,
1570 .max_ap_intf = 8,
1571 .eeprom_size = EEPROM_SIZE,
1572 .rf_size = RF_SIZE,
1573 .tx_queues = NUM_TX_QUEUES,
1574 .rx = &rt2800pci_queue_rx,
1575 .tx = &rt2800pci_queue_tx,
1576 .bcn = &rt2800pci_queue_bcn,
1577 .lib = &rt2800pci_rt2x00_ops,
1578 .hw = &rt2800_mac80211_ops,
1579#ifdef CONFIG_RT2X00_LIB_DEBUGFS
1580 .debugfs = &rt2800_rt2x00debug,
1581#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
1582};
1583
1584/*
1585 * RT2800pci module information.
1586 */
1587static struct pci_device_id rt2800pci_device_table[] = {
1588 { PCI_DEVICE(0x1462, 0x891a), PCI_DEVICE_DATA(&rt2800pci_ops) },
1589 { PCI_DEVICE(0x1432, 0x7708), PCI_DEVICE_DATA(&rt2800pci_ops) },
1590 { PCI_DEVICE(0x1432, 0x7727), PCI_DEVICE_DATA(&rt2800pci_ops) },
1591 { PCI_DEVICE(0x1432, 0x7728), PCI_DEVICE_DATA(&rt2800pci_ops) },
1592 { PCI_DEVICE(0x1432, 0x7738), PCI_DEVICE_DATA(&rt2800pci_ops) },
1593 { PCI_DEVICE(0x1432, 0x7748), PCI_DEVICE_DATA(&rt2800pci_ops) },
1594 { PCI_DEVICE(0x1432, 0x7758), PCI_DEVICE_DATA(&rt2800pci_ops) },
1595 { PCI_DEVICE(0x1432, 0x7768), PCI_DEVICE_DATA(&rt2800pci_ops) },
1596 { PCI_DEVICE(0x1814, 0x0601), PCI_DEVICE_DATA(&rt2800pci_ops) },
1597 { PCI_DEVICE(0x1814, 0x0681), PCI_DEVICE_DATA(&rt2800pci_ops) },
1598 { PCI_DEVICE(0x1814, 0x0701), PCI_DEVICE_DATA(&rt2800pci_ops) },
1599 { PCI_DEVICE(0x1814, 0x0781), PCI_DEVICE_DATA(&rt2800pci_ops) },
1600 { PCI_DEVICE(0x1814, 0x3060), PCI_DEVICE_DATA(&rt2800pci_ops) },
1601 { PCI_DEVICE(0x1814, 0x3062), PCI_DEVICE_DATA(&rt2800pci_ops) },
1602 { PCI_DEVICE(0x1814, 0x3090), PCI_DEVICE_DATA(&rt2800pci_ops) },
1603 { PCI_DEVICE(0x1814, 0x3091), PCI_DEVICE_DATA(&rt2800pci_ops) },
1604 { PCI_DEVICE(0x1814, 0x3092), PCI_DEVICE_DATA(&rt2800pci_ops) },
1605 { PCI_DEVICE(0x1814, 0x3562), PCI_DEVICE_DATA(&rt2800pci_ops) },
1606 { PCI_DEVICE(0x1814, 0x3592), PCI_DEVICE_DATA(&rt2800pci_ops) },
1607 { PCI_DEVICE(0x1a3b, 0x1059), PCI_DEVICE_DATA(&rt2800pci_ops) },
1608 { 0, }
1609};
1610
1611MODULE_AUTHOR(DRV_PROJECT);
1612MODULE_VERSION(DRV_VERSION);
1613MODULE_DESCRIPTION("Ralink RT2800 PCI & PCMCIA Wireless LAN driver.");
1614MODULE_SUPPORTED_DEVICE("Ralink RT2860 PCI & PCMCIA chipset based cards");
1615#ifdef CONFIG_RT2800PCI_PCI
1616MODULE_FIRMWARE(FIRMWARE_RT2860);
1617MODULE_DEVICE_TABLE(pci, rt2800pci_device_table);
1618#endif /* CONFIG_RT2800PCI_PCI */
1619MODULE_LICENSE("GPL");
1620
1621#ifdef CONFIG_RT2800PCI_WISOC
1622#if defined(CONFIG_RALINK_RT288X)
1623__rt2x00soc_probe(RT2880, &rt2800pci_ops);
1624#elif defined(CONFIG_RALINK_RT305X)
1625__rt2x00soc_probe(RT3052, &rt2800pci_ops);
1626#endif
1627
1628static struct platform_driver rt2800soc_driver = {
1629 .driver = {
1630 .name = "rt2800_wmac",
1631 .owner = THIS_MODULE,
1632 .mod_name = KBUILD_MODNAME,
1633 },
1634 .probe = __rt2x00soc_probe,
1635 .remove = __devexit_p(rt2x00soc_remove),
1636 .suspend = rt2x00soc_suspend,
1637 .resume = rt2x00soc_resume,
1638};
1639#endif /* CONFIG_RT2800PCI_WISOC */
1640
1641#ifdef CONFIG_RT2800PCI_PCI
1642static struct pci_driver rt2800pci_driver = {
1643 .name = KBUILD_MODNAME,
1644 .id_table = rt2800pci_device_table,
1645 .probe = rt2x00pci_probe,
1646 .remove = __devexit_p(rt2x00pci_remove),
1647 .suspend = rt2x00pci_suspend,
1648 .resume = rt2x00pci_resume,
1649};
1650#endif /* CONFIG_RT2800PCI_PCI */
1651
1652static int __init rt2800pci_init(void)
1653{
1654 int ret = 0;
1655
1656#ifdef CONFIG_RT2800PCI_WISOC
1657 ret = platform_driver_register(&rt2800soc_driver);
1658 if (ret)
1659 return ret;
1660#endif
1661#ifdef CONFIG_RT2800PCI_PCI
1662 ret = pci_register_driver(&rt2800pci_driver);
1663 if (ret) {
1664#ifdef CONFIG_RT2800PCI_WISOC
1665 platform_driver_unregister(&rt2800soc_driver);
1666#endif
1667 return ret;
1668 }
1669#endif
1670
1671 return ret;
1672}
1673
1674static void __exit rt2800pci_exit(void)
1675{
1676#ifdef CONFIG_RT2800PCI_PCI
1677 pci_unregister_driver(&rt2800pci_driver);
1678#endif
1679#ifdef CONFIG_RT2800PCI_WISOC
1680 platform_driver_unregister(&rt2800soc_driver);
1681#endif
1682}
1683
1684module_init(rt2800pci_init);
1685module_exit(rt2800pci_exit);
diff --git a/drivers/net/wireless/rt2x00/rt2800pci.h b/drivers/net/wireless/rt2x00/rt2800pci.h
new file mode 100644
index 000000000000..1dbf13270cda
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2800pci.h
@@ -0,0 +1,180 @@
1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2800pci
23 Abstract: Data structures and registers for the rt2800pci module.
24 Supported chipsets: RT2800E & RT2800ED.
25 */
26
27#ifndef RT2800PCI_H
28#define RT2800PCI_H
29
30/*
31 * PCI registers.
32 */
33
34/*
35 * E2PROM_CSR: EEPROM control register.
36 * RELOAD: Write 1 to reload eeprom content.
37 * TYPE: 0: 93c46, 1:93c66.
38 * LOAD_STATUS: 1:loading, 0:done.
39 */
40#define E2PROM_CSR 0x0004
41#define E2PROM_CSR_DATA_CLOCK FIELD32(0x00000001)
42#define E2PROM_CSR_CHIP_SELECT FIELD32(0x00000002)
43#define E2PROM_CSR_DATA_IN FIELD32(0x00000004)
44#define E2PROM_CSR_DATA_OUT FIELD32(0x00000008)
45#define E2PROM_CSR_TYPE FIELD32(0x00000030)
46#define E2PROM_CSR_LOAD_STATUS FIELD32(0x00000040)
47#define E2PROM_CSR_RELOAD FIELD32(0x00000080)
48
49/*
50 * Queue register offset macros
51 */
52#define TX_QUEUE_REG_OFFSET 0x10
53#define TX_BASE_PTR(__x) TX_BASE_PTR0 + ((__x) * TX_QUEUE_REG_OFFSET)
54#define TX_MAX_CNT(__x) TX_MAX_CNT0 + ((__x) * TX_QUEUE_REG_OFFSET)
55#define TX_CTX_IDX(__x) TX_CTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET)
56#define TX_DTX_IDX(__x) TX_DTX_IDX0 + ((__x) * TX_QUEUE_REG_OFFSET)
57
58/*
59 * EFUSE_CSR: RT3090 EEPROM
60 */
61#define EFUSE_CTRL 0x0580
62#define EFUSE_CTRL_ADDRESS_IN FIELD32(0x03fe0000)
63#define EFUSE_CTRL_MODE FIELD32(0x000000c0)
64#define EFUSE_CTRL_KICK FIELD32(0x40000000)
65
66/*
67 * EFUSE_DATA0
68 */
69#define EFUSE_DATA0 0x0590
70
71/*
72 * EFUSE_DATA1
73 */
74#define EFUSE_DATA1 0x0594
75
76/*
77 * EFUSE_DATA2
78 */
79#define EFUSE_DATA2 0x0598
80
81/*
82 * EFUSE_DATA3
83 */
84#define EFUSE_DATA3 0x059c
85
86/*
87 * 8051 firmware image.
88 */
89#define FIRMWARE_RT2860 "rt2860.bin"
90#define FIRMWARE_IMAGE_BASE 0x2000
91
92/*
93 * DMA descriptor defines.
94 */
95#define TXD_DESC_SIZE ( 4 * sizeof(__le32) )
96#define RXD_DESC_SIZE ( 4 * sizeof(__le32) )
97
98/*
99 * TX descriptor format for TX, PRIO and Beacon Ring.
100 */
101
102/*
103 * Word0
104 */
105#define TXD_W0_SD_PTR0 FIELD32(0xffffffff)
106
107/*
108 * Word1
109 */
110#define TXD_W1_SD_LEN1 FIELD32(0x00003fff)
111#define TXD_W1_LAST_SEC1 FIELD32(0x00004000)
112#define TXD_W1_BURST FIELD32(0x00008000)
113#define TXD_W1_SD_LEN0 FIELD32(0x3fff0000)
114#define TXD_W1_LAST_SEC0 FIELD32(0x40000000)
115#define TXD_W1_DMA_DONE FIELD32(0x80000000)
116
117/*
118 * Word2
119 */
120#define TXD_W2_SD_PTR1 FIELD32(0xffffffff)
121
122/*
123 * Word3
124 * WIV: Wireless Info Valid. 1: Driver filled WI, 0: DMA needs to copy WI
125 * QSEL: Select on-chip FIFO ID for 2nd-stage output scheduler.
126 * 0:MGMT, 1:HCCA 2:EDCA
127 */
128#define TXD_W3_WIV FIELD32(0x01000000)
129#define TXD_W3_QSEL FIELD32(0x06000000)
130#define TXD_W3_TCO FIELD32(0x20000000)
131#define TXD_W3_UCO FIELD32(0x40000000)
132#define TXD_W3_ICO FIELD32(0x80000000)
133
134/*
135 * RX descriptor format for RX Ring.
136 */
137
138/*
139 * Word0
140 */
141#define RXD_W0_SDP0 FIELD32(0xffffffff)
142
143/*
144 * Word1
145 */
146#define RXD_W1_SDL1 FIELD32(0x00003fff)
147#define RXD_W1_SDL0 FIELD32(0x3fff0000)
148#define RXD_W1_LS0 FIELD32(0x40000000)
149#define RXD_W1_DMA_DONE FIELD32(0x80000000)
150
151/*
152 * Word2
153 */
154#define RXD_W2_SDP1 FIELD32(0xffffffff)
155
156/*
157 * Word3
158 * AMSDU: RX with 802.3 header, not 802.11 header.
159 * DECRYPTED: This frame is being decrypted.
160 */
161#define RXD_W3_BA FIELD32(0x00000001)
162#define RXD_W3_DATA FIELD32(0x00000002)
163#define RXD_W3_NULLDATA FIELD32(0x00000004)
164#define RXD_W3_FRAG FIELD32(0x00000008)
165#define RXD_W3_UNICAST_TO_ME FIELD32(0x00000010)
166#define RXD_W3_MULTICAST FIELD32(0x00000020)
167#define RXD_W3_BROADCAST FIELD32(0x00000040)
168#define RXD_W3_MY_BSS FIELD32(0x00000080)
169#define RXD_W3_CRC_ERROR FIELD32(0x00000100)
170#define RXD_W3_CIPHER_ERROR FIELD32(0x00000600)
171#define RXD_W3_AMSDU FIELD32(0x00000800)
172#define RXD_W3_HTC FIELD32(0x00001000)
173#define RXD_W3_RSSI FIELD32(0x00002000)
174#define RXD_W3_L2PAD FIELD32(0x00004000)
175#define RXD_W3_AMPDU FIELD32(0x00008000)
176#define RXD_W3_DECRYPTED FIELD32(0x00010000)
177#define RXD_W3_PLCP_SIGNAL FIELD32(0x00020000)
178#define RXD_W3_PLCP_RSSI FIELD32(0x00040000)
179
180#endif /* RT2800PCI_H */
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.c b/drivers/net/wireless/rt2x00/rt2800usb.c
index 9fe770f7d7bb..ce2e893856c1 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.c
+++ b/drivers/net/wireless/rt2x00/rt2800usb.c
@@ -34,6 +34,8 @@
34 34
35#include "rt2x00.h" 35#include "rt2x00.h"
36#include "rt2x00usb.h" 36#include "rt2x00usb.h"
37#include "rt2800lib.h"
38#include "rt2800.h"
37#include "rt2800usb.h" 39#include "rt2800usb.h"
38 40
39/* 41/*
@@ -44,1027 +46,6 @@ module_param_named(nohwcrypt, modparam_nohwcrypt, bool, S_IRUGO);
44MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption."); 46MODULE_PARM_DESC(nohwcrypt, "Disable hardware encryption.");
45 47
46/* 48/*
47 * Register access.
48 * All access to the CSR registers will go through the methods
49 * rt2x00usb_register_read and rt2x00usb_register_write.
50 * BBP and RF register require indirect register access,
51 * and use the CSR registers BBPCSR and RFCSR to achieve this.
52 * These indirect registers work with busy bits,
53 * and we will try maximal REGISTER_BUSY_COUNT times to access
54 * the register while taking a REGISTER_BUSY_DELAY us delay
55 * between each attampt. When the busy bit is still set at that time,
56 * the access attempt is considered to have failed,
57 * and we will print an error.
58 * The _lock versions must be used if you already hold the csr_mutex
59 */
60#define WAIT_FOR_BBP(__dev, __reg) \
61 rt2x00usb_regbusy_read((__dev), BBP_CSR_CFG, BBP_CSR_CFG_BUSY, (__reg))
62#define WAIT_FOR_RFCSR(__dev, __reg) \
63 rt2x00usb_regbusy_read((__dev), RF_CSR_CFG, RF_CSR_CFG_BUSY, (__reg))
64#define WAIT_FOR_RF(__dev, __reg) \
65 rt2x00usb_regbusy_read((__dev), RF_CSR_CFG0, RF_CSR_CFG0_BUSY, (__reg))
66#define WAIT_FOR_MCU(__dev, __reg) \
67 rt2x00usb_regbusy_read((__dev), H2M_MAILBOX_CSR, \
68 H2M_MAILBOX_CSR_OWNER, (__reg))
69
70static void rt2800usb_bbp_write(struct rt2x00_dev *rt2x00dev,
71 const unsigned int word, const u8 value)
72{
73 u32 reg;
74
75 mutex_lock(&rt2x00dev->csr_mutex);
76
77 /*
78 * Wait until the BBP becomes available, afterwards we
79 * can safely write the new data into the register.
80 */
81 if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
82 reg = 0;
83 rt2x00_set_field32(&reg, BBP_CSR_CFG_VALUE, value);
84 rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
85 rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
86 rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 0);
87
88 rt2x00usb_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
89 }
90
91 mutex_unlock(&rt2x00dev->csr_mutex);
92}
93
94static void rt2800usb_bbp_read(struct rt2x00_dev *rt2x00dev,
95 const unsigned int word, u8 *value)
96{
97 u32 reg;
98
99 mutex_lock(&rt2x00dev->csr_mutex);
100
101 /*
102 * Wait until the BBP becomes available, afterwards we
103 * can safely write the read request into the register.
104 * After the data has been written, we wait until hardware
105 * returns the correct value, if at any time the register
106 * doesn't become available in time, reg will be 0xffffffff
107 * which means we return 0xff to the caller.
108 */
109 if (WAIT_FOR_BBP(rt2x00dev, &reg)) {
110 reg = 0;
111 rt2x00_set_field32(&reg, BBP_CSR_CFG_REGNUM, word);
112 rt2x00_set_field32(&reg, BBP_CSR_CFG_BUSY, 1);
113 rt2x00_set_field32(&reg, BBP_CSR_CFG_READ_CONTROL, 1);
114
115 rt2x00usb_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
116
117 WAIT_FOR_BBP(rt2x00dev, &reg);
118 }
119
120 *value = rt2x00_get_field32(reg, BBP_CSR_CFG_VALUE);
121
122 mutex_unlock(&rt2x00dev->csr_mutex);
123}
124
125static void rt2800usb_rfcsr_write(struct rt2x00_dev *rt2x00dev,
126 const unsigned int word, const u8 value)
127{
128 u32 reg;
129
130 mutex_lock(&rt2x00dev->csr_mutex);
131
132 /*
133 * Wait until the RFCSR becomes available, afterwards we
134 * can safely write the new data into the register.
135 */
136 if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
137 reg = 0;
138 rt2x00_set_field32(&reg, RF_CSR_CFG_DATA, value);
139 rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
140 rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 1);
141 rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
142
143 rt2x00usb_register_write_lock(rt2x00dev, RF_CSR_CFG, reg);
144 }
145
146 mutex_unlock(&rt2x00dev->csr_mutex);
147}
148
149static void rt2800usb_rfcsr_read(struct rt2x00_dev *rt2x00dev,
150 const unsigned int word, u8 *value)
151{
152 u32 reg;
153
154 mutex_lock(&rt2x00dev->csr_mutex);
155
156 /*
157 * Wait until the RFCSR becomes available, afterwards we
158 * can safely write the read request into the register.
159 * After the data has been written, we wait until hardware
160 * returns the correct value, if at any time the register
161 * doesn't become available in time, reg will be 0xffffffff
162 * which means we return 0xff to the caller.
163 */
164 if (WAIT_FOR_RFCSR(rt2x00dev, &reg)) {
165 reg = 0;
166 rt2x00_set_field32(&reg, RF_CSR_CFG_REGNUM, word);
167 rt2x00_set_field32(&reg, RF_CSR_CFG_WRITE, 0);
168 rt2x00_set_field32(&reg, RF_CSR_CFG_BUSY, 1);
169
170 rt2x00usb_register_write_lock(rt2x00dev, BBP_CSR_CFG, reg);
171
172 WAIT_FOR_RFCSR(rt2x00dev, &reg);
173 }
174
175 *value = rt2x00_get_field32(reg, RF_CSR_CFG_DATA);
176
177 mutex_unlock(&rt2x00dev->csr_mutex);
178}
179
180static void rt2800usb_rf_write(struct rt2x00_dev *rt2x00dev,
181 const unsigned int word, const u32 value)
182{
183 u32 reg;
184
185 mutex_lock(&rt2x00dev->csr_mutex);
186
187 /*
188 * Wait until the RF becomes available, afterwards we
189 * can safely write the new data into the register.
190 */
191 if (WAIT_FOR_RF(rt2x00dev, &reg)) {
192 reg = 0;
193 rt2x00_set_field32(&reg, RF_CSR_CFG0_REG_VALUE_BW, value);
194 rt2x00_set_field32(&reg, RF_CSR_CFG0_STANDBYMODE, 0);
195 rt2x00_set_field32(&reg, RF_CSR_CFG0_SEL, 0);
196 rt2x00_set_field32(&reg, RF_CSR_CFG0_BUSY, 1);
197
198 rt2x00usb_register_write_lock(rt2x00dev, RF_CSR_CFG0, reg);
199 rt2x00_rf_write(rt2x00dev, word, value);
200 }
201
202 mutex_unlock(&rt2x00dev->csr_mutex);
203}
204
205static void rt2800usb_mcu_request(struct rt2x00_dev *rt2x00dev,
206 const u8 command, const u8 token,
207 const u8 arg0, const u8 arg1)
208{
209 u32 reg;
210
211 mutex_lock(&rt2x00dev->csr_mutex);
212
213 /*
214 * Wait until the MCU becomes available, afterwards we
215 * can safely write the new data into the register.
216 */
217 if (WAIT_FOR_MCU(rt2x00dev, &reg)) {
218 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_OWNER, 1);
219 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_CMD_TOKEN, token);
220 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG0, arg0);
221 rt2x00_set_field32(&reg, H2M_MAILBOX_CSR_ARG1, arg1);
222 rt2x00usb_register_write_lock(rt2x00dev, H2M_MAILBOX_CSR, reg);
223
224 reg = 0;
225 rt2x00_set_field32(&reg, HOST_CMD_CSR_HOST_COMMAND, command);
226 rt2x00usb_register_write_lock(rt2x00dev, HOST_CMD_CSR, reg);
227 }
228
229 mutex_unlock(&rt2x00dev->csr_mutex);
230}
231
232#ifdef CONFIG_RT2X00_LIB_DEBUGFS
233static const struct rt2x00debug rt2800usb_rt2x00debug = {
234 .owner = THIS_MODULE,
235 .csr = {
236 .read = rt2x00usb_register_read,
237 .write = rt2x00usb_register_write,
238 .flags = RT2X00DEBUGFS_OFFSET,
239 .word_base = CSR_REG_BASE,
240 .word_size = sizeof(u32),
241 .word_count = CSR_REG_SIZE / sizeof(u32),
242 },
243 .eeprom = {
244 .read = rt2x00_eeprom_read,
245 .write = rt2x00_eeprom_write,
246 .word_base = EEPROM_BASE,
247 .word_size = sizeof(u16),
248 .word_count = EEPROM_SIZE / sizeof(u16),
249 },
250 .bbp = {
251 .read = rt2800usb_bbp_read,
252 .write = rt2800usb_bbp_write,
253 .word_base = BBP_BASE,
254 .word_size = sizeof(u8),
255 .word_count = BBP_SIZE / sizeof(u8),
256 },
257 .rf = {
258 .read = rt2x00_rf_read,
259 .write = rt2800usb_rf_write,
260 .word_base = RF_BASE,
261 .word_size = sizeof(u32),
262 .word_count = RF_SIZE / sizeof(u32),
263 },
264};
265#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
266
267static int rt2800usb_rfkill_poll(struct rt2x00_dev *rt2x00dev)
268{
269 u32 reg;
270
271 rt2x00usb_register_read(rt2x00dev, GPIO_CTRL_CFG, &reg);
272 return rt2x00_get_field32(reg, GPIO_CTRL_CFG_BIT2);
273}
274
275#ifdef CONFIG_RT2X00_LIB_LEDS
276static void rt2800usb_brightness_set(struct led_classdev *led_cdev,
277 enum led_brightness brightness)
278{
279 struct rt2x00_led *led =
280 container_of(led_cdev, struct rt2x00_led, led_dev);
281 unsigned int enabled = brightness != LED_OFF;
282 unsigned int bg_mode =
283 (enabled && led->rt2x00dev->curr_band == IEEE80211_BAND_2GHZ);
284 unsigned int polarity =
285 rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
286 EEPROM_FREQ_LED_POLARITY);
287 unsigned int ledmode =
288 rt2x00_get_field16(led->rt2x00dev->led_mcu_reg,
289 EEPROM_FREQ_LED_MODE);
290
291 if (led->type == LED_TYPE_RADIO) {
292 rt2800usb_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
293 enabled ? 0x20 : 0);
294 } else if (led->type == LED_TYPE_ASSOC) {
295 rt2800usb_mcu_request(led->rt2x00dev, MCU_LED, 0xff, ledmode,
296 enabled ? (bg_mode ? 0x60 : 0xa0) : 0x20);
297 } else if (led->type == LED_TYPE_QUALITY) {
298 /*
299 * The brightness is divided into 6 levels (0 - 5),
300 * The specs tell us the following levels:
301 * 0, 1 ,3, 7, 15, 31
302 * to determine the level in a simple way we can simply
303 * work with bitshifting:
304 * (1 << level) - 1
305 */
306 rt2800usb_mcu_request(led->rt2x00dev, MCU_LED_STRENGTH, 0xff,
307 (1 << brightness / (LED_FULL / 6)) - 1,
308 polarity);
309 }
310}
311
312static int rt2800usb_blink_set(struct led_classdev *led_cdev,
313 unsigned long *delay_on,
314 unsigned long *delay_off)
315{
316 struct rt2x00_led *led =
317 container_of(led_cdev, struct rt2x00_led, led_dev);
318 u32 reg;
319
320 rt2x00usb_register_read(led->rt2x00dev, LED_CFG, &reg);
321 rt2x00_set_field32(&reg, LED_CFG_ON_PERIOD, *delay_on);
322 rt2x00_set_field32(&reg, LED_CFG_OFF_PERIOD, *delay_off);
323 rt2x00_set_field32(&reg, LED_CFG_SLOW_BLINK_PERIOD, 3);
324 rt2x00_set_field32(&reg, LED_CFG_R_LED_MODE, 3);
325 rt2x00_set_field32(&reg, LED_CFG_G_LED_MODE, 12);
326 rt2x00_set_field32(&reg, LED_CFG_Y_LED_MODE, 3);
327 rt2x00_set_field32(&reg, LED_CFG_LED_POLAR, 1);
328 rt2x00usb_register_write(led->rt2x00dev, LED_CFG, reg);
329
330 return 0;
331}
332
333static void rt2800usb_init_led(struct rt2x00_dev *rt2x00dev,
334 struct rt2x00_led *led,
335 enum led_type type)
336{
337 led->rt2x00dev = rt2x00dev;
338 led->type = type;
339 led->led_dev.brightness_set = rt2800usb_brightness_set;
340 led->led_dev.blink_set = rt2800usb_blink_set;
341 led->flags = LED_INITIALIZED;
342}
343#endif /* CONFIG_RT2X00_LIB_LEDS */
344
345/*
346 * Configuration handlers.
347 */
348static void rt2800usb_config_wcid_attr(struct rt2x00_dev *rt2x00dev,
349 struct rt2x00lib_crypto *crypto,
350 struct ieee80211_key_conf *key)
351{
352 struct mac_wcid_entry wcid_entry;
353 struct mac_iveiv_entry iveiv_entry;
354 u32 offset;
355 u32 reg;
356
357 offset = MAC_WCID_ATTR_ENTRY(key->hw_key_idx);
358
359 rt2x00usb_register_read(rt2x00dev, offset, &reg);
360 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_KEYTAB,
361 !!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE));
362 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_CIPHER,
363 (crypto->cmd == SET_KEY) * crypto->cipher);
364 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_BSS_IDX,
365 (crypto->cmd == SET_KEY) * crypto->bssidx);
366 rt2x00_set_field32(&reg, MAC_WCID_ATTRIBUTE_RX_WIUDF, crypto->cipher);
367 rt2x00usb_register_write(rt2x00dev, offset, reg);
368
369 offset = MAC_IVEIV_ENTRY(key->hw_key_idx);
370
371 memset(&iveiv_entry, 0, sizeof(iveiv_entry));
372 if ((crypto->cipher == CIPHER_TKIP) ||
373 (crypto->cipher == CIPHER_TKIP_NO_MIC) ||
374 (crypto->cipher == CIPHER_AES))
375 iveiv_entry.iv[3] |= 0x20;
376 iveiv_entry.iv[3] |= key->keyidx << 6;
377 rt2x00usb_register_multiwrite(rt2x00dev, offset,
378 &iveiv_entry, sizeof(iveiv_entry));
379
380 offset = MAC_WCID_ENTRY(key->hw_key_idx);
381
382 memset(&wcid_entry, 0, sizeof(wcid_entry));
383 if (crypto->cmd == SET_KEY)
384 memcpy(&wcid_entry, crypto->address, ETH_ALEN);
385 rt2x00usb_register_multiwrite(rt2x00dev, offset,
386 &wcid_entry, sizeof(wcid_entry));
387}
388
389static int rt2800usb_config_shared_key(struct rt2x00_dev *rt2x00dev,
390 struct rt2x00lib_crypto *crypto,
391 struct ieee80211_key_conf *key)
392{
393 struct hw_key_entry key_entry;
394 struct rt2x00_field32 field;
395 int timeout;
396 u32 offset;
397 u32 reg;
398
399 if (crypto->cmd == SET_KEY) {
400 key->hw_key_idx = (4 * crypto->bssidx) + key->keyidx;
401
402 memcpy(key_entry.key, crypto->key,
403 sizeof(key_entry.key));
404 memcpy(key_entry.tx_mic, crypto->tx_mic,
405 sizeof(key_entry.tx_mic));
406 memcpy(key_entry.rx_mic, crypto->rx_mic,
407 sizeof(key_entry.rx_mic));
408
409 offset = SHARED_KEY_ENTRY(key->hw_key_idx);
410 timeout = REGISTER_TIMEOUT32(sizeof(key_entry));
411 rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
412 USB_VENDOR_REQUEST_OUT,
413 offset, &key_entry,
414 sizeof(key_entry),
415 timeout);
416 }
417
418 /*
419 * The cipher types are stored over multiple registers
420 * starting with SHARED_KEY_MODE_BASE each word will have
421 * 32 bits and contains the cipher types for 2 bssidx each.
422 * Using the correct defines correctly will cause overhead,
423 * so just calculate the correct offset.
424 */
425 field.bit_offset = 4 * (key->hw_key_idx % 8);
426 field.bit_mask = 0x7 << field.bit_offset;
427
428 offset = SHARED_KEY_MODE_ENTRY(key->hw_key_idx / 8);
429
430 rt2x00usb_register_read(rt2x00dev, offset, &reg);
431 rt2x00_set_field32(&reg, field,
432 (crypto->cmd == SET_KEY) * crypto->cipher);
433 rt2x00usb_register_write(rt2x00dev, offset, reg);
434
435 /*
436 * Update WCID information
437 */
438 rt2800usb_config_wcid_attr(rt2x00dev, crypto, key);
439
440 return 0;
441}
442
443static int rt2800usb_config_pairwise_key(struct rt2x00_dev *rt2x00dev,
444 struct rt2x00lib_crypto *crypto,
445 struct ieee80211_key_conf *key)
446{
447 struct hw_key_entry key_entry;
448 int timeout;
449 u32 offset;
450
451 if (crypto->cmd == SET_KEY) {
452 /*
453 * 1 pairwise key is possible per AID, this means that the AID
454 * equals our hw_key_idx. Make sure the WCID starts _after_ the
455 * last possible shared key entry.
456 */
457 if (crypto->aid > (256 - 32))
458 return -ENOSPC;
459
460 key->hw_key_idx = 32 + crypto->aid;
461
462 memcpy(key_entry.key, crypto->key,
463 sizeof(key_entry.key));
464 memcpy(key_entry.tx_mic, crypto->tx_mic,
465 sizeof(key_entry.tx_mic));
466 memcpy(key_entry.rx_mic, crypto->rx_mic,
467 sizeof(key_entry.rx_mic));
468
469 offset = PAIRWISE_KEY_ENTRY(key->hw_key_idx);
470 timeout = REGISTER_TIMEOUT32(sizeof(key_entry));
471 rt2x00usb_vendor_request_large_buff(rt2x00dev, USB_MULTI_WRITE,
472 USB_VENDOR_REQUEST_OUT,
473 offset, &key_entry,
474 sizeof(key_entry),
475 timeout);
476 }
477
478 /*
479 * Update WCID information
480 */
481 rt2800usb_config_wcid_attr(rt2x00dev, crypto, key);
482
483 return 0;
484}
485
486static void rt2800usb_config_filter(struct rt2x00_dev *rt2x00dev,
487 const unsigned int filter_flags)
488{
489 u32 reg;
490
491 /*
492 * Start configuration steps.
493 * Note that the version error will always be dropped
494 * and broadcast frames will always be accepted since
495 * there is no filter for it at this time.
496 */
497 rt2x00usb_register_read(rt2x00dev, RX_FILTER_CFG, &reg);
498 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CRC_ERROR,
499 !(filter_flags & FIF_FCSFAIL));
500 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PHY_ERROR,
501 !(filter_flags & FIF_PLCPFAIL));
502 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_TO_ME,
503 !(filter_flags & FIF_PROMISC_IN_BSS));
504 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_NOT_MY_BSSD, 0);
505 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_VER_ERROR, 1);
506 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_MULTICAST,
507 !(filter_flags & FIF_ALLMULTI));
508 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BROADCAST, 0);
509 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_DUPLICATE, 1);
510 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CF_END_ACK,
511 !(filter_flags & FIF_CONTROL));
512 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CF_END,
513 !(filter_flags & FIF_CONTROL));
514 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_ACK,
515 !(filter_flags & FIF_CONTROL));
516 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CTS,
517 !(filter_flags & FIF_CONTROL));
518 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_RTS,
519 !(filter_flags & FIF_CONTROL));
520 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_PSPOLL,
521 !(filter_flags & FIF_PSPOLL));
522 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BA, 1);
523 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_BAR, 0);
524 rt2x00_set_field32(&reg, RX_FILTER_CFG_DROP_CNTL,
525 !(filter_flags & FIF_CONTROL));
526 rt2x00usb_register_write(rt2x00dev, RX_FILTER_CFG, reg);
527}
528
529static void rt2800usb_config_intf(struct rt2x00_dev *rt2x00dev,
530 struct rt2x00_intf *intf,
531 struct rt2x00intf_conf *conf,
532 const unsigned int flags)
533{
534 unsigned int beacon_base;
535 u32 reg;
536
537 if (flags & CONFIG_UPDATE_TYPE) {
538 /*
539 * Clear current synchronisation setup.
540 * For the Beacon base registers we only need to clear
541 * the first byte since that byte contains the VALID and OWNER
542 * bits which (when set to 0) will invalidate the entire beacon.
543 */
544 beacon_base = HW_BEACON_OFFSET(intf->beacon->entry_idx);
545 rt2x00usb_register_write(rt2x00dev, beacon_base, 0);
546
547 /*
548 * Enable synchronisation.
549 */
550 rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
551 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
552 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, conf->sync);
553 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
554 rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg);
555 }
556
557 if (flags & CONFIG_UPDATE_MAC) {
558 reg = le32_to_cpu(conf->mac[1]);
559 rt2x00_set_field32(&reg, MAC_ADDR_DW1_UNICAST_TO_ME_MASK, 0xff);
560 conf->mac[1] = cpu_to_le32(reg);
561
562 rt2x00usb_register_multiwrite(rt2x00dev, MAC_ADDR_DW0,
563 conf->mac, sizeof(conf->mac));
564 }
565
566 if (flags & CONFIG_UPDATE_BSSID) {
567 reg = le32_to_cpu(conf->bssid[1]);
568 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_ID_MASK, 0);
569 rt2x00_set_field32(&reg, MAC_BSSID_DW1_BSS_BCN_NUM, 0);
570 conf->bssid[1] = cpu_to_le32(reg);
571
572 rt2x00usb_register_multiwrite(rt2x00dev, MAC_BSSID_DW0,
573 conf->bssid, sizeof(conf->bssid));
574 }
575}
576
577static void rt2800usb_config_erp(struct rt2x00_dev *rt2x00dev,
578 struct rt2x00lib_erp *erp)
579{
580 u32 reg;
581
582 rt2x00usb_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
583 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_RX_ACK_TIMEOUT, 0x20);
584 rt2x00usb_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
585
586 rt2x00usb_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
587 rt2x00_set_field32(&reg, AUTO_RSP_CFG_BAC_ACK_POLICY,
588 !!erp->short_preamble);
589 rt2x00_set_field32(&reg, AUTO_RSP_CFG_AR_PREAMBLE,
590 !!erp->short_preamble);
591 rt2x00usb_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
592
593 rt2x00usb_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
594 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL,
595 erp->cts_protection ? 2 : 0);
596 rt2x00usb_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
597
598 rt2x00usb_register_write(rt2x00dev, LEGACY_BASIC_RATE,
599 erp->basic_rates);
600 rt2x00usb_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003);
601
602 rt2x00usb_register_read(rt2x00dev, BKOFF_SLOT_CFG, &reg);
603 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_SLOT_TIME, erp->slot_time);
604 rt2x00_set_field32(&reg, BKOFF_SLOT_CFG_CC_DELAY_TIME, 2);
605 rt2x00usb_register_write(rt2x00dev, BKOFF_SLOT_CFG, reg);
606
607 rt2x00usb_register_read(rt2x00dev, XIFS_TIME_CFG, &reg);
608 rt2x00_set_field32(&reg, XIFS_TIME_CFG_CCKM_SIFS_TIME, erp->sifs);
609 rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_SIFS_TIME, erp->sifs);
610 rt2x00_set_field32(&reg, XIFS_TIME_CFG_OFDM_XIFS_TIME, 4);
611 rt2x00_set_field32(&reg, XIFS_TIME_CFG_EIFS, erp->eifs);
612 rt2x00_set_field32(&reg, XIFS_TIME_CFG_BB_RXEND_ENABLE, 1);
613 rt2x00usb_register_write(rt2x00dev, XIFS_TIME_CFG, reg);
614
615 rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
616 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL,
617 erp->beacon_int * 16);
618 rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg);
619}
620
621static void rt2800usb_config_ant(struct rt2x00_dev *rt2x00dev,
622 struct antenna_setup *ant)
623{
624 u8 r1;
625 u8 r3;
626
627 rt2800usb_bbp_read(rt2x00dev, 1, &r1);
628 rt2800usb_bbp_read(rt2x00dev, 3, &r3);
629
630 /*
631 * Configure the TX antenna.
632 */
633 switch ((int)ant->tx) {
634 case 1:
635 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 0);
636 break;
637 case 2:
638 rt2x00_set_field8(&r1, BBP1_TX_ANTENNA, 2);
639 break;
640 case 3:
641 /* Do nothing */
642 break;
643 }
644
645 /*
646 * Configure the RX antenna.
647 */
648 switch ((int)ant->rx) {
649 case 1:
650 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 0);
651 break;
652 case 2:
653 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 1);
654 break;
655 case 3:
656 rt2x00_set_field8(&r3, BBP3_RX_ANTENNA, 2);
657 break;
658 }
659
660 rt2800usb_bbp_write(rt2x00dev, 3, r3);
661 rt2800usb_bbp_write(rt2x00dev, 1, r1);
662}
663
664static void rt2800usb_config_lna_gain(struct rt2x00_dev *rt2x00dev,
665 struct rt2x00lib_conf *libconf)
666{
667 u16 eeprom;
668 short lna_gain;
669
670 if (libconf->rf.channel <= 14) {
671 rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
672 lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_BG);
673 } else if (libconf->rf.channel <= 64) {
674 rt2x00_eeprom_read(rt2x00dev, EEPROM_LNA, &eeprom);
675 lna_gain = rt2x00_get_field16(eeprom, EEPROM_LNA_A0);
676 } else if (libconf->rf.channel <= 128) {
677 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_BG2, &eeprom);
678 lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_BG2_LNA_A1);
679 } else {
680 rt2x00_eeprom_read(rt2x00dev, EEPROM_RSSI_A2, &eeprom);
681 lna_gain = rt2x00_get_field16(eeprom, EEPROM_RSSI_A2_LNA_A2);
682 }
683
684 rt2x00dev->lna_gain = lna_gain;
685}
686
687static void rt2800usb_config_channel_rt2x(struct rt2x00_dev *rt2x00dev,
688 struct ieee80211_conf *conf,
689 struct rf_channel *rf,
690 struct channel_info *info)
691{
692 rt2x00_set_field32(&rf->rf4, RF4_FREQ_OFFSET, rt2x00dev->freq_offset);
693
694 if (rt2x00dev->default_ant.tx == 1)
695 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_TX1, 1);
696
697 if (rt2x00dev->default_ant.rx == 1) {
698 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX1, 1);
699 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
700 } else if (rt2x00dev->default_ant.rx == 2)
701 rt2x00_set_field32(&rf->rf2, RF2_ANTENNA_RX2, 1);
702
703 if (rf->channel > 14) {
704 /*
705 * When TX power is below 0, we should increase it by 7 to
706 * make it a positive value (Minumum value is -7).
707 * However this means that values between 0 and 7 have
708 * double meaning, and we should set a 7DBm boost flag.
709 */
710 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A_7DBM_BOOST,
711 (info->tx_power1 >= 0));
712
713 if (info->tx_power1 < 0)
714 info->tx_power1 += 7;
715
716 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_A,
717 TXPOWER_A_TO_DEV(info->tx_power1));
718
719 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A_7DBM_BOOST,
720 (info->tx_power2 >= 0));
721
722 if (info->tx_power2 < 0)
723 info->tx_power2 += 7;
724
725 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_A,
726 TXPOWER_A_TO_DEV(info->tx_power2));
727 } else {
728 rt2x00_set_field32(&rf->rf3, RF3_TXPOWER_G,
729 TXPOWER_G_TO_DEV(info->tx_power1));
730 rt2x00_set_field32(&rf->rf4, RF4_TXPOWER_G,
731 TXPOWER_G_TO_DEV(info->tx_power2));
732 }
733
734 rt2x00_set_field32(&rf->rf4, RF4_HT40, conf_is_ht40(conf));
735
736 rt2800usb_rf_write(rt2x00dev, 1, rf->rf1);
737 rt2800usb_rf_write(rt2x00dev, 2, rf->rf2);
738 rt2800usb_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
739 rt2800usb_rf_write(rt2x00dev, 4, rf->rf4);
740
741 udelay(200);
742
743 rt2800usb_rf_write(rt2x00dev, 1, rf->rf1);
744 rt2800usb_rf_write(rt2x00dev, 2, rf->rf2);
745 rt2800usb_rf_write(rt2x00dev, 3, rf->rf3 | 0x00000004);
746 rt2800usb_rf_write(rt2x00dev, 4, rf->rf4);
747
748 udelay(200);
749
750 rt2800usb_rf_write(rt2x00dev, 1, rf->rf1);
751 rt2800usb_rf_write(rt2x00dev, 2, rf->rf2);
752 rt2800usb_rf_write(rt2x00dev, 3, rf->rf3 & ~0x00000004);
753 rt2800usb_rf_write(rt2x00dev, 4, rf->rf4);
754}
755
756static void rt2800usb_config_channel_rt3x(struct rt2x00_dev *rt2x00dev,
757 struct ieee80211_conf *conf,
758 struct rf_channel *rf,
759 struct channel_info *info)
760{
761 u8 rfcsr;
762
763 rt2800usb_rfcsr_write(rt2x00dev, 2, rf->rf1);
764 rt2800usb_rfcsr_write(rt2x00dev, 2, rf->rf3);
765
766 rt2800usb_rfcsr_read(rt2x00dev, 6, &rfcsr);
767 rt2x00_set_field8(&rfcsr, RFCSR6_R, rf->rf2);
768 rt2800usb_rfcsr_write(rt2x00dev, 6, rfcsr);
769
770 rt2800usb_rfcsr_read(rt2x00dev, 12, &rfcsr);
771 rt2x00_set_field8(&rfcsr, RFCSR12_TX_POWER,
772 TXPOWER_G_TO_DEV(info->tx_power1));
773 rt2800usb_rfcsr_write(rt2x00dev, 12, rfcsr);
774
775 rt2800usb_rfcsr_read(rt2x00dev, 23, &rfcsr);
776 rt2x00_set_field8(&rfcsr, RFCSR23_FREQ_OFFSET, rt2x00dev->freq_offset);
777 rt2800usb_rfcsr_write(rt2x00dev, 23, rfcsr);
778
779 rt2800usb_rfcsr_write(rt2x00dev, 24,
780 rt2x00dev->calibration[conf_is_ht40(conf)]);
781
782 rt2800usb_rfcsr_read(rt2x00dev, 23, &rfcsr);
783 rt2x00_set_field8(&rfcsr, RFCSR7_RF_TUNING, 1);
784 rt2800usb_rfcsr_write(rt2x00dev, 23, rfcsr);
785}
786
787static void rt2800usb_config_channel(struct rt2x00_dev *rt2x00dev,
788 struct ieee80211_conf *conf,
789 struct rf_channel *rf,
790 struct channel_info *info)
791{
792 u32 reg;
793 unsigned int tx_pin;
794 u8 bbp;
795
796 if (rt2x00_rev(&rt2x00dev->chip) != RT3070_VERSION)
797 rt2800usb_config_channel_rt2x(rt2x00dev, conf, rf, info);
798 else
799 rt2800usb_config_channel_rt3x(rt2x00dev, conf, rf, info);
800
801 /*
802 * Change BBP settings
803 */
804 rt2800usb_bbp_write(rt2x00dev, 62, 0x37 - rt2x00dev->lna_gain);
805 rt2800usb_bbp_write(rt2x00dev, 63, 0x37 - rt2x00dev->lna_gain);
806 rt2800usb_bbp_write(rt2x00dev, 64, 0x37 - rt2x00dev->lna_gain);
807 rt2800usb_bbp_write(rt2x00dev, 86, 0);
808
809 if (rf->channel <= 14) {
810 if (test_bit(CONFIG_EXTERNAL_LNA_BG, &rt2x00dev->flags)) {
811 rt2800usb_bbp_write(rt2x00dev, 82, 0x62);
812 rt2800usb_bbp_write(rt2x00dev, 75, 0x46);
813 } else {
814 rt2800usb_bbp_write(rt2x00dev, 82, 0x84);
815 rt2800usb_bbp_write(rt2x00dev, 75, 0x50);
816 }
817 } else {
818 rt2800usb_bbp_write(rt2x00dev, 82, 0xf2);
819
820 if (test_bit(CONFIG_EXTERNAL_LNA_A, &rt2x00dev->flags))
821 rt2800usb_bbp_write(rt2x00dev, 75, 0x46);
822 else
823 rt2800usb_bbp_write(rt2x00dev, 75, 0x50);
824 }
825
826 rt2x00usb_register_read(rt2x00dev, TX_BAND_CFG, &reg);
827 rt2x00_set_field32(&reg, TX_BAND_CFG_HT40_PLUS, conf_is_ht40_plus(conf));
828 rt2x00_set_field32(&reg, TX_BAND_CFG_A, rf->channel > 14);
829 rt2x00_set_field32(&reg, TX_BAND_CFG_BG, rf->channel <= 14);
830 rt2x00usb_register_write(rt2x00dev, TX_BAND_CFG, reg);
831
832 tx_pin = 0;
833
834 /* Turn on unused PA or LNA when not using 1T or 1R */
835 if (rt2x00dev->default_ant.tx != 1) {
836 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A1_EN, 1);
837 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G1_EN, 1);
838 }
839
840 /* Turn on unused PA or LNA when not using 1T or 1R */
841 if (rt2x00dev->default_ant.rx != 1) {
842 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A1_EN, 1);
843 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G1_EN, 1);
844 }
845
846 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_A0_EN, 1);
847 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_LNA_PE_G0_EN, 1);
848 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_RFTR_EN, 1);
849 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_TRSW_EN, 1);
850 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_G0_EN, rf->channel <= 14);
851 rt2x00_set_field32(&tx_pin, TX_PIN_CFG_PA_PE_A0_EN, rf->channel > 14);
852
853 rt2x00usb_register_write(rt2x00dev, TX_PIN_CFG, tx_pin);
854
855 rt2800usb_bbp_read(rt2x00dev, 4, &bbp);
856 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * conf_is_ht40(conf));
857 rt2800usb_bbp_write(rt2x00dev, 4, bbp);
858
859 rt2800usb_bbp_read(rt2x00dev, 3, &bbp);
860 rt2x00_set_field8(&bbp, BBP3_HT40_PLUS, conf_is_ht40_plus(conf));
861 rt2800usb_bbp_write(rt2x00dev, 3, bbp);
862
863 if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
864 if (conf_is_ht40(conf)) {
865 rt2800usb_bbp_write(rt2x00dev, 69, 0x1a);
866 rt2800usb_bbp_write(rt2x00dev, 70, 0x0a);
867 rt2800usb_bbp_write(rt2x00dev, 73, 0x16);
868 } else {
869 rt2800usb_bbp_write(rt2x00dev, 69, 0x16);
870 rt2800usb_bbp_write(rt2x00dev, 70, 0x08);
871 rt2800usb_bbp_write(rt2x00dev, 73, 0x11);
872 }
873 }
874
875 msleep(1);
876}
877
878static void rt2800usb_config_txpower(struct rt2x00_dev *rt2x00dev,
879 const int txpower)
880{
881 u32 reg;
882 u32 value = TXPOWER_G_TO_DEV(txpower);
883 u8 r1;
884
885 rt2800usb_bbp_read(rt2x00dev, 1, &r1);
886 rt2x00_set_field8(&reg, BBP1_TX_POWER, 0);
887 rt2800usb_bbp_write(rt2x00dev, 1, r1);
888
889 rt2x00usb_register_read(rt2x00dev, TX_PWR_CFG_0, &reg);
890 rt2x00_set_field32(&reg, TX_PWR_CFG_0_1MBS, value);
891 rt2x00_set_field32(&reg, TX_PWR_CFG_0_2MBS, value);
892 rt2x00_set_field32(&reg, TX_PWR_CFG_0_55MBS, value);
893 rt2x00_set_field32(&reg, TX_PWR_CFG_0_11MBS, value);
894 rt2x00_set_field32(&reg, TX_PWR_CFG_0_6MBS, value);
895 rt2x00_set_field32(&reg, TX_PWR_CFG_0_9MBS, value);
896 rt2x00_set_field32(&reg, TX_PWR_CFG_0_12MBS, value);
897 rt2x00_set_field32(&reg, TX_PWR_CFG_0_18MBS, value);
898 rt2x00usb_register_write(rt2x00dev, TX_PWR_CFG_0, reg);
899
900 rt2x00usb_register_read(rt2x00dev, TX_PWR_CFG_1, &reg);
901 rt2x00_set_field32(&reg, TX_PWR_CFG_1_24MBS, value);
902 rt2x00_set_field32(&reg, TX_PWR_CFG_1_36MBS, value);
903 rt2x00_set_field32(&reg, TX_PWR_CFG_1_48MBS, value);
904 rt2x00_set_field32(&reg, TX_PWR_CFG_1_54MBS, value);
905 rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS0, value);
906 rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS1, value);
907 rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS2, value);
908 rt2x00_set_field32(&reg, TX_PWR_CFG_1_MCS3, value);
909 rt2x00usb_register_write(rt2x00dev, TX_PWR_CFG_1, reg);
910
911 rt2x00usb_register_read(rt2x00dev, TX_PWR_CFG_2, &reg);
912 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS4, value);
913 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS5, value);
914 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS6, value);
915 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS7, value);
916 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS8, value);
917 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS9, value);
918 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS10, value);
919 rt2x00_set_field32(&reg, TX_PWR_CFG_2_MCS11, value);
920 rt2x00usb_register_write(rt2x00dev, TX_PWR_CFG_2, reg);
921
922 rt2x00usb_register_read(rt2x00dev, TX_PWR_CFG_3, &reg);
923 rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS12, value);
924 rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS13, value);
925 rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS14, value);
926 rt2x00_set_field32(&reg, TX_PWR_CFG_3_MCS15, value);
927 rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN1, value);
928 rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN2, value);
929 rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN3, value);
930 rt2x00_set_field32(&reg, TX_PWR_CFG_3_UKNOWN4, value);
931 rt2x00usb_register_write(rt2x00dev, TX_PWR_CFG_3, reg);
932
933 rt2x00usb_register_read(rt2x00dev, TX_PWR_CFG_4, &reg);
934 rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN5, value);
935 rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN6, value);
936 rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN7, value);
937 rt2x00_set_field32(&reg, TX_PWR_CFG_4_UKNOWN8, value);
938 rt2x00usb_register_write(rt2x00dev, TX_PWR_CFG_4, reg);
939}
940
941static void rt2800usb_config_retry_limit(struct rt2x00_dev *rt2x00dev,
942 struct rt2x00lib_conf *libconf)
943{
944 u32 reg;
945
946 rt2x00usb_register_read(rt2x00dev, TX_RTY_CFG, &reg);
947 rt2x00_set_field32(&reg, TX_RTY_CFG_SHORT_RTY_LIMIT,
948 libconf->conf->short_frame_max_tx_count);
949 rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_LIMIT,
950 libconf->conf->long_frame_max_tx_count);
951 rt2x00_set_field32(&reg, TX_RTY_CFG_LONG_RTY_THRE, 2000);
952 rt2x00_set_field32(&reg, TX_RTY_CFG_NON_AGG_RTY_MODE, 0);
953 rt2x00_set_field32(&reg, TX_RTY_CFG_AGG_RTY_MODE, 0);
954 rt2x00_set_field32(&reg, TX_RTY_CFG_TX_AUTO_FB_ENABLE, 1);
955 rt2x00usb_register_write(rt2x00dev, TX_RTY_CFG, reg);
956}
957
958static void rt2800usb_config_ps(struct rt2x00_dev *rt2x00dev,
959 struct rt2x00lib_conf *libconf)
960{
961 enum dev_state state =
962 (libconf->conf->flags & IEEE80211_CONF_PS) ?
963 STATE_SLEEP : STATE_AWAKE;
964 u32 reg;
965
966 if (state == STATE_SLEEP) {
967 rt2x00usb_register_write(rt2x00dev, AUTOWAKEUP_CFG, 0);
968
969 rt2x00usb_register_read(rt2x00dev, AUTOWAKEUP_CFG, &reg);
970 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 5);
971 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE,
972 libconf->conf->listen_interval - 1);
973 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 1);
974 rt2x00usb_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg);
975
976 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
977 } else {
978 rt2x00dev->ops->lib->set_device_state(rt2x00dev, state);
979
980 rt2x00usb_register_read(rt2x00dev, AUTOWAKEUP_CFG, &reg);
981 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTO_LEAD_TIME, 0);
982 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE, 0);
983 rt2x00_set_field32(&reg, AUTOWAKEUP_CFG_AUTOWAKE, 0);
984 rt2x00usb_register_write(rt2x00dev, AUTOWAKEUP_CFG, reg);
985 }
986}
987
988static void rt2800usb_config(struct rt2x00_dev *rt2x00dev,
989 struct rt2x00lib_conf *libconf,
990 const unsigned int flags)
991{
992 /* Always recalculate LNA gain before changing configuration */
993 rt2800usb_config_lna_gain(rt2x00dev, libconf);
994
995 if (flags & IEEE80211_CONF_CHANGE_CHANNEL)
996 rt2800usb_config_channel(rt2x00dev, libconf->conf,
997 &libconf->rf, &libconf->channel);
998 if (flags & IEEE80211_CONF_CHANGE_POWER)
999 rt2800usb_config_txpower(rt2x00dev, libconf->conf->power_level);
1000 if (flags & IEEE80211_CONF_CHANGE_RETRY_LIMITS)
1001 rt2800usb_config_retry_limit(rt2x00dev, libconf);
1002 if (flags & IEEE80211_CONF_CHANGE_PS)
1003 rt2800usb_config_ps(rt2x00dev, libconf);
1004}
1005
1006/*
1007 * Link tuning
1008 */
1009static void rt2800usb_link_stats(struct rt2x00_dev *rt2x00dev,
1010 struct link_qual *qual)
1011{
1012 u32 reg;
1013
1014 /*
1015 * Update FCS error count from register.
1016 */
1017 rt2x00usb_register_read(rt2x00dev, RX_STA_CNT0, &reg);
1018 qual->rx_failed = rt2x00_get_field32(reg, RX_STA_CNT0_CRC_ERR);
1019}
1020
1021static u8 rt2800usb_get_default_vgc(struct rt2x00_dev *rt2x00dev)
1022{
1023 if (rt2x00dev->curr_band == IEEE80211_BAND_2GHZ) {
1024 if (rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION)
1025 return 0x1c + (2 * rt2x00dev->lna_gain);
1026 else
1027 return 0x2e + rt2x00dev->lna_gain;
1028 }
1029
1030 if (!test_bit(CONFIG_CHANNEL_HT40, &rt2x00dev->flags))
1031 return 0x32 + (rt2x00dev->lna_gain * 5) / 3;
1032 else
1033 return 0x3a + (rt2x00dev->lna_gain * 5) / 3;
1034}
1035
1036static inline void rt2800usb_set_vgc(struct rt2x00_dev *rt2x00dev,
1037 struct link_qual *qual, u8 vgc_level)
1038{
1039 if (qual->vgc_level != vgc_level) {
1040 rt2800usb_bbp_write(rt2x00dev, 66, vgc_level);
1041 qual->vgc_level = vgc_level;
1042 qual->vgc_level_reg = vgc_level;
1043 }
1044}
1045
1046static void rt2800usb_reset_tuner(struct rt2x00_dev *rt2x00dev,
1047 struct link_qual *qual)
1048{
1049 rt2800usb_set_vgc(rt2x00dev, qual,
1050 rt2800usb_get_default_vgc(rt2x00dev));
1051}
1052
1053static void rt2800usb_link_tuner(struct rt2x00_dev *rt2x00dev,
1054 struct link_qual *qual, const u32 count)
1055{
1056 if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION)
1057 return;
1058
1059 /*
1060 * When RSSI is better then -80 increase VGC level with 0x10
1061 */
1062 rt2800usb_set_vgc(rt2x00dev, qual,
1063 rt2800usb_get_default_vgc(rt2x00dev) +
1064 ((qual->rssi > -80) * 0x10));
1065}
1066
1067/*
1068 * Firmware functions 49 * Firmware functions
1069 */ 50 */
1070static char *rt2800usb_get_firmware_name(struct rt2x00_dev *rt2x00dev) 51static char *rt2800usb_get_firmware_name(struct rt2x00_dev *rt2x00dev)
@@ -1172,7 +153,7 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
1172 * Wait for stable hardware. 153 * Wait for stable hardware.
1173 */ 154 */
1174 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 155 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1175 rt2x00usb_register_read(rt2x00dev, MAC_CSR0, &reg); 156 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
1176 if (reg && reg != ~0) 157 if (reg && reg != ~0)
1177 break; 158 break;
1178 msleep(1); 159 msleep(1);
@@ -1192,8 +173,8 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
1192 data + offset, length, 173 data + offset, length,
1193 REGISTER_TIMEOUT32(length)); 174 REGISTER_TIMEOUT32(length));
1194 175
1195 rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0); 176 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CID, ~0);
1196 rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0); 177 rt2800_register_write(rt2x00dev, H2M_MAILBOX_STATUS, ~0);
1197 178
1198 /* 179 /*
1199 * Send firmware request to device to load firmware, 180 * Send firmware request to device to load firmware,
@@ -1208,18 +189,18 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
1208 } 189 }
1209 190
1210 msleep(10); 191 msleep(10);
1211 rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); 192 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
1212 193
1213 /* 194 /*
1214 * Send signal to firmware during boot time. 195 * Send signal to firmware during boot time.
1215 */ 196 */
1216 rt2800usb_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0); 197 rt2800_mcu_request(rt2x00dev, MCU_BOOT_SIGNAL, 0xff, 0, 0);
1217 198
1218 if ((chipset == 0x3070) || 199 if ((chipset == 0x3070) ||
1219 (chipset == 0x3071) || 200 (chipset == 0x3071) ||
1220 (chipset == 0x3572)) { 201 (chipset == 0x3572)) {
1221 udelay(200); 202 udelay(200);
1222 rt2800usb_mcu_request(rt2x00dev, MCU_CURRENT, 0, 0, 0); 203 rt2800_mcu_request(rt2x00dev, MCU_CURRENT, 0, 0, 0);
1223 udelay(10); 204 udelay(10);
1224 } 205 }
1225 206
@@ -1227,7 +208,7 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
1227 * Wait for device to stabilize. 208 * Wait for device to stabilize.
1228 */ 209 */
1229 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 210 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1230 rt2x00usb_register_read(rt2x00dev, PBF_SYS_CTRL, &reg); 211 rt2800_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
1231 if (rt2x00_get_field32(reg, PBF_SYS_CTRL_READY)) 212 if (rt2x00_get_field32(reg, PBF_SYS_CTRL_READY))
1232 break; 213 break;
1233 msleep(1); 214 msleep(1);
@@ -1241,536 +222,14 @@ static int rt2800usb_load_firmware(struct rt2x00_dev *rt2x00dev,
1241 /* 222 /*
1242 * Initialize firmware. 223 * Initialize firmware.
1243 */ 224 */
1244 rt2x00usb_register_write(rt2x00dev, H2M_BBP_AGENT, 0); 225 rt2800_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
1245 rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0); 226 rt2800_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
1246 msleep(1); 227 msleep(1);
1247 228
1248 return 0; 229 return 0;
1249} 230}
1250 231
1251/* 232/*
1252 * Initialization functions.
1253 */
1254static int rt2800usb_init_registers(struct rt2x00_dev *rt2x00dev)
1255{
1256 u32 reg;
1257 unsigned int i;
1258
1259 /*
1260 * Wait untill BBP and RF are ready.
1261 */
1262 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1263 rt2x00usb_register_read(rt2x00dev, MAC_CSR0, &reg);
1264 if (reg && reg != ~0)
1265 break;
1266 msleep(1);
1267 }
1268
1269 if (i == REGISTER_BUSY_COUNT) {
1270 ERROR(rt2x00dev, "Unstable hardware.\n");
1271 return -EBUSY;
1272 }
1273
1274 rt2x00usb_register_read(rt2x00dev, PBF_SYS_CTRL, &reg);
1275 rt2x00usb_register_write(rt2x00dev, PBF_SYS_CTRL, reg & ~0x00002000);
1276
1277 rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
1278 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_CSR, 1);
1279 rt2x00_set_field32(&reg, MAC_SYS_CTRL_RESET_BBP, 1);
1280 rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
1281
1282 rt2x00usb_register_write(rt2x00dev, USB_DMA_CFG, 0x00000000);
1283
1284 rt2x00usb_vendor_request_sw(rt2x00dev, USB_DEVICE_MODE, 0,
1285 USB_MODE_RESET, REGISTER_TIMEOUT);
1286
1287 rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
1288
1289 rt2x00usb_register_read(rt2x00dev, BCN_OFFSET0, &reg);
1290 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN0, 0xe0); /* 0x3800 */
1291 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN1, 0xe8); /* 0x3a00 */
1292 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN2, 0xf0); /* 0x3c00 */
1293 rt2x00_set_field32(&reg, BCN_OFFSET0_BCN3, 0xf8); /* 0x3e00 */
1294 rt2x00usb_register_write(rt2x00dev, BCN_OFFSET0, reg);
1295
1296 rt2x00usb_register_read(rt2x00dev, BCN_OFFSET1, &reg);
1297 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN4, 0xc8); /* 0x3200 */
1298 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN5, 0xd0); /* 0x3400 */
1299 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN6, 0x77); /* 0x1dc0 */
1300 rt2x00_set_field32(&reg, BCN_OFFSET1_BCN7, 0x6f); /* 0x1bc0 */
1301 rt2x00usb_register_write(rt2x00dev, BCN_OFFSET1, reg);
1302
1303 rt2x00usb_register_write(rt2x00dev, LEGACY_BASIC_RATE, 0x0000013f);
1304 rt2x00usb_register_write(rt2x00dev, HT_BASIC_RATE, 0x00008003);
1305
1306 rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, 0x00000000);
1307
1308 rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
1309 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_INTERVAL, 0);
1310 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 0);
1311 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_SYNC, 0);
1312 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 0);
1313 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
1314 rt2x00_set_field32(&reg, BCN_TIME_CFG_TX_TIME_COMPENSATE, 0);
1315 rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg);
1316
1317 if (rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) {
1318 rt2x00usb_register_write(rt2x00dev, TX_SW_CFG0, 0x00000400);
1319 rt2x00usb_register_write(rt2x00dev, TX_SW_CFG1, 0x00000000);
1320 rt2x00usb_register_write(rt2x00dev, TX_SW_CFG2, 0x00000000);
1321 } else {
1322 rt2x00usb_register_write(rt2x00dev, TX_SW_CFG0, 0x00000000);
1323 rt2x00usb_register_write(rt2x00dev, TX_SW_CFG1, 0x00080606);
1324 }
1325
1326 rt2x00usb_register_read(rt2x00dev, TX_LINK_CFG, &reg);
1327 rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFB_LIFETIME, 32);
1328 rt2x00_set_field32(&reg, TX_LINK_CFG_MFB_ENABLE, 0);
1329 rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_UMFS_ENABLE, 0);
1330 rt2x00_set_field32(&reg, TX_LINK_CFG_TX_MRQ_EN, 0);
1331 rt2x00_set_field32(&reg, TX_LINK_CFG_TX_RDG_EN, 0);
1332 rt2x00_set_field32(&reg, TX_LINK_CFG_TX_CF_ACK_EN, 1);
1333 rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFB, 0);
1334 rt2x00_set_field32(&reg, TX_LINK_CFG_REMOTE_MFS, 0);
1335 rt2x00usb_register_write(rt2x00dev, TX_LINK_CFG, reg);
1336
1337 rt2x00usb_register_read(rt2x00dev, TX_TIMEOUT_CFG, &reg);
1338 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_MPDU_LIFETIME, 9);
1339 rt2x00_set_field32(&reg, TX_TIMEOUT_CFG_TX_OP_TIMEOUT, 10);
1340 rt2x00usb_register_write(rt2x00dev, TX_TIMEOUT_CFG, reg);
1341
1342 rt2x00usb_register_read(rt2x00dev, MAX_LEN_CFG, &reg);
1343 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_MPDU, AGGREGATION_SIZE);
1344 if (rt2x00_rev(&rt2x00dev->chip) >= RT2880E_VERSION &&
1345 rt2x00_rev(&rt2x00dev->chip) < RT3070_VERSION)
1346 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 2);
1347 else
1348 rt2x00_set_field32(&reg, MAX_LEN_CFG_MAX_PSDU, 1);
1349 rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_PSDU, 0);
1350 rt2x00_set_field32(&reg, MAX_LEN_CFG_MIN_MPDU, 0);
1351 rt2x00usb_register_write(rt2x00dev, MAX_LEN_CFG, reg);
1352
1353 rt2x00usb_register_write(rt2x00dev, PBF_MAX_PCNT, 0x1f3fbf9f);
1354
1355 rt2x00usb_register_read(rt2x00dev, AUTO_RSP_CFG, &reg);
1356 rt2x00_set_field32(&reg, AUTO_RSP_CFG_AUTORESPONDER, 1);
1357 rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MMODE, 0);
1358 rt2x00_set_field32(&reg, AUTO_RSP_CFG_CTS_40_MREF, 0);
1359 rt2x00_set_field32(&reg, AUTO_RSP_CFG_DUAL_CTS_EN, 0);
1360 rt2x00_set_field32(&reg, AUTO_RSP_CFG_ACK_CTS_PSM_BIT, 0);
1361 rt2x00usb_register_write(rt2x00dev, AUTO_RSP_CFG, reg);
1362
1363 rt2x00usb_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
1364 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_RATE, 8);
1365 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_CTRL, 0);
1366 rt2x00_set_field32(&reg, CCK_PROT_CFG_PROTECT_NAV, 1);
1367 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1368 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1369 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1370 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_MM40, 1);
1371 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1372 rt2x00_set_field32(&reg, CCK_PROT_CFG_TX_OP_ALLOW_GF40, 1);
1373 rt2x00usb_register_write(rt2x00dev, CCK_PROT_CFG, reg);
1374
1375 rt2x00usb_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
1376 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_RATE, 8);
1377 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_CTRL, 0);
1378 rt2x00_set_field32(&reg, OFDM_PROT_CFG_PROTECT_NAV, 1);
1379 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1380 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1381 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1382 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_MM40, 1);
1383 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1384 rt2x00_set_field32(&reg, OFDM_PROT_CFG_TX_OP_ALLOW_GF40, 1);
1385 rt2x00usb_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
1386
1387 rt2x00usb_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
1388 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_RATE, 0x4004);
1389 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_CTRL, 0);
1390 rt2x00_set_field32(&reg, MM20_PROT_CFG_PROTECT_NAV, 1);
1391 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1392 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1393 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1394 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
1395 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1396 rt2x00_set_field32(&reg, MM20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
1397 rt2x00usb_register_write(rt2x00dev, MM20_PROT_CFG, reg);
1398
1399 rt2x00usb_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
1400 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_RATE, 0x4084);
1401 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_CTRL, 0);
1402 rt2x00_set_field32(&reg, MM40_PROT_CFG_PROTECT_NAV, 1);
1403 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1404 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1405 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1406 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
1407 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1408 rt2x00_set_field32(&reg, MM40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
1409 rt2x00usb_register_write(rt2x00dev, MM40_PROT_CFG, reg);
1410
1411 rt2x00usb_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
1412 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_RATE, 0x4004);
1413 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_CTRL, 0);
1414 rt2x00_set_field32(&reg, GF20_PROT_CFG_PROTECT_NAV, 1);
1415 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1416 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1417 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1418 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_MM40, 0);
1419 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1420 rt2x00_set_field32(&reg, GF20_PROT_CFG_TX_OP_ALLOW_GF40, 0);
1421 rt2x00usb_register_write(rt2x00dev, GF20_PROT_CFG, reg);
1422
1423 rt2x00usb_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
1424 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_RATE, 0x4084);
1425 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_CTRL, 0);
1426 rt2x00_set_field32(&reg, GF40_PROT_CFG_PROTECT_NAV, 1);
1427 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_CCK, 1);
1428 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_OFDM, 1);
1429 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM20, 1);
1430 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_MM40, 1);
1431 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF20, 1);
1432 rt2x00_set_field32(&reg, GF40_PROT_CFG_TX_OP_ALLOW_GF40, 1);
1433 rt2x00usb_register_write(rt2x00dev, GF40_PROT_CFG, reg);
1434
1435 rt2x00usb_register_write(rt2x00dev, PBF_CFG, 0xf40006);
1436
1437 rt2x00usb_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
1438 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
1439 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_DMA_BUSY, 0);
1440 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
1441 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_DMA_BUSY, 0);
1442 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_WP_DMA_BURST_SIZE, 3);
1443 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 0);
1444 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_BIG_ENDIAN, 0);
1445 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_RX_HDR_SCATTER, 0);
1446 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_HDR_SEG_LEN, 0);
1447 rt2x00usb_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
1448
1449 rt2x00usb_register_write(rt2x00dev, TXOP_CTRL_CFG, 0x0000583f);
1450 rt2x00usb_register_write(rt2x00dev, TXOP_HLDR_ET, 0x00000002);
1451
1452 rt2x00usb_register_read(rt2x00dev, TX_RTS_CFG, &reg);
1453 rt2x00_set_field32(&reg, TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT, 32);
1454 rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_THRES,
1455 IEEE80211_MAX_RTS_THRESHOLD);
1456 rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_FBK_EN, 0);
1457 rt2x00usb_register_write(rt2x00dev, TX_RTS_CFG, reg);
1458
1459 rt2x00usb_register_write(rt2x00dev, EXP_ACK_TIME, 0x002400ca);
1460 rt2x00usb_register_write(rt2x00dev, PWR_PIN_CFG, 0x00000003);
1461
1462 /*
1463 * ASIC will keep garbage value after boot, clear encryption keys.
1464 */
1465 for (i = 0; i < 4; i++)
1466 rt2x00usb_register_write(rt2x00dev,
1467 SHARED_KEY_MODE_ENTRY(i), 0);
1468
1469 for (i = 0; i < 256; i++) {
1470 u32 wcid[2] = { 0xffffffff, 0x00ffffff };
1471 rt2x00usb_register_multiwrite(rt2x00dev, MAC_WCID_ENTRY(i),
1472 wcid, sizeof(wcid));
1473
1474 rt2x00usb_register_write(rt2x00dev, MAC_WCID_ATTR_ENTRY(i), 1);
1475 rt2x00usb_register_write(rt2x00dev, MAC_IVEIV_ENTRY(i), 0);
1476 }
1477
1478 /*
1479 * Clear all beacons
1480 * For the Beacon base registers we only need to clear
1481 * the first byte since that byte contains the VALID and OWNER
1482 * bits which (when set to 0) will invalidate the entire beacon.
1483 */
1484 rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE0, 0);
1485 rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE1, 0);
1486 rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE2, 0);
1487 rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE3, 0);
1488 rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE4, 0);
1489 rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE5, 0);
1490 rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE6, 0);
1491 rt2x00usb_register_write(rt2x00dev, HW_BEACON_BASE7, 0);
1492
1493 rt2x00usb_register_read(rt2x00dev, USB_CYC_CFG, &reg);
1494 rt2x00_set_field32(&reg, USB_CYC_CFG_CLOCK_CYCLE, 30);
1495 rt2x00usb_register_write(rt2x00dev, USB_CYC_CFG, reg);
1496
1497 rt2x00usb_register_read(rt2x00dev, HT_FBK_CFG0, &reg);
1498 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS0FBK, 0);
1499 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS1FBK, 0);
1500 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS2FBK, 1);
1501 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS3FBK, 2);
1502 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS4FBK, 3);
1503 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS5FBK, 4);
1504 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS6FBK, 5);
1505 rt2x00_set_field32(&reg, HT_FBK_CFG0_HTMCS7FBK, 6);
1506 rt2x00usb_register_write(rt2x00dev, HT_FBK_CFG0, reg);
1507
1508 rt2x00usb_register_read(rt2x00dev, HT_FBK_CFG1, &reg);
1509 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS8FBK, 8);
1510 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS9FBK, 8);
1511 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS10FBK, 9);
1512 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS11FBK, 10);
1513 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS12FBK, 11);
1514 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS13FBK, 12);
1515 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS14FBK, 13);
1516 rt2x00_set_field32(&reg, HT_FBK_CFG1_HTMCS15FBK, 14);
1517 rt2x00usb_register_write(rt2x00dev, HT_FBK_CFG1, reg);
1518
1519 rt2x00usb_register_read(rt2x00dev, LG_FBK_CFG0, &reg);
1520 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS0FBK, 8);
1521 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS1FBK, 8);
1522 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS2FBK, 9);
1523 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS3FBK, 10);
1524 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS4FBK, 11);
1525 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS5FBK, 12);
1526 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS6FBK, 13);
1527 rt2x00_set_field32(&reg, LG_FBK_CFG0_OFDMMCS7FBK, 14);
1528 rt2x00usb_register_write(rt2x00dev, LG_FBK_CFG0, reg);
1529
1530 rt2x00usb_register_read(rt2x00dev, LG_FBK_CFG1, &reg);
1531 rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS0FBK, 0);
1532 rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS1FBK, 0);
1533 rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS2FBK, 1);
1534 rt2x00_set_field32(&reg, LG_FBK_CFG0_CCKMCS3FBK, 2);
1535 rt2x00usb_register_write(rt2x00dev, LG_FBK_CFG1, reg);
1536
1537 /*
1538 * We must clear the error counters.
1539 * These registers are cleared on read,
1540 * so we may pass a useless variable to store the value.
1541 */
1542 rt2x00usb_register_read(rt2x00dev, RX_STA_CNT0, &reg);
1543 rt2x00usb_register_read(rt2x00dev, RX_STA_CNT1, &reg);
1544 rt2x00usb_register_read(rt2x00dev, RX_STA_CNT2, &reg);
1545 rt2x00usb_register_read(rt2x00dev, TX_STA_CNT0, &reg);
1546 rt2x00usb_register_read(rt2x00dev, TX_STA_CNT1, &reg);
1547 rt2x00usb_register_read(rt2x00dev, TX_STA_CNT2, &reg);
1548
1549 return 0;
1550}
1551
1552static int rt2800usb_wait_bbp_rf_ready(struct rt2x00_dev *rt2x00dev)
1553{
1554 unsigned int i;
1555 u32 reg;
1556
1557 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1558 rt2x00usb_register_read(rt2x00dev, MAC_STATUS_CFG, &reg);
1559 if (!rt2x00_get_field32(reg, MAC_STATUS_CFG_BBP_RF_BUSY))
1560 return 0;
1561
1562 udelay(REGISTER_BUSY_DELAY);
1563 }
1564
1565 ERROR(rt2x00dev, "BBP/RF register access failed, aborting.\n");
1566 return -EACCES;
1567}
1568
1569static int rt2800usb_wait_bbp_ready(struct rt2x00_dev *rt2x00dev)
1570{
1571 unsigned int i;
1572 u8 value;
1573
1574 /*
1575 * BBP was enabled after firmware was loaded,
1576 * but we need to reactivate it now.
1577 */
1578 rt2x00usb_register_write(rt2x00dev, H2M_BBP_AGENT, 0);
1579 rt2x00usb_register_write(rt2x00dev, H2M_MAILBOX_CSR, 0);
1580 msleep(1);
1581
1582 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1583 rt2800usb_bbp_read(rt2x00dev, 0, &value);
1584 if ((value != 0xff) && (value != 0x00))
1585 return 0;
1586 udelay(REGISTER_BUSY_DELAY);
1587 }
1588
1589 ERROR(rt2x00dev, "BBP register access failed, aborting.\n");
1590 return -EACCES;
1591}
1592
1593static int rt2800usb_init_bbp(struct rt2x00_dev *rt2x00dev)
1594{
1595 unsigned int i;
1596 u16 eeprom;
1597 u8 reg_id;
1598 u8 value;
1599
1600 if (unlikely(rt2800usb_wait_bbp_rf_ready(rt2x00dev) ||
1601 rt2800usb_wait_bbp_ready(rt2x00dev)))
1602 return -EACCES;
1603
1604 rt2800usb_bbp_write(rt2x00dev, 65, 0x2c);
1605 rt2800usb_bbp_write(rt2x00dev, 66, 0x38);
1606 rt2800usb_bbp_write(rt2x00dev, 69, 0x12);
1607 rt2800usb_bbp_write(rt2x00dev, 70, 0x0a);
1608 rt2800usb_bbp_write(rt2x00dev, 73, 0x10);
1609 rt2800usb_bbp_write(rt2x00dev, 81, 0x37);
1610 rt2800usb_bbp_write(rt2x00dev, 82, 0x62);
1611 rt2800usb_bbp_write(rt2x00dev, 83, 0x6a);
1612 rt2800usb_bbp_write(rt2x00dev, 84, 0x99);
1613 rt2800usb_bbp_write(rt2x00dev, 86, 0x00);
1614 rt2800usb_bbp_write(rt2x00dev, 91, 0x04);
1615 rt2800usb_bbp_write(rt2x00dev, 92, 0x00);
1616 rt2800usb_bbp_write(rt2x00dev, 103, 0x00);
1617 rt2800usb_bbp_write(rt2x00dev, 105, 0x05);
1618
1619 if (rt2x00_rev(&rt2x00dev->chip) == RT2860C_VERSION) {
1620 rt2800usb_bbp_write(rt2x00dev, 69, 0x16);
1621 rt2800usb_bbp_write(rt2x00dev, 73, 0x12);
1622 }
1623
1624 if (rt2x00_rev(&rt2x00dev->chip) > RT2860D_VERSION) {
1625 rt2800usb_bbp_write(rt2x00dev, 84, 0x19);
1626 }
1627
1628 if (rt2x00_rev(&rt2x00dev->chip) == RT3070_VERSION) {
1629 rt2800usb_bbp_write(rt2x00dev, 70, 0x0a);
1630 rt2800usb_bbp_write(rt2x00dev, 84, 0x99);
1631 rt2800usb_bbp_write(rt2x00dev, 105, 0x05);
1632 }
1633
1634 for (i = 0; i < EEPROM_BBP_SIZE; i++) {
1635 rt2x00_eeprom_read(rt2x00dev, EEPROM_BBP_START + i, &eeprom);
1636
1637 if (eeprom != 0xffff && eeprom != 0x0000) {
1638 reg_id = rt2x00_get_field16(eeprom, EEPROM_BBP_REG_ID);
1639 value = rt2x00_get_field16(eeprom, EEPROM_BBP_VALUE);
1640 rt2800usb_bbp_write(rt2x00dev, reg_id, value);
1641 }
1642 }
1643
1644 return 0;
1645}
1646
1647static u8 rt2800usb_init_rx_filter(struct rt2x00_dev *rt2x00dev,
1648 bool bw40, u8 rfcsr24, u8 filter_target)
1649{
1650 unsigned int i;
1651 u8 bbp;
1652 u8 rfcsr;
1653 u8 passband;
1654 u8 stopband;
1655 u8 overtuned = 0;
1656
1657 rt2800usb_rfcsr_write(rt2x00dev, 24, rfcsr24);
1658
1659 rt2800usb_bbp_read(rt2x00dev, 4, &bbp);
1660 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 2 * bw40);
1661 rt2800usb_bbp_write(rt2x00dev, 4, bbp);
1662
1663 rt2800usb_rfcsr_read(rt2x00dev, 22, &rfcsr);
1664 rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 1);
1665 rt2800usb_rfcsr_write(rt2x00dev, 22, rfcsr);
1666
1667 /*
1668 * Set power & frequency of passband test tone
1669 */
1670 rt2800usb_bbp_write(rt2x00dev, 24, 0);
1671
1672 for (i = 0; i < 100; i++) {
1673 rt2800usb_bbp_write(rt2x00dev, 25, 0x90);
1674 msleep(1);
1675
1676 rt2800usb_bbp_read(rt2x00dev, 55, &passband);
1677 if (passband)
1678 break;
1679 }
1680
1681 /*
1682 * Set power & frequency of stopband test tone
1683 */
1684 rt2800usb_bbp_write(rt2x00dev, 24, 0x06);
1685
1686 for (i = 0; i < 100; i++) {
1687 rt2800usb_bbp_write(rt2x00dev, 25, 0x90);
1688 msleep(1);
1689
1690 rt2800usb_bbp_read(rt2x00dev, 55, &stopband);
1691
1692 if ((passband - stopband) <= filter_target) {
1693 rfcsr24++;
1694 overtuned += ((passband - stopband) == filter_target);
1695 } else
1696 break;
1697
1698 rt2800usb_rfcsr_write(rt2x00dev, 24, rfcsr24);
1699 }
1700
1701 rfcsr24 -= !!overtuned;
1702
1703 rt2800usb_rfcsr_write(rt2x00dev, 24, rfcsr24);
1704 return rfcsr24;
1705}
1706
1707static int rt2800usb_init_rfcsr(struct rt2x00_dev *rt2x00dev)
1708{
1709 u8 rfcsr;
1710 u8 bbp;
1711
1712 if (rt2x00_rev(&rt2x00dev->chip) != RT3070_VERSION)
1713 return 0;
1714
1715 /*
1716 * Init RF calibration.
1717 */
1718 rt2800usb_rfcsr_read(rt2x00dev, 30, &rfcsr);
1719 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 1);
1720 rt2800usb_rfcsr_write(rt2x00dev, 30, rfcsr);
1721 msleep(1);
1722 rt2x00_set_field8(&rfcsr, RFCSR30_RF_CALIBRATION, 0);
1723 rt2800usb_rfcsr_write(rt2x00dev, 30, rfcsr);
1724
1725 rt2800usb_rfcsr_write(rt2x00dev, 4, 0x40);
1726 rt2800usb_rfcsr_write(rt2x00dev, 5, 0x03);
1727 rt2800usb_rfcsr_write(rt2x00dev, 6, 0x02);
1728 rt2800usb_rfcsr_write(rt2x00dev, 7, 0x70);
1729 rt2800usb_rfcsr_write(rt2x00dev, 9, 0x0f);
1730 rt2800usb_rfcsr_write(rt2x00dev, 10, 0x71);
1731 rt2800usb_rfcsr_write(rt2x00dev, 11, 0x21);
1732 rt2800usb_rfcsr_write(rt2x00dev, 12, 0x7b);
1733 rt2800usb_rfcsr_write(rt2x00dev, 14, 0x90);
1734 rt2800usb_rfcsr_write(rt2x00dev, 15, 0x58);
1735 rt2800usb_rfcsr_write(rt2x00dev, 16, 0xb3);
1736 rt2800usb_rfcsr_write(rt2x00dev, 17, 0x92);
1737 rt2800usb_rfcsr_write(rt2x00dev, 18, 0x2c);
1738 rt2800usb_rfcsr_write(rt2x00dev, 19, 0x02);
1739 rt2800usb_rfcsr_write(rt2x00dev, 20, 0xba);
1740 rt2800usb_rfcsr_write(rt2x00dev, 21, 0xdb);
1741 rt2800usb_rfcsr_write(rt2x00dev, 24, 0x16);
1742 rt2800usb_rfcsr_write(rt2x00dev, 25, 0x01);
1743 rt2800usb_rfcsr_write(rt2x00dev, 27, 0x03);
1744 rt2800usb_rfcsr_write(rt2x00dev, 29, 0x1f);
1745
1746 /*
1747 * Set RX Filter calibration for 20MHz and 40MHz
1748 */
1749 rt2x00dev->calibration[0] =
1750 rt2800usb_init_rx_filter(rt2x00dev, false, 0x07, 0x16);
1751 rt2x00dev->calibration[1] =
1752 rt2800usb_init_rx_filter(rt2x00dev, true, 0x27, 0x19);
1753
1754 /*
1755 * Set back to initial state
1756 */
1757 rt2800usb_bbp_write(rt2x00dev, 24, 0);
1758
1759 rt2800usb_rfcsr_read(rt2x00dev, 22, &rfcsr);
1760 rt2x00_set_field8(&rfcsr, RFCSR22_BASEBAND_LOOPBACK, 0);
1761 rt2800usb_rfcsr_write(rt2x00dev, 22, rfcsr);
1762
1763 /*
1764 * set BBP back to BW20
1765 */
1766 rt2800usb_bbp_read(rt2x00dev, 4, &bbp);
1767 rt2x00_set_field8(&bbp, BBP4_BANDWIDTH, 0);
1768 rt2800usb_bbp_write(rt2x00dev, 4, bbp);
1769
1770 return 0;
1771}
1772
1773/*
1774 * Device state switch handlers. 233 * Device state switch handlers.
1775 */ 234 */
1776static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev, 235static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
@@ -1778,11 +237,11 @@ static void rt2800usb_toggle_rx(struct rt2x00_dev *rt2x00dev,
1778{ 237{
1779 u32 reg; 238 u32 reg;
1780 239
1781 rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); 240 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
1782 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 241 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX,
1783 (state == STATE_RADIO_RX_ON) || 242 (state == STATE_RADIO_RX_ON) ||
1784 (state == STATE_RADIO_RX_ON_LINK)); 243 (state == STATE_RADIO_RX_ON_LINK));
1785 rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 244 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
1786} 245}
1787 246
1788static int rt2800usb_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev) 247static int rt2800usb_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
@@ -1791,7 +250,7 @@ static int rt2800usb_wait_wpdma_ready(struct rt2x00_dev *rt2x00dev)
1791 u32 reg; 250 u32 reg;
1792 251
1793 for (i = 0; i < REGISTER_BUSY_COUNT; i++) { 252 for (i = 0; i < REGISTER_BUSY_COUNT; i++) {
1794 rt2x00usb_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg); 253 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
1795 if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) && 254 if (!rt2x00_get_field32(reg, WPDMA_GLO_CFG_TX_DMA_BUSY) &&
1796 !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY)) 255 !rt2x00_get_field32(reg, WPDMA_GLO_CFG_RX_DMA_BUSY))
1797 return 0; 256 return 0;
@@ -1812,25 +271,25 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
1812 * Initialize all registers. 271 * Initialize all registers.
1813 */ 272 */
1814 if (unlikely(rt2800usb_wait_wpdma_ready(rt2x00dev) || 273 if (unlikely(rt2800usb_wait_wpdma_ready(rt2x00dev) ||
1815 rt2800usb_init_registers(rt2x00dev) || 274 rt2800_init_registers(rt2x00dev) ||
1816 rt2800usb_init_bbp(rt2x00dev) || 275 rt2800_init_bbp(rt2x00dev) ||
1817 rt2800usb_init_rfcsr(rt2x00dev))) 276 rt2800_init_rfcsr(rt2x00dev)))
1818 return -EIO; 277 return -EIO;
1819 278
1820 rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); 279 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
1821 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1); 280 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
1822 rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 281 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
1823 282
1824 udelay(50); 283 udelay(50);
1825 284
1826 rt2x00usb_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg); 285 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
1827 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1); 286 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_TX_WRITEBACK_DONE, 1);
1828 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1); 287 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 1);
1829 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1); 288 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 1);
1830 rt2x00usb_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); 289 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
1831 290
1832 291
1833 rt2x00usb_register_read(rt2x00dev, USB_DMA_CFG, &reg); 292 rt2800_register_read(rt2x00dev, USB_DMA_CFG, &reg);
1834 rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0); 293 rt2x00_set_field32(&reg, USB_DMA_CFG_PHY_CLEAR, 0);
1835 /* Don't use bulk in aggregation when working with USB 1.1 */ 294 /* Don't use bulk in aggregation when working with USB 1.1 */
1836 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN, 295 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_AGG_EN,
@@ -1844,26 +303,26 @@ static int rt2800usb_enable_radio(struct rt2x00_dev *rt2x00dev)
1844 ((RX_ENTRIES * DATA_FRAME_SIZE) / 1024) - 3); 303 ((RX_ENTRIES * DATA_FRAME_SIZE) / 1024) - 3);
1845 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_EN, 1); 304 rt2x00_set_field32(&reg, USB_DMA_CFG_RX_BULK_EN, 1);
1846 rt2x00_set_field32(&reg, USB_DMA_CFG_TX_BULK_EN, 1); 305 rt2x00_set_field32(&reg, USB_DMA_CFG_TX_BULK_EN, 1);
1847 rt2x00usb_register_write(rt2x00dev, USB_DMA_CFG, reg); 306 rt2800_register_write(rt2x00dev, USB_DMA_CFG, reg);
1848 307
1849 rt2x00usb_register_read(rt2x00dev, MAC_SYS_CTRL, &reg); 308 rt2800_register_read(rt2x00dev, MAC_SYS_CTRL, &reg);
1850 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1); 309 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_TX, 1);
1851 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1); 310 rt2x00_set_field32(&reg, MAC_SYS_CTRL_ENABLE_RX, 1);
1852 rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, reg); 311 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, reg);
1853 312
1854 /* 313 /*
1855 * Initialize LED control 314 * Initialize LED control
1856 */ 315 */
1857 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED1, &word); 316 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED1, &word);
1858 rt2800usb_mcu_request(rt2x00dev, MCU_LED_1, 0xff, 317 rt2800_mcu_request(rt2x00dev, MCU_LED_1, 0xff,
1859 word & 0xff, (word >> 8) & 0xff); 318 word & 0xff, (word >> 8) & 0xff);
1860 319
1861 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED2, &word); 320 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED2, &word);
1862 rt2800usb_mcu_request(rt2x00dev, MCU_LED_2, 0xff, 321 rt2800_mcu_request(rt2x00dev, MCU_LED_2, 0xff,
1863 word & 0xff, (word >> 8) & 0xff); 322 word & 0xff, (word >> 8) & 0xff);
1864 323
1865 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED3, &word); 324 rt2x00_eeprom_read(rt2x00dev, EEPROM_LED3, &word);
1866 rt2800usb_mcu_request(rt2x00dev, MCU_LED_3, 0xff, 325 rt2800_mcu_request(rt2x00dev, MCU_LED_3, 0xff,
1867 word & 0xff, (word >> 8) & 0xff); 326 word & 0xff, (word >> 8) & 0xff);
1868 327
1869 return 0; 328 return 0;
@@ -1873,14 +332,14 @@ static void rt2800usb_disable_radio(struct rt2x00_dev *rt2x00dev)
1873{ 332{
1874 u32 reg; 333 u32 reg;
1875 334
1876 rt2x00usb_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg); 335 rt2800_register_read(rt2x00dev, WPDMA_GLO_CFG, &reg);
1877 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0); 336 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_TX_DMA, 0);
1878 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0); 337 rt2x00_set_field32(&reg, WPDMA_GLO_CFG_ENABLE_RX_DMA, 0);
1879 rt2x00usb_register_write(rt2x00dev, WPDMA_GLO_CFG, reg); 338 rt2800_register_write(rt2x00dev, WPDMA_GLO_CFG, reg);
1880 339
1881 rt2x00usb_register_write(rt2x00dev, MAC_SYS_CTRL, 0); 340 rt2800_register_write(rt2x00dev, MAC_SYS_CTRL, 0);
1882 rt2x00usb_register_write(rt2x00dev, PWR_PIN_CFG, 0); 341 rt2800_register_write(rt2x00dev, PWR_PIN_CFG, 0);
1883 rt2x00usb_register_write(rt2x00dev, TX_PIN_CFG, 0); 342 rt2800_register_write(rt2x00dev, TX_PIN_CFG, 0);
1884 343
1885 /* Wait for DMA, ignore error */ 344 /* Wait for DMA, ignore error */
1886 rt2800usb_wait_wpdma_ready(rt2x00dev); 345 rt2800usb_wait_wpdma_ready(rt2x00dev);
@@ -1892,9 +351,9 @@ static int rt2800usb_set_state(struct rt2x00_dev *rt2x00dev,
1892 enum dev_state state) 351 enum dev_state state)
1893{ 352{
1894 if (state == STATE_AWAKE) 353 if (state == STATE_AWAKE)
1895 rt2800usb_mcu_request(rt2x00dev, MCU_WAKEUP, 0xff, 0, 0); 354 rt2800_mcu_request(rt2x00dev, MCU_WAKEUP, 0xff, 0, 0);
1896 else 355 else
1897 rt2800usb_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 2); 356 rt2800_mcu_request(rt2x00dev, MCU_SLEEP, 0xff, 0, 2);
1898 357
1899 return 0; 358 return 0;
1900} 359}
@@ -2048,9 +507,9 @@ static void rt2800usb_write_beacon(struct queue_entry *entry)
2048 * Disable beaconing while we are reloading the beacon data, 507 * Disable beaconing while we are reloading the beacon data,
2049 * otherwise we might be sending out invalid data. 508 * otherwise we might be sending out invalid data.
2050 */ 509 */
2051 rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 510 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
2052 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0); 511 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 0);
2053 rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg); 512 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
2054 513
2055 /* 514 /*
2056 * Write entire beacon with descriptor to register. 515 * Write entire beacon with descriptor to register.
@@ -2093,12 +552,12 @@ static void rt2800usb_kick_tx_queue(struct rt2x00_dev *rt2x00dev,
2093 return; 552 return;
2094 } 553 }
2095 554
2096 rt2x00usb_register_read(rt2x00dev, BCN_TIME_CFG, &reg); 555 rt2800_register_read(rt2x00dev, BCN_TIME_CFG, &reg);
2097 if (!rt2x00_get_field32(reg, BCN_TIME_CFG_BEACON_GEN)) { 556 if (!rt2x00_get_field32(reg, BCN_TIME_CFG_BEACON_GEN)) {
2098 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1); 557 rt2x00_set_field32(&reg, BCN_TIME_CFG_TSF_TICKING, 1);
2099 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1); 558 rt2x00_set_field32(&reg, BCN_TIME_CFG_TBTT_ENABLE, 1);
2100 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1); 559 rt2x00_set_field32(&reg, BCN_TIME_CFG_BEACON_GEN, 1);
2101 rt2x00usb_register_write(rt2x00dev, BCN_TIME_CFG, reg); 560 rt2800_register_write(rt2x00dev, BCN_TIME_CFG, reg);
2102 } 561 }
2103} 562}
2104 563
@@ -2124,7 +583,7 @@ static void rt2800usb_fill_rxdone(struct queue_entry *entry,
2124 */ 583 */
2125 memcpy(skbdesc->desc, rxd, skbdesc->desc_len); 584 memcpy(skbdesc->desc, rxd, skbdesc->desc_len);
2126 rxd = (__le32 *)skbdesc->desc; 585 rxd = (__le32 *)skbdesc->desc;
2127 rxwi = &rxd[RXD_DESC_SIZE / sizeof(__le32)]; 586 rxwi = &rxd[RXINFO_DESC_SIZE / sizeof(__le32)];
2128 587
2129 /* 588 /*
2130 * It is now safe to read the descriptor on all architectures. 589 * It is now safe to read the descriptor on all architectures.
@@ -2326,7 +785,7 @@ static int rt2800usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
2326 * Identify RF chipset. 785 * Identify RF chipset.
2327 */ 786 */
2328 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE); 787 value = rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE);
2329 rt2x00usb_register_read(rt2x00dev, MAC_CSR0, &reg); 788 rt2800_register_read(rt2x00dev, MAC_CSR0, &reg);
2330 rt2x00_set_chip(rt2x00dev, RT2870, value, reg); 789 rt2x00_set_chip(rt2x00dev, RT2870, value, reg);
2331 790
2332 /* 791 /*
@@ -2385,9 +844,9 @@ static int rt2800usb_init_eeprom(struct rt2x00_dev *rt2x00dev)
2385 * Store led settings, for correct led behaviour. 844 * Store led settings, for correct led behaviour.
2386 */ 845 */
2387#ifdef CONFIG_RT2X00_LIB_LEDS 846#ifdef CONFIG_RT2X00_LIB_LEDS
2388 rt2800usb_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO); 847 rt2800_init_led(rt2x00dev, &rt2x00dev->led_radio, LED_TYPE_RADIO);
2389 rt2800usb_init_led(rt2x00dev, &rt2x00dev->led_assoc, LED_TYPE_ASSOC); 848 rt2800_init_led(rt2x00dev, &rt2x00dev->led_assoc, LED_TYPE_ASSOC);
2390 rt2800usb_init_led(rt2x00dev, &rt2x00dev->led_qual, LED_TYPE_QUALITY); 849 rt2800_init_led(rt2x00dev, &rt2x00dev->led_qual, LED_TYPE_QUALITY);
2391 850
2392 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ, 851 rt2x00_eeprom_read(rt2x00dev, EEPROM_FREQ,
2393 &rt2x00dev->led_mcu_reg); 852 &rt2x00dev->led_mcu_reg);
@@ -2600,10 +1059,25 @@ static int rt2800usb_probe_hw_mode(struct rt2x00_dev *rt2x00dev)
2600 return 0; 1059 return 0;
2601} 1060}
2602 1061
1062static const struct rt2800_ops rt2800usb_rt2800_ops = {
1063 .register_read = rt2x00usb_register_read,
1064 .register_write = rt2x00usb_register_write,
1065 .register_write_lock = rt2x00usb_register_write_lock,
1066
1067 .register_multiread = rt2x00usb_register_multiread,
1068 .register_multiwrite = rt2x00usb_register_multiwrite,
1069
1070 .regbusy_read = rt2x00usb_regbusy_read,
1071};
1072
2603static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev) 1073static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
2604{ 1074{
2605 int retval; 1075 int retval;
2606 1076
1077 rt2x00_set_chip_intf(rt2x00dev, RT2X00_CHIP_INTF_USB);
1078
1079 rt2x00dev->priv = (void *)&rt2800usb_rt2800_ops;
1080
2607 /* 1081 /*
2608 * Allocate eeprom data. 1082 * Allocate eeprom data.
2609 */ 1083 */
@@ -2645,162 +1119,6 @@ static int rt2800usb_probe_hw(struct rt2x00_dev *rt2x00dev)
2645 return 0; 1119 return 0;
2646} 1120}
2647 1121
2648/*
2649 * IEEE80211 stack callback functions.
2650 */
2651static void rt2800usb_get_tkip_seq(struct ieee80211_hw *hw, u8 hw_key_idx,
2652 u32 *iv32, u16 *iv16)
2653{
2654 struct rt2x00_dev *rt2x00dev = hw->priv;
2655 struct mac_iveiv_entry iveiv_entry;
2656 u32 offset;
2657
2658 offset = MAC_IVEIV_ENTRY(hw_key_idx);
2659 rt2x00usb_register_multiread(rt2x00dev, offset,
2660 &iveiv_entry, sizeof(iveiv_entry));
2661
2662 memcpy(&iveiv_entry.iv[0], iv16, sizeof(iv16));
2663 memcpy(&iveiv_entry.iv[4], iv32, sizeof(iv32));
2664}
2665
2666static int rt2800usb_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
2667{
2668 struct rt2x00_dev *rt2x00dev = hw->priv;
2669 u32 reg;
2670 bool enabled = (value < IEEE80211_MAX_RTS_THRESHOLD);
2671
2672 rt2x00usb_register_read(rt2x00dev, TX_RTS_CFG, &reg);
2673 rt2x00_set_field32(&reg, TX_RTS_CFG_RTS_THRES, value);
2674 rt2x00usb_register_write(rt2x00dev, TX_RTS_CFG, reg);
2675
2676 rt2x00usb_register_read(rt2x00dev, CCK_PROT_CFG, &reg);
2677 rt2x00_set_field32(&reg, CCK_PROT_CFG_RTS_TH_EN, enabled);
2678 rt2x00usb_register_write(rt2x00dev, CCK_PROT_CFG, reg);
2679
2680 rt2x00usb_register_read(rt2x00dev, OFDM_PROT_CFG, &reg);
2681 rt2x00_set_field32(&reg, OFDM_PROT_CFG_RTS_TH_EN, enabled);
2682 rt2x00usb_register_write(rt2x00dev, OFDM_PROT_CFG, reg);
2683
2684 rt2x00usb_register_read(rt2x00dev, MM20_PROT_CFG, &reg);
2685 rt2x00_set_field32(&reg, MM20_PROT_CFG_RTS_TH_EN, enabled);
2686 rt2x00usb_register_write(rt2x00dev, MM20_PROT_CFG, reg);
2687
2688 rt2x00usb_register_read(rt2x00dev, MM40_PROT_CFG, &reg);
2689 rt2x00_set_field32(&reg, MM40_PROT_CFG_RTS_TH_EN, enabled);
2690 rt2x00usb_register_write(rt2x00dev, MM40_PROT_CFG, reg);
2691
2692 rt2x00usb_register_read(rt2x00dev, GF20_PROT_CFG, &reg);
2693 rt2x00_set_field32(&reg, GF20_PROT_CFG_RTS_TH_EN, enabled);
2694 rt2x00usb_register_write(rt2x00dev, GF20_PROT_CFG, reg);
2695
2696 rt2x00usb_register_read(rt2x00dev, GF40_PROT_CFG, &reg);
2697 rt2x00_set_field32(&reg, GF40_PROT_CFG_RTS_TH_EN, enabled);
2698 rt2x00usb_register_write(rt2x00dev, GF40_PROT_CFG, reg);
2699
2700 return 0;
2701}
2702
2703static int rt2800usb_conf_tx(struct ieee80211_hw *hw, u16 queue_idx,
2704 const struct ieee80211_tx_queue_params *params)
2705{
2706 struct rt2x00_dev *rt2x00dev = hw->priv;
2707 struct data_queue *queue;
2708 struct rt2x00_field32 field;
2709 int retval;
2710 u32 reg;
2711 u32 offset;
2712
2713 /*
2714 * First pass the configuration through rt2x00lib, that will
2715 * update the queue settings and validate the input. After that
2716 * we are free to update the registers based on the value
2717 * in the queue parameter.
2718 */
2719 retval = rt2x00mac_conf_tx(hw, queue_idx, params);
2720 if (retval)
2721 return retval;
2722
2723 /*
2724 * We only need to perform additional register initialization
2725 * for WMM queues/
2726 */
2727 if (queue_idx >= 4)
2728 return 0;
2729
2730 queue = rt2x00queue_get_queue(rt2x00dev, queue_idx);
2731
2732 /* Update WMM TXOP register */
2733 offset = WMM_TXOP0_CFG + (sizeof(u32) * (!!(queue_idx & 2)));
2734 field.bit_offset = (queue_idx & 1) * 16;
2735 field.bit_mask = 0xffff << field.bit_offset;
2736
2737 rt2x00usb_register_read(rt2x00dev, offset, &reg);
2738 rt2x00_set_field32(&reg, field, queue->txop);
2739 rt2x00usb_register_write(rt2x00dev, offset, reg);
2740
2741 /* Update WMM registers */
2742 field.bit_offset = queue_idx * 4;
2743 field.bit_mask = 0xf << field.bit_offset;
2744
2745 rt2x00usb_register_read(rt2x00dev, WMM_AIFSN_CFG, &reg);
2746 rt2x00_set_field32(&reg, field, queue->aifs);
2747 rt2x00usb_register_write(rt2x00dev, WMM_AIFSN_CFG, reg);
2748
2749 rt2x00usb_register_read(rt2x00dev, WMM_CWMIN_CFG, &reg);
2750 rt2x00_set_field32(&reg, field, queue->cw_min);
2751 rt2x00usb_register_write(rt2x00dev, WMM_CWMIN_CFG, reg);
2752
2753 rt2x00usb_register_read(rt2x00dev, WMM_CWMAX_CFG, &reg);
2754 rt2x00_set_field32(&reg, field, queue->cw_max);
2755 rt2x00usb_register_write(rt2x00dev, WMM_CWMAX_CFG, reg);
2756
2757 /* Update EDCA registers */
2758 offset = EDCA_AC0_CFG + (sizeof(u32) * queue_idx);
2759
2760 rt2x00usb_register_read(rt2x00dev, offset, &reg);
2761 rt2x00_set_field32(&reg, EDCA_AC0_CFG_TX_OP, queue->txop);
2762 rt2x00_set_field32(&reg, EDCA_AC0_CFG_AIFSN, queue->aifs);
2763 rt2x00_set_field32(&reg, EDCA_AC0_CFG_CWMIN, queue->cw_min);
2764 rt2x00_set_field32(&reg, EDCA_AC0_CFG_CWMAX, queue->cw_max);
2765 rt2x00usb_register_write(rt2x00dev, offset, reg);
2766
2767 return 0;
2768}
2769
2770static u64 rt2800usb_get_tsf(struct ieee80211_hw *hw)
2771{
2772 struct rt2x00_dev *rt2x00dev = hw->priv;
2773 u64 tsf;
2774 u32 reg;
2775
2776 rt2x00usb_register_read(rt2x00dev, TSF_TIMER_DW1, &reg);
2777 tsf = (u64) rt2x00_get_field32(reg, TSF_TIMER_DW1_HIGH_WORD) << 32;
2778 rt2x00usb_register_read(rt2x00dev, TSF_TIMER_DW0, &reg);
2779 tsf |= rt2x00_get_field32(reg, TSF_TIMER_DW0_LOW_WORD);
2780
2781 return tsf;
2782}
2783
2784static const struct ieee80211_ops rt2800usb_mac80211_ops = {
2785 .tx = rt2x00mac_tx,
2786 .start = rt2x00mac_start,
2787 .stop = rt2x00mac_stop,
2788 .add_interface = rt2x00mac_add_interface,
2789 .remove_interface = rt2x00mac_remove_interface,
2790 .config = rt2x00mac_config,
2791 .configure_filter = rt2x00mac_configure_filter,
2792 .set_tim = rt2x00mac_set_tim,
2793 .set_key = rt2x00mac_set_key,
2794 .get_stats = rt2x00mac_get_stats,
2795 .get_tkip_seq = rt2800usb_get_tkip_seq,
2796 .set_rts_threshold = rt2800usb_set_rts_threshold,
2797 .bss_info_changed = rt2x00mac_bss_info_changed,
2798 .conf_tx = rt2800usb_conf_tx,
2799 .get_tx_stats = rt2x00mac_get_tx_stats,
2800 .get_tsf = rt2800usb_get_tsf,
2801 .rfkill_poll = rt2x00mac_rfkill_poll,
2802};
2803
2804static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = { 1122static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
2805 .probe_hw = rt2800usb_probe_hw, 1123 .probe_hw = rt2800usb_probe_hw,
2806 .get_firmware_name = rt2800usb_get_firmware_name, 1124 .get_firmware_name = rt2800usb_get_firmware_name,
@@ -2810,10 +1128,10 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
2810 .uninitialize = rt2x00usb_uninitialize, 1128 .uninitialize = rt2x00usb_uninitialize,
2811 .clear_entry = rt2x00usb_clear_entry, 1129 .clear_entry = rt2x00usb_clear_entry,
2812 .set_device_state = rt2800usb_set_device_state, 1130 .set_device_state = rt2800usb_set_device_state,
2813 .rfkill_poll = rt2800usb_rfkill_poll, 1131 .rfkill_poll = rt2800_rfkill_poll,
2814 .link_stats = rt2800usb_link_stats, 1132 .link_stats = rt2800_link_stats,
2815 .reset_tuner = rt2800usb_reset_tuner, 1133 .reset_tuner = rt2800_reset_tuner,
2816 .link_tuner = rt2800usb_link_tuner, 1134 .link_tuner = rt2800_link_tuner,
2817 .write_tx_desc = rt2800usb_write_tx_desc, 1135 .write_tx_desc = rt2800usb_write_tx_desc,
2818 .write_tx_data = rt2x00usb_write_tx_data, 1136 .write_tx_data = rt2x00usb_write_tx_data,
2819 .write_beacon = rt2800usb_write_beacon, 1137 .write_beacon = rt2800usb_write_beacon,
@@ -2821,19 +1139,19 @@ static const struct rt2x00lib_ops rt2800usb_rt2x00_ops = {
2821 .kick_tx_queue = rt2800usb_kick_tx_queue, 1139 .kick_tx_queue = rt2800usb_kick_tx_queue,
2822 .kill_tx_queue = rt2x00usb_kill_tx_queue, 1140 .kill_tx_queue = rt2x00usb_kill_tx_queue,
2823 .fill_rxdone = rt2800usb_fill_rxdone, 1141 .fill_rxdone = rt2800usb_fill_rxdone,
2824 .config_shared_key = rt2800usb_config_shared_key, 1142 .config_shared_key = rt2800_config_shared_key,
2825 .config_pairwise_key = rt2800usb_config_pairwise_key, 1143 .config_pairwise_key = rt2800_config_pairwise_key,
2826 .config_filter = rt2800usb_config_filter, 1144 .config_filter = rt2800_config_filter,
2827 .config_intf = rt2800usb_config_intf, 1145 .config_intf = rt2800_config_intf,
2828 .config_erp = rt2800usb_config_erp, 1146 .config_erp = rt2800_config_erp,
2829 .config_ant = rt2800usb_config_ant, 1147 .config_ant = rt2800_config_ant,
2830 .config = rt2800usb_config, 1148 .config = rt2800_config,
2831}; 1149};
2832 1150
2833static const struct data_queue_desc rt2800usb_queue_rx = { 1151static const struct data_queue_desc rt2800usb_queue_rx = {
2834 .entry_num = RX_ENTRIES, 1152 .entry_num = RX_ENTRIES,
2835 .data_size = AGGREGATION_SIZE, 1153 .data_size = AGGREGATION_SIZE,
2836 .desc_size = RXD_DESC_SIZE + RXWI_DESC_SIZE, 1154 .desc_size = RXINFO_DESC_SIZE + RXWI_DESC_SIZE,
2837 .priv_size = sizeof(struct queue_entry_priv_usb), 1155 .priv_size = sizeof(struct queue_entry_priv_usb),
2838}; 1156};
2839 1157
@@ -2862,9 +1180,9 @@ static const struct rt2x00_ops rt2800usb_ops = {
2862 .tx = &rt2800usb_queue_tx, 1180 .tx = &rt2800usb_queue_tx,
2863 .bcn = &rt2800usb_queue_bcn, 1181 .bcn = &rt2800usb_queue_bcn,
2864 .lib = &rt2800usb_rt2x00_ops, 1182 .lib = &rt2800usb_rt2x00_ops,
2865 .hw = &rt2800usb_mac80211_ops, 1183 .hw = &rt2800_mac80211_ops,
2866#ifdef CONFIG_RT2X00_LIB_DEBUGFS 1184#ifdef CONFIG_RT2X00_LIB_DEBUGFS
2867 .debugfs = &rt2800usb_rt2x00debug, 1185 .debugfs = &rt2800_rt2x00debug,
2868#endif /* CONFIG_RT2X00_LIB_DEBUGFS */ 1186#endif /* CONFIG_RT2X00_LIB_DEBUGFS */
2869}; 1187};
2870 1188
diff --git a/drivers/net/wireless/rt2x00/rt2800usb.h b/drivers/net/wireless/rt2x00/rt2800usb.h
index 4d9991c9a51c..c9d7d40ee5fb 100644
--- a/drivers/net/wireless/rt2x00/rt2800usb.h
+++ b/drivers/net/wireless/rt2x00/rt2800usb.h
@@ -28,288 +28,10 @@
28#define RT2800USB_H 28#define RT2800USB_H
29 29
30/* 30/*
31 * RF chip defines.
32 *
33 * RF2820 2.4G 2T3R
34 * RF2850 2.4G/5G 2T3R
35 * RF2720 2.4G 1T2R
36 * RF2750 2.4G/5G 1T2R
37 * RF3020 2.4G 1T1R
38 * RF2020 2.4G B/G
39 * RF3021 2.4G 1T2R
40 * RF3022 2.4G 2T2R
41 * RF3052 2.4G 2T2R
42 */
43#define RF2820 0x0001
44#define RF2850 0x0002
45#define RF2720 0x0003
46#define RF2750 0x0004
47#define RF3020 0x0005
48#define RF2020 0x0006
49#define RF3021 0x0007
50#define RF3022 0x0008
51#define RF3052 0x0009
52
53/*
54 * RT2870 version
55 */
56#define RT2860C_VERSION 0x28600100
57#define RT2860D_VERSION 0x28600101
58#define RT2880E_VERSION 0x28720200
59#define RT2883_VERSION 0x28830300
60#define RT3070_VERSION 0x30700200
61
62/*
63 * Signal information.
64 * Defaul offset is required for RSSI <-> dBm conversion.
65 */
66#define DEFAULT_RSSI_OFFSET 120 /* FIXME */
67
68/*
69 * Register layout information.
70 */
71#define CSR_REG_BASE 0x1000
72#define CSR_REG_SIZE 0x0800
73#define EEPROM_BASE 0x0000
74#define EEPROM_SIZE 0x0110
75#define BBP_BASE 0x0000
76#define BBP_SIZE 0x0080
77#define RF_BASE 0x0004
78#define RF_SIZE 0x0010
79
80/*
81 * Number of TX queues.
82 */
83#define NUM_TX_QUEUES 4
84
85/*
86 * USB registers. 31 * USB registers.
87 */ 32 */
88 33
89/* 34/*
90 * HOST-MCU shared memory
91 */
92#define HOST_CMD_CSR 0x0404
93#define HOST_CMD_CSR_HOST_COMMAND FIELD32(0x000000ff)
94
95/*
96 * INT_SOURCE_CSR: Interrupt source register.
97 * Write one to clear corresponding bit.
98 * TX_FIFO_STATUS: FIFO Statistics is full, sw should read 0x171c
99 */
100#define INT_SOURCE_CSR 0x0200
101#define INT_SOURCE_CSR_RXDELAYINT FIELD32(0x00000001)
102#define INT_SOURCE_CSR_TXDELAYINT FIELD32(0x00000002)
103#define INT_SOURCE_CSR_RX_DONE FIELD32(0x00000004)
104#define INT_SOURCE_CSR_AC0_DMA_DONE FIELD32(0x00000008)
105#define INT_SOURCE_CSR_AC1_DMA_DONE FIELD32(0x00000010)
106#define INT_SOURCE_CSR_AC2_DMA_DONE FIELD32(0x00000020)
107#define INT_SOURCE_CSR_AC3_DMA_DONE FIELD32(0x00000040)
108#define INT_SOURCE_CSR_HCCA_DMA_DONE FIELD32(0x00000080)
109#define INT_SOURCE_CSR_MGMT_DMA_DONE FIELD32(0x00000100)
110#define INT_SOURCE_CSR_MCU_COMMAND FIELD32(0x00000200)
111#define INT_SOURCE_CSR_RXTX_COHERENT FIELD32(0x00000400)
112#define INT_SOURCE_CSR_TBTT FIELD32(0x00000800)
113#define INT_SOURCE_CSR_PRE_TBTT FIELD32(0x00001000)
114#define INT_SOURCE_CSR_TX_FIFO_STATUS FIELD32(0x00002000)
115#define INT_SOURCE_CSR_AUTO_WAKEUP FIELD32(0x00004000)
116#define INT_SOURCE_CSR_GPTIMER FIELD32(0x00008000)
117#define INT_SOURCE_CSR_RX_COHERENT FIELD32(0x00010000)
118#define INT_SOURCE_CSR_TX_COHERENT FIELD32(0x00020000)
119
120/*
121 * INT_MASK_CSR: Interrupt MASK register. 1: the interrupt is mask OFF.
122 */
123#define INT_MASK_CSR 0x0204
124#define INT_MASK_CSR_RXDELAYINT FIELD32(0x00000001)
125#define INT_MASK_CSR_TXDELAYINT FIELD32(0x00000002)
126#define INT_MASK_CSR_RX_DONE FIELD32(0x00000004)
127#define INT_MASK_CSR_AC0_DMA_DONE FIELD32(0x00000008)
128#define INT_MASK_CSR_AC1_DMA_DONE FIELD32(0x00000010)
129#define INT_MASK_CSR_AC2_DMA_DONE FIELD32(0x00000020)
130#define INT_MASK_CSR_AC3_DMA_DONE FIELD32(0x00000040)
131#define INT_MASK_CSR_HCCA_DMA_DONE FIELD32(0x00000080)
132#define INT_MASK_CSR_MGMT_DMA_DONE FIELD32(0x00000100)
133#define INT_MASK_CSR_MCU_COMMAND FIELD32(0x00000200)
134#define INT_MASK_CSR_RXTX_COHERENT FIELD32(0x00000400)
135#define INT_MASK_CSR_TBTT FIELD32(0x00000800)
136#define INT_MASK_CSR_PRE_TBTT FIELD32(0x00001000)
137#define INT_MASK_CSR_TX_FIFO_STATUS FIELD32(0x00002000)
138#define INT_MASK_CSR_AUTO_WAKEUP FIELD32(0x00004000)
139#define INT_MASK_CSR_GPTIMER FIELD32(0x00008000)
140#define INT_MASK_CSR_RX_COHERENT FIELD32(0x00010000)
141#define INT_MASK_CSR_TX_COHERENT FIELD32(0x00020000)
142
143/*
144 * WPDMA_GLO_CFG
145 */
146#define WPDMA_GLO_CFG 0x0208
147#define WPDMA_GLO_CFG_ENABLE_TX_DMA FIELD32(0x00000001)
148#define WPDMA_GLO_CFG_TX_DMA_BUSY FIELD32(0x00000002)
149#define WPDMA_GLO_CFG_ENABLE_RX_DMA FIELD32(0x00000004)
150#define WPDMA_GLO_CFG_RX_DMA_BUSY FIELD32(0x00000008)
151#define WPDMA_GLO_CFG_WP_DMA_BURST_SIZE FIELD32(0x00000030)
152#define WPDMA_GLO_CFG_TX_WRITEBACK_DONE FIELD32(0x00000040)
153#define WPDMA_GLO_CFG_BIG_ENDIAN FIELD32(0x00000080)
154#define WPDMA_GLO_CFG_RX_HDR_SCATTER FIELD32(0x0000ff00)
155#define WPDMA_GLO_CFG_HDR_SEG_LEN FIELD32(0xffff0000)
156
157/*
158 * WPDMA_RST_IDX
159 */
160#define WPDMA_RST_IDX 0x020c
161#define WPDMA_RST_IDX_DTX_IDX0 FIELD32(0x00000001)
162#define WPDMA_RST_IDX_DTX_IDX1 FIELD32(0x00000002)
163#define WPDMA_RST_IDX_DTX_IDX2 FIELD32(0x00000004)
164#define WPDMA_RST_IDX_DTX_IDX3 FIELD32(0x00000008)
165#define WPDMA_RST_IDX_DTX_IDX4 FIELD32(0x00000010)
166#define WPDMA_RST_IDX_DTX_IDX5 FIELD32(0x00000020)
167#define WPDMA_RST_IDX_DRX_IDX0 FIELD32(0x00010000)
168
169/*
170 * DELAY_INT_CFG
171 */
172#define DELAY_INT_CFG 0x0210
173#define DELAY_INT_CFG_RXMAX_PTIME FIELD32(0x000000ff)
174#define DELAY_INT_CFG_RXMAX_PINT FIELD32(0x00007f00)
175#define DELAY_INT_CFG_RXDLY_INT_EN FIELD32(0x00008000)
176#define DELAY_INT_CFG_TXMAX_PTIME FIELD32(0x00ff0000)
177#define DELAY_INT_CFG_TXMAX_PINT FIELD32(0x7f000000)
178#define DELAY_INT_CFG_TXDLY_INT_EN FIELD32(0x80000000)
179
180/*
181 * WMM_AIFSN_CFG: Aifsn for each EDCA AC
182 * AIFSN0: AC_BE
183 * AIFSN1: AC_BK
184 * AIFSN1: AC_VI
185 * AIFSN1: AC_VO
186 */
187#define WMM_AIFSN_CFG 0x0214
188#define WMM_AIFSN_CFG_AIFSN0 FIELD32(0x0000000f)
189#define WMM_AIFSN_CFG_AIFSN1 FIELD32(0x000000f0)
190#define WMM_AIFSN_CFG_AIFSN2 FIELD32(0x00000f00)
191#define WMM_AIFSN_CFG_AIFSN3 FIELD32(0x0000f000)
192
193/*
194 * WMM_CWMIN_CSR: CWmin for each EDCA AC
195 * CWMIN0: AC_BE
196 * CWMIN1: AC_BK
197 * CWMIN1: AC_VI
198 * CWMIN1: AC_VO
199 */
200#define WMM_CWMIN_CFG 0x0218
201#define WMM_CWMIN_CFG_CWMIN0 FIELD32(0x0000000f)
202#define WMM_CWMIN_CFG_CWMIN1 FIELD32(0x000000f0)
203#define WMM_CWMIN_CFG_CWMIN2 FIELD32(0x00000f00)
204#define WMM_CWMIN_CFG_CWMIN3 FIELD32(0x0000f000)
205
206/*
207 * WMM_CWMAX_CSR: CWmax for each EDCA AC
208 * CWMAX0: AC_BE
209 * CWMAX1: AC_BK
210 * CWMAX1: AC_VI
211 * CWMAX1: AC_VO
212 */
213#define WMM_CWMAX_CFG 0x021c
214#define WMM_CWMAX_CFG_CWMAX0 FIELD32(0x0000000f)
215#define WMM_CWMAX_CFG_CWMAX1 FIELD32(0x000000f0)
216#define WMM_CWMAX_CFG_CWMAX2 FIELD32(0x00000f00)
217#define WMM_CWMAX_CFG_CWMAX3 FIELD32(0x0000f000)
218
219/*
220 * AC_TXOP0: AC_BK/AC_BE TXOP register
221 * AC0TXOP: AC_BK in unit of 32us
222 * AC1TXOP: AC_BE in unit of 32us
223 */
224#define WMM_TXOP0_CFG 0x0220
225#define WMM_TXOP0_CFG_AC0TXOP FIELD32(0x0000ffff)
226#define WMM_TXOP0_CFG_AC1TXOP FIELD32(0xffff0000)
227
228/*
229 * AC_TXOP1: AC_VO/AC_VI TXOP register
230 * AC2TXOP: AC_VI in unit of 32us
231 * AC3TXOP: AC_VO in unit of 32us
232 */
233#define WMM_TXOP1_CFG 0x0224
234#define WMM_TXOP1_CFG_AC2TXOP FIELD32(0x0000ffff)
235#define WMM_TXOP1_CFG_AC3TXOP FIELD32(0xffff0000)
236
237/*
238 * GPIO_CTRL_CFG:
239 */
240#define GPIO_CTRL_CFG 0x0228
241#define GPIO_CTRL_CFG_BIT0 FIELD32(0x00000001)
242#define GPIO_CTRL_CFG_BIT1 FIELD32(0x00000002)
243#define GPIO_CTRL_CFG_BIT2 FIELD32(0x00000004)
244#define GPIO_CTRL_CFG_BIT3 FIELD32(0x00000008)
245#define GPIO_CTRL_CFG_BIT4 FIELD32(0x00000010)
246#define GPIO_CTRL_CFG_BIT5 FIELD32(0x00000020)
247#define GPIO_CTRL_CFG_BIT6 FIELD32(0x00000040)
248#define GPIO_CTRL_CFG_BIT7 FIELD32(0x00000080)
249#define GPIO_CTRL_CFG_BIT8 FIELD32(0x00000100)
250
251/*
252 * MCU_CMD_CFG
253 */
254#define MCU_CMD_CFG 0x022c
255
256/*
257 * AC_BK register offsets
258 */
259#define TX_BASE_PTR0 0x0230
260#define TX_MAX_CNT0 0x0234
261#define TX_CTX_IDX0 0x0238
262#define TX_DTX_IDX0 0x023c
263
264/*
265 * AC_BE register offsets
266 */
267#define TX_BASE_PTR1 0x0240
268#define TX_MAX_CNT1 0x0244
269#define TX_CTX_IDX1 0x0248
270#define TX_DTX_IDX1 0x024c
271
272/*
273 * AC_VI register offsets
274 */
275#define TX_BASE_PTR2 0x0250
276#define TX_MAX_CNT2 0x0254
277#define TX_CTX_IDX2 0x0258
278#define TX_DTX_IDX2 0x025c
279
280/*
281 * AC_VO register offsets
282 */
283#define TX_BASE_PTR3 0x0260
284#define TX_MAX_CNT3 0x0264
285#define TX_CTX_IDX3 0x0268
286#define TX_DTX_IDX3 0x026c
287
288/*
289 * HCCA register offsets
290 */
291#define TX_BASE_PTR4 0x0270
292#define TX_MAX_CNT4 0x0274
293#define TX_CTX_IDX4 0x0278
294#define TX_DTX_IDX4 0x027c
295
296/*
297 * MGMT register offsets
298 */
299#define TX_BASE_PTR5 0x0280
300#define TX_MAX_CNT5 0x0284
301#define TX_CTX_IDX5 0x0288
302#define TX_DTX_IDX5 0x028c
303
304/*
305 * RX register offsets
306 */
307#define RX_BASE_PTR 0x0290
308#define RX_MAX_CNT 0x0294
309#define RX_CRX_IDX 0x0298
310#define RX_DRX_IDX 0x029c
311
312/*
313 * USB_DMA_CFG 35 * USB_DMA_CFG
314 * RX_BULK_AGG_TIMEOUT: Rx Bulk Aggregation TimeOut in unit of 33ns. 36 * RX_BULK_AGG_TIMEOUT: Rx Bulk Aggregation TimeOut in unit of 33ns.
315 * RX_BULK_AGG_LIMIT: Rx Bulk Aggregation Limit in unit of 256 bytes. 37 * RX_BULK_AGG_LIMIT: Rx Bulk Aggregation Limit in unit of 256 bytes.
@@ -343,1448 +65,16 @@
343#define USB_CYC_CFG_CLOCK_CYCLE FIELD32(0x000000ff) 65#define USB_CYC_CFG_CLOCK_CYCLE FIELD32(0x000000ff)
344 66
345/* 67/*
346 * PBF_SYS_CTRL
347 * HOST_RAM_WRITE: enable Host program ram write selection
348 */
349#define PBF_SYS_CTRL 0x0400
350#define PBF_SYS_CTRL_READY FIELD32(0x00000080)
351#define PBF_SYS_CTRL_HOST_RAM_WRITE FIELD32(0x00010000)
352
353/*
354 * PBF registers
355 * Most are for debug. Driver doesn't touch PBF register.
356 */
357#define PBF_CFG 0x0408
358#define PBF_MAX_PCNT 0x040c
359#define PBF_CTRL 0x0410
360#define PBF_INT_STA 0x0414
361#define PBF_INT_ENA 0x0418
362
363/*
364 * BCN_OFFSET0:
365 */
366#define BCN_OFFSET0 0x042c
367#define BCN_OFFSET0_BCN0 FIELD32(0x000000ff)
368#define BCN_OFFSET0_BCN1 FIELD32(0x0000ff00)
369#define BCN_OFFSET0_BCN2 FIELD32(0x00ff0000)
370#define BCN_OFFSET0_BCN3 FIELD32(0xff000000)
371
372/*
373 * BCN_OFFSET1:
374 */
375#define BCN_OFFSET1 0x0430
376#define BCN_OFFSET1_BCN4 FIELD32(0x000000ff)
377#define BCN_OFFSET1_BCN5 FIELD32(0x0000ff00)
378#define BCN_OFFSET1_BCN6 FIELD32(0x00ff0000)
379#define BCN_OFFSET1_BCN7 FIELD32(0xff000000)
380
381/*
382 * PBF registers
383 * Most are for debug. Driver doesn't touch PBF register.
384 */
385#define TXRXQ_PCNT 0x0438
386#define PBF_DBG 0x043c
387
388/*
389 * RF registers
390 */
391#define RF_CSR_CFG 0x0500
392#define RF_CSR_CFG_DATA FIELD32(0x000000ff)
393#define RF_CSR_CFG_REGNUM FIELD32(0x00001f00)
394#define RF_CSR_CFG_WRITE FIELD32(0x00010000)
395#define RF_CSR_CFG_BUSY FIELD32(0x00020000)
396
397/*
398 * MAC Control/Status Registers(CSR).
399 * Some values are set in TU, whereas 1 TU == 1024 us.
400 */
401
402/*
403 * MAC_CSR0: ASIC revision number.
404 * ASIC_REV: 0
405 * ASIC_VER: 2870
406 */
407#define MAC_CSR0 0x1000
408#define MAC_CSR0_ASIC_REV FIELD32(0x0000ffff)
409#define MAC_CSR0_ASIC_VER FIELD32(0xffff0000)
410
411/*
412 * MAC_SYS_CTRL:
413 */
414#define MAC_SYS_CTRL 0x1004
415#define MAC_SYS_CTRL_RESET_CSR FIELD32(0x00000001)
416#define MAC_SYS_CTRL_RESET_BBP FIELD32(0x00000002)
417#define MAC_SYS_CTRL_ENABLE_TX FIELD32(0x00000004)
418#define MAC_SYS_CTRL_ENABLE_RX FIELD32(0x00000008)
419#define MAC_SYS_CTRL_CONTINUOUS_TX FIELD32(0x00000010)
420#define MAC_SYS_CTRL_LOOPBACK FIELD32(0x00000020)
421#define MAC_SYS_CTRL_WLAN_HALT FIELD32(0x00000040)
422#define MAC_SYS_CTRL_RX_TIMESTAMP FIELD32(0x00000080)
423
424/*
425 * MAC_ADDR_DW0: STA MAC register 0
426 */
427#define MAC_ADDR_DW0 0x1008
428#define MAC_ADDR_DW0_BYTE0 FIELD32(0x000000ff)
429#define MAC_ADDR_DW0_BYTE1 FIELD32(0x0000ff00)
430#define MAC_ADDR_DW0_BYTE2 FIELD32(0x00ff0000)
431#define MAC_ADDR_DW0_BYTE3 FIELD32(0xff000000)
432
433/*
434 * MAC_ADDR_DW1: STA MAC register 1
435 * UNICAST_TO_ME_MASK:
436 * Used to mask off bits from byte 5 of the MAC address
437 * to determine the UNICAST_TO_ME bit for RX frames.
438 * The full mask is complemented by BSS_ID_MASK:
439 * MASK = BSS_ID_MASK & UNICAST_TO_ME_MASK
440 */
441#define MAC_ADDR_DW1 0x100c
442#define MAC_ADDR_DW1_BYTE4 FIELD32(0x000000ff)
443#define MAC_ADDR_DW1_BYTE5 FIELD32(0x0000ff00)
444#define MAC_ADDR_DW1_UNICAST_TO_ME_MASK FIELD32(0x00ff0000)
445
446/*
447 * MAC_BSSID_DW0: BSSID register 0
448 */
449#define MAC_BSSID_DW0 0x1010
450#define MAC_BSSID_DW0_BYTE0 FIELD32(0x000000ff)
451#define MAC_BSSID_DW0_BYTE1 FIELD32(0x0000ff00)
452#define MAC_BSSID_DW0_BYTE2 FIELD32(0x00ff0000)
453#define MAC_BSSID_DW0_BYTE3 FIELD32(0xff000000)
454
455/*
456 * MAC_BSSID_DW1: BSSID register 1
457 * BSS_ID_MASK:
458 * 0: 1-BSSID mode (BSS index = 0)
459 * 1: 2-BSSID mode (BSS index: Byte5, bit 0)
460 * 2: 4-BSSID mode (BSS index: byte5, bit 0 - 1)
461 * 3: 8-BSSID mode (BSS index: byte5, bit 0 - 2)
462 * This mask is used to mask off bits 0, 1 and 2 of byte 5 of the
463 * BSSID. This will make sure that those bits will be ignored
464 * when determining the MY_BSS of RX frames.
465 */
466#define MAC_BSSID_DW1 0x1014
467#define MAC_BSSID_DW1_BYTE4 FIELD32(0x000000ff)
468#define MAC_BSSID_DW1_BYTE5 FIELD32(0x0000ff00)
469#define MAC_BSSID_DW1_BSS_ID_MASK FIELD32(0x00030000)
470#define MAC_BSSID_DW1_BSS_BCN_NUM FIELD32(0x001c0000)
471
472/*
473 * MAX_LEN_CFG: Maximum frame length register.
474 * MAX_MPDU: rt2860b max 16k bytes
475 * MAX_PSDU: Maximum PSDU length
476 * (power factor) 0:2^13, 1:2^14, 2:2^15, 3:2^16
477 */
478#define MAX_LEN_CFG 0x1018
479#define MAX_LEN_CFG_MAX_MPDU FIELD32(0x00000fff)
480#define MAX_LEN_CFG_MAX_PSDU FIELD32(0x00003000)
481#define MAX_LEN_CFG_MIN_PSDU FIELD32(0x0000c000)
482#define MAX_LEN_CFG_MIN_MPDU FIELD32(0x000f0000)
483
484/*
485 * BBP_CSR_CFG: BBP serial control register
486 * VALUE: Register value to program into BBP
487 * REG_NUM: Selected BBP register
488 * READ_CONTROL: 0 write BBP, 1 read BBP
489 * BUSY: ASIC is busy executing BBP commands
490 * BBP_PAR_DUR: 0 4 MAC clocks, 1 8 MAC clocks
491 * BBP_RW_MODE: 0 serial, 1 paralell
492 */
493#define BBP_CSR_CFG 0x101c
494#define BBP_CSR_CFG_VALUE FIELD32(0x000000ff)
495#define BBP_CSR_CFG_REGNUM FIELD32(0x0000ff00)
496#define BBP_CSR_CFG_READ_CONTROL FIELD32(0x00010000)
497#define BBP_CSR_CFG_BUSY FIELD32(0x00020000)
498#define BBP_CSR_CFG_BBP_PAR_DUR FIELD32(0x00040000)
499#define BBP_CSR_CFG_BBP_RW_MODE FIELD32(0x00080000)
500
501/*
502 * RF_CSR_CFG0: RF control register
503 * REGID_AND_VALUE: Register value to program into RF
504 * BITWIDTH: Selected RF register
505 * STANDBYMODE: 0 high when standby, 1 low when standby
506 * SEL: 0 RF_LE0 activate, 1 RF_LE1 activate
507 * BUSY: ASIC is busy executing RF commands
508 */
509#define RF_CSR_CFG0 0x1020
510#define RF_CSR_CFG0_REGID_AND_VALUE FIELD32(0x00ffffff)
511#define RF_CSR_CFG0_BITWIDTH FIELD32(0x1f000000)
512#define RF_CSR_CFG0_REG_VALUE_BW FIELD32(0x1fffffff)
513#define RF_CSR_CFG0_STANDBYMODE FIELD32(0x20000000)
514#define RF_CSR_CFG0_SEL FIELD32(0x40000000)
515#define RF_CSR_CFG0_BUSY FIELD32(0x80000000)
516
517/*
518 * RF_CSR_CFG1: RF control register
519 * REGID_AND_VALUE: Register value to program into RF
520 * RFGAP: Gap between BB_CONTROL_RF and RF_LE
521 * 0: 3 system clock cycle (37.5usec)
522 * 1: 5 system clock cycle (62.5usec)
523 */
524#define RF_CSR_CFG1 0x1024
525#define RF_CSR_CFG1_REGID_AND_VALUE FIELD32(0x00ffffff)
526#define RF_CSR_CFG1_RFGAP FIELD32(0x1f000000)
527
528/*
529 * RF_CSR_CFG2: RF control register
530 * VALUE: Register value to program into RF
531 * RFGAP: Gap between BB_CONTROL_RF and RF_LE
532 * 0: 3 system clock cycle (37.5usec)
533 * 1: 5 system clock cycle (62.5usec)
534 */
535#define RF_CSR_CFG2 0x1028
536#define RF_CSR_CFG2_VALUE FIELD32(0x00ffffff)
537
538/*
539 * LED_CFG: LED control
540 * color LED's:
541 * 0: off
542 * 1: blinking upon TX2
543 * 2: periodic slow blinking
544 * 3: always on
545 * LED polarity:
546 * 0: active low
547 * 1: active high
548 */
549#define LED_CFG 0x102c
550#define LED_CFG_ON_PERIOD FIELD32(0x000000ff)
551#define LED_CFG_OFF_PERIOD FIELD32(0x0000ff00)
552#define LED_CFG_SLOW_BLINK_PERIOD FIELD32(0x003f0000)
553#define LED_CFG_R_LED_MODE FIELD32(0x03000000)
554#define LED_CFG_G_LED_MODE FIELD32(0x0c000000)
555#define LED_CFG_Y_LED_MODE FIELD32(0x30000000)
556#define LED_CFG_LED_POLAR FIELD32(0x40000000)
557
558/*
559 * XIFS_TIME_CFG: MAC timing
560 * CCKM_SIFS_TIME: unit 1us. Applied after CCK RX/TX
561 * OFDM_SIFS_TIME: unit 1us. Applied after OFDM RX/TX
562 * OFDM_XIFS_TIME: unit 1us. Applied after OFDM RX
563 * when MAC doesn't reference BBP signal BBRXEND
564 * EIFS: unit 1us
565 * BB_RXEND_ENABLE: reference RXEND signal to begin XIFS defer
566 *
567 */
568#define XIFS_TIME_CFG 0x1100
569#define XIFS_TIME_CFG_CCKM_SIFS_TIME FIELD32(0x000000ff)
570#define XIFS_TIME_CFG_OFDM_SIFS_TIME FIELD32(0x0000ff00)
571#define XIFS_TIME_CFG_OFDM_XIFS_TIME FIELD32(0x000f0000)
572#define XIFS_TIME_CFG_EIFS FIELD32(0x1ff00000)
573#define XIFS_TIME_CFG_BB_RXEND_ENABLE FIELD32(0x20000000)
574
575/*
576 * BKOFF_SLOT_CFG:
577 */
578#define BKOFF_SLOT_CFG 0x1104
579#define BKOFF_SLOT_CFG_SLOT_TIME FIELD32(0x000000ff)
580#define BKOFF_SLOT_CFG_CC_DELAY_TIME FIELD32(0x0000ff00)
581
582/*
583 * NAV_TIME_CFG:
584 */
585#define NAV_TIME_CFG 0x1108
586#define NAV_TIME_CFG_SIFS FIELD32(0x000000ff)
587#define NAV_TIME_CFG_SLOT_TIME FIELD32(0x0000ff00)
588#define NAV_TIME_CFG_EIFS FIELD32(0x01ff0000)
589#define NAV_TIME_ZERO_SIFS FIELD32(0x02000000)
590
591/*
592 * CH_TIME_CFG: count as channel busy
593 */
594#define CH_TIME_CFG 0x110c
595
596/*
597 * PBF_LIFE_TIMER: TX/RX MPDU timestamp timer (free run) Unit: 1us
598 */
599#define PBF_LIFE_TIMER 0x1110
600
601/*
602 * BCN_TIME_CFG:
603 * BEACON_INTERVAL: in unit of 1/16 TU
604 * TSF_TICKING: Enable TSF auto counting
605 * TSF_SYNC: Enable TSF sync, 00: disable, 01: infra mode, 10: ad-hoc mode
606 * BEACON_GEN: Enable beacon generator
607 */
608#define BCN_TIME_CFG 0x1114
609#define BCN_TIME_CFG_BEACON_INTERVAL FIELD32(0x0000ffff)
610#define BCN_TIME_CFG_TSF_TICKING FIELD32(0x00010000)
611#define BCN_TIME_CFG_TSF_SYNC FIELD32(0x00060000)
612#define BCN_TIME_CFG_TBTT_ENABLE FIELD32(0x00080000)
613#define BCN_TIME_CFG_BEACON_GEN FIELD32(0x00100000)
614#define BCN_TIME_CFG_TX_TIME_COMPENSATE FIELD32(0xf0000000)
615
616/*
617 * TBTT_SYNC_CFG:
618 */
619#define TBTT_SYNC_CFG 0x1118
620
621/*
622 * TSF_TIMER_DW0: Local lsb TSF timer, read-only
623 */
624#define TSF_TIMER_DW0 0x111c
625#define TSF_TIMER_DW0_LOW_WORD FIELD32(0xffffffff)
626
627/*
628 * TSF_TIMER_DW1: Local msb TSF timer, read-only
629 */
630#define TSF_TIMER_DW1 0x1120
631#define TSF_TIMER_DW1_HIGH_WORD FIELD32(0xffffffff)
632
633/*
634 * TBTT_TIMER: TImer remains till next TBTT, read-only
635 */
636#define TBTT_TIMER 0x1124
637
638/*
639 * INT_TIMER_CFG:
640 */
641#define INT_TIMER_CFG 0x1128
642
643/*
644 * INT_TIMER_EN: GP-timer and pre-tbtt Int enable
645 */
646#define INT_TIMER_EN 0x112c
647
648/*
649 * CH_IDLE_STA: channel idle time
650 */
651#define CH_IDLE_STA 0x1130
652
653/*
654 * CH_BUSY_STA: channel busy time
655 */
656#define CH_BUSY_STA 0x1134
657
658/*
659 * MAC_STATUS_CFG:
660 * BBP_RF_BUSY: When set to 0, BBP and RF are stable.
661 * if 1 or higher one of the 2 registers is busy.
662 */
663#define MAC_STATUS_CFG 0x1200
664#define MAC_STATUS_CFG_BBP_RF_BUSY FIELD32(0x00000003)
665
666/*
667 * PWR_PIN_CFG:
668 */
669#define PWR_PIN_CFG 0x1204
670
671/*
672 * AUTOWAKEUP_CFG: Manual power control / status register
673 * TBCN_BEFORE_WAKE: ForceWake has high privilege than PutToSleep when both set
674 * AUTOWAKE: 0:sleep, 1:awake
675 */
676#define AUTOWAKEUP_CFG 0x1208
677#define AUTOWAKEUP_CFG_AUTO_LEAD_TIME FIELD32(0x000000ff)
678#define AUTOWAKEUP_CFG_TBCN_BEFORE_WAKE FIELD32(0x00007f00)
679#define AUTOWAKEUP_CFG_AUTOWAKE FIELD32(0x00008000)
680
681/*
682 * EDCA_AC0_CFG:
683 */
684#define EDCA_AC0_CFG 0x1300
685#define EDCA_AC0_CFG_TX_OP FIELD32(0x000000ff)
686#define EDCA_AC0_CFG_AIFSN FIELD32(0x00000f00)
687#define EDCA_AC0_CFG_CWMIN FIELD32(0x0000f000)
688#define EDCA_AC0_CFG_CWMAX FIELD32(0x000f0000)
689
690/*
691 * EDCA_AC1_CFG:
692 */
693#define EDCA_AC1_CFG 0x1304
694#define EDCA_AC1_CFG_TX_OP FIELD32(0x000000ff)
695#define EDCA_AC1_CFG_AIFSN FIELD32(0x00000f00)
696#define EDCA_AC1_CFG_CWMIN FIELD32(0x0000f000)
697#define EDCA_AC1_CFG_CWMAX FIELD32(0x000f0000)
698
699/*
700 * EDCA_AC2_CFG:
701 */
702#define EDCA_AC2_CFG 0x1308
703#define EDCA_AC2_CFG_TX_OP FIELD32(0x000000ff)
704#define EDCA_AC2_CFG_AIFSN FIELD32(0x00000f00)
705#define EDCA_AC2_CFG_CWMIN FIELD32(0x0000f000)
706#define EDCA_AC2_CFG_CWMAX FIELD32(0x000f0000)
707
708/*
709 * EDCA_AC3_CFG:
710 */
711#define EDCA_AC3_CFG 0x130c
712#define EDCA_AC3_CFG_TX_OP FIELD32(0x000000ff)
713#define EDCA_AC3_CFG_AIFSN FIELD32(0x00000f00)
714#define EDCA_AC3_CFG_CWMIN FIELD32(0x0000f000)
715#define EDCA_AC3_CFG_CWMAX FIELD32(0x000f0000)
716
717/*
718 * EDCA_TID_AC_MAP:
719 */
720#define EDCA_TID_AC_MAP 0x1310
721
722/*
723 * TX_PWR_CFG_0:
724 */
725#define TX_PWR_CFG_0 0x1314
726#define TX_PWR_CFG_0_1MBS FIELD32(0x0000000f)
727#define TX_PWR_CFG_0_2MBS FIELD32(0x000000f0)
728#define TX_PWR_CFG_0_55MBS FIELD32(0x00000f00)
729#define TX_PWR_CFG_0_11MBS FIELD32(0x0000f000)
730#define TX_PWR_CFG_0_6MBS FIELD32(0x000f0000)
731#define TX_PWR_CFG_0_9MBS FIELD32(0x00f00000)
732#define TX_PWR_CFG_0_12MBS FIELD32(0x0f000000)
733#define TX_PWR_CFG_0_18MBS FIELD32(0xf0000000)
734
735/*
736 * TX_PWR_CFG_1:
737 */
738#define TX_PWR_CFG_1 0x1318
739#define TX_PWR_CFG_1_24MBS FIELD32(0x0000000f)
740#define TX_PWR_CFG_1_36MBS FIELD32(0x000000f0)
741#define TX_PWR_CFG_1_48MBS FIELD32(0x00000f00)
742#define TX_PWR_CFG_1_54MBS FIELD32(0x0000f000)
743#define TX_PWR_CFG_1_MCS0 FIELD32(0x000f0000)
744#define TX_PWR_CFG_1_MCS1 FIELD32(0x00f00000)
745#define TX_PWR_CFG_1_MCS2 FIELD32(0x0f000000)
746#define TX_PWR_CFG_1_MCS3 FIELD32(0xf0000000)
747
748/*
749 * TX_PWR_CFG_2:
750 */
751#define TX_PWR_CFG_2 0x131c
752#define TX_PWR_CFG_2_MCS4 FIELD32(0x0000000f)
753#define TX_PWR_CFG_2_MCS5 FIELD32(0x000000f0)
754#define TX_PWR_CFG_2_MCS6 FIELD32(0x00000f00)
755#define TX_PWR_CFG_2_MCS7 FIELD32(0x0000f000)
756#define TX_PWR_CFG_2_MCS8 FIELD32(0x000f0000)
757#define TX_PWR_CFG_2_MCS9 FIELD32(0x00f00000)
758#define TX_PWR_CFG_2_MCS10 FIELD32(0x0f000000)
759#define TX_PWR_CFG_2_MCS11 FIELD32(0xf0000000)
760
761/*
762 * TX_PWR_CFG_3:
763 */
764#define TX_PWR_CFG_3 0x1320
765#define TX_PWR_CFG_3_MCS12 FIELD32(0x0000000f)
766#define TX_PWR_CFG_3_MCS13 FIELD32(0x000000f0)
767#define TX_PWR_CFG_3_MCS14 FIELD32(0x00000f00)
768#define TX_PWR_CFG_3_MCS15 FIELD32(0x0000f000)
769#define TX_PWR_CFG_3_UKNOWN1 FIELD32(0x000f0000)
770#define TX_PWR_CFG_3_UKNOWN2 FIELD32(0x00f00000)
771#define TX_PWR_CFG_3_UKNOWN3 FIELD32(0x0f000000)
772#define TX_PWR_CFG_3_UKNOWN4 FIELD32(0xf0000000)
773
774/*
775 * TX_PWR_CFG_4:
776 */
777#define TX_PWR_CFG_4 0x1324
778#define TX_PWR_CFG_4_UKNOWN5 FIELD32(0x0000000f)
779#define TX_PWR_CFG_4_UKNOWN6 FIELD32(0x000000f0)
780#define TX_PWR_CFG_4_UKNOWN7 FIELD32(0x00000f00)
781#define TX_PWR_CFG_4_UKNOWN8 FIELD32(0x0000f000)
782
783/*
784 * TX_PIN_CFG:
785 */
786#define TX_PIN_CFG 0x1328
787#define TX_PIN_CFG_PA_PE_A0_EN FIELD32(0x00000001)
788#define TX_PIN_CFG_PA_PE_G0_EN FIELD32(0x00000002)
789#define TX_PIN_CFG_PA_PE_A1_EN FIELD32(0x00000004)
790#define TX_PIN_CFG_PA_PE_G1_EN FIELD32(0x00000008)
791#define TX_PIN_CFG_PA_PE_A0_POL FIELD32(0x00000010)
792#define TX_PIN_CFG_PA_PE_G0_POL FIELD32(0x00000020)
793#define TX_PIN_CFG_PA_PE_A1_POL FIELD32(0x00000040)
794#define TX_PIN_CFG_PA_PE_G1_POL FIELD32(0x00000080)
795#define TX_PIN_CFG_LNA_PE_A0_EN FIELD32(0x00000100)
796#define TX_PIN_CFG_LNA_PE_G0_EN FIELD32(0x00000200)
797#define TX_PIN_CFG_LNA_PE_A1_EN FIELD32(0x00000400)
798#define TX_PIN_CFG_LNA_PE_G1_EN FIELD32(0x00000800)
799#define TX_PIN_CFG_LNA_PE_A0_POL FIELD32(0x00001000)
800#define TX_PIN_CFG_LNA_PE_G0_POL FIELD32(0x00002000)
801#define TX_PIN_CFG_LNA_PE_A1_POL FIELD32(0x00004000)
802#define TX_PIN_CFG_LNA_PE_G1_POL FIELD32(0x00008000)
803#define TX_PIN_CFG_RFTR_EN FIELD32(0x00010000)
804#define TX_PIN_CFG_RFTR_POL FIELD32(0x00020000)
805#define TX_PIN_CFG_TRSW_EN FIELD32(0x00040000)
806#define TX_PIN_CFG_TRSW_POL FIELD32(0x00080000)
807
808/*
809 * TX_BAND_CFG: 0x1 use upper 20MHz, 0x0 use lower 20MHz
810 */
811#define TX_BAND_CFG 0x132c
812#define TX_BAND_CFG_HT40_PLUS FIELD32(0x00000001)
813#define TX_BAND_CFG_A FIELD32(0x00000002)
814#define TX_BAND_CFG_BG FIELD32(0x00000004)
815
816/*
817 * TX_SW_CFG0:
818 */
819#define TX_SW_CFG0 0x1330
820
821/*
822 * TX_SW_CFG1:
823 */
824#define TX_SW_CFG1 0x1334
825
826/*
827 * TX_SW_CFG2:
828 */
829#define TX_SW_CFG2 0x1338
830
831/*
832 * TXOP_THRES_CFG:
833 */
834#define TXOP_THRES_CFG 0x133c
835
836/*
837 * TXOP_CTRL_CFG:
838 */
839#define TXOP_CTRL_CFG 0x1340
840
841/*
842 * TX_RTS_CFG:
843 * RTS_THRES: unit:byte
844 * RTS_FBK_EN: enable rts rate fallback
845 */
846#define TX_RTS_CFG 0x1344
847#define TX_RTS_CFG_AUTO_RTS_RETRY_LIMIT FIELD32(0x000000ff)
848#define TX_RTS_CFG_RTS_THRES FIELD32(0x00ffff00)
849#define TX_RTS_CFG_RTS_FBK_EN FIELD32(0x01000000)
850
851/*
852 * TX_TIMEOUT_CFG:
853 * MPDU_LIFETIME: expiration time = 2^(9+MPDU LIFE TIME) us
854 * RX_ACK_TIMEOUT: unit:slot. Used for TX procedure
855 * TX_OP_TIMEOUT: TXOP timeout value for TXOP truncation.
856 * it is recommended that:
857 * (SLOT_TIME) > (TX_OP_TIMEOUT) > (RX_ACK_TIMEOUT)
858 */
859#define TX_TIMEOUT_CFG 0x1348
860#define TX_TIMEOUT_CFG_MPDU_LIFETIME FIELD32(0x000000f0)
861#define TX_TIMEOUT_CFG_RX_ACK_TIMEOUT FIELD32(0x0000ff00)
862#define TX_TIMEOUT_CFG_TX_OP_TIMEOUT FIELD32(0x00ff0000)
863
864/*
865 * TX_RTY_CFG:
866 * SHORT_RTY_LIMIT: short retry limit
867 * LONG_RTY_LIMIT: long retry limit
868 * LONG_RTY_THRE: Long retry threshoold
869 * NON_AGG_RTY_MODE: Non-Aggregate MPDU retry mode
870 * 0:expired by retry limit, 1: expired by mpdu life timer
871 * AGG_RTY_MODE: Aggregate MPDU retry mode
872 * 0:expired by retry limit, 1: expired by mpdu life timer
873 * TX_AUTO_FB_ENABLE: Tx retry PHY rate auto fallback enable
874 */
875#define TX_RTY_CFG 0x134c
876#define TX_RTY_CFG_SHORT_RTY_LIMIT FIELD32(0x000000ff)
877#define TX_RTY_CFG_LONG_RTY_LIMIT FIELD32(0x0000ff00)
878#define TX_RTY_CFG_LONG_RTY_THRE FIELD32(0x0fff0000)
879#define TX_RTY_CFG_NON_AGG_RTY_MODE FIELD32(0x10000000)
880#define TX_RTY_CFG_AGG_RTY_MODE FIELD32(0x20000000)
881#define TX_RTY_CFG_TX_AUTO_FB_ENABLE FIELD32(0x40000000)
882
883/*
884 * TX_LINK_CFG:
885 * REMOTE_MFB_LIFETIME: remote MFB life time. unit: 32us
886 * MFB_ENABLE: TX apply remote MFB 1:enable
887 * REMOTE_UMFS_ENABLE: remote unsolicit MFB enable
888 * 0: not apply remote remote unsolicit (MFS=7)
889 * TX_MRQ_EN: MCS request TX enable
890 * TX_RDG_EN: RDG TX enable
891 * TX_CF_ACK_EN: Piggyback CF-ACK enable
892 * REMOTE_MFB: remote MCS feedback
893 * REMOTE_MFS: remote MCS feedback sequence number
894 */
895#define TX_LINK_CFG 0x1350
896#define TX_LINK_CFG_REMOTE_MFB_LIFETIME FIELD32(0x000000ff)
897#define TX_LINK_CFG_MFB_ENABLE FIELD32(0x00000100)
898#define TX_LINK_CFG_REMOTE_UMFS_ENABLE FIELD32(0x00000200)
899#define TX_LINK_CFG_TX_MRQ_EN FIELD32(0x00000400)
900#define TX_LINK_CFG_TX_RDG_EN FIELD32(0x00000800)
901#define TX_LINK_CFG_TX_CF_ACK_EN FIELD32(0x00001000)
902#define TX_LINK_CFG_REMOTE_MFB FIELD32(0x00ff0000)
903#define TX_LINK_CFG_REMOTE_MFS FIELD32(0xff000000)
904
905/*
906 * HT_FBK_CFG0:
907 */
908#define HT_FBK_CFG0 0x1354
909#define HT_FBK_CFG0_HTMCS0FBK FIELD32(0x0000000f)
910#define HT_FBK_CFG0_HTMCS1FBK FIELD32(0x000000f0)
911#define HT_FBK_CFG0_HTMCS2FBK FIELD32(0x00000f00)
912#define HT_FBK_CFG0_HTMCS3FBK FIELD32(0x0000f000)
913#define HT_FBK_CFG0_HTMCS4FBK FIELD32(0x000f0000)
914#define HT_FBK_CFG0_HTMCS5FBK FIELD32(0x00f00000)
915#define HT_FBK_CFG0_HTMCS6FBK FIELD32(0x0f000000)
916#define HT_FBK_CFG0_HTMCS7FBK FIELD32(0xf0000000)
917
918/*
919 * HT_FBK_CFG1:
920 */
921#define HT_FBK_CFG1 0x1358
922#define HT_FBK_CFG1_HTMCS8FBK FIELD32(0x0000000f)
923#define HT_FBK_CFG1_HTMCS9FBK FIELD32(0x000000f0)
924#define HT_FBK_CFG1_HTMCS10FBK FIELD32(0x00000f00)
925#define HT_FBK_CFG1_HTMCS11FBK FIELD32(0x0000f000)
926#define HT_FBK_CFG1_HTMCS12FBK FIELD32(0x000f0000)
927#define HT_FBK_CFG1_HTMCS13FBK FIELD32(0x00f00000)
928#define HT_FBK_CFG1_HTMCS14FBK FIELD32(0x0f000000)
929#define HT_FBK_CFG1_HTMCS15FBK FIELD32(0xf0000000)
930
931/*
932 * LG_FBK_CFG0:
933 */
934#define LG_FBK_CFG0 0x135c
935#define LG_FBK_CFG0_OFDMMCS0FBK FIELD32(0x0000000f)
936#define LG_FBK_CFG0_OFDMMCS1FBK FIELD32(0x000000f0)
937#define LG_FBK_CFG0_OFDMMCS2FBK FIELD32(0x00000f00)
938#define LG_FBK_CFG0_OFDMMCS3FBK FIELD32(0x0000f000)
939#define LG_FBK_CFG0_OFDMMCS4FBK FIELD32(0x000f0000)
940#define LG_FBK_CFG0_OFDMMCS5FBK FIELD32(0x00f00000)
941#define LG_FBK_CFG0_OFDMMCS6FBK FIELD32(0x0f000000)
942#define LG_FBK_CFG0_OFDMMCS7FBK FIELD32(0xf0000000)
943
944/*
945 * LG_FBK_CFG1:
946 */
947#define LG_FBK_CFG1 0x1360
948#define LG_FBK_CFG0_CCKMCS0FBK FIELD32(0x0000000f)
949#define LG_FBK_CFG0_CCKMCS1FBK FIELD32(0x000000f0)
950#define LG_FBK_CFG0_CCKMCS2FBK FIELD32(0x00000f00)
951#define LG_FBK_CFG0_CCKMCS3FBK FIELD32(0x0000f000)
952
953/*
954 * CCK_PROT_CFG: CCK Protection
955 * PROTECT_RATE: Protection control frame rate for CCK TX(RTS/CTS/CFEnd)
956 * PROTECT_CTRL: Protection control frame type for CCK TX
957 * 0:none, 1:RTS/CTS, 2:CTS-to-self
958 * PROTECT_NAV: TXOP protection type for CCK TX
959 * 0:none, 1:ShortNAVprotect, 2:LongNAVProtect
960 * TX_OP_ALLOW_CCK: CCK TXOP allowance, 0:disallow
961 * TX_OP_ALLOW_OFDM: CCK TXOP allowance, 0:disallow
962 * TX_OP_ALLOW_MM20: CCK TXOP allowance, 0:disallow
963 * TX_OP_ALLOW_MM40: CCK TXOP allowance, 0:disallow
964 * TX_OP_ALLOW_GF20: CCK TXOP allowance, 0:disallow
965 * TX_OP_ALLOW_GF40: CCK TXOP allowance, 0:disallow
966 * RTS_TH_EN: RTS threshold enable on CCK TX
967 */
968#define CCK_PROT_CFG 0x1364
969#define CCK_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
970#define CCK_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
971#define CCK_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
972#define CCK_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
973#define CCK_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
974#define CCK_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
975#define CCK_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
976#define CCK_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
977#define CCK_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
978#define CCK_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
979
980/*
981 * OFDM_PROT_CFG: OFDM Protection
982 */
983#define OFDM_PROT_CFG 0x1368
984#define OFDM_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
985#define OFDM_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
986#define OFDM_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
987#define OFDM_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
988#define OFDM_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
989#define OFDM_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
990#define OFDM_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
991#define OFDM_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
992#define OFDM_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
993#define OFDM_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
994
995/*
996 * MM20_PROT_CFG: MM20 Protection
997 */
998#define MM20_PROT_CFG 0x136c
999#define MM20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1000#define MM20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1001#define MM20_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
1002#define MM20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1003#define MM20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1004#define MM20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
1005#define MM20_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
1006#define MM20_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
1007#define MM20_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
1008#define MM20_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
1009
1010/*
1011 * MM40_PROT_CFG: MM40 Protection
1012 */
1013#define MM40_PROT_CFG 0x1370
1014#define MM40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1015#define MM40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1016#define MM40_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
1017#define MM40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1018#define MM40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1019#define MM40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
1020#define MM40_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
1021#define MM40_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
1022#define MM40_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
1023#define MM40_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
1024
1025/*
1026 * GF20_PROT_CFG: GF20 Protection
1027 */
1028#define GF20_PROT_CFG 0x1374
1029#define GF20_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1030#define GF20_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1031#define GF20_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
1032#define GF20_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1033#define GF20_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1034#define GF20_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
1035#define GF20_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
1036#define GF20_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
1037#define GF20_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
1038#define GF20_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
1039
1040/*
1041 * GF40_PROT_CFG: GF40 Protection
1042 */
1043#define GF40_PROT_CFG 0x1378
1044#define GF40_PROT_CFG_PROTECT_RATE FIELD32(0x0000ffff)
1045#define GF40_PROT_CFG_PROTECT_CTRL FIELD32(0x00030000)
1046#define GF40_PROT_CFG_PROTECT_NAV FIELD32(0x000c0000)
1047#define GF40_PROT_CFG_TX_OP_ALLOW_CCK FIELD32(0x00100000)
1048#define GF40_PROT_CFG_TX_OP_ALLOW_OFDM FIELD32(0x00200000)
1049#define GF40_PROT_CFG_TX_OP_ALLOW_MM20 FIELD32(0x00400000)
1050#define GF40_PROT_CFG_TX_OP_ALLOW_MM40 FIELD32(0x00800000)
1051#define GF40_PROT_CFG_TX_OP_ALLOW_GF20 FIELD32(0x01000000)
1052#define GF40_PROT_CFG_TX_OP_ALLOW_GF40 FIELD32(0x02000000)
1053#define GF40_PROT_CFG_RTS_TH_EN FIELD32(0x04000000)
1054
1055/*
1056 * EXP_CTS_TIME:
1057 */
1058#define EXP_CTS_TIME 0x137c
1059
1060/*
1061 * EXP_ACK_TIME:
1062 */
1063#define EXP_ACK_TIME 0x1380
1064
1065/*
1066 * RX_FILTER_CFG: RX configuration register.
1067 */
1068#define RX_FILTER_CFG 0x1400
1069#define RX_FILTER_CFG_DROP_CRC_ERROR FIELD32(0x00000001)
1070#define RX_FILTER_CFG_DROP_PHY_ERROR FIELD32(0x00000002)
1071#define RX_FILTER_CFG_DROP_NOT_TO_ME FIELD32(0x00000004)
1072#define RX_FILTER_CFG_DROP_NOT_MY_BSSD FIELD32(0x00000008)
1073#define RX_FILTER_CFG_DROP_VER_ERROR FIELD32(0x00000010)
1074#define RX_FILTER_CFG_DROP_MULTICAST FIELD32(0x00000020)
1075#define RX_FILTER_CFG_DROP_BROADCAST FIELD32(0x00000040)
1076#define RX_FILTER_CFG_DROP_DUPLICATE FIELD32(0x00000080)
1077#define RX_FILTER_CFG_DROP_CF_END_ACK FIELD32(0x00000100)
1078#define RX_FILTER_CFG_DROP_CF_END FIELD32(0x00000200)
1079#define RX_FILTER_CFG_DROP_ACK FIELD32(0x00000400)
1080#define RX_FILTER_CFG_DROP_CTS FIELD32(0x00000800)
1081#define RX_FILTER_CFG_DROP_RTS FIELD32(0x00001000)
1082#define RX_FILTER_CFG_DROP_PSPOLL FIELD32(0x00002000)
1083#define RX_FILTER_CFG_DROP_BA FIELD32(0x00004000)
1084#define RX_FILTER_CFG_DROP_BAR FIELD32(0x00008000)
1085#define RX_FILTER_CFG_DROP_CNTL FIELD32(0x00010000)
1086
1087/*
1088 * AUTO_RSP_CFG:
1089 * AUTORESPONDER: 0: disable, 1: enable
1090 * BAC_ACK_POLICY: 0:long, 1:short preamble
1091 * CTS_40_MMODE: Response CTS 40MHz duplicate mode
1092 * CTS_40_MREF: Response CTS 40MHz duplicate mode
1093 * AR_PREAMBLE: Auto responder preamble 0:long, 1:short preamble
1094 * DUAL_CTS_EN: Power bit value in control frame
1095 * ACK_CTS_PSM_BIT:Power bit value in control frame
1096 */
1097#define AUTO_RSP_CFG 0x1404
1098#define AUTO_RSP_CFG_AUTORESPONDER FIELD32(0x00000001)
1099#define AUTO_RSP_CFG_BAC_ACK_POLICY FIELD32(0x00000002)
1100#define AUTO_RSP_CFG_CTS_40_MMODE FIELD32(0x00000004)
1101#define AUTO_RSP_CFG_CTS_40_MREF FIELD32(0x00000008)
1102#define AUTO_RSP_CFG_AR_PREAMBLE FIELD32(0x00000010)
1103#define AUTO_RSP_CFG_DUAL_CTS_EN FIELD32(0x00000040)
1104#define AUTO_RSP_CFG_ACK_CTS_PSM_BIT FIELD32(0x00000080)
1105
1106/*
1107 * LEGACY_BASIC_RATE:
1108 */
1109#define LEGACY_BASIC_RATE 0x1408
1110
1111/*
1112 * HT_BASIC_RATE:
1113 */
1114#define HT_BASIC_RATE 0x140c
1115
1116/*
1117 * HT_CTRL_CFG:
1118 */
1119#define HT_CTRL_CFG 0x1410
1120
1121/*
1122 * SIFS_COST_CFG:
1123 */
1124#define SIFS_COST_CFG 0x1414
1125
1126/*
1127 * RX_PARSER_CFG:
1128 * Set NAV for all received frames
1129 */
1130#define RX_PARSER_CFG 0x1418
1131
1132/*
1133 * TX_SEC_CNT0:
1134 */
1135#define TX_SEC_CNT0 0x1500
1136
1137/*
1138 * RX_SEC_CNT0:
1139 */
1140#define RX_SEC_CNT0 0x1504
1141
1142/*
1143 * CCMP_FC_MUTE:
1144 */
1145#define CCMP_FC_MUTE 0x1508
1146
1147/*
1148 * TXOP_HLDR_ADDR0:
1149 */
1150#define TXOP_HLDR_ADDR0 0x1600
1151
1152/*
1153 * TXOP_HLDR_ADDR1:
1154 */
1155#define TXOP_HLDR_ADDR1 0x1604
1156
1157/*
1158 * TXOP_HLDR_ET:
1159 */
1160#define TXOP_HLDR_ET 0x1608
1161
1162/*
1163 * QOS_CFPOLL_RA_DW0:
1164 */
1165#define QOS_CFPOLL_RA_DW0 0x160c
1166
1167/*
1168 * QOS_CFPOLL_RA_DW1:
1169 */
1170#define QOS_CFPOLL_RA_DW1 0x1610
1171
1172/*
1173 * QOS_CFPOLL_QC:
1174 */
1175#define QOS_CFPOLL_QC 0x1614
1176
1177/*
1178 * RX_STA_CNT0: RX PLCP error count & RX CRC error count
1179 */
1180#define RX_STA_CNT0 0x1700
1181#define RX_STA_CNT0_CRC_ERR FIELD32(0x0000ffff)
1182#define RX_STA_CNT0_PHY_ERR FIELD32(0xffff0000)
1183
1184/*
1185 * RX_STA_CNT1: RX False CCA count & RX LONG frame count
1186 */
1187#define RX_STA_CNT1 0x1704
1188#define RX_STA_CNT1_FALSE_CCA FIELD32(0x0000ffff)
1189#define RX_STA_CNT1_PLCP_ERR FIELD32(0xffff0000)
1190
1191/*
1192 * RX_STA_CNT2:
1193 */
1194#define RX_STA_CNT2 0x1708
1195#define RX_STA_CNT2_RX_DUPLI_COUNT FIELD32(0x0000ffff)
1196#define RX_STA_CNT2_RX_FIFO_OVERFLOW FIELD32(0xffff0000)
1197
1198/*
1199 * TX_STA_CNT0: TX Beacon count
1200 */
1201#define TX_STA_CNT0 0x170c
1202#define TX_STA_CNT0_TX_FAIL_COUNT FIELD32(0x0000ffff)
1203#define TX_STA_CNT0_TX_BEACON_COUNT FIELD32(0xffff0000)
1204
1205/*
1206 * TX_STA_CNT1: TX tx count
1207 */
1208#define TX_STA_CNT1 0x1710
1209#define TX_STA_CNT1_TX_SUCCESS FIELD32(0x0000ffff)
1210#define TX_STA_CNT1_TX_RETRANSMIT FIELD32(0xffff0000)
1211
1212/*
1213 * TX_STA_CNT2: TX tx count
1214 */
1215#define TX_STA_CNT2 0x1714
1216#define TX_STA_CNT2_TX_ZERO_LEN_COUNT FIELD32(0x0000ffff)
1217#define TX_STA_CNT2_TX_UNDER_FLOW_COUNT FIELD32(0xffff0000)
1218
1219/*
1220 * TX_STA_FIFO: TX Result for specific PID status fifo register
1221 */
1222#define TX_STA_FIFO 0x1718
1223#define TX_STA_FIFO_VALID FIELD32(0x00000001)
1224#define TX_STA_FIFO_PID_TYPE FIELD32(0x0000001e)
1225#define TX_STA_FIFO_TX_SUCCESS FIELD32(0x00000020)
1226#define TX_STA_FIFO_TX_AGGRE FIELD32(0x00000040)
1227#define TX_STA_FIFO_TX_ACK_REQUIRED FIELD32(0x00000080)
1228#define TX_STA_FIFO_WCID FIELD32(0x0000ff00)
1229#define TX_STA_FIFO_SUCCESS_RATE FIELD32(0xffff0000)
1230
1231/*
1232 * TX_AGG_CNT: Debug counter
1233 */
1234#define TX_AGG_CNT 0x171c
1235#define TX_AGG_CNT_NON_AGG_TX_COUNT FIELD32(0x0000ffff)
1236#define TX_AGG_CNT_AGG_TX_COUNT FIELD32(0xffff0000)
1237
1238/*
1239 * TX_AGG_CNT0:
1240 */
1241#define TX_AGG_CNT0 0x1720
1242#define TX_AGG_CNT0_AGG_SIZE_1_COUNT FIELD32(0x0000ffff)
1243#define TX_AGG_CNT0_AGG_SIZE_2_COUNT FIELD32(0xffff0000)
1244
1245/*
1246 * TX_AGG_CNT1:
1247 */
1248#define TX_AGG_CNT1 0x1724
1249#define TX_AGG_CNT1_AGG_SIZE_3_COUNT FIELD32(0x0000ffff)
1250#define TX_AGG_CNT1_AGG_SIZE_4_COUNT FIELD32(0xffff0000)
1251
1252/*
1253 * TX_AGG_CNT2:
1254 */
1255#define TX_AGG_CNT2 0x1728
1256#define TX_AGG_CNT2_AGG_SIZE_5_COUNT FIELD32(0x0000ffff)
1257#define TX_AGG_CNT2_AGG_SIZE_6_COUNT FIELD32(0xffff0000)
1258
1259/*
1260 * TX_AGG_CNT3:
1261 */
1262#define TX_AGG_CNT3 0x172c
1263#define TX_AGG_CNT3_AGG_SIZE_7_COUNT FIELD32(0x0000ffff)
1264#define TX_AGG_CNT3_AGG_SIZE_8_COUNT FIELD32(0xffff0000)
1265
1266/*
1267 * TX_AGG_CNT4:
1268 */
1269#define TX_AGG_CNT4 0x1730
1270#define TX_AGG_CNT4_AGG_SIZE_9_COUNT FIELD32(0x0000ffff)
1271#define TX_AGG_CNT4_AGG_SIZE_10_COUNT FIELD32(0xffff0000)
1272
1273/*
1274 * TX_AGG_CNT5:
1275 */
1276#define TX_AGG_CNT5 0x1734
1277#define TX_AGG_CNT5_AGG_SIZE_11_COUNT FIELD32(0x0000ffff)
1278#define TX_AGG_CNT5_AGG_SIZE_12_COUNT FIELD32(0xffff0000)
1279
1280/*
1281 * TX_AGG_CNT6:
1282 */
1283#define TX_AGG_CNT6 0x1738
1284#define TX_AGG_CNT6_AGG_SIZE_13_COUNT FIELD32(0x0000ffff)
1285#define TX_AGG_CNT6_AGG_SIZE_14_COUNT FIELD32(0xffff0000)
1286
1287/*
1288 * TX_AGG_CNT7:
1289 */
1290#define TX_AGG_CNT7 0x173c
1291#define TX_AGG_CNT7_AGG_SIZE_15_COUNT FIELD32(0x0000ffff)
1292#define TX_AGG_CNT7_AGG_SIZE_16_COUNT FIELD32(0xffff0000)
1293
1294/*
1295 * MPDU_DENSITY_CNT:
1296 * TX_ZERO_DEL: TX zero length delimiter count
1297 * RX_ZERO_DEL: RX zero length delimiter count
1298 */
1299#define MPDU_DENSITY_CNT 0x1740
1300#define MPDU_DENSITY_CNT_TX_ZERO_DEL FIELD32(0x0000ffff)
1301#define MPDU_DENSITY_CNT_RX_ZERO_DEL FIELD32(0xffff0000)
1302
1303/*
1304 * Security key table memory.
1305 * MAC_WCID_BASE: 8-bytes (use only 6 bytes) * 256 entry
1306 * PAIRWISE_KEY_TABLE_BASE: 32-byte * 256 entry
1307 * MAC_IVEIV_TABLE_BASE: 8-byte * 256-entry
1308 * MAC_WCID_ATTRIBUTE_BASE: 4-byte * 256-entry
1309 * SHARED_KEY_TABLE_BASE: 32 bytes * 32-entry
1310 * SHARED_KEY_MODE_BASE: 4 bits * 32-entry
1311 */
1312#define MAC_WCID_BASE 0x1800
1313#define PAIRWISE_KEY_TABLE_BASE 0x4000
1314#define MAC_IVEIV_TABLE_BASE 0x6000
1315#define MAC_WCID_ATTRIBUTE_BASE 0x6800
1316#define SHARED_KEY_TABLE_BASE 0x6c00
1317#define SHARED_KEY_MODE_BASE 0x7000
1318
1319#define MAC_WCID_ENTRY(__idx) \
1320 ( MAC_WCID_BASE + ((__idx) * sizeof(struct mac_wcid_entry)) )
1321#define PAIRWISE_KEY_ENTRY(__idx) \
1322 ( PAIRWISE_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
1323#define MAC_IVEIV_ENTRY(__idx) \
1324 ( MAC_IVEIV_TABLE_BASE + ((__idx) & sizeof(struct mac_iveiv_entry)) )
1325#define MAC_WCID_ATTR_ENTRY(__idx) \
1326 ( MAC_WCID_ATTRIBUTE_BASE + ((__idx) * sizeof(u32)) )
1327#define SHARED_KEY_ENTRY(__idx) \
1328 ( SHARED_KEY_TABLE_BASE + ((__idx) * sizeof(struct hw_key_entry)) )
1329#define SHARED_KEY_MODE_ENTRY(__idx) \
1330 ( SHARED_KEY_MODE_BASE + ((__idx) * sizeof(u32)) )
1331
1332struct mac_wcid_entry {
1333 u8 mac[6];
1334 u8 reserved[2];
1335} __attribute__ ((packed));
1336
1337struct hw_key_entry {
1338 u8 key[16];
1339 u8 tx_mic[8];
1340 u8 rx_mic[8];
1341} __attribute__ ((packed));
1342
1343struct mac_iveiv_entry {
1344 u8 iv[8];
1345} __attribute__ ((packed));
1346
1347/*
1348 * MAC_WCID_ATTRIBUTE:
1349 */
1350#define MAC_WCID_ATTRIBUTE_KEYTAB FIELD32(0x00000001)
1351#define MAC_WCID_ATTRIBUTE_CIPHER FIELD32(0x0000000e)
1352#define MAC_WCID_ATTRIBUTE_BSS_IDX FIELD32(0x00000070)
1353#define MAC_WCID_ATTRIBUTE_RX_WIUDF FIELD32(0x00000380)
1354
1355/*
1356 * SHARED_KEY_MODE:
1357 */
1358#define SHARED_KEY_MODE_BSS0_KEY0 FIELD32(0x00000007)
1359#define SHARED_KEY_MODE_BSS0_KEY1 FIELD32(0x00000070)
1360#define SHARED_KEY_MODE_BSS0_KEY2 FIELD32(0x00000700)
1361#define SHARED_KEY_MODE_BSS0_KEY3 FIELD32(0x00007000)
1362#define SHARED_KEY_MODE_BSS1_KEY0 FIELD32(0x00070000)
1363#define SHARED_KEY_MODE_BSS1_KEY1 FIELD32(0x00700000)
1364#define SHARED_KEY_MODE_BSS1_KEY2 FIELD32(0x07000000)
1365#define SHARED_KEY_MODE_BSS1_KEY3 FIELD32(0x70000000)
1366
1367/*
1368 * HOST-MCU communication
1369 */
1370
1371/*
1372 * H2M_MAILBOX_CSR: Host-to-MCU Mailbox.
1373 */
1374#define H2M_MAILBOX_CSR 0x7010
1375#define H2M_MAILBOX_CSR_ARG0 FIELD32(0x000000ff)
1376#define H2M_MAILBOX_CSR_ARG1 FIELD32(0x0000ff00)
1377#define H2M_MAILBOX_CSR_CMD_TOKEN FIELD32(0x00ff0000)
1378#define H2M_MAILBOX_CSR_OWNER FIELD32(0xff000000)
1379
1380/*
1381 * H2M_MAILBOX_CID:
1382 */
1383#define H2M_MAILBOX_CID 0x7014
1384#define H2M_MAILBOX_CID_CMD0 FIELD32(0x000000ff)
1385#define H2M_MAILBOX_CID_CMD1 FIELD32(0x0000ff00)
1386#define H2M_MAILBOX_CID_CMD2 FIELD32(0x00ff0000)
1387#define H2M_MAILBOX_CID_CMD3 FIELD32(0xff000000)
1388
1389/*
1390 * H2M_MAILBOX_STATUS:
1391 */
1392#define H2M_MAILBOX_STATUS 0x701c
1393
1394/*
1395 * H2M_INT_SRC:
1396 */
1397#define H2M_INT_SRC 0x7024
1398
1399/*
1400 * H2M_BBP_AGENT:
1401 */
1402#define H2M_BBP_AGENT 0x7028
1403
1404/*
1405 * MCU_LEDCS: LED control for MCU Mailbox.
1406 */
1407#define MCU_LEDCS_LED_MODE FIELD8(0x1f)
1408#define MCU_LEDCS_POLARITY FIELD8(0x01)
1409
1410/*
1411 * HW_CS_CTS_BASE:
1412 * Carrier-sense CTS frame base address.
1413 * It's where mac stores carrier-sense frame for carrier-sense function.
1414 */
1415#define HW_CS_CTS_BASE 0x7700
1416
1417/*
1418 * HW_DFS_CTS_BASE:
1419 * FS CTS frame base address. It's where mac stores CTS frame for DFS.
1420 */
1421#define HW_DFS_CTS_BASE 0x7780
1422
1423/*
1424 * TXRX control registers - base address 0x3000
1425 */
1426
1427/*
1428 * TXRX_CSR1:
1429 * rt2860b UNKNOWN reg use R/O Reg Addr 0x77d0 first..
1430 */
1431#define TXRX_CSR1 0x77d0
1432
1433/*
1434 * HW_DEBUG_SETTING_BASE:
1435 * since NULL frame won't be that long (256 byte)
1436 * We steal 16 tail bytes to save debugging settings
1437 */
1438#define HW_DEBUG_SETTING_BASE 0x77f0
1439#define HW_DEBUG_SETTING_BASE2 0x7770
1440
1441/*
1442 * HW_BEACON_BASE
1443 * In order to support maximum 8 MBSS and its maximum length
1444 * is 512 bytes for each beacon
1445 * Three section discontinue memory segments will be used.
1446 * 1. The original region for BCN 0~3
1447 * 2. Extract memory from FCE table for BCN 4~5
1448 * 3. Extract memory from Pair-wise key table for BCN 6~7
1449 * It occupied those memory of wcid 238~253 for BCN 6
1450 * and wcid 222~237 for BCN 7
1451 *
1452 * IMPORTANT NOTE: Not sure why legacy driver does this,
1453 * but HW_BEACON_BASE7 is 0x0200 bytes below HW_BEACON_BASE6.
1454 */
1455#define HW_BEACON_BASE0 0x7800
1456#define HW_BEACON_BASE1 0x7a00
1457#define HW_BEACON_BASE2 0x7c00
1458#define HW_BEACON_BASE3 0x7e00
1459#define HW_BEACON_BASE4 0x7200
1460#define HW_BEACON_BASE5 0x7400
1461#define HW_BEACON_BASE6 0x5dc0
1462#define HW_BEACON_BASE7 0x5bc0
1463
1464#define HW_BEACON_OFFSET(__index) \
1465 ( ((__index) < 4) ? ( HW_BEACON_BASE0 + (__index * 0x0200) ) : \
1466 (((__index) < 6) ? ( HW_BEACON_BASE4 + ((__index - 4) * 0x0200) ) : \
1467 (HW_BEACON_BASE6 - ((__index - 6) * 0x0200))) )
1468
1469/*
1470 * 8051 firmware image. 68 * 8051 firmware image.
1471 */ 69 */
1472#define FIRMWARE_RT2870 "rt2870.bin" 70#define FIRMWARE_RT2870 "rt2870.bin"
1473#define FIRMWARE_IMAGE_BASE 0x3000 71#define FIRMWARE_IMAGE_BASE 0x3000
1474 72
1475/* 73/*
1476 * BBP registers.
1477 * The wordsize of the BBP is 8 bits.
1478 */
1479
1480/*
1481 * BBP 1: TX Antenna
1482 */
1483#define BBP1_TX_POWER FIELD8(0x07)
1484#define BBP1_TX_ANTENNA FIELD8(0x18)
1485
1486/*
1487 * BBP 3: RX Antenna
1488 */
1489#define BBP3_RX_ANTENNA FIELD8(0x18)
1490#define BBP3_HT40_PLUS FIELD8(0x20)
1491
1492/*
1493 * BBP 4: Bandwidth
1494 */
1495#define BBP4_TX_BF FIELD8(0x01)
1496#define BBP4_BANDWIDTH FIELD8(0x18)
1497
1498/*
1499 * RFCSR registers
1500 * The wordsize of the RFCSR is 8 bits.
1501 */
1502
1503/*
1504 * RFCSR 6:
1505 */
1506#define RFCSR6_R FIELD8(0x03)
1507
1508/*
1509 * RFCSR 7:
1510 */
1511#define RFCSR7_RF_TUNING FIELD8(0x01)
1512
1513/*
1514 * RFCSR 12:
1515 */
1516#define RFCSR12_TX_POWER FIELD8(0x1f)
1517
1518/*
1519 * RFCSR 22:
1520 */
1521#define RFCSR22_BASEBAND_LOOPBACK FIELD8(0x01)
1522
1523/*
1524 * RFCSR 23:
1525 */
1526#define RFCSR23_FREQ_OFFSET FIELD8(0x7f)
1527
1528/*
1529 * RFCSR 30:
1530 */
1531#define RFCSR30_RF_CALIBRATION FIELD8(0x80)
1532
1533/*
1534 * RF registers
1535 */
1536
1537/*
1538 * RF 2
1539 */
1540#define RF2_ANTENNA_RX2 FIELD32(0x00000040)
1541#define RF2_ANTENNA_TX1 FIELD32(0x00004000)
1542#define RF2_ANTENNA_RX1 FIELD32(0x00020000)
1543
1544/*
1545 * RF 3
1546 */
1547#define RF3_TXPOWER_G FIELD32(0x00003e00)
1548#define RF3_TXPOWER_A_7DBM_BOOST FIELD32(0x00000200)
1549#define RF3_TXPOWER_A FIELD32(0x00003c00)
1550
1551/*
1552 * RF 4
1553 */
1554#define RF4_TXPOWER_G FIELD32(0x000007c0)
1555#define RF4_TXPOWER_A_7DBM_BOOST FIELD32(0x00000040)
1556#define RF4_TXPOWER_A FIELD32(0x00000780)
1557#define RF4_FREQ_OFFSET FIELD32(0x001f8000)
1558#define RF4_HT40 FIELD32(0x00200000)
1559
1560/*
1561 * EEPROM content.
1562 * The wordsize of the EEPROM is 16 bits.
1563 */
1564
1565/*
1566 * EEPROM Version
1567 */
1568#define EEPROM_VERSION 0x0001
1569#define EEPROM_VERSION_FAE FIELD16(0x00ff)
1570#define EEPROM_VERSION_VERSION FIELD16(0xff00)
1571
1572/*
1573 * HW MAC address.
1574 */
1575#define EEPROM_MAC_ADDR_0 0x0002
1576#define EEPROM_MAC_ADDR_BYTE0 FIELD16(0x00ff)
1577#define EEPROM_MAC_ADDR_BYTE1 FIELD16(0xff00)
1578#define EEPROM_MAC_ADDR_1 0x0003
1579#define EEPROM_MAC_ADDR_BYTE2 FIELD16(0x00ff)
1580#define EEPROM_MAC_ADDR_BYTE3 FIELD16(0xff00)
1581#define EEPROM_MAC_ADDR_2 0x0004
1582#define EEPROM_MAC_ADDR_BYTE4 FIELD16(0x00ff)
1583#define EEPROM_MAC_ADDR_BYTE5 FIELD16(0xff00)
1584
1585/*
1586 * EEPROM ANTENNA config
1587 * RXPATH: 1: 1R, 2: 2R, 3: 3R
1588 * TXPATH: 1: 1T, 2: 2T
1589 */
1590#define EEPROM_ANTENNA 0x001a
1591#define EEPROM_ANTENNA_RXPATH FIELD16(0x000f)
1592#define EEPROM_ANTENNA_TXPATH FIELD16(0x00f0)
1593#define EEPROM_ANTENNA_RF_TYPE FIELD16(0x0f00)
1594
1595/*
1596 * EEPROM NIC config
1597 * CARDBUS_ACCEL: 0 - enable, 1 - disable
1598 */
1599#define EEPROM_NIC 0x001b
1600#define EEPROM_NIC_HW_RADIO FIELD16(0x0001)
1601#define EEPROM_NIC_DYNAMIC_TX_AGC FIELD16(0x0002)
1602#define EEPROM_NIC_EXTERNAL_LNA_BG FIELD16(0x0004)
1603#define EEPROM_NIC_EXTERNAL_LNA_A FIELD16(0x0008)
1604#define EEPROM_NIC_CARDBUS_ACCEL FIELD16(0x0010)
1605#define EEPROM_NIC_BW40M_SB_BG FIELD16(0x0020)
1606#define EEPROM_NIC_BW40M_SB_A FIELD16(0x0040)
1607#define EEPROM_NIC_WPS_PBC FIELD16(0x0080)
1608#define EEPROM_NIC_BW40M_BG FIELD16(0x0100)
1609#define EEPROM_NIC_BW40M_A FIELD16(0x0200)
1610
1611/*
1612 * EEPROM frequency
1613 */
1614#define EEPROM_FREQ 0x001d
1615#define EEPROM_FREQ_OFFSET FIELD16(0x00ff)
1616#define EEPROM_FREQ_LED_MODE FIELD16(0x7f00)
1617#define EEPROM_FREQ_LED_POLARITY FIELD16(0x1000)
1618
1619/*
1620 * EEPROM LED
1621 * POLARITY_RDY_G: Polarity RDY_G setting.
1622 * POLARITY_RDY_A: Polarity RDY_A setting.
1623 * POLARITY_ACT: Polarity ACT setting.
1624 * POLARITY_GPIO_0: Polarity GPIO0 setting.
1625 * POLARITY_GPIO_1: Polarity GPIO1 setting.
1626 * POLARITY_GPIO_2: Polarity GPIO2 setting.
1627 * POLARITY_GPIO_3: Polarity GPIO3 setting.
1628 * POLARITY_GPIO_4: Polarity GPIO4 setting.
1629 * LED_MODE: Led mode.
1630 */
1631#define EEPROM_LED1 0x001e
1632#define EEPROM_LED2 0x001f
1633#define EEPROM_LED3 0x0020
1634#define EEPROM_LED_POLARITY_RDY_BG FIELD16(0x0001)
1635#define EEPROM_LED_POLARITY_RDY_A FIELD16(0x0002)
1636#define EEPROM_LED_POLARITY_ACT FIELD16(0x0004)
1637#define EEPROM_LED_POLARITY_GPIO_0 FIELD16(0x0008)
1638#define EEPROM_LED_POLARITY_GPIO_1 FIELD16(0x0010)
1639#define EEPROM_LED_POLARITY_GPIO_2 FIELD16(0x0020)
1640#define EEPROM_LED_POLARITY_GPIO_3 FIELD16(0x0040)
1641#define EEPROM_LED_POLARITY_GPIO_4 FIELD16(0x0080)
1642#define EEPROM_LED_LED_MODE FIELD16(0x1f00)
1643
1644/*
1645 * EEPROM LNA
1646 */
1647#define EEPROM_LNA 0x0022
1648#define EEPROM_LNA_BG FIELD16(0x00ff)
1649#define EEPROM_LNA_A0 FIELD16(0xff00)
1650
1651/*
1652 * EEPROM RSSI BG offset
1653 */
1654#define EEPROM_RSSI_BG 0x0023
1655#define EEPROM_RSSI_BG_OFFSET0 FIELD16(0x00ff)
1656#define EEPROM_RSSI_BG_OFFSET1 FIELD16(0xff00)
1657
1658/*
1659 * EEPROM RSSI BG2 offset
1660 */
1661#define EEPROM_RSSI_BG2 0x0024
1662#define EEPROM_RSSI_BG2_OFFSET2 FIELD16(0x00ff)
1663#define EEPROM_RSSI_BG2_LNA_A1 FIELD16(0xff00)
1664
1665/*
1666 * EEPROM RSSI A offset
1667 */
1668#define EEPROM_RSSI_A 0x0025
1669#define EEPROM_RSSI_A_OFFSET0 FIELD16(0x00ff)
1670#define EEPROM_RSSI_A_OFFSET1 FIELD16(0xff00)
1671
1672/*
1673 * EEPROM RSSI A2 offset
1674 */
1675#define EEPROM_RSSI_A2 0x0026
1676#define EEPROM_RSSI_A2_OFFSET2 FIELD16(0x00ff)
1677#define EEPROM_RSSI_A2_LNA_A2 FIELD16(0xff00)
1678
1679/*
1680 * EEPROM TXpower delta: 20MHZ AND 40 MHZ use different power.
1681 * This is delta in 40MHZ.
1682 * VALUE: Tx Power dalta value (MAX=4)
1683 * TYPE: 1: Plus the delta value, 0: minus the delta value
1684 * TXPOWER: Enable:
1685 */
1686#define EEPROM_TXPOWER_DELTA 0x0028
1687#define EEPROM_TXPOWER_DELTA_VALUE FIELD16(0x003f)
1688#define EEPROM_TXPOWER_DELTA_TYPE FIELD16(0x0040)
1689#define EEPROM_TXPOWER_DELTA_TXPOWER FIELD16(0x0080)
1690
1691/*
1692 * EEPROM TXPOWER 802.11BG
1693 */
1694#define EEPROM_TXPOWER_BG1 0x0029
1695#define EEPROM_TXPOWER_BG2 0x0030
1696#define EEPROM_TXPOWER_BG_SIZE 7
1697#define EEPROM_TXPOWER_BG_1 FIELD16(0x00ff)
1698#define EEPROM_TXPOWER_BG_2 FIELD16(0xff00)
1699
1700/*
1701 * EEPROM TXPOWER 802.11A
1702 */
1703#define EEPROM_TXPOWER_A1 0x003c
1704#define EEPROM_TXPOWER_A2 0x0053
1705#define EEPROM_TXPOWER_A_SIZE 6
1706#define EEPROM_TXPOWER_A_1 FIELD16(0x00ff)
1707#define EEPROM_TXPOWER_A_2 FIELD16(0xff00)
1708
1709/*
1710 * EEPROM TXpower byrate: 20MHZ power
1711 */
1712#define EEPROM_TXPOWER_BYRATE 0x006f
1713
1714/*
1715 * EEPROM BBP.
1716 */
1717#define EEPROM_BBP_START 0x0078
1718#define EEPROM_BBP_SIZE 16
1719#define EEPROM_BBP_VALUE FIELD16(0x00ff)
1720#define EEPROM_BBP_REG_ID FIELD16(0xff00)
1721
1722/*
1723 * MCU mailbox commands.
1724 */
1725#define MCU_SLEEP 0x30
1726#define MCU_WAKEUP 0x31
1727#define MCU_RADIO_OFF 0x35
1728#define MCU_CURRENT 0x36
1729#define MCU_LED 0x50
1730#define MCU_LED_STRENGTH 0x51
1731#define MCU_LED_1 0x52
1732#define MCU_LED_2 0x53
1733#define MCU_LED_3 0x54
1734#define MCU_RADAR 0x60
1735#define MCU_BOOT_SIGNAL 0x72
1736#define MCU_BBP_SIGNAL 0x80
1737#define MCU_POWER_SAVE 0x83
1738
1739/*
1740 * MCU mailbox tokens
1741 */
1742#define TOKEN_WAKUP 3
1743
1744/*
1745 * DMA descriptor defines. 74 * DMA descriptor defines.
1746 */ 75 */
1747#define TXD_DESC_SIZE ( 4 * sizeof(__le32) )
1748#define TXINFO_DESC_SIZE ( 1 * sizeof(__le32) ) 76#define TXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
1749#define TXWI_DESC_SIZE ( 4 * sizeof(__le32) ) 77#define RXINFO_DESC_SIZE ( 1 * sizeof(__le32) )
1750#define RXD_DESC_SIZE ( 1 * sizeof(__le32) )
1751#define RXWI_DESC_SIZE ( 4 * sizeof(__le32) )
1752
1753/*
1754 * TX descriptor format for TX, PRIO and Beacon Ring.
1755 */
1756
1757/*
1758 * Word0
1759 */
1760#define TXD_W0_SD_PTR0 FIELD32(0xffffffff)
1761
1762/*
1763 * Word1
1764 */
1765#define TXD_W1_SD_LEN1 FIELD32(0x00003fff)
1766#define TXD_W1_LAST_SEC1 FIELD32(0x00004000)
1767#define TXD_W1_BURST FIELD32(0x00008000)
1768#define TXD_W1_SD_LEN0 FIELD32(0x3fff0000)
1769#define TXD_W1_LAST_SEC0 FIELD32(0x40000000)
1770#define TXD_W1_DMA_DONE FIELD32(0x80000000)
1771
1772/*
1773 * Word2
1774 */
1775#define TXD_W2_SD_PTR1 FIELD32(0xffffffff)
1776
1777/*
1778 * Word3
1779 * WIV: Wireless Info Valid. 1: Driver filled WI, 0: DMA needs to copy WI
1780 * QSEL: Select on-chip FIFO ID for 2nd-stage output scheduler.
1781 * 0:MGMT, 1:HCCA 2:EDCA
1782 */
1783#define TXD_W3_WIV FIELD32(0x01000000)
1784#define TXD_W3_QSEL FIELD32(0x06000000)
1785#define TXD_W3_TCO FIELD32(0x20000000)
1786#define TXD_W3_UCO FIELD32(0x40000000)
1787#define TXD_W3_ICO FIELD32(0x80000000)
1788 78
1789/* 79/*
1790 * TX Info structure 80 * TX Info structure
@@ -1807,52 +97,6 @@ struct mac_iveiv_entry {
1807#define TXINFO_W0_USB_DMA_TX_BURST FIELD32(0x80000000) 97#define TXINFO_W0_USB_DMA_TX_BURST FIELD32(0x80000000)
1808 98
1809/* 99/*
1810 * TX WI structure
1811 */
1812
1813/*
1814 * Word0
1815 * FRAG: 1 To inform TKIP engine this is a fragment.
1816 * MIMO_PS: The remote peer is in dynamic MIMO-PS mode
1817 * TX_OP: 0:HT TXOP rule , 1:PIFS TX ,2:Backoff, 3:sifs
1818 * BW: Channel bandwidth 20MHz or 40 MHz
1819 * STBC: 1: STBC support MCS =0-7, 2,3 : RESERVED
1820 */
1821#define TXWI_W0_FRAG FIELD32(0x00000001)
1822#define TXWI_W0_MIMO_PS FIELD32(0x00000002)
1823#define TXWI_W0_CF_ACK FIELD32(0x00000004)
1824#define TXWI_W0_TS FIELD32(0x00000008)
1825#define TXWI_W0_AMPDU FIELD32(0x00000010)
1826#define TXWI_W0_MPDU_DENSITY FIELD32(0x000000e0)
1827#define TXWI_W0_TX_OP FIELD32(0x00000300)
1828#define TXWI_W0_MCS FIELD32(0x007f0000)
1829#define TXWI_W0_BW FIELD32(0x00800000)
1830#define TXWI_W0_SHORT_GI FIELD32(0x01000000)
1831#define TXWI_W0_STBC FIELD32(0x06000000)
1832#define TXWI_W0_IFS FIELD32(0x08000000)
1833#define TXWI_W0_PHYMODE FIELD32(0xc0000000)
1834
1835/*
1836 * Word1
1837 */
1838#define TXWI_W1_ACK FIELD32(0x00000001)
1839#define TXWI_W1_NSEQ FIELD32(0x00000002)
1840#define TXWI_W1_BW_WIN_SIZE FIELD32(0x000000fc)
1841#define TXWI_W1_WIRELESS_CLI_ID FIELD32(0x0000ff00)
1842#define TXWI_W1_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000)
1843#define TXWI_W1_PACKETID FIELD32(0xf0000000)
1844
1845/*
1846 * Word2
1847 */
1848#define TXWI_W2_IV FIELD32(0xffffffff)
1849
1850/*
1851 * Word3
1852 */
1853#define TXWI_W3_EIV FIELD32(0xffffffff)
1854
1855/*
1856 * RX descriptor format for RX Ring. 100 * RX descriptor format for RX Ring.
1857 */ 101 */
1858 102
@@ -1888,64 +132,4 @@ struct mac_iveiv_entry {
1888#define RXD_W0_LAST_AMSDU FIELD32(0x00080000) 132#define RXD_W0_LAST_AMSDU FIELD32(0x00080000)
1889#define RXD_W0_PLCP_SIGNAL FIELD32(0xfff00000) 133#define RXD_W0_PLCP_SIGNAL FIELD32(0xfff00000)
1890 134
1891/*
1892 * RX WI structure
1893 */
1894
1895/*
1896 * Word0
1897 */
1898#define RXWI_W0_WIRELESS_CLI_ID FIELD32(0x000000ff)
1899#define RXWI_W0_KEY_INDEX FIELD32(0x00000300)
1900#define RXWI_W0_BSSID FIELD32(0x00001c00)
1901#define RXWI_W0_UDF FIELD32(0x0000e000)
1902#define RXWI_W0_MPDU_TOTAL_BYTE_COUNT FIELD32(0x0fff0000)
1903#define RXWI_W0_TID FIELD32(0xf0000000)
1904
1905/*
1906 * Word1
1907 */
1908#define RXWI_W1_FRAG FIELD32(0x0000000f)
1909#define RXWI_W1_SEQUENCE FIELD32(0x0000fff0)
1910#define RXWI_W1_MCS FIELD32(0x007f0000)
1911#define RXWI_W1_BW FIELD32(0x00800000)
1912#define RXWI_W1_SHORT_GI FIELD32(0x01000000)
1913#define RXWI_W1_STBC FIELD32(0x06000000)
1914#define RXWI_W1_PHYMODE FIELD32(0xc0000000)
1915
1916/*
1917 * Word2
1918 */
1919#define RXWI_W2_RSSI0 FIELD32(0x000000ff)
1920#define RXWI_W2_RSSI1 FIELD32(0x0000ff00)
1921#define RXWI_W2_RSSI2 FIELD32(0x00ff0000)
1922
1923/*
1924 * Word3
1925 */
1926#define RXWI_W3_SNR0 FIELD32(0x000000ff)
1927#define RXWI_W3_SNR1 FIELD32(0x0000ff00)
1928
1929/*
1930 * Macros for converting txpower from EEPROM to mac80211 value
1931 * and from mac80211 value to register value.
1932 */
1933#define MIN_G_TXPOWER 0
1934#define MIN_A_TXPOWER -7
1935#define MAX_G_TXPOWER 31
1936#define MAX_A_TXPOWER 15
1937#define DEFAULT_TXPOWER 5
1938
1939#define TXPOWER_G_FROM_DEV(__txpower) \
1940 ((__txpower) > MAX_G_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
1941
1942#define TXPOWER_G_TO_DEV(__txpower) \
1943 clamp_t(char, __txpower, MIN_G_TXPOWER, MAX_G_TXPOWER)
1944
1945#define TXPOWER_A_FROM_DEV(__txpower) \
1946 ((__txpower) > MAX_A_TXPOWER) ? DEFAULT_TXPOWER : (__txpower)
1947
1948#define TXPOWER_A_TO_DEV(__txpower) \
1949 clamp_t(char, __txpower, MIN_A_TXPOWER, MAX_A_TXPOWER)
1950
1951#endif /* RT2800USB_H */ 135#endif /* RT2800USB_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00.h b/drivers/net/wireless/rt2x00/rt2x00.h
index 27bc6b7fbfde..c83dbaefd57a 100644
--- a/drivers/net/wireless/rt2x00/rt2x00.h
+++ b/drivers/net/wireless/rt2x00/rt2x00.h
@@ -144,6 +144,11 @@ struct avg_val {
144 int avg_weight; 144 int avg_weight;
145}; 145};
146 146
147enum rt2x00_chip_intf {
148 RT2X00_CHIP_INTF_PCI,
149 RT2X00_CHIP_INTF_USB,
150};
151
147/* 152/*
148 * Chipset identification 153 * Chipset identification
149 * The chipset on the device is composed of a RT and RF chip. 154 * The chipset on the device is composed of a RT and RF chip.
@@ -158,10 +163,19 @@ struct rt2x00_chip {
158#define RT2561 0x0302 163#define RT2561 0x0302
159#define RT2661 0x0401 164#define RT2661 0x0401
160#define RT2571 0x1300 165#define RT2571 0x1300
166#define RT2860 0x0601 /* 2.4GHz PCI/CB */
167#define RT2860D 0x0681 /* 2.4GHz, 5GHz PCI/CB */
168#define RT2890 0x0701 /* 2.4GHz PCIe */
169#define RT2890D 0x0781 /* 2.4GHz, 5GHz PCIe */
170#define RT2880 0x2880 /* WSOC */
171#define RT3052 0x3052 /* WSOC */
172#define RT3090 0x3090 /* 2.4GHz PCIe */
161#define RT2870 0x1600 173#define RT2870 0x1600
162 174
163 u16 rf; 175 u16 rf;
164 u32 rev; 176 u32 rev;
177
178 enum rt2x00_chip_intf intf;
165}; 179};
166 180
167/* 181/*
@@ -835,9 +849,23 @@ struct rt2x00_dev {
835 * Firmware image. 849 * Firmware image.
836 */ 850 */
837 const struct firmware *fw; 851 const struct firmware *fw;
852
853 /*
854 * Driver specific data.
855 */
856 void *priv;
838}; 857};
839 858
840/* 859/*
860 * Register defines.
861 * Some registers require multiple attempts before success,
862 * in those cases REGISTER_BUSY_COUNT attempts should be
863 * taken with a REGISTER_BUSY_DELAY interval.
864 */
865#define REGISTER_BUSY_COUNT 5
866#define REGISTER_BUSY_DELAY 100
867
868/*
841 * Generic RF access. 869 * Generic RF access.
842 * The RF is being accessed by word index. 870 * The RF is being accessed by word index.
843 */ 871 */
@@ -925,6 +953,28 @@ static inline bool rt2x00_check_rev(const struct rt2x00_chip *chipset,
925 return ((chipset->rev & mask) == rev); 953 return ((chipset->rev & mask) == rev);
926} 954}
927 955
956static inline void rt2x00_set_chip_intf(struct rt2x00_dev *rt2x00dev,
957 enum rt2x00_chip_intf intf)
958{
959 rt2x00dev->chip.intf = intf;
960}
961
962static inline bool rt2x00_intf(const struct rt2x00_chip *chipset,
963 enum rt2x00_chip_intf intf)
964{
965 return (chipset->intf == intf);
966}
967
968static inline bool rt2x00_intf_is_pci(struct rt2x00_dev *rt2x00dev)
969{
970 return rt2x00_intf(&rt2x00dev->chip, RT2X00_CHIP_INTF_PCI);
971}
972
973static inline bool rt2x00_intf_is_usb(struct rt2x00_dev *rt2x00dev)
974{
975 return rt2x00_intf(&rt2x00dev->chip, RT2X00_CHIP_INTF_USB);
976}
977
928/** 978/**
929 * rt2x00queue_map_txskb - Map a skb into DMA for TX purposes. 979 * rt2x00queue_map_txskb - Map a skb into DMA for TX purposes.
930 * @rt2x00dev: Pointer to &struct rt2x00_dev. 980 * @rt2x00dev: Pointer to &struct rt2x00_dev.
diff --git a/drivers/net/wireless/rt2x00/rt2x00leds.h b/drivers/net/wireless/rt2x00/rt2x00leds.h
index 1046977e6a12..8e03c045e037 100644
--- a/drivers/net/wireless/rt2x00/rt2x00leds.h
+++ b/drivers/net/wireless/rt2x00/rt2x00leds.h
@@ -33,8 +33,6 @@ enum led_type {
33 LED_TYPE_QUALITY, 33 LED_TYPE_QUALITY,
34}; 34};
35 35
36#ifdef CONFIG_RT2X00_LIB_LEDS
37
38struct rt2x00_led { 36struct rt2x00_led {
39 struct rt2x00_dev *rt2x00dev; 37 struct rt2x00_dev *rt2x00dev;
40 struct led_classdev led_dev; 38 struct led_classdev led_dev;
@@ -45,6 +43,4 @@ struct rt2x00_led {
45#define LED_REGISTERED ( 1 << 1 ) 43#define LED_REGISTERED ( 1 << 1 )
46}; 44};
47 45
48#endif /* CONFIG_RT2X00_LIB_LEDS */
49
50#endif /* RT2X00LEDS_H */ 46#endif /* RT2X00LEDS_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00pci.h b/drivers/net/wireless/rt2x00/rt2x00pci.h
index 15a12487e04b..ae33eebe9a6f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00pci.h
+++ b/drivers/net/wireless/rt2x00/rt2x00pci.h
@@ -35,15 +35,6 @@
35#define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops) 35#define PCI_DEVICE_DATA(__ops) .driver_data = (kernel_ulong_t)(__ops)
36 36
37/* 37/*
38 * Register defines.
39 * Some registers require multiple attempts before success,
40 * in those cases REGISTER_BUSY_COUNT attempts should be
41 * taken with a REGISTER_BUSY_DELAY interval.
42 */
43#define REGISTER_BUSY_COUNT 5
44#define REGISTER_BUSY_DELAY 100
45
46/*
47 * Register access. 38 * Register access.
48 */ 39 */
49static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev, 40static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev,
@@ -53,10 +44,9 @@ static inline void rt2x00pci_register_read(struct rt2x00_dev *rt2x00dev,
53 *value = readl(rt2x00dev->csr.base + offset); 44 *value = readl(rt2x00dev->csr.base + offset);
54} 45}
55 46
56static inline void 47static inline void rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev,
57rt2x00pci_register_multiread(struct rt2x00_dev *rt2x00dev, 48 const unsigned int offset,
58 const unsigned int offset, 49 void *value, const u32 length)
59 void *value, const u16 length)
60{ 50{
61 memcpy_fromio(value, rt2x00dev->csr.base + offset, length); 51 memcpy_fromio(value, rt2x00dev->csr.base + offset, length);
62} 52}
@@ -68,10 +58,10 @@ static inline void rt2x00pci_register_write(struct rt2x00_dev *rt2x00dev,
68 writel(value, rt2x00dev->csr.base + offset); 58 writel(value, rt2x00dev->csr.base + offset);
69} 59}
70 60
71static inline void 61static inline void rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev,
72rt2x00pci_register_multiwrite(struct rt2x00_dev *rt2x00dev, 62 const unsigned int offset,
73 const unsigned int offset, 63 const void *value,
74 const void *value, const u16 length) 64 const u32 length)
75{ 65{
76 memcpy_toio(rt2x00dev->csr.base + offset, value, length); 66 memcpy_toio(rt2x00dev->csr.base + offset, value, length);
77} 67}
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.c b/drivers/net/wireless/rt2x00/rt2x00soc.c
new file mode 100644
index 000000000000..539568c48953
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.c
@@ -0,0 +1,159 @@
1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00soc
23 Abstract: rt2x00 generic soc device routines.
24 */
25
26#include <linux/bug.h>
27#include <linux/kernel.h>
28#include <linux/module.h>
29#include <linux/platform_device.h>
30
31#include "rt2x00.h"
32#include "rt2x00soc.h"
33
34static void rt2x00soc_free_reg(struct rt2x00_dev *rt2x00dev)
35{
36 kfree(rt2x00dev->rf);
37 rt2x00dev->rf = NULL;
38
39 kfree(rt2x00dev->eeprom);
40 rt2x00dev->eeprom = NULL;
41}
42
43static int rt2x00soc_alloc_reg(struct rt2x00_dev *rt2x00dev)
44{
45 struct platform_device *pdev = to_platform_device(rt2x00dev->dev);
46 struct resource *res;
47
48 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
49 if (!res)
50 return -ENODEV;
51
52 rt2x00dev->csr.base = (void __iomem *)KSEG1ADDR(res->start);
53 if (!rt2x00dev->csr.base)
54 goto exit;
55
56 rt2x00dev->eeprom = kzalloc(rt2x00dev->ops->eeprom_size, GFP_KERNEL);
57 if (!rt2x00dev->eeprom)
58 goto exit;
59
60 rt2x00dev->rf = kzalloc(rt2x00dev->ops->rf_size, GFP_KERNEL);
61 if (!rt2x00dev->rf)
62 goto exit;
63
64 return 0;
65
66exit:
67 ERROR_PROBE("Failed to allocate registers.\n");
68 rt2x00soc_free_reg(rt2x00dev);
69
70 return -ENOMEM;
71}
72
73int rt2x00soc_probe(struct platform_device *pdev,
74 const unsigned short chipset,
75 const struct rt2x00_ops *ops)
76{
77 struct ieee80211_hw *hw;
78 struct rt2x00_dev *rt2x00dev;
79 int retval;
80
81 hw = ieee80211_alloc_hw(sizeof(struct rt2x00_dev), ops->hw);
82 if (!hw) {
83 ERROR_PROBE("Failed to allocate hardware.\n");
84 return -ENOMEM;
85 }
86
87 platform_set_drvdata(pdev, hw);
88
89 rt2x00dev = hw->priv;
90 rt2x00dev->dev = &pdev->dev;
91 rt2x00dev->ops = ops;
92 rt2x00dev->hw = hw;
93 rt2x00dev->irq = platform_get_irq(pdev, 0);
94 rt2x00dev->name = pdev->dev.driver->name;
95
96 rt2x00_set_chip_rt(rt2x00dev, chipset);
97
98 retval = rt2x00soc_alloc_reg(rt2x00dev);
99 if (retval)
100 goto exit_free_device;
101
102 retval = rt2x00lib_probe_dev(rt2x00dev);
103 if (retval)
104 goto exit_free_reg;
105
106 return 0;
107
108exit_free_reg:
109 rt2x00soc_free_reg(rt2x00dev);
110
111exit_free_device:
112 ieee80211_free_hw(hw);
113
114 return retval;
115}
116
117int rt2x00soc_remove(struct platform_device *pdev)
118{
119 struct ieee80211_hw *hw = platform_get_drvdata(pdev);
120 struct rt2x00_dev *rt2x00dev = hw->priv;
121
122 /*
123 * Free all allocated data.
124 */
125 rt2x00lib_remove_dev(rt2x00dev);
126 rt2x00soc_free_reg(rt2x00dev);
127 ieee80211_free_hw(hw);
128
129 return 0;
130}
131EXPORT_SYMBOL_GPL(rt2x00soc_remove);
132
133#ifdef CONFIG_PM
134int rt2x00soc_suspend(struct platform_device *pdev, pm_message_t state)
135{
136 struct ieee80211_hw *hw = platform_get_drvdata(pdev);
137 struct rt2x00_dev *rt2x00dev = hw->priv;
138
139 return rt2x00lib_suspend(rt2x00dev, state);
140}
141EXPORT_SYMBOL_GPL(rt2x00soc_suspend);
142
143int rt2x00soc_resume(struct platform_device *pdev)
144{
145 struct ieee80211_hw *hw = platform_get_drvdata(pdev);
146 struct rt2x00_dev *rt2x00dev = hw->priv;
147
148 return rt2x00lib_resume(rt2x00dev);
149}
150EXPORT_SYMBOL_GPL(rt2x00soc_resume);
151#endif /* CONFIG_PM */
152
153/*
154 * rt2x00soc module information.
155 */
156MODULE_AUTHOR(DRV_PROJECT);
157MODULE_VERSION(DRV_VERSION);
158MODULE_DESCRIPTION("rt2x00 soc library");
159MODULE_LICENSE("GPL");
diff --git a/drivers/net/wireless/rt2x00/rt2x00soc.h b/drivers/net/wireless/rt2x00/rt2x00soc.h
new file mode 100644
index 000000000000..5cf114ac2b9c
--- /dev/null
+++ b/drivers/net/wireless/rt2x00/rt2x00soc.h
@@ -0,0 +1,52 @@
1/*
2 Copyright (C) 2004 - 2009 rt2x00 SourceForge Project
3 <http://rt2x00.serialmonkey.com>
4
5 This program is free software; you can redistribute it and/or modify
6 it under the terms of the GNU General Public License as published by
7 the Free Software Foundation; either version 2 of the License, or
8 (at your option) any later version.
9
10 This program is distributed in the hope that it will be useful,
11 but WITHOUT ANY WARRANTY; without even the implied warranty of
12 MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 GNU General Public License for more details.
14
15 You should have received a copy of the GNU General Public License
16 along with this program; if not, write to the
17 Free Software Foundation, Inc.,
18 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21/*
22 Module: rt2x00soc
23 Abstract: Data structures for the rt2x00soc module.
24 */
25
26#ifndef RT2X00SOC_H
27#define RT2X00SOC_H
28
29#define KSEG1ADDR(__ptr) __ptr
30
31#define __rt2x00soc_probe(__chipset, __ops) \
32static int __rt2x00soc_probe(struct platform_device *pdev) \
33{ \
34 return rt2x00soc_probe(pdev, (__chipset), (__ops)); \
35}
36
37/*
38 * SoC driver handlers.
39 */
40int rt2x00soc_probe(struct platform_device *pdev,
41 const unsigned short chipset,
42 const struct rt2x00_ops *ops);
43int rt2x00soc_remove(struct platform_device *pdev);
44#ifdef CONFIG_PM
45int rt2x00soc_suspend(struct platform_device *pdev, pm_message_t state);
46int rt2x00soc_resume(struct platform_device *pdev);
47#else
48#define rt2x00soc_suspend NULL
49#define rt2x00soc_resume NULL
50#endif /* CONFIG_PM */
51
52#endif /* RT2X00SOC_H */
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.c b/drivers/net/wireless/rt2x00/rt2x00usb.c
index f02b48a90593..c9cbdaa1073f 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.c
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.c
@@ -160,7 +160,7 @@ EXPORT_SYMBOL_GPL(rt2x00usb_vendor_request_large_buff);
160 160
161int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev, 161int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
162 const unsigned int offset, 162 const unsigned int offset,
163 struct rt2x00_field32 field, 163 const struct rt2x00_field32 field,
164 u32 *reg) 164 u32 *reg)
165{ 165{
166 unsigned int i; 166 unsigned int i;
diff --git a/drivers/net/wireless/rt2x00/rt2x00usb.h b/drivers/net/wireless/rt2x00/rt2x00usb.h
index bd2d59c85f1b..9943e428bc21 100644
--- a/drivers/net/wireless/rt2x00/rt2x00usb.h
+++ b/drivers/net/wireless/rt2x00/rt2x00usb.h
@@ -39,17 +39,11 @@
39#define USB_DEVICE_DATA(__ops) .driver_info = (kernel_ulong_t)(__ops) 39#define USB_DEVICE_DATA(__ops) .driver_info = (kernel_ulong_t)(__ops)
40 40
41/* 41/*
42 * Register defines.
43 * Some registers require multiple attempts before success,
44 * in those cases REGISTER_BUSY_COUNT attempts should be
45 * taken with a REGISTER_BUSY_DELAY interval.
46 * For USB vendor requests we need to pass a timeout 42 * For USB vendor requests we need to pass a timeout
47 * time in ms, for this we use the REGISTER_TIMEOUT, 43 * time in ms, for this we use the REGISTER_TIMEOUT,
48 * however when loading firmware a higher value is 44 * however when loading firmware a higher value is
49 * required. In that case we use the REGISTER_TIMEOUT_FIRMWARE. 45 * required. In that case we use the REGISTER_TIMEOUT_FIRMWARE.
50 */ 46 */
51#define REGISTER_BUSY_COUNT 5
52#define REGISTER_BUSY_DELAY 100
53#define REGISTER_TIMEOUT 500 47#define REGISTER_TIMEOUT 500
54#define REGISTER_TIMEOUT_FIRMWARE 1000 48#define REGISTER_TIMEOUT_FIRMWARE 1000
55 49
@@ -232,7 +226,7 @@ static inline int rt2x00usb_eeprom_read(struct rt2x00_dev *rt2x00dev,
232} 226}
233 227
234/** 228/**
235 * rt2x00usb_regbusy_read - Read 32bit register word 229 * rt2x00usb_register_read - Read 32bit register word
236 * @rt2x00dev: Device pointer, see &struct rt2x00_dev. 230 * @rt2x00dev: Device pointer, see &struct rt2x00_dev.
237 * @offset: Register offset 231 * @offset: Register offset
238 * @value: Pointer to where register contents should be stored 232 * @value: Pointer to where register contents should be stored
@@ -340,12 +334,13 @@ static inline void rt2x00usb_register_write_lock(struct rt2x00_dev *rt2x00dev,
340 * through rt2x00usb_vendor_request_buff(). 334 * through rt2x00usb_vendor_request_buff().
341 */ 335 */
342static inline void rt2x00usb_register_multiwrite(struct rt2x00_dev *rt2x00dev, 336static inline void rt2x00usb_register_multiwrite(struct rt2x00_dev *rt2x00dev,
343 const unsigned int offset, 337 const unsigned int offset,
344 void *value, const u32 length) 338 const void *value,
339 const u32 length)
345{ 340{
346 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE, 341 rt2x00usb_vendor_request_buff(rt2x00dev, USB_MULTI_WRITE,
347 USB_VENDOR_REQUEST_OUT, offset, 342 USB_VENDOR_REQUEST_OUT, offset,
348 value, length, 343 (void *)value, length,
349 REGISTER_TIMEOUT32(length)); 344 REGISTER_TIMEOUT32(length));
350} 345}
351 346
@@ -364,7 +359,7 @@ static inline void rt2x00usb_register_multiwrite(struct rt2x00_dev *rt2x00dev,
364 */ 359 */
365int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev, 360int rt2x00usb_regbusy_read(struct rt2x00_dev *rt2x00dev,
366 const unsigned int offset, 361 const unsigned int offset,
367 struct rt2x00_field32 field, 362 const struct rt2x00_field32 field,
368 u32 *reg); 363 u32 *reg);
369 364
370/* 365/*
diff --git a/drivers/net/wireless/rtl818x/rtl8187.h b/drivers/net/wireless/rtl818x/rtl8187.h
index bf9175a8c1f4..abb4907cf296 100644
--- a/drivers/net/wireless/rtl818x/rtl8187.h
+++ b/drivers/net/wireless/rtl818x/rtl8187.h
@@ -119,7 +119,6 @@ struct rtl8187_priv {
119 } hw_rev; 119 } hw_rev;
120 struct sk_buff_head rx_queue; 120 struct sk_buff_head rx_queue;
121 u8 signal; 121 u8 signal;
122 u8 quality;
123 u8 noise; 122 u8 noise;
124 u8 slot_time; 123 u8 slot_time;
125 u8 aifsn[4]; 124 u8 aifsn[4];
diff --git a/drivers/net/wireless/rtl818x/rtl8187_dev.c b/drivers/net/wireless/rtl818x/rtl8187_dev.c
index 2017ccc00145..76973b8c7099 100644
--- a/drivers/net/wireless/rtl818x/rtl8187_dev.c
+++ b/drivers/net/wireless/rtl818x/rtl8187_dev.c
@@ -320,7 +320,6 @@ static void rtl8187_rx_cb(struct urb *urb)
320 struct ieee80211_rx_status rx_status = { 0 }; 320 struct ieee80211_rx_status rx_status = { 0 };
321 int rate, signal; 321 int rate, signal;
322 u32 flags; 322 u32 flags;
323 u32 quality;
324 unsigned long f; 323 unsigned long f;
325 324
326 spin_lock_irqsave(&priv->rx_queue.lock, f); 325 spin_lock_irqsave(&priv->rx_queue.lock, f);
@@ -338,10 +337,9 @@ static void rtl8187_rx_cb(struct urb *urb)
338 (typeof(hdr))(skb_tail_pointer(skb) - sizeof(*hdr)); 337 (typeof(hdr))(skb_tail_pointer(skb) - sizeof(*hdr));
339 flags = le32_to_cpu(hdr->flags); 338 flags = le32_to_cpu(hdr->flags);
340 /* As with the RTL8187B below, the AGC is used to calculate 339 /* As with the RTL8187B below, the AGC is used to calculate
341 * signal strength and quality. In this case, the scaling 340 * signal strength. In this case, the scaling
342 * constants are derived from the output of p54usb. 341 * constants are derived from the output of p54usb.
343 */ 342 */
344 quality = 130 - ((41 * hdr->agc) >> 6);
345 signal = -4 - ((27 * hdr->agc) >> 6); 343 signal = -4 - ((27 * hdr->agc) >> 6);
346 rx_status.antenna = (hdr->signal >> 7) & 1; 344 rx_status.antenna = (hdr->signal >> 7) & 1;
347 rx_status.mactime = le64_to_cpu(hdr->mac_time); 345 rx_status.mactime = le64_to_cpu(hdr->mac_time);
@@ -354,23 +352,18 @@ static void rtl8187_rx_cb(struct urb *urb)
354 * In testing, none of these quantities show qualitative 352 * In testing, none of these quantities show qualitative
355 * agreement with AP signal strength, except for the AGC, 353 * agreement with AP signal strength, except for the AGC,
356 * which is inversely proportional to the strength of the 354 * which is inversely proportional to the strength of the
357 * signal. In the following, the quality and signal strength 355 * signal. In the following, the signal strength
358 * are derived from the AGC. The arbitrary scaling constants 356 * is derived from the AGC. The arbitrary scaling constants
359 * are chosen to make the results close to the values obtained 357 * are chosen to make the results close to the values obtained
360 * for a BCM4312 using b43 as the driver. The noise is ignored 358 * for a BCM4312 using b43 as the driver. The noise is ignored
361 * for now. 359 * for now.
362 */ 360 */
363 flags = le32_to_cpu(hdr->flags); 361 flags = le32_to_cpu(hdr->flags);
364 quality = 170 - hdr->agc;
365 signal = 14 - hdr->agc / 2; 362 signal = 14 - hdr->agc / 2;
366 rx_status.antenna = (hdr->rssi >> 7) & 1; 363 rx_status.antenna = (hdr->rssi >> 7) & 1;
367 rx_status.mactime = le64_to_cpu(hdr->mac_time); 364 rx_status.mactime = le64_to_cpu(hdr->mac_time);
368 } 365 }
369 366
370 if (quality > 100)
371 quality = 100;
372 rx_status.qual = quality;
373 priv->quality = quality;
374 rx_status.signal = signal; 367 rx_status.signal = signal;
375 priv->signal = signal; 368 priv->signal = signal;
376 rate = (flags >> 20) & 0xF; 369 rate = (flags >> 20) & 0xF;
diff --git a/drivers/net/wireless/wl12xx/Kconfig b/drivers/net/wireless/wl12xx/Kconfig
index 88060e117541..785e0244e305 100644
--- a/drivers/net/wireless/wl12xx/Kconfig
+++ b/drivers/net/wireless/wl12xx/Kconfig
@@ -1,6 +1,6 @@
1menuconfig WL12XX 1menuconfig WL12XX
2 tristate "TI wl12xx driver support" 2 tristate "TI wl12xx driver support"
3 depends on MAC80211 && WLAN_80211 && EXPERIMENTAL 3 depends on MAC80211 && EXPERIMENTAL
4 ---help--- 4 ---help---
5 This will enable TI wl12xx driver support. The drivers make 5 This will enable TI wl12xx driver support. The drivers make
6 use of the mac80211 stack. 6 use of the mac80211 stack.
@@ -42,6 +42,7 @@ config WL1251_SDIO
42config WL1271 42config WL1271
43 tristate "TI wl1271 support" 43 tristate "TI wl1271 support"
44 depends on WL12XX && SPI_MASTER && GENERIC_HARDIRQS 44 depends on WL12XX && SPI_MASTER && GENERIC_HARDIRQS
45 depends on INET
45 select FW_LOADER 46 select FW_LOADER
46 select CRC7 47 select CRC7
47 ---help--- 48 ---help---
diff --git a/drivers/net/wireless/wl12xx/wl1251_main.c b/drivers/net/wireless/wl12xx/wl1251_main.c
index 1103256ad989..da3bf1cebc08 100644
--- a/drivers/net/wireless/wl12xx/wl1251_main.c
+++ b/drivers/net/wireless/wl12xx/wl1251_main.c
@@ -183,8 +183,11 @@ static int wl1251_chip_wakeup(struct wl1251 *wl)
183 wl1251_debug(DEBUG_BOOT, "chip id 0x%x (1251 PG12)", 183 wl1251_debug(DEBUG_BOOT, "chip id 0x%x (1251 PG12)",
184 wl->chip_id); 184 wl->chip_id);
185 break; 185 break;
186 case CHIP_ID_1251_PG10:
187 case CHIP_ID_1251_PG11: 186 case CHIP_ID_1251_PG11:
187 wl1251_debug(DEBUG_BOOT, "chip id 0x%x (1251 PG11)",
188 wl->chip_id);
189 break;
190 case CHIP_ID_1251_PG10:
188 default: 191 default:
189 wl1251_error("unsupported chip id: 0x%x", wl->chip_id); 192 wl1251_error("unsupported chip id: 0x%x", wl->chip_id);
190 ret = -ENODEV; 193 ret = -ENODEV;
@@ -1308,7 +1311,8 @@ int wl1251_init_ieee80211(struct wl1251 *wl)
1308 wl->hw->channel_change_time = 10000; 1311 wl->hw->channel_change_time = 10000;
1309 1312
1310 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | 1313 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
1311 IEEE80211_HW_NOISE_DBM; 1314 IEEE80211_HW_NOISE_DBM |
1315 IEEE80211_HW_SUPPORTS_PS;
1312 1316
1313 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 1317 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
1314 wl->hw->wiphy->max_scan_ssids = 1; 1318 wl->hw->wiphy->max_scan_ssids = 1;
@@ -1426,4 +1430,4 @@ EXPORT_SYMBOL_GPL(wl1251_free_hw);
1426MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core"); 1430MODULE_DESCRIPTION("TI wl1251 Wireles LAN Driver Core");
1427MODULE_LICENSE("GPL"); 1431MODULE_LICENSE("GPL");
1428MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>"); 1432MODULE_AUTHOR("Kalle Valo <kalle.valo@nokia.com>");
1429MODULE_ALIAS("spi:wl12xx"); 1433MODULE_ALIAS("spi:wl1251");
diff --git a/drivers/net/wireless/wl12xx/wl1251_netlink.h b/drivers/net/wireless/wl12xx/wl1251_netlink.h
deleted file mode 100644
index ee36695e134e..000000000000
--- a/drivers/net/wireless/wl12xx/wl1251_netlink.h
+++ /dev/null
@@ -1,30 +0,0 @@
1/*
2 * This file is part of wl1251
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 *
6 * Contact: Kalle Valo <kalle.valo@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#ifndef __WL1251_NETLINK_H__
25#define __WL1251_NETLINK_H__
26
27int wl1251_nl_register(void);
28void wl1251_nl_unregister(void);
29
30#endif /* __WL1251_NETLINK_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1251_rx.c b/drivers/net/wireless/wl12xx/wl1251_rx.c
index 17c54b59ef86..601fe0d67827 100644
--- a/drivers/net/wireless/wl12xx/wl1251_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1251_rx.c
@@ -153,7 +153,7 @@ static void wl1251_rx_body(struct wl1251 *wl,
153 beacon ? "beacon" : ""); 153 beacon ? "beacon" : "");
154 154
155 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status)); 155 memcpy(IEEE80211_SKB_RXCB(skb), &status, sizeof(status));
156 ieee80211_rx(wl->hw, skb); 156 ieee80211_rx_ni(wl->hw, skb);
157} 157}
158 158
159static void wl1251_rx_ack(struct wl1251 *wl) 159static void wl1251_rx_ack(struct wl1251 *wl)
diff --git a/drivers/net/wireless/wl12xx/wl1251_spi.c b/drivers/net/wireless/wl12xx/wl1251_spi.c
index 14eff2b3d4c6..2cf8a2169d43 100644
--- a/drivers/net/wireless/wl12xx/wl1251_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1251_spi.c
@@ -307,7 +307,7 @@ static int __devexit wl1251_spi_remove(struct spi_device *spi)
307 307
308static struct spi_driver wl1251_spi_driver = { 308static struct spi_driver wl1251_spi_driver = {
309 .driver = { 309 .driver = {
310 .name = "wl12xx", 310 .name = "wl1251",
311 .bus = &spi_bus_type, 311 .bus = &spi_bus_type,
312 .owner = THIS_MODULE, 312 .owner = THIS_MODULE,
313 }, 313 },
diff --git a/drivers/net/wireless/wl12xx/wl1271.h b/drivers/net/wireless/wl12xx/wl1271.h
index 55818f94017b..94359b1a861f 100644
--- a/drivers/net/wireless/wl12xx/wl1271.h
+++ b/drivers/net/wireless/wl12xx/wl1271.h
@@ -32,6 +32,8 @@
32#include <linux/bitops.h> 32#include <linux/bitops.h>
33#include <net/mac80211.h> 33#include <net/mac80211.h>
34 34
35#include "wl1271_conf.h"
36
35#define DRIVER_NAME "wl1271" 37#define DRIVER_NAME "wl1271"
36#define DRIVER_PREFIX DRIVER_NAME ": " 38#define DRIVER_PREFIX DRIVER_NAME ": "
37 39
@@ -97,21 +99,42 @@ enum {
97 } while (0) 99 } while (0)
98 100
99#define WL1271_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN | \ 101#define WL1271_DEFAULT_RX_CONFIG (CFG_UNI_FILTER_EN | \
100 CFG_BSSID_FILTER_EN) 102 CFG_BSSID_FILTER_EN | \
103 CFG_MC_FILTER_EN)
101 104
102#define WL1271_DEFAULT_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN | \ 105#define WL1271_DEFAULT_RX_FILTER (CFG_RX_RCTS_ACK | CFG_RX_PRSP_EN | \
103 CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \ 106 CFG_RX_MGMT_EN | CFG_RX_DATA_EN | \
104 CFG_RX_CTL_EN | CFG_RX_BCN_EN | \ 107 CFG_RX_CTL_EN | CFG_RX_BCN_EN | \
105 CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN) 108 CFG_RX_AUTH_EN | CFG_RX_ASSOC_EN)
106 109
110#define WL1271_DEFAULT_BASIC_RATE_SET (CONF_TX_RATE_MASK_ALL)
111
107#define WL1271_FW_NAME "wl1271-fw.bin" 112#define WL1271_FW_NAME "wl1271-fw.bin"
108#define WL1271_NVS_NAME "wl1271-nvs.bin" 113#define WL1271_NVS_NAME "wl1271-nvs.bin"
109 114
110#define WL1271_BUSY_WORD_LEN 8 115/*
116 * Enable/disable 802.11a support for WL1273
117 */
118#undef WL1271_80211A_ENABLED
119
120/*
121 * FIXME: for the wl1271, a busy word count of 1 here will result in a more
122 * optimal SPI interface. There is some SPI bug however, causing RXS time outs
123 * with this mode occasionally on boot, so lets have three for now. A value of
124 * three should make sure, that the chipset will always be ready, though this
125 * will impact throughput and latencies slightly.
126 */
127#define WL1271_BUSY_WORD_CNT 3
128#define WL1271_BUSY_WORD_LEN (WL1271_BUSY_WORD_CNT * sizeof(u32))
111 129
112#define WL1271_ELP_HW_STATE_ASLEEP 0 130#define WL1271_ELP_HW_STATE_ASLEEP 0
113#define WL1271_ELP_HW_STATE_IRQ 1 131#define WL1271_ELP_HW_STATE_IRQ 1
114 132
133#define WL1271_DEFAULT_BEACON_INT 100
134#define WL1271_DEFAULT_DTIM_PERIOD 1
135
136#define ACX_TX_DESCRIPTORS 32
137
115enum wl1271_state { 138enum wl1271_state {
116 WL1271_STATE_OFF, 139 WL1271_STATE_OFF,
117 WL1271_STATE_ON, 140 WL1271_STATE_ON,
@@ -134,6 +157,8 @@ struct wl1271_partition {
134struct wl1271_partition_set { 157struct wl1271_partition_set {
135 struct wl1271_partition mem; 158 struct wl1271_partition mem;
136 struct wl1271_partition reg; 159 struct wl1271_partition reg;
160 struct wl1271_partition mem2;
161 struct wl1271_partition mem3;
137}; 162};
138 163
139struct wl1271; 164struct wl1271;
@@ -258,15 +283,15 @@ struct wl1271_debugfs {
258 283
259/* FW status registers */ 284/* FW status registers */
260struct wl1271_fw_status { 285struct wl1271_fw_status {
261 u32 intr; 286 __le32 intr;
262 u8 fw_rx_counter; 287 u8 fw_rx_counter;
263 u8 drv_rx_counter; 288 u8 drv_rx_counter;
264 u8 reserved; 289 u8 reserved;
265 u8 tx_results_counter; 290 u8 tx_results_counter;
266 u32 rx_pkt_descs[NUM_RX_PKT_DESC]; 291 __le32 rx_pkt_descs[NUM_RX_PKT_DESC];
267 u32 tx_released_blks[NUM_TX_QUEUES]; 292 __le32 tx_released_blks[NUM_TX_QUEUES];
268 u32 fw_localtime; 293 __le32 fw_localtime;
269 u32 padding[2]; 294 __le32 padding[2];
270} __attribute__ ((packed)); 295} __attribute__ ((packed));
271 296
272struct wl1271_rx_mem_pool_addr { 297struct wl1271_rx_mem_pool_addr {
@@ -274,6 +299,15 @@ struct wl1271_rx_mem_pool_addr {
274 u32 addr_extra; 299 u32 addr_extra;
275}; 300};
276 301
302struct wl1271_scan {
303 u8 state;
304 u8 ssid[IW_ESSID_MAX_SIZE+1];
305 size_t ssid_len;
306 u8 active;
307 u8 high_prio;
308 u8 probe_requests;
309};
310
277struct wl1271 { 311struct wl1271 {
278 struct ieee80211_hw *hw; 312 struct ieee80211_hw *hw;
279 bool mac80211_registered; 313 bool mac80211_registered;
@@ -288,10 +322,7 @@ struct wl1271 {
288 enum wl1271_state state; 322 enum wl1271_state state;
289 struct mutex mutex; 323 struct mutex mutex;
290 324
291 int physical_mem_addr; 325 struct wl1271_partition_set part;
292 int physical_reg_addr;
293 int virtual_mem_addr;
294 int virtual_reg_addr;
295 326
296 struct wl1271_chip chip; 327 struct wl1271_chip chip;
297 328
@@ -308,7 +339,6 @@ struct wl1271 {
308 u8 bss_type; 339 u8 bss_type;
309 u8 ssid[IW_ESSID_MAX_SIZE + 1]; 340 u8 ssid[IW_ESSID_MAX_SIZE + 1];
310 u8 ssid_len; 341 u8 ssid_len;
311 u8 listen_int;
312 int channel; 342 int channel;
313 343
314 struct wl1271_acx_mem_map *target_mem_map; 344 struct wl1271_acx_mem_map *target_mem_map;
@@ -332,10 +362,14 @@ struct wl1271 {
332 bool tx_queue_stopped; 362 bool tx_queue_stopped;
333 363
334 struct work_struct tx_work; 364 struct work_struct tx_work;
335 struct work_struct filter_work;
336 365
337 /* Pending TX frames */ 366 /* Pending TX frames */
338 struct sk_buff *tx_frames[16]; 367 struct sk_buff *tx_frames[ACX_TX_DESCRIPTORS];
368
369 /* Security sequence number counters */
370 u8 tx_security_last_seq;
371 u16 tx_security_seq_16;
372 u32 tx_security_seq_32;
339 373
340 /* FW Rx counter */ 374 /* FW Rx counter */
341 u32 rx_counter; 375 u32 rx_counter;
@@ -354,10 +388,17 @@ struct wl1271 {
354 388
355 /* Are we currently scanning */ 389 /* Are we currently scanning */
356 bool scanning; 390 bool scanning;
391 struct wl1271_scan scan;
357 392
358 /* Our association ID */ 393 /* Our association ID */
359 u16 aid; 394 u16 aid;
360 395
396 /* currently configured rate set */
397 u32 basic_rate_set;
398
399 /* The current band */
400 enum ieee80211_band band;
401
361 /* Default key (for WEP) */ 402 /* Default key (for WEP) */
362 u32 default_key; 403 u32 default_key;
363 404
@@ -368,6 +409,7 @@ struct wl1271 {
368 bool elp; 409 bool elp;
369 410
370 struct completion *elp_compl; 411 struct completion *elp_compl;
412 struct delayed_work elp_work;
371 413
372 /* we can be in psm, but not in elp, we have to differentiate */ 414 /* we can be in psm, but not in elp, we have to differentiate */
373 bool psm; 415 bool psm;
@@ -375,6 +417,9 @@ struct wl1271 {
375 /* PSM mode requested */ 417 /* PSM mode requested */
376 bool psm_requested; 418 bool psm_requested;
377 419
420 /* retry counter for PSM entries */
421 u8 psm_entry_retry;
422
378 /* in dBm */ 423 /* in dBm */
379 int power_level; 424 int power_level;
380 425
@@ -383,11 +428,20 @@ struct wl1271 {
383 428
384 u32 buffer_32; 429 u32 buffer_32;
385 u32 buffer_cmd; 430 u32 buffer_cmd;
386 u8 buffer_busyword[WL1271_BUSY_WORD_LEN]; 431 u32 buffer_busyword[WL1271_BUSY_WORD_CNT];
387 struct wl1271_rx_descriptor *rx_descriptor;
388 432
389 struct wl1271_fw_status *fw_status; 433 struct wl1271_fw_status *fw_status;
390 struct wl1271_tx_hw_res_if *tx_res_if; 434 struct wl1271_tx_hw_res_if *tx_res_if;
435
436 struct ieee80211_vif *vif;
437
438 /* Used for a workaround to send disconnect before rejoining */
439 bool joined;
440
441 /* Current chipset configuration */
442 struct conf_drv_settings conf;
443
444 struct list_head list;
391}; 445};
392 446
393int wl1271_plt_start(struct wl1271 *wl); 447int wl1271_plt_start(struct wl1271 *wl);
@@ -404,4 +458,13 @@ int wl1271_plt_stop(struct wl1271 *wl);
404/* WL1271 needs a 200ms sleep after power on */ 458/* WL1271 needs a 200ms sleep after power on */
405#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */ 459#define WL1271_POWER_ON_SLEEP 200 /* in miliseconds */
406 460
461static inline bool wl1271_11a_enabled(void)
462{
463#ifdef WL1271_80211A_ENABLED
464 return true;
465#else
466 return false;
467#endif
468}
469
407#endif 470#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.c b/drivers/net/wireless/wl12xx/wl1271_acx.c
index f622a4092615..5cc89bbdac7a 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.c
@@ -34,8 +34,7 @@
34#include "wl1271_spi.h" 34#include "wl1271_spi.h"
35#include "wl1271_ps.h" 35#include "wl1271_ps.h"
36 36
37int wl1271_acx_wake_up_conditions(struct wl1271 *wl, u8 wake_up_event, 37int wl1271_acx_wake_up_conditions(struct wl1271 *wl)
38 u8 listen_interval)
39{ 38{
40 struct acx_wake_up_condition *wake_up; 39 struct acx_wake_up_condition *wake_up;
41 int ret; 40 int ret;
@@ -48,8 +47,8 @@ int wl1271_acx_wake_up_conditions(struct wl1271 *wl, u8 wake_up_event,
48 goto out; 47 goto out;
49 } 48 }
50 49
51 wake_up->wake_up_event = wake_up_event; 50 wake_up->wake_up_event = wl->conf.conn.wake_up_event;
52 wake_up->listen_interval = listen_interval; 51 wake_up->listen_interval = wl->conf.conn.listen_interval;
53 52
54 ret = wl1271_cmd_configure(wl, ACX_WAKE_UP_CONDITIONS, 53 ret = wl1271_cmd_configure(wl, ACX_WAKE_UP_CONDITIONS,
55 wake_up, sizeof(*wake_up)); 54 wake_up, sizeof(*wake_up));
@@ -137,7 +136,12 @@ int wl1271_acx_tx_power(struct wl1271 *wl, int power)
137 goto out; 136 goto out;
138 } 137 }
139 138
140 acx->current_tx_power = power * 10; 139 /*
140 * FIXME: This is a workaround needed while we don't the correct
141 * calibration, to avoid distortions
142 */
143 /* acx->current_tx_power = power * 10; */
144 acx->current_tx_power = 120;
141 145
142 ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx)); 146 ret = wl1271_cmd_configure(wl, DOT11_CUR_TX_PWR, acx, sizeof(*acx));
143 if (ret < 0) { 147 if (ret < 0) {
@@ -193,7 +197,7 @@ int wl1271_acx_mem_map(struct wl1271 *wl, struct acx_header *mem_map,
193 return 0; 197 return 0;
194} 198}
195 199
196int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl, u32 life_time) 200int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl)
197{ 201{
198 struct acx_rx_msdu_lifetime *acx; 202 struct acx_rx_msdu_lifetime *acx;
199 int ret; 203 int ret;
@@ -206,7 +210,7 @@ int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl, u32 life_time)
206 goto out; 210 goto out;
207 } 211 }
208 212
209 acx->lifetime = life_time; 213 acx->lifetime = cpu_to_le32(wl->conf.rx.rx_msdu_life_time);
210 ret = wl1271_cmd_configure(wl, DOT11_RX_MSDU_LIFE_TIME, 214 ret = wl1271_cmd_configure(wl, DOT11_RX_MSDU_LIFE_TIME,
211 acx, sizeof(*acx)); 215 acx, sizeof(*acx));
212 if (ret < 0) { 216 if (ret < 0) {
@@ -232,8 +236,8 @@ int wl1271_acx_rx_config(struct wl1271 *wl, u32 config, u32 filter)
232 goto out; 236 goto out;
233 } 237 }
234 238
235 rx_config->config_options = config; 239 rx_config->config_options = cpu_to_le32(config);
236 rx_config->filter_options = filter; 240 rx_config->filter_options = cpu_to_le32(filter);
237 241
238 ret = wl1271_cmd_configure(wl, ACX_RX_CFG, 242 ret = wl1271_cmd_configure(wl, ACX_RX_CFG,
239 rx_config, sizeof(*rx_config)); 243 rx_config, sizeof(*rx_config));
@@ -260,7 +264,7 @@ int wl1271_acx_pd_threshold(struct wl1271 *wl)
260 goto out; 264 goto out;
261 } 265 }
262 266
263 /* FIXME: threshold value not set */ 267 pd->threshold = cpu_to_le32(wl->conf.rx.packet_detection_threshold);
264 268
265 ret = wl1271_cmd_configure(wl, ACX_PD_THRESHOLD, pd, sizeof(*pd)); 269 ret = wl1271_cmd_configure(wl, ACX_PD_THRESHOLD, pd, sizeof(*pd));
266 if (ret < 0) { 270 if (ret < 0) {
@@ -300,7 +304,8 @@ out:
300 return ret; 304 return ret;
301} 305}
302 306
303int wl1271_acx_group_address_tbl(struct wl1271 *wl) 307int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
308 void *mc_list, u32 mc_list_len)
304{ 309{
305 struct acx_dot11_grp_addr_tbl *acx; 310 struct acx_dot11_grp_addr_tbl *acx;
306 int ret; 311 int ret;
@@ -314,9 +319,9 @@ int wl1271_acx_group_address_tbl(struct wl1271 *wl)
314 } 319 }
315 320
316 /* MAC filtering */ 321 /* MAC filtering */
317 acx->enabled = 0; 322 acx->enabled = enable;
318 acx->num_groups = 0; 323 acx->num_groups = mc_list_len;
319 memset(acx->mac_table, 0, ADDRESS_GROUP_MAX_LEN); 324 memcpy(acx->mac_table, mc_list, mc_list_len * ETH_ALEN);
320 325
321 ret = wl1271_cmd_configure(wl, DOT11_GROUP_ADDRESS_TBL, 326 ret = wl1271_cmd_configure(wl, DOT11_GROUP_ADDRESS_TBL,
322 acx, sizeof(*acx)); 327 acx, sizeof(*acx));
@@ -343,8 +348,8 @@ int wl1271_acx_service_period_timeout(struct wl1271 *wl)
343 348
344 wl1271_debug(DEBUG_ACX, "acx service period timeout"); 349 wl1271_debug(DEBUG_ACX, "acx service period timeout");
345 350
346 rx_timeout->ps_poll_timeout = RX_TIMEOUT_PS_POLL_DEF; 351 rx_timeout->ps_poll_timeout = cpu_to_le16(wl->conf.rx.ps_poll_timeout);
347 rx_timeout->upsd_timeout = RX_TIMEOUT_UPSD_DEF; 352 rx_timeout->upsd_timeout = cpu_to_le16(wl->conf.rx.upsd_timeout);
348 353
349 ret = wl1271_cmd_configure(wl, ACX_SERVICE_PERIOD_TIMEOUT, 354 ret = wl1271_cmd_configure(wl, ACX_SERVICE_PERIOD_TIMEOUT,
350 rx_timeout, sizeof(*rx_timeout)); 355 rx_timeout, sizeof(*rx_timeout));
@@ -372,7 +377,7 @@ int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold)
372 goto out; 377 goto out;
373 } 378 }
374 379
375 rts->threshold = rts_threshold; 380 rts->threshold = cpu_to_le16(rts_threshold);
376 381
377 ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts)); 382 ret = wl1271_cmd_configure(wl, DOT11_RTS_THRESHOLD, rts, sizeof(*rts));
378 if (ret < 0) { 383 if (ret < 0) {
@@ -385,20 +390,29 @@ out:
385 return ret; 390 return ret;
386} 391}
387 392
388int wl1271_acx_beacon_filter_opt(struct wl1271 *wl) 393int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter)
389{ 394{
390 struct acx_beacon_filter_option *beacon_filter; 395 struct acx_beacon_filter_option *beacon_filter = NULL;
391 int ret; 396 int ret = 0;
392 397
393 wl1271_debug(DEBUG_ACX, "acx beacon filter opt"); 398 wl1271_debug(DEBUG_ACX, "acx beacon filter opt");
394 399
400 if (enable_filter &&
401 wl->conf.conn.bcn_filt_mode == CONF_BCN_FILT_MODE_DISABLED)
402 goto out;
403
395 beacon_filter = kzalloc(sizeof(*beacon_filter), GFP_KERNEL); 404 beacon_filter = kzalloc(sizeof(*beacon_filter), GFP_KERNEL);
396 if (!beacon_filter) { 405 if (!beacon_filter) {
397 ret = -ENOMEM; 406 ret = -ENOMEM;
398 goto out; 407 goto out;
399 } 408 }
400 409
401 beacon_filter->enable = 0; 410 beacon_filter->enable = enable_filter;
411
412 /*
413 * When set to zero, and the filter is enabled, beacons
414 * without the unicast TIM bit set are dropped.
415 */
402 beacon_filter->max_num_beacons = 0; 416 beacon_filter->max_num_beacons = 0;
403 417
404 ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_OPT, 418 ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_OPT,
@@ -416,7 +430,9 @@ out:
416int wl1271_acx_beacon_filter_table(struct wl1271 *wl) 430int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
417{ 431{
418 struct acx_beacon_filter_ie_table *ie_table; 432 struct acx_beacon_filter_ie_table *ie_table;
433 int i, idx = 0;
419 int ret; 434 int ret;
435 bool vendor_spec = false;
420 436
421 wl1271_debug(DEBUG_ACX, "acx beacon filter table"); 437 wl1271_debug(DEBUG_ACX, "acx beacon filter table");
422 438
@@ -426,8 +442,32 @@ int wl1271_acx_beacon_filter_table(struct wl1271 *wl)
426 goto out; 442 goto out;
427 } 443 }
428 444
445 /* configure default beacon pass-through rules */
429 ie_table->num_ie = 0; 446 ie_table->num_ie = 0;
430 memset(ie_table->table, 0, BEACON_FILTER_TABLE_MAX_SIZE); 447 for (i = 0; i < wl->conf.conn.bcn_filt_ie_count; i++) {
448 struct conf_bcn_filt_rule *r = &(wl->conf.conn.bcn_filt_ie[i]);
449 ie_table->table[idx++] = r->ie;
450 ie_table->table[idx++] = r->rule;
451
452 if (r->ie == WLAN_EID_VENDOR_SPECIFIC) {
453 /* only one vendor specific ie allowed */
454 if (vendor_spec)
455 continue;
456
457 /* for vendor specific rules configure the
458 additional fields */
459 memcpy(&(ie_table->table[idx]), r->oui,
460 CONF_BCN_IE_OUI_LEN);
461 idx += CONF_BCN_IE_OUI_LEN;
462 ie_table->table[idx++] = r->type;
463 memcpy(&(ie_table->table[idx]), r->version,
464 CONF_BCN_IE_VER_LEN);
465 idx += CONF_BCN_IE_VER_LEN;
466 vendor_spec = true;
467 }
468
469 ie_table->num_ie++;
470 }
431 471
432 ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_TABLE, 472 ret = wl1271_cmd_configure(wl, ACX_BEACON_FILTER_TABLE,
433 ie_table, sizeof(*ie_table)); 473 ie_table, sizeof(*ie_table));
@@ -441,6 +481,36 @@ out:
441 return ret; 481 return ret;
442} 482}
443 483
484int wl1271_acx_conn_monit_params(struct wl1271 *wl)
485{
486 struct acx_conn_monit_params *acx;
487 int ret;
488
489 wl1271_debug(DEBUG_ACX, "acx connection monitor parameters");
490
491 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
492 if (!acx) {
493 ret = -ENOMEM;
494 goto out;
495 }
496
497 acx->synch_fail_thold = cpu_to_le32(wl->conf.conn.synch_fail_thold);
498 acx->bss_lose_timeout = cpu_to_le32(wl->conf.conn.bss_lose_timeout);
499
500 ret = wl1271_cmd_configure(wl, ACX_CONN_MONIT_PARAMS,
501 acx, sizeof(*acx));
502 if (ret < 0) {
503 wl1271_warning("failed to set connection monitor "
504 "parameters: %d", ret);
505 goto out;
506 }
507
508out:
509 kfree(acx);
510 return ret;
511}
512
513
444int wl1271_acx_sg_enable(struct wl1271 *wl) 514int wl1271_acx_sg_enable(struct wl1271 *wl)
445{ 515{
446 struct acx_bt_wlan_coex *pta; 516 struct acx_bt_wlan_coex *pta;
@@ -470,6 +540,7 @@ out:
470int wl1271_acx_sg_cfg(struct wl1271 *wl) 540int wl1271_acx_sg_cfg(struct wl1271 *wl)
471{ 541{
472 struct acx_bt_wlan_coex_param *param; 542 struct acx_bt_wlan_coex_param *param;
543 struct conf_sg_settings *c = &wl->conf.sg;
473 int ret; 544 int ret;
474 545
475 wl1271_debug(DEBUG_ACX, "acx sg cfg"); 546 wl1271_debug(DEBUG_ACX, "acx sg cfg");
@@ -481,34 +552,19 @@ int wl1271_acx_sg_cfg(struct wl1271 *wl)
481 } 552 }
482 553
483 /* BT-WLAN coext parameters */ 554 /* BT-WLAN coext parameters */
484 param->min_rate = RATE_INDEX_24MBPS; 555 param->per_threshold = cpu_to_le32(c->per_threshold);
485 param->bt_hp_max_time = PTA_BT_HP_MAXTIME_DEF; 556 param->max_scan_compensation_time =
486 param->wlan_hp_max_time = PTA_WLAN_HP_MAX_TIME_DEF; 557 cpu_to_le32(c->max_scan_compensation_time);
487 param->sense_disable_timer = PTA_SENSE_DISABLE_TIMER_DEF; 558 param->nfs_sample_interval = cpu_to_le16(c->nfs_sample_interval);
488 param->rx_time_bt_hp = PTA_PROTECTIVE_RX_TIME_DEF; 559 param->load_ratio = c->load_ratio;
489 param->tx_time_bt_hp = PTA_PROTECTIVE_TX_TIME_DEF; 560 param->auto_ps_mode = c->auto_ps_mode;
490 param->rx_time_bt_hp_fast = PTA_PROTECTIVE_RX_TIME_FAST_DEF; 561 param->probe_req_compensation = c->probe_req_compensation;
491 param->tx_time_bt_hp_fast = PTA_PROTECTIVE_TX_TIME_FAST_DEF; 562 param->scan_window_compensation = c->scan_window_compensation;
492 param->wlan_cycle_fast = PTA_CYCLE_TIME_FAST_DEF; 563 param->antenna_config = c->antenna_config;
493 param->bt_anti_starvation_period = PTA_ANTI_STARVE_PERIOD_DEF; 564 param->beacon_miss_threshold = c->beacon_miss_threshold;
494 param->next_bt_lp_packet = PTA_TIMEOUT_NEXT_BT_LP_PACKET_DEF; 565 param->rate_adaptation_threshold =
495 param->wake_up_beacon = PTA_TIME_BEFORE_BEACON_DEF; 566 cpu_to_le32(c->rate_adaptation_threshold);
496 param->hp_dm_max_guard_time = PTA_HPDM_MAX_TIME_DEF; 567 param->rate_adaptation_snr = c->rate_adaptation_snr;
497 param->next_wlan_packet = PTA_TIME_OUT_NEXT_WLAN_DEF;
498 param->antenna_type = PTA_ANTENNA_TYPE_DEF;
499 param->signal_type = PTA_SIGNALING_TYPE_DEF;
500 param->afh_leverage_on = PTA_AFH_LEVERAGE_ON_DEF;
501 param->quiet_cycle_num = PTA_NUMBER_QUIET_CYCLE_DEF;
502 param->max_cts = PTA_MAX_NUM_CTS_DEF;
503 param->wlan_packets_num = PTA_NUMBER_OF_WLAN_PACKETS_DEF;
504 param->bt_packets_num = PTA_NUMBER_OF_BT_PACKETS_DEF;
505 param->missed_rx_avalanche = PTA_RX_FOR_AVALANCHE_DEF;
506 param->wlan_elp_hp = PTA_ELP_HP_DEF;
507 param->bt_anti_starvation_cycles = PTA_ANTI_STARVE_NUM_CYCLE_DEF;
508 param->ack_mode_dual_ant = PTA_ACK_MODE_DEF;
509 param->pa_sd_enable = PTA_ALLOW_PA_SD_DEF;
510 param->pta_auto_mode_enable = PTA_AUTO_MODE_NO_CTS_DEF;
511 param->bt_hp_respected_num = PTA_BT_HP_RESPECTED_DEF;
512 568
513 ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param)); 569 ret = wl1271_cmd_configure(wl, ACX_SG_CFG, param, sizeof(*param));
514 if (ret < 0) { 570 if (ret < 0) {
@@ -534,8 +590,8 @@ int wl1271_acx_cca_threshold(struct wl1271 *wl)
534 goto out; 590 goto out;
535 } 591 }
536 592
537 detection->rx_cca_threshold = CCA_THRSH_DISABLE_ENERGY_D; 593 detection->rx_cca_threshold = cpu_to_le16(wl->conf.rx.rx_cca_threshold);
538 detection->tx_energy_detection = 0; 594 detection->tx_energy_detection = wl->conf.tx.tx_energy_detection;
539 595
540 ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD, 596 ret = wl1271_cmd_configure(wl, ACX_CCA_THRESHOLD,
541 detection, sizeof(*detection)); 597 detection, sizeof(*detection));
@@ -562,10 +618,10 @@ int wl1271_acx_bcn_dtim_options(struct wl1271 *wl)
562 goto out; 618 goto out;
563 } 619 }
564 620
565 bb->beacon_rx_timeout = BCN_RX_TIMEOUT_DEF_VALUE; 621 bb->beacon_rx_timeout = cpu_to_le16(wl->conf.conn.beacon_rx_timeout);
566 bb->broadcast_timeout = BROADCAST_RX_TIMEOUT_DEF_VALUE; 622 bb->broadcast_timeout = cpu_to_le16(wl->conf.conn.broadcast_timeout);
567 bb->rx_broadcast_in_ps = RX_BROADCAST_IN_PS_DEF_VALUE; 623 bb->rx_broadcast_in_ps = wl->conf.conn.rx_broadcast_in_ps;
568 bb->ps_poll_threshold = CONSECUTIVE_PS_POLL_FAILURE_DEF; 624 bb->ps_poll_threshold = wl->conf.conn.ps_poll_threshold;
569 625
570 ret = wl1271_cmd_configure(wl, ACX_BCN_DTIM_OPTIONS, bb, sizeof(*bb)); 626 ret = wl1271_cmd_configure(wl, ACX_BCN_DTIM_OPTIONS, bb, sizeof(*bb));
571 if (ret < 0) { 627 if (ret < 0) {
@@ -591,7 +647,7 @@ int wl1271_acx_aid(struct wl1271 *wl, u16 aid)
591 goto out; 647 goto out;
592 } 648 }
593 649
594 acx_aid->aid = aid; 650 acx_aid->aid = cpu_to_le16(aid);
595 651
596 ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid)); 652 ret = wl1271_cmd_configure(wl, ACX_AID, acx_aid, sizeof(*acx_aid));
597 if (ret < 0) { 653 if (ret < 0) {
@@ -618,9 +674,8 @@ int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask)
618 } 674 }
619 675
620 /* high event mask is unused */ 676 /* high event mask is unused */
621 mask->high_event_mask = 0xffffffff; 677 mask->high_event_mask = cpu_to_le32(0xffffffff);
622 678 mask->event_mask = cpu_to_le32(event_mask);
623 mask->event_mask = event_mask;
624 679
625 ret = wl1271_cmd_configure(wl, ACX_EVENT_MBOX_MASK, 680 ret = wl1271_cmd_configure(wl, ACX_EVENT_MBOX_MASK,
626 mask, sizeof(*mask)); 681 mask, sizeof(*mask));
@@ -703,9 +758,10 @@ int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats)
703 return 0; 758 return 0;
704} 759}
705 760
706int wl1271_acx_rate_policies(struct wl1271 *wl) 761int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates)
707{ 762{
708 struct acx_rate_policy *acx; 763 struct acx_rate_policy *acx;
764 struct conf_tx_rate_class *c = &wl->conf.tx.rc_conf;
709 int ret = 0; 765 int ret = 0;
710 766
711 wl1271_debug(DEBUG_ACX, "acx rate policies"); 767 wl1271_debug(DEBUG_ACX, "acx rate policies");
@@ -718,11 +774,11 @@ int wl1271_acx_rate_policies(struct wl1271 *wl)
718 } 774 }
719 775
720 /* configure one default (one-size-fits-all) rate class */ 776 /* configure one default (one-size-fits-all) rate class */
721 acx->rate_class_cnt = 1; 777 acx->rate_class_cnt = cpu_to_le32(1);
722 acx->rate_class[0].enabled_rates = ACX_RATE_MASK_ALL; 778 acx->rate_class[0].enabled_rates = cpu_to_le32(enabled_rates);
723 acx->rate_class[0].short_retry_limit = ACX_RATE_RETRY_LIMIT; 779 acx->rate_class[0].short_retry_limit = c->short_retry_limit;
724 acx->rate_class[0].long_retry_limit = ACX_RATE_RETRY_LIMIT; 780 acx->rate_class[0].long_retry_limit = c->long_retry_limit;
725 acx->rate_class[0].aflags = 0; 781 acx->rate_class[0].aflags = c->aflags;
726 782
727 ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx)); 783 ret = wl1271_cmd_configure(wl, ACX_RATE_POLICY, acx, sizeof(*acx));
728 if (ret < 0) { 784 if (ret < 0) {
@@ -749,22 +805,14 @@ int wl1271_acx_ac_cfg(struct wl1271 *wl)
749 goto out; 805 goto out;
750 } 806 }
751 807
752 /* 808 for (i = 0; i < wl->conf.tx.ac_conf_count; i++) {
753 * FIXME: Configure each AC with appropriate values (most suitable 809 struct conf_tx_ac_category *c = &(wl->conf.tx.ac_conf[i]);
754 * values will probably be different for each AC. 810 acx->ac = c->ac;
755 */ 811 acx->cw_min = c->cw_min;
756 for (i = 0; i < WL1271_ACX_AC_COUNT; i++) { 812 acx->cw_max = cpu_to_le16(c->cw_max);
757 acx->ac = i; 813 acx->aifsn = c->aifsn;
758
759 /*
760 * FIXME: The following default values originate from
761 * the TI reference driver. What do they mean?
762 */
763 acx->cw_min = 15;
764 acx->cw_max = 63;
765 acx->aifsn = 3;
766 acx->reserved = 0; 814 acx->reserved = 0;
767 acx->tx_op_limit = 0; 815 acx->tx_op_limit = cpu_to_le16(c->tx_op_limit);
768 816
769 ret = wl1271_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx)); 817 ret = wl1271_cmd_configure(wl, ACX_AC_CFG, acx, sizeof(*acx));
770 if (ret < 0) { 818 if (ret < 0) {
@@ -793,12 +841,15 @@ int wl1271_acx_tid_cfg(struct wl1271 *wl)
793 goto out; 841 goto out;
794 } 842 }
795 843
796 /* FIXME: configure each TID with a different AC reference */ 844 for (i = 0; i < wl->conf.tx.tid_conf_count; i++) {
797 for (i = 0; i < WL1271_ACX_TID_COUNT; i++) { 845 struct conf_tx_tid *c = &(wl->conf.tx.tid_conf[i]);
798 acx->queue_id = i; 846 acx->queue_id = c->queue_id;
799 acx->tsid = WL1271_ACX_AC_BE; 847 acx->channel_type = c->channel_type;
800 acx->ps_scheme = WL1271_ACX_PS_SCHEME_LEGACY; 848 acx->tsid = c->tsid;
801 acx->ack_policy = WL1271_ACX_ACK_POLICY_LEGACY; 849 acx->ps_scheme = c->ps_scheme;
850 acx->ack_policy = c->ack_policy;
851 acx->apsd_conf[0] = cpu_to_le32(c->apsd_conf[0]);
852 acx->apsd_conf[1] = cpu_to_le32(c->apsd_conf[1]);
802 853
803 ret = wl1271_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx)); 854 ret = wl1271_cmd_configure(wl, ACX_TID_CFG, acx, sizeof(*acx));
804 if (ret < 0) { 855 if (ret < 0) {
@@ -826,7 +877,7 @@ int wl1271_acx_frag_threshold(struct wl1271 *wl)
826 goto out; 877 goto out;
827 } 878 }
828 879
829 acx->frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD; 880 acx->frag_threshold = cpu_to_le16(wl->conf.tx.frag_threshold);
830 ret = wl1271_cmd_configure(wl, ACX_FRAG_CFG, acx, sizeof(*acx)); 881 ret = wl1271_cmd_configure(wl, ACX_FRAG_CFG, acx, sizeof(*acx));
831 if (ret < 0) { 882 if (ret < 0) {
832 wl1271_warning("Setting of frag threshold failed: %d", ret); 883 wl1271_warning("Setting of frag threshold failed: %d", ret);
@@ -852,8 +903,8 @@ int wl1271_acx_tx_config_options(struct wl1271 *wl)
852 goto out; 903 goto out;
853 } 904 }
854 905
855 acx->tx_compl_timeout = WL1271_ACX_TX_COMPL_TIMEOUT; 906 acx->tx_compl_timeout = cpu_to_le16(wl->conf.tx.tx_compl_timeout);
856 acx->tx_compl_threshold = WL1271_ACX_TX_COMPL_THRESHOLD; 907 acx->tx_compl_threshold = cpu_to_le16(wl->conf.tx.tx_compl_threshold);
857 ret = wl1271_cmd_configure(wl, ACX_TX_CONFIG_OPT, acx, sizeof(*acx)); 908 ret = wl1271_cmd_configure(wl, ACX_TX_CONFIG_OPT, acx, sizeof(*acx));
858 if (ret < 0) { 909 if (ret < 0) {
859 wl1271_warning("Setting of tx options failed: %d", ret); 910 wl1271_warning("Setting of tx options failed: %d", ret);
@@ -879,11 +930,11 @@ int wl1271_acx_mem_cfg(struct wl1271 *wl)
879 } 930 }
880 931
881 /* memory config */ 932 /* memory config */
882 mem_conf->num_stations = cpu_to_le16(DEFAULT_NUM_STATIONS); 933 mem_conf->num_stations = DEFAULT_NUM_STATIONS;
883 mem_conf->rx_mem_block_num = ACX_RX_MEM_BLOCKS; 934 mem_conf->rx_mem_block_num = ACX_RX_MEM_BLOCKS;
884 mem_conf->tx_min_mem_block_num = ACX_TX_MIN_MEM_BLOCKS; 935 mem_conf->tx_min_mem_block_num = ACX_TX_MIN_MEM_BLOCKS;
885 mem_conf->num_ssid_profiles = ACX_NUM_SSID_PROFILES; 936 mem_conf->num_ssid_profiles = ACX_NUM_SSID_PROFILES;
886 mem_conf->total_tx_descriptors = ACX_TX_DESCRIPTORS; 937 mem_conf->total_tx_descriptors = cpu_to_le32(ACX_TX_DESCRIPTORS);
887 938
888 ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf, 939 ret = wl1271_cmd_configure(wl, ACX_MEM_CFG, mem_conf,
889 sizeof(*mem_conf)); 940 sizeof(*mem_conf));
@@ -906,7 +957,7 @@ int wl1271_acx_init_mem_config(struct wl1271 *wl)
906 return ret; 957 return ret;
907 958
908 wl->target_mem_map = kzalloc(sizeof(struct wl1271_acx_mem_map), 959 wl->target_mem_map = kzalloc(sizeof(struct wl1271_acx_mem_map),
909 GFP_KERNEL); 960 GFP_KERNEL);
910 if (!wl->target_mem_map) { 961 if (!wl->target_mem_map) {
911 wl1271_error("couldn't allocate target memory map"); 962 wl1271_error("couldn't allocate target memory map");
912 return -ENOMEM; 963 return -ENOMEM;
@@ -923,7 +974,8 @@ int wl1271_acx_init_mem_config(struct wl1271 *wl)
923 } 974 }
924 975
925 /* initialize TX block book keeping */ 976 /* initialize TX block book keeping */
926 wl->tx_blocks_available = wl->target_mem_map->num_tx_mem_blocks; 977 wl->tx_blocks_available =
978 le32_to_cpu(wl->target_mem_map->num_tx_mem_blocks);
927 wl1271_debug(DEBUG_TX, "available tx blocks: %d", 979 wl1271_debug(DEBUG_TX, "available tx blocks: %d",
928 wl->tx_blocks_available); 980 wl->tx_blocks_available);
929 981
@@ -943,10 +995,10 @@ int wl1271_acx_init_rx_interrupt(struct wl1271 *wl)
943 goto out; 995 goto out;
944 } 996 }
945 997
946 rx_conf->threshold = WL1271_RX_INTR_THRESHOLD_DEF; 998 rx_conf->threshold = cpu_to_le16(wl->conf.rx.irq_pkt_threshold);
947 rx_conf->timeout = WL1271_RX_INTR_TIMEOUT_DEF; 999 rx_conf->timeout = cpu_to_le16(wl->conf.rx.irq_timeout);
948 rx_conf->mblk_threshold = USHORT_MAX; /* Disabled */ 1000 rx_conf->mblk_threshold = cpu_to_le16(wl->conf.rx.irq_blk_threshold);
949 rx_conf->queue_type = RX_QUEUE_TYPE_RX_LOW_PRIORITY; 1001 rx_conf->queue_type = wl->conf.rx.queue_type;
950 1002
951 ret = wl1271_cmd_configure(wl, ACX_RX_CONFIG_OPT, rx_conf, 1003 ret = wl1271_cmd_configure(wl, ACX_RX_CONFIG_OPT, rx_conf,
952 sizeof(*rx_conf)); 1004 sizeof(*rx_conf));
@@ -959,3 +1011,124 @@ out:
959 kfree(rx_conf); 1011 kfree(rx_conf);
960 return ret; 1012 return ret;
961} 1013}
1014
1015int wl1271_acx_smart_reflex(struct wl1271 *wl)
1016{
1017 struct acx_smart_reflex_state *sr_state = NULL;
1018 struct acx_smart_reflex_config_params *sr_param = NULL;
1019 int i, ret;
1020
1021 wl1271_debug(DEBUG_ACX, "acx smart reflex");
1022
1023 sr_param = kzalloc(sizeof(*sr_param), GFP_KERNEL);
1024 if (!sr_param) {
1025 ret = -ENOMEM;
1026 goto out;
1027 }
1028
1029 for (i = 0; i < CONF_SR_ERR_TBL_COUNT; i++) {
1030 struct conf_mart_reflex_err_table *e =
1031 &(wl->conf.init.sr_err_tbl[i]);
1032
1033 sr_param->error_table[i].len = e->len;
1034 sr_param->error_table[i].upper_limit = e->upper_limit;
1035 memcpy(sr_param->error_table[i].values, e->values, e->len);
1036 }
1037
1038 ret = wl1271_cmd_configure(wl, ACX_SET_SMART_REFLEX_PARAMS,
1039 sr_param, sizeof(*sr_param));
1040 if (ret < 0) {
1041 wl1271_warning("failed to set smart reflex params: %d", ret);
1042 goto out;
1043 }
1044
1045 sr_state = kzalloc(sizeof(*sr_state), GFP_KERNEL);
1046 if (!sr_state) {
1047 ret = -ENOMEM;
1048 goto out;
1049 }
1050
1051 /* enable smart reflex */
1052 sr_state->enable = wl->conf.init.sr_enable;
1053
1054 ret = wl1271_cmd_configure(wl, ACX_SET_SMART_REFLEX_STATE,
1055 sr_state, sizeof(*sr_state));
1056 if (ret < 0) {
1057 wl1271_warning("failed to set smart reflex params: %d", ret);
1058 goto out;
1059 }
1060
1061out:
1062 kfree(sr_state);
1063 kfree(sr_param);
1064 return ret;
1065
1066}
1067
1068int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable)
1069{
1070 struct wl1271_acx_bet_enable *acx = NULL;
1071 int ret = 0;
1072
1073 wl1271_debug(DEBUG_ACX, "acx bet enable");
1074
1075 if (enable && wl->conf.conn.bet_enable == CONF_BET_MODE_DISABLE)
1076 goto out;
1077
1078 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1079 if (!acx) {
1080 ret = -ENOMEM;
1081 goto out;
1082 }
1083
1084 acx->enable = enable ? CONF_BET_MODE_ENABLE : CONF_BET_MODE_DISABLE;
1085 acx->max_consecutive = wl->conf.conn.bet_max_consecutive;
1086
1087 ret = wl1271_cmd_configure(wl, ACX_BET_ENABLE, acx, sizeof(*acx));
1088 if (ret < 0) {
1089 wl1271_warning("acx bet enable failed: %d", ret);
1090 goto out;
1091 }
1092
1093out:
1094 kfree(acx);
1095 return ret;
1096}
1097
1098int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address,
1099 u8 version)
1100{
1101 struct wl1271_acx_arp_filter *acx;
1102 int ret;
1103
1104 wl1271_debug(DEBUG_ACX, "acx arp ip filter, enable: %d", enable);
1105
1106 acx = kzalloc(sizeof(*acx), GFP_KERNEL);
1107 if (!acx) {
1108 ret = -ENOMEM;
1109 goto out;
1110 }
1111
1112 acx->version = version;
1113 acx->enable = enable;
1114
1115 if (enable == true) {
1116 if (version == ACX_IPV4_VERSION)
1117 memcpy(acx->address, address, ACX_IPV4_ADDR_SIZE);
1118 else if (version == ACX_IPV6_VERSION)
1119 memcpy(acx->address, address, sizeof(acx->address));
1120 else
1121 wl1271_error("Invalid IP version");
1122 }
1123
1124 ret = wl1271_cmd_configure(wl, ACX_ARP_IP_FILTER,
1125 acx, sizeof(*acx));
1126 if (ret < 0) {
1127 wl1271_warning("failed to set arp ip filter: %d", ret);
1128 goto out;
1129 }
1130
1131out:
1132 kfree(acx);
1133 return ret;
1134}
diff --git a/drivers/net/wireless/wl12xx/wl1271_acx.h b/drivers/net/wireless/wl12xx/wl1271_acx.h
index 9068daaf0ddf..2ce0a8128542 100644
--- a/drivers/net/wireless/wl12xx/wl1271_acx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_acx.h
@@ -61,8 +61,9 @@
61 WL1271_ACX_INTR_HW_AVAILABLE | \ 61 WL1271_ACX_INTR_HW_AVAILABLE | \
62 WL1271_ACX_INTR_DATA) 62 WL1271_ACX_INTR_DATA)
63 63
64#define WL1271_INTR_MASK (WL1271_ACX_INTR_EVENT_A | \ 64#define WL1271_INTR_MASK (WL1271_ACX_INTR_EVENT_A | \
65 WL1271_ACX_INTR_EVENT_B | \ 65 WL1271_ACX_INTR_EVENT_B | \
66 WL1271_ACX_INTR_HW_AVAILABLE | \
66 WL1271_ACX_INTR_DATA) 67 WL1271_ACX_INTR_DATA)
67 68
68/* Target's information element */ 69/* Target's information element */
@@ -70,11 +71,11 @@ struct acx_header {
70 struct wl1271_cmd_header cmd; 71 struct wl1271_cmd_header cmd;
71 72
72 /* acx (or information element) header */ 73 /* acx (or information element) header */
73 u16 id; 74 __le16 id;
74 75
75 /* payload length (not including headers */ 76 /* payload length (not including headers */
76 u16 len; 77 __le16 len;
77}; 78} __attribute__ ((packed));
78 79
79struct acx_error_counter { 80struct acx_error_counter {
80 struct acx_header header; 81 struct acx_header header;
@@ -82,21 +83,21 @@ struct acx_error_counter {
82 /* The number of PLCP errors since the last time this */ 83 /* The number of PLCP errors since the last time this */
83 /* information element was interrogated. This field is */ 84 /* information element was interrogated. This field is */
84 /* automatically cleared when it is interrogated.*/ 85 /* automatically cleared when it is interrogated.*/
85 u32 PLCP_error; 86 __le32 PLCP_error;
86 87
87 /* The number of FCS errors since the last time this */ 88 /* The number of FCS errors since the last time this */
88 /* information element was interrogated. This field is */ 89 /* information element was interrogated. This field is */
89 /* automatically cleared when it is interrogated.*/ 90 /* automatically cleared when it is interrogated.*/
90 u32 FCS_error; 91 __le32 FCS_error;
91 92
92 /* The number of MPDUs without PLCP header errors received*/ 93 /* The number of MPDUs without PLCP header errors received*/
93 /* since the last time this information element was interrogated. */ 94 /* since the last time this information element was interrogated. */
94 /* This field is automatically cleared when it is interrogated.*/ 95 /* This field is automatically cleared when it is interrogated.*/
95 u32 valid_frame; 96 __le32 valid_frame;
96 97
97 /* the number of missed sequence numbers in the squentially */ 98 /* the number of missed sequence numbers in the squentially */
98 /* values of frames seq numbers */ 99 /* values of frames seq numbers */
99 u32 seq_num_miss; 100 __le32 seq_num_miss;
100} __attribute__ ((packed)); 101} __attribute__ ((packed));
101 102
102struct acx_revision { 103struct acx_revision {
@@ -125,7 +126,7 @@ struct acx_revision {
125 * (1 = first spin, 2 = second spin, and so on). 126 * (1 = first spin, 2 = second spin, and so on).
126 * bits 24 - 31: Chip ID - The WiLink chip ID. 127 * bits 24 - 31: Chip ID - The WiLink chip ID.
127 */ 128 */
128 u32 hw_version; 129 __le32 hw_version;
129} __attribute__ ((packed)); 130} __attribute__ ((packed));
130 131
131enum wl1271_psm_mode { 132enum wl1271_psm_mode {
@@ -170,7 +171,6 @@ enum {
170#define DP_RX_PACKET_RING_CHUNK_NUM 2 171#define DP_RX_PACKET_RING_CHUNK_NUM 2
171#define DP_TX_PACKET_RING_CHUNK_NUM 2 172#define DP_TX_PACKET_RING_CHUNK_NUM 2
172#define DP_TX_COMPLETE_TIME_OUT 20 173#define DP_TX_COMPLETE_TIME_OUT 20
173#define FW_TX_CMPLT_BLOCK_SIZE 16
174 174
175#define TX_MSDU_LIFETIME_MIN 0 175#define TX_MSDU_LIFETIME_MIN 0
176#define TX_MSDU_LIFETIME_MAX 3000 176#define TX_MSDU_LIFETIME_MAX 3000
@@ -186,7 +186,7 @@ struct acx_rx_msdu_lifetime {
186 * The maximum amount of time, in TU, before the 186 * The maximum amount of time, in TU, before the
187 * firmware discards the MSDU. 187 * firmware discards the MSDU.
188 */ 188 */
189 u32 lifetime; 189 __le32 lifetime;
190} __attribute__ ((packed)); 190} __attribute__ ((packed));
191 191
192/* 192/*
@@ -273,14 +273,14 @@ struct acx_rx_msdu_lifetime {
273struct acx_rx_config { 273struct acx_rx_config {
274 struct acx_header header; 274 struct acx_header header;
275 275
276 u32 config_options; 276 __le32 config_options;
277 u32 filter_options; 277 __le32 filter_options;
278} __attribute__ ((packed)); 278} __attribute__ ((packed));
279 279
280struct acx_packet_detection { 280struct acx_packet_detection {
281 struct acx_header header; 281 struct acx_header header;
282 282
283 u32 threshold; 283 __le32 threshold;
284} __attribute__ ((packed)); 284} __attribute__ ((packed));
285 285
286 286
@@ -302,8 +302,8 @@ struct acx_slot {
302} __attribute__ ((packed)); 302} __attribute__ ((packed));
303 303
304 304
305#define ADDRESS_GROUP_MAX (8) 305#define ACX_MC_ADDRESS_GROUP_MAX (8)
306#define ADDRESS_GROUP_MAX_LEN (ETH_ALEN * ADDRESS_GROUP_MAX) 306#define ADDRESS_GROUP_MAX_LEN (ETH_ALEN * ACX_MC_ADDRESS_GROUP_MAX)
307 307
308struct acx_dot11_grp_addr_tbl { 308struct acx_dot11_grp_addr_tbl {
309 struct acx_header header; 309 struct acx_header header;
@@ -314,40 +314,17 @@ struct acx_dot11_grp_addr_tbl {
314 u8 mac_table[ADDRESS_GROUP_MAX_LEN]; 314 u8 mac_table[ADDRESS_GROUP_MAX_LEN];
315} __attribute__ ((packed)); 315} __attribute__ ((packed));
316 316
317
318#define RX_TIMEOUT_PS_POLL_MIN 0
319#define RX_TIMEOUT_PS_POLL_MAX (200000)
320#define RX_TIMEOUT_PS_POLL_DEF (15)
321#define RX_TIMEOUT_UPSD_MIN 0
322#define RX_TIMEOUT_UPSD_MAX (200000)
323#define RX_TIMEOUT_UPSD_DEF (15)
324
325struct acx_rx_timeout { 317struct acx_rx_timeout {
326 struct acx_header header; 318 struct acx_header header;
327 319
328 /* 320 __le16 ps_poll_timeout;
329 * The longest time the STA will wait to receive 321 __le16 upsd_timeout;
330 * traffic from the AP after a PS-poll has been
331 * transmitted.
332 */
333 u16 ps_poll_timeout;
334
335 /*
336 * The longest time the STA will wait to receive
337 * traffic from the AP after a frame has been sent
338 * from an UPSD enabled queue.
339 */
340 u16 upsd_timeout;
341} __attribute__ ((packed)); 322} __attribute__ ((packed));
342 323
343#define RTS_THRESHOLD_MIN 0
344#define RTS_THRESHOLD_MAX 4096
345#define RTS_THRESHOLD_DEF 2347
346
347struct acx_rts_threshold { 324struct acx_rts_threshold {
348 struct acx_header header; 325 struct acx_header header;
349 326
350 u16 threshold; 327 __le16 threshold;
351 u8 pad[2]; 328 u8 pad[2];
352} __attribute__ ((packed)); 329} __attribute__ ((packed));
353 330
@@ -408,6 +385,13 @@ struct acx_beacon_filter_ie_table {
408 u8 pad[3]; 385 u8 pad[3];
409} __attribute__ ((packed)); 386} __attribute__ ((packed));
410 387
388struct acx_conn_monit_params {
389 struct acx_header header;
390
391 __le32 synch_fail_thold; /* number of beacons missed */
392 __le32 bss_lose_timeout; /* number of TU's from synch fail */
393} __attribute__ ((packed));
394
411enum { 395enum {
412 SG_ENABLE = 0, 396 SG_ENABLE = 0,
413 SG_DISABLE, 397 SG_DISABLE,
@@ -431,6 +415,25 @@ struct acx_bt_wlan_coex {
431 u8 pad[3]; 415 u8 pad[3];
432} __attribute__ ((packed)); 416} __attribute__ ((packed));
433 417
418struct acx_smart_reflex_state {
419 struct acx_header header;
420
421 u8 enable;
422 u8 padding[3];
423} __attribute__ ((packed));
424
425struct smart_reflex_err_table {
426 u8 len;
427 s8 upper_limit;
428 s8 values[14];
429} __attribute__ ((packed));
430
431struct acx_smart_reflex_config_params {
432 struct acx_header header;
433
434 struct smart_reflex_err_table error_table[3];
435} __attribute__ ((packed));
436
434#define PTA_ANTENNA_TYPE_DEF (0) 437#define PTA_ANTENNA_TYPE_DEF (0)
435#define PTA_BT_HP_MAXTIME_DEF (2000) 438#define PTA_BT_HP_MAXTIME_DEF (2000)
436#define PTA_WLAN_HP_MAX_TIME_DEF (5000) 439#define PTA_WLAN_HP_MAX_TIME_DEF (5000)
@@ -463,150 +466,34 @@ struct acx_bt_wlan_coex {
463struct acx_bt_wlan_coex_param { 466struct acx_bt_wlan_coex_param {
464 struct acx_header header; 467 struct acx_header header;
465 468
466 /* 469 __le32 per_threshold;
467 * The minimum rate of a received WLAN packet in the STA, 470 __le32 max_scan_compensation_time;
468 * during protective mode, of which a new BT-HP request 471 __le16 nfs_sample_interval;
469 * during this Rx will always be respected and gain the antenna. 472 u8 load_ratio;
470 */ 473 u8 auto_ps_mode;
471 u32 min_rate; 474 u8 probe_req_compensation;
472 475 u8 scan_window_compensation;
473 /* Max time the BT HP will be respected. */ 476 u8 antenna_config;
474 u16 bt_hp_max_time; 477 u8 beacon_miss_threshold;
475 478 __le32 rate_adaptation_threshold;
476 /* Max time the WLAN HP will be respected. */ 479 s8 rate_adaptation_snr;
477 u16 wlan_hp_max_time; 480 u8 padding[3];
478
479 /*
480 * The time between the last BT activity
481 * and the moment when the sense mode returns
482 * to SENSE_INACTIVE.
483 */
484 u16 sense_disable_timer;
485
486 /* Time before the next BT HP instance */
487 u16 rx_time_bt_hp;
488 u16 tx_time_bt_hp;
489
490 /* range: 10-20000 default: 1500 */
491 u16 rx_time_bt_hp_fast;
492 u16 tx_time_bt_hp_fast;
493
494 /* range: 2000-65535 default: 8700 */
495 u16 wlan_cycle_fast;
496
497 /* range: 0 - 15000 (Msec) default: 1000 */
498 u16 bt_anti_starvation_period;
499
500 /* range 400-10000(Usec) default: 3000 */
501 u16 next_bt_lp_packet;
502
503 /* Deafult: worst case for BT DH5 traffic */
504 u16 wake_up_beacon;
505
506 /* range: 0-50000(Usec) default: 1050 */
507 u16 hp_dm_max_guard_time;
508
509 /*
510 * This is to prevent both BT & WLAN antenna
511 * starvation.
512 * Range: 100-50000(Usec) default:2550
513 */
514 u16 next_wlan_packet;
515
516 /* 0 -> shared antenna */
517 u8 antenna_type;
518
519 /*
520 * 0 -> TI legacy
521 * 1 -> Palau
522 */
523 u8 signal_type;
524
525 /*
526 * BT AFH status
527 * 0 -> no AFH
528 * 1 -> from dedicated GPIO
529 * 2 -> AFH on (from host)
530 */
531 u8 afh_leverage_on;
532
533 /*
534 * The number of cycles during which no
535 * TX will be sent after 1 cycle of RX
536 * transaction in protective mode
537 */
538 u8 quiet_cycle_num;
539
540 /*
541 * The maximum number of CTSs that will
542 * be sent for receiving RX packet in
543 * protective mode
544 */
545 u8 max_cts;
546
547 /*
548 * The number of WLAN packets
549 * transferred in common mode before
550 * switching to BT.
551 */
552 u8 wlan_packets_num;
553
554 /*
555 * The number of BT packets
556 * transferred in common mode before
557 * switching to WLAN.
558 */
559 u8 bt_packets_num;
560
561 /* range: 1-255 default: 5 */
562 u8 missed_rx_avalanche;
563
564 /* range: 0-1 default: 1 */
565 u8 wlan_elp_hp;
566
567 /* range: 0 - 15 default: 4 */
568 u8 bt_anti_starvation_cycles;
569
570 u8 ack_mode_dual_ant;
571
572 /*
573 * Allow PA_SD assertion/de-assertion
574 * during enabled BT activity.
575 */
576 u8 pa_sd_enable;
577
578 /*
579 * Enable/Disable PTA in auto mode:
580 * Support Both Active & P.S modes
581 */
582 u8 pta_auto_mode_enable;
583
584 /* range: 0 - 20 default: 1 */
585 u8 bt_hp_respected_num;
586} __attribute__ ((packed)); 481} __attribute__ ((packed));
587 482
588#define CCA_THRSH_ENABLE_ENERGY_D 0x140A
589#define CCA_THRSH_DISABLE_ENERGY_D 0xFFEF
590
591struct acx_energy_detection { 483struct acx_energy_detection {
592 struct acx_header header; 484 struct acx_header header;
593 485
594 /* The RX Clear Channel Assessment threshold in the PHY */ 486 /* The RX Clear Channel Assessment threshold in the PHY */
595 u16 rx_cca_threshold; 487 __le16 rx_cca_threshold;
596 u8 tx_energy_detection; 488 u8 tx_energy_detection;
597 u8 pad; 489 u8 pad;
598} __attribute__ ((packed)); 490} __attribute__ ((packed));
599 491
600#define BCN_RX_TIMEOUT_DEF_VALUE 10000
601#define BROADCAST_RX_TIMEOUT_DEF_VALUE 20000
602#define RX_BROADCAST_IN_PS_DEF_VALUE 1
603#define CONSECUTIVE_PS_POLL_FAILURE_DEF 4
604
605struct acx_beacon_broadcast { 492struct acx_beacon_broadcast {
606 struct acx_header header; 493 struct acx_header header;
607 494
608 u16 beacon_rx_timeout; 495 __le16 beacon_rx_timeout;
609 u16 broadcast_timeout; 496 __le16 broadcast_timeout;
610 497
611 /* Enables receiving of broadcast packets in PS mode */ 498 /* Enables receiving of broadcast packets in PS mode */
612 u8 rx_broadcast_in_ps; 499 u8 rx_broadcast_in_ps;
@@ -619,8 +506,8 @@ struct acx_beacon_broadcast {
619struct acx_event_mask { 506struct acx_event_mask {
620 struct acx_header header; 507 struct acx_header header;
621 508
622 u32 event_mask; 509 __le32 event_mask;
623 u32 high_event_mask; /* Unused */ 510 __le32 high_event_mask; /* Unused */
624} __attribute__ ((packed)); 511} __attribute__ ((packed));
625 512
626#define CFG_RX_FCS BIT(2) 513#define CFG_RX_FCS BIT(2)
@@ -657,11 +544,15 @@ struct acx_event_mask {
657#define SCAN_TRIGGERED BIT(2) 544#define SCAN_TRIGGERED BIT(2)
658#define SCAN_PRIORITY_HIGH BIT(3) 545#define SCAN_PRIORITY_HIGH BIT(3)
659 546
547/* When set, disable HW encryption */
548#define DF_ENCRYPTION_DISABLE 0x01
549#define DF_SNIFF_MODE_ENABLE 0x80
550
660struct acx_feature_config { 551struct acx_feature_config {
661 struct acx_header header; 552 struct acx_header header;
662 553
663 u32 options; 554 __le32 options;
664 u32 data_flow_options; 555 __le32 data_flow_options;
665} __attribute__ ((packed)); 556} __attribute__ ((packed));
666 557
667struct acx_current_tx_power { 558struct acx_current_tx_power {
@@ -671,14 +562,6 @@ struct acx_current_tx_power {
671 u8 padding[3]; 562 u8 padding[3];
672} __attribute__ ((packed)); 563} __attribute__ ((packed));
673 564
674enum acx_wake_up_event {
675 WAKE_UP_EVENT_BEACON_BITMAP = 0x01, /* Wake on every Beacon*/
676 WAKE_UP_EVENT_DTIM_BITMAP = 0x02, /* Wake on every DTIM*/
677 WAKE_UP_EVENT_N_DTIM_BITMAP = 0x04, /* Wake on every Nth DTIM */
678 WAKE_UP_EVENT_N_BEACONS_BITMAP = 0x08, /* Wake on every Nth Beacon */
679 WAKE_UP_EVENT_BITS_MASK = 0x0F
680};
681
682struct acx_wake_up_condition { 565struct acx_wake_up_condition {
683 struct acx_header header; 566 struct acx_header header;
684 567
@@ -693,7 +576,7 @@ struct acx_aid {
693 /* 576 /*
694 * To be set when associated with an AP. 577 * To be set when associated with an AP.
695 */ 578 */
696 u16 aid; 579 __le16 aid;
697 u8 pad[2]; 580 u8 pad[2];
698} __attribute__ ((packed)); 581} __attribute__ ((packed));
699 582
@@ -725,152 +608,152 @@ struct acx_ctsprotect {
725} __attribute__ ((packed)); 608} __attribute__ ((packed));
726 609
727struct acx_tx_statistics { 610struct acx_tx_statistics {
728 u32 internal_desc_overflow; 611 __le32 internal_desc_overflow;
729} __attribute__ ((packed)); 612} __attribute__ ((packed));
730 613
731struct acx_rx_statistics { 614struct acx_rx_statistics {
732 u32 out_of_mem; 615 __le32 out_of_mem;
733 u32 hdr_overflow; 616 __le32 hdr_overflow;
734 u32 hw_stuck; 617 __le32 hw_stuck;
735 u32 dropped; 618 __le32 dropped;
736 u32 fcs_err; 619 __le32 fcs_err;
737 u32 xfr_hint_trig; 620 __le32 xfr_hint_trig;
738 u32 path_reset; 621 __le32 path_reset;
739 u32 reset_counter; 622 __le32 reset_counter;
740} __attribute__ ((packed)); 623} __attribute__ ((packed));
741 624
742struct acx_dma_statistics { 625struct acx_dma_statistics {
743 u32 rx_requested; 626 __le32 rx_requested;
744 u32 rx_errors; 627 __le32 rx_errors;
745 u32 tx_requested; 628 __le32 tx_requested;
746 u32 tx_errors; 629 __le32 tx_errors;
747} __attribute__ ((packed)); 630} __attribute__ ((packed));
748 631
749struct acx_isr_statistics { 632struct acx_isr_statistics {
750 /* host command complete */ 633 /* host command complete */
751 u32 cmd_cmplt; 634 __le32 cmd_cmplt;
752 635
753 /* fiqisr() */ 636 /* fiqisr() */
754 u32 fiqs; 637 __le32 fiqs;
755 638
756 /* (INT_STS_ND & INT_TRIG_RX_HEADER) */ 639 /* (INT_STS_ND & INT_TRIG_RX_HEADER) */
757 u32 rx_headers; 640 __le32 rx_headers;
758 641
759 /* (INT_STS_ND & INT_TRIG_RX_CMPLT) */ 642 /* (INT_STS_ND & INT_TRIG_RX_CMPLT) */
760 u32 rx_completes; 643 __le32 rx_completes;
761 644
762 /* (INT_STS_ND & INT_TRIG_NO_RX_BUF) */ 645 /* (INT_STS_ND & INT_TRIG_NO_RX_BUF) */
763 u32 rx_mem_overflow; 646 __le32 rx_mem_overflow;
764 647
765 /* (INT_STS_ND & INT_TRIG_S_RX_RDY) */ 648 /* (INT_STS_ND & INT_TRIG_S_RX_RDY) */
766 u32 rx_rdys; 649 __le32 rx_rdys;
767 650
768 /* irqisr() */ 651 /* irqisr() */
769 u32 irqs; 652 __le32 irqs;
770 653
771 /* (INT_STS_ND & INT_TRIG_TX_PROC) */ 654 /* (INT_STS_ND & INT_TRIG_TX_PROC) */
772 u32 tx_procs; 655 __le32 tx_procs;
773 656
774 /* (INT_STS_ND & INT_TRIG_DECRYPT_DONE) */ 657 /* (INT_STS_ND & INT_TRIG_DECRYPT_DONE) */
775 u32 decrypt_done; 658 __le32 decrypt_done;
776 659
777 /* (INT_STS_ND & INT_TRIG_DMA0) */ 660 /* (INT_STS_ND & INT_TRIG_DMA0) */
778 u32 dma0_done; 661 __le32 dma0_done;
779 662
780 /* (INT_STS_ND & INT_TRIG_DMA1) */ 663 /* (INT_STS_ND & INT_TRIG_DMA1) */
781 u32 dma1_done; 664 __le32 dma1_done;
782 665
783 /* (INT_STS_ND & INT_TRIG_TX_EXC_CMPLT) */ 666 /* (INT_STS_ND & INT_TRIG_TX_EXC_CMPLT) */
784 u32 tx_exch_complete; 667 __le32 tx_exch_complete;
785 668
786 /* (INT_STS_ND & INT_TRIG_COMMAND) */ 669 /* (INT_STS_ND & INT_TRIG_COMMAND) */
787 u32 commands; 670 __le32 commands;
788 671
789 /* (INT_STS_ND & INT_TRIG_RX_PROC) */ 672 /* (INT_STS_ND & INT_TRIG_RX_PROC) */
790 u32 rx_procs; 673 __le32 rx_procs;
791 674
792 /* (INT_STS_ND & INT_TRIG_PM_802) */ 675 /* (INT_STS_ND & INT_TRIG_PM_802) */
793 u32 hw_pm_mode_changes; 676 __le32 hw_pm_mode_changes;
794 677
795 /* (INT_STS_ND & INT_TRIG_ACKNOWLEDGE) */ 678 /* (INT_STS_ND & INT_TRIG_ACKNOWLEDGE) */
796 u32 host_acknowledges; 679 __le32 host_acknowledges;
797 680
798 /* (INT_STS_ND & INT_TRIG_PM_PCI) */ 681 /* (INT_STS_ND & INT_TRIG_PM_PCI) */
799 u32 pci_pm; 682 __le32 pci_pm;
800 683
801 /* (INT_STS_ND & INT_TRIG_ACM_WAKEUP) */ 684 /* (INT_STS_ND & INT_TRIG_ACM_WAKEUP) */
802 u32 wakeups; 685 __le32 wakeups;
803 686
804 /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */ 687 /* (INT_STS_ND & INT_TRIG_LOW_RSSI) */
805 u32 low_rssi; 688 __le32 low_rssi;
806} __attribute__ ((packed)); 689} __attribute__ ((packed));
807 690
808struct acx_wep_statistics { 691struct acx_wep_statistics {
809 /* WEP address keys configured */ 692 /* WEP address keys configured */
810 u32 addr_key_count; 693 __le32 addr_key_count;
811 694
812 /* default keys configured */ 695 /* default keys configured */
813 u32 default_key_count; 696 __le32 default_key_count;
814 697
815 u32 reserved; 698 __le32 reserved;
816 699
817 /* number of times that WEP key not found on lookup */ 700 /* number of times that WEP key not found on lookup */
818 u32 key_not_found; 701 __le32 key_not_found;
819 702
820 /* number of times that WEP key decryption failed */ 703 /* number of times that WEP key decryption failed */
821 u32 decrypt_fail; 704 __le32 decrypt_fail;
822 705
823 /* WEP packets decrypted */ 706 /* WEP packets decrypted */
824 u32 packets; 707 __le32 packets;
825 708
826 /* WEP decrypt interrupts */ 709 /* WEP decrypt interrupts */
827 u32 interrupt; 710 __le32 interrupt;
828} __attribute__ ((packed)); 711} __attribute__ ((packed));
829 712
830#define ACX_MISSED_BEACONS_SPREAD 10 713#define ACX_MISSED_BEACONS_SPREAD 10
831 714
832struct acx_pwr_statistics { 715struct acx_pwr_statistics {
833 /* the amount of enters into power save mode (both PD & ELP) */ 716 /* the amount of enters into power save mode (both PD & ELP) */
834 u32 ps_enter; 717 __le32 ps_enter;
835 718
836 /* the amount of enters into ELP mode */ 719 /* the amount of enters into ELP mode */
837 u32 elp_enter; 720 __le32 elp_enter;
838 721
839 /* the amount of missing beacon interrupts to the host */ 722 /* the amount of missing beacon interrupts to the host */
840 u32 missing_bcns; 723 __le32 missing_bcns;
841 724
842 /* the amount of wake on host-access times */ 725 /* the amount of wake on host-access times */
843 u32 wake_on_host; 726 __le32 wake_on_host;
844 727
845 /* the amount of wake on timer-expire */ 728 /* the amount of wake on timer-expire */
846 u32 wake_on_timer_exp; 729 __le32 wake_on_timer_exp;
847 730
848 /* the number of packets that were transmitted with PS bit set */ 731 /* the number of packets that were transmitted with PS bit set */
849 u32 tx_with_ps; 732 __le32 tx_with_ps;
850 733
851 /* the number of packets that were transmitted with PS bit clear */ 734 /* the number of packets that were transmitted with PS bit clear */
852 u32 tx_without_ps; 735 __le32 tx_without_ps;
853 736
854 /* the number of received beacons */ 737 /* the number of received beacons */
855 u32 rcvd_beacons; 738 __le32 rcvd_beacons;
856 739
857 /* the number of entering into PowerOn (power save off) */ 740 /* the number of entering into PowerOn (power save off) */
858 u32 power_save_off; 741 __le32 power_save_off;
859 742
860 /* the number of entries into power save mode */ 743 /* the number of entries into power save mode */
861 u16 enable_ps; 744 __le16 enable_ps;
862 745
863 /* 746 /*
864 * the number of exits from power save, not including failed PS 747 * the number of exits from power save, not including failed PS
865 * transitions 748 * transitions
866 */ 749 */
867 u16 disable_ps; 750 __le16 disable_ps;
868 751
869 /* 752 /*
870 * the number of times the TSF counter was adjusted because 753 * the number of times the TSF counter was adjusted because
871 * of drift 754 * of drift
872 */ 755 */
873 u32 fix_tsf_ps; 756 __le32 fix_tsf_ps;
874 757
875 /* Gives statistics about the spread continuous missed beacons. 758 /* Gives statistics about the spread continuous missed beacons.
876 * The 16 LSB are dedicated for the PS mode. 759 * The 16 LSB are dedicated for the PS mode.
@@ -881,53 +764,53 @@ struct acx_pwr_statistics {
881 * ... 764 * ...
882 * cont_miss_bcns_spread[9] - ten and more continuous missed beacons. 765 * cont_miss_bcns_spread[9] - ten and more continuous missed beacons.
883 */ 766 */
884 u32 cont_miss_bcns_spread[ACX_MISSED_BEACONS_SPREAD]; 767 __le32 cont_miss_bcns_spread[ACX_MISSED_BEACONS_SPREAD];
885 768
886 /* the number of beacons in awake mode */ 769 /* the number of beacons in awake mode */
887 u32 rcvd_awake_beacons; 770 __le32 rcvd_awake_beacons;
888} __attribute__ ((packed)); 771} __attribute__ ((packed));
889 772
890struct acx_mic_statistics { 773struct acx_mic_statistics {
891 u32 rx_pkts; 774 __le32 rx_pkts;
892 u32 calc_failure; 775 __le32 calc_failure;
893} __attribute__ ((packed)); 776} __attribute__ ((packed));
894 777
895struct acx_aes_statistics { 778struct acx_aes_statistics {
896 u32 encrypt_fail; 779 __le32 encrypt_fail;
897 u32 decrypt_fail; 780 __le32 decrypt_fail;
898 u32 encrypt_packets; 781 __le32 encrypt_packets;
899 u32 decrypt_packets; 782 __le32 decrypt_packets;
900 u32 encrypt_interrupt; 783 __le32 encrypt_interrupt;
901 u32 decrypt_interrupt; 784 __le32 decrypt_interrupt;
902} __attribute__ ((packed)); 785} __attribute__ ((packed));
903 786
904struct acx_event_statistics { 787struct acx_event_statistics {
905 u32 heart_beat; 788 __le32 heart_beat;
906 u32 calibration; 789 __le32 calibration;
907 u32 rx_mismatch; 790 __le32 rx_mismatch;
908 u32 rx_mem_empty; 791 __le32 rx_mem_empty;
909 u32 rx_pool; 792 __le32 rx_pool;
910 u32 oom_late; 793 __le32 oom_late;
911 u32 phy_transmit_error; 794 __le32 phy_transmit_error;
912 u32 tx_stuck; 795 __le32 tx_stuck;
913} __attribute__ ((packed)); 796} __attribute__ ((packed));
914 797
915struct acx_ps_statistics { 798struct acx_ps_statistics {
916 u32 pspoll_timeouts; 799 __le32 pspoll_timeouts;
917 u32 upsd_timeouts; 800 __le32 upsd_timeouts;
918 u32 upsd_max_sptime; 801 __le32 upsd_max_sptime;
919 u32 upsd_max_apturn; 802 __le32 upsd_max_apturn;
920 u32 pspoll_max_apturn; 803 __le32 pspoll_max_apturn;
921 u32 pspoll_utilization; 804 __le32 pspoll_utilization;
922 u32 upsd_utilization; 805 __le32 upsd_utilization;
923} __attribute__ ((packed)); 806} __attribute__ ((packed));
924 807
925struct acx_rxpipe_statistics { 808struct acx_rxpipe_statistics {
926 u32 rx_prep_beacon_drop; 809 __le32 rx_prep_beacon_drop;
927 u32 descr_host_int_trig_rx_data; 810 __le32 descr_host_int_trig_rx_data;
928 u32 beacon_buffer_thres_host_int_trig_rx_data; 811 __le32 beacon_buffer_thres_host_int_trig_rx_data;
929 u32 missed_beacon_host_int_trig_rx_data; 812 __le32 missed_beacon_host_int_trig_rx_data;
930 u32 tx_xfr_host_int_trig_rx_data; 813 __le32 tx_xfr_host_int_trig_rx_data;
931} __attribute__ ((packed)); 814} __attribute__ ((packed));
932 815
933struct acx_statistics { 816struct acx_statistics {
@@ -946,13 +829,8 @@ struct acx_statistics {
946 struct acx_rxpipe_statistics rxpipe; 829 struct acx_rxpipe_statistics rxpipe;
947} __attribute__ ((packed)); 830} __attribute__ ((packed));
948 831
949#define ACX_MAX_RATE_CLASSES 8
950#define ACX_RATE_MASK_UNSPECIFIED 0
951#define ACX_RATE_MASK_ALL 0x1eff
952#define ACX_RATE_RETRY_LIMIT 10
953
954struct acx_rate_class { 832struct acx_rate_class {
955 u32 enabled_rates; 833 __le32 enabled_rates;
956 u8 short_retry_limit; 834 u8 short_retry_limit;
957 u8 long_retry_limit; 835 u8 long_retry_limit;
958 u8 aflags; 836 u8 aflags;
@@ -962,47 +840,20 @@ struct acx_rate_class {
962struct acx_rate_policy { 840struct acx_rate_policy {
963 struct acx_header header; 841 struct acx_header header;
964 842
965 u32 rate_class_cnt; 843 __le32 rate_class_cnt;
966 struct acx_rate_class rate_class[ACX_MAX_RATE_CLASSES]; 844 struct acx_rate_class rate_class[CONF_TX_MAX_RATE_CLASSES];
967} __attribute__ ((packed)); 845} __attribute__ ((packed));
968 846
969#define WL1271_ACX_AC_COUNT 4
970
971struct acx_ac_cfg { 847struct acx_ac_cfg {
972 struct acx_header header; 848 struct acx_header header;
973 u8 ac; 849 u8 ac;
974 u8 cw_min; 850 u8 cw_min;
975 u16 cw_max; 851 __le16 cw_max;
976 u8 aifsn; 852 u8 aifsn;
977 u8 reserved; 853 u8 reserved;
978 u16 tx_op_limit; 854 __le16 tx_op_limit;
979} __attribute__ ((packed)); 855} __attribute__ ((packed));
980 856
981enum wl1271_acx_ac {
982 WL1271_ACX_AC_BE = 0,
983 WL1271_ACX_AC_BK = 1,
984 WL1271_ACX_AC_VI = 2,
985 WL1271_ACX_AC_VO = 3,
986 WL1271_ACX_AC_CTS2SELF = 4,
987 WL1271_ACX_AC_ANY_TID = 0x1F,
988 WL1271_ACX_AC_INVALID = 0xFF,
989};
990
991enum wl1271_acx_ps_scheme {
992 WL1271_ACX_PS_SCHEME_LEGACY = 0,
993 WL1271_ACX_PS_SCHEME_UPSD_TRIGGER = 1,
994 WL1271_ACX_PS_SCHEME_LEGACY_PSPOLL = 2,
995 WL1271_ACX_PS_SCHEME_SAPSD = 3,
996};
997
998enum wl1271_acx_ack_policy {
999 WL1271_ACX_ACK_POLICY_LEGACY = 0,
1000 WL1271_ACX_ACK_POLICY_NO_ACK = 1,
1001 WL1271_ACX_ACK_POLICY_BLOCK = 2,
1002};
1003
1004#define WL1271_ACX_TID_COUNT 7
1005
1006struct acx_tid_config { 857struct acx_tid_config {
1007 struct acx_header header; 858 struct acx_header header;
1008 u8 queue_id; 859 u8 queue_id;
@@ -1011,22 +862,19 @@ struct acx_tid_config {
1011 u8 ps_scheme; 862 u8 ps_scheme;
1012 u8 ack_policy; 863 u8 ack_policy;
1013 u8 padding[3]; 864 u8 padding[3];
1014 u32 apsd_conf[2]; 865 __le32 apsd_conf[2];
1015} __attribute__ ((packed)); 866} __attribute__ ((packed));
1016 867
1017struct acx_frag_threshold { 868struct acx_frag_threshold {
1018 struct acx_header header; 869 struct acx_header header;
1019 u16 frag_threshold; 870 __le16 frag_threshold;
1020 u8 padding[2]; 871 u8 padding[2];
1021} __attribute__ ((packed)); 872} __attribute__ ((packed));
1022 873
1023#define WL1271_ACX_TX_COMPL_TIMEOUT 5
1024#define WL1271_ACX_TX_COMPL_THRESHOLD 5
1025
1026struct acx_tx_config_options { 874struct acx_tx_config_options {
1027 struct acx_header header; 875 struct acx_header header;
1028 u16 tx_compl_timeout; /* msec */ 876 __le16 tx_compl_timeout; /* msec */
1029 u16 tx_compl_threshold; /* number of packets */ 877 __le16 tx_compl_threshold; /* number of packets */
1030} __attribute__ ((packed)); 878} __attribute__ ((packed));
1031 879
1032#define ACX_RX_MEM_BLOCKS 64 880#define ACX_RX_MEM_BLOCKS 64
@@ -1041,79 +889,87 @@ struct wl1271_acx_config_memory {
1041 u8 tx_min_mem_block_num; 889 u8 tx_min_mem_block_num;
1042 u8 num_stations; 890 u8 num_stations;
1043 u8 num_ssid_profiles; 891 u8 num_ssid_profiles;
1044 u32 total_tx_descriptors; 892 __le32 total_tx_descriptors;
1045} __attribute__ ((packed)); 893} __attribute__ ((packed));
1046 894
1047struct wl1271_acx_mem_map { 895struct wl1271_acx_mem_map {
1048 struct acx_header header; 896 struct acx_header header;
1049 897
1050 void *code_start; 898 __le32 code_start;
1051 void *code_end; 899 __le32 code_end;
1052 900
1053 void *wep_defkey_start; 901 __le32 wep_defkey_start;
1054 void *wep_defkey_end; 902 __le32 wep_defkey_end;
1055 903
1056 void *sta_table_start; 904 __le32 sta_table_start;
1057 void *sta_table_end; 905 __le32 sta_table_end;
1058 906
1059 void *packet_template_start; 907 __le32 packet_template_start;
1060 void *packet_template_end; 908 __le32 packet_template_end;
1061 909
1062 /* Address of the TX result interface (control block) */ 910 /* Address of the TX result interface (control block) */
1063 u32 tx_result; 911 __le32 tx_result;
1064 u32 tx_result_queue_start; 912 __le32 tx_result_queue_start;
1065 913
1066 void *queue_memory_start; 914 __le32 queue_memory_start;
1067 void *queue_memory_end; 915 __le32 queue_memory_end;
1068 916
1069 u32 packet_memory_pool_start; 917 __le32 packet_memory_pool_start;
1070 u32 packet_memory_pool_end; 918 __le32 packet_memory_pool_end;
1071 919
1072 void *debug_buffer1_start; 920 __le32 debug_buffer1_start;
1073 void *debug_buffer1_end; 921 __le32 debug_buffer1_end;
1074 922
1075 void *debug_buffer2_start; 923 __le32 debug_buffer2_start;
1076 void *debug_buffer2_end; 924 __le32 debug_buffer2_end;
1077 925
1078 /* Number of blocks FW allocated for TX packets */ 926 /* Number of blocks FW allocated for TX packets */
1079 u32 num_tx_mem_blocks; 927 __le32 num_tx_mem_blocks;
1080 928
1081 /* Number of blocks FW allocated for RX packets */ 929 /* Number of blocks FW allocated for RX packets */
1082 u32 num_rx_mem_blocks; 930 __le32 num_rx_mem_blocks;
1083 931
1084 /* the following 4 fields are valid in SLAVE mode only */ 932 /* the following 4 fields are valid in SLAVE mode only */
1085 u8 *tx_cbuf; 933 u8 *tx_cbuf;
1086 u8 *rx_cbuf; 934 u8 *rx_cbuf;
1087 void *rx_ctrl; 935 __le32 rx_ctrl;
1088 void *tx_ctrl; 936 __le32 tx_ctrl;
1089} __attribute__ ((packed)); 937} __attribute__ ((packed));
1090 938
1091enum wl1271_acx_rx_queue_type {
1092 RX_QUEUE_TYPE_RX_LOW_PRIORITY, /* All except the high priority */
1093 RX_QUEUE_TYPE_RX_HIGH_PRIORITY, /* Management and voice packets */
1094 RX_QUEUE_TYPE_NUM,
1095 RX_QUEUE_TYPE_MAX = USHORT_MAX
1096};
1097
1098#define WL1271_RX_INTR_THRESHOLD_DEF 0 /* no pacing, send interrupt on
1099 * every event */
1100#define WL1271_RX_INTR_THRESHOLD_MIN 0
1101#define WL1271_RX_INTR_THRESHOLD_MAX 15
1102
1103#define WL1271_RX_INTR_TIMEOUT_DEF 5
1104#define WL1271_RX_INTR_TIMEOUT_MIN 1
1105#define WL1271_RX_INTR_TIMEOUT_MAX 100
1106
1107struct wl1271_acx_rx_config_opt { 939struct wl1271_acx_rx_config_opt {
1108 struct acx_header header; 940 struct acx_header header;
1109 941
1110 u16 mblk_threshold; 942 __le16 mblk_threshold;
1111 u16 threshold; 943 __le16 threshold;
1112 u16 timeout; 944 __le16 timeout;
1113 u8 queue_type; 945 u8 queue_type;
1114 u8 reserved; 946 u8 reserved;
1115} __attribute__ ((packed)); 947} __attribute__ ((packed));
1116 948
949
950struct wl1271_acx_bet_enable {
951 struct acx_header header;
952
953 u8 enable;
954 u8 max_consecutive;
955 u8 padding[2];
956} __attribute__ ((packed));
957
958#define ACX_IPV4_VERSION 4
959#define ACX_IPV6_VERSION 6
960#define ACX_IPV4_ADDR_SIZE 4
961struct wl1271_acx_arp_filter {
962 struct acx_header header;
963 u8 version; /* ACX_IPV4_VERSION, ACX_IPV6_VERSION */
964 u8 enable; /* 1 to enable ARP filtering, 0 to disable */
965 u8 padding[2];
966 u8 address[16]; /* The configured device IP address - all ARP
967 requests directed to this IP address will pass
968 through. For IPv4, the first four bytes are
969 used. */
970} __attribute__((packed));
971
972
1117enum { 973enum {
1118 ACX_WAKE_UP_CONDITIONS = 0x0002, 974 ACX_WAKE_UP_CONDITIONS = 0x0002,
1119 ACX_MEM_CFG = 0x0003, 975 ACX_MEM_CFG = 0x0003,
@@ -1170,6 +1026,9 @@ enum {
1170 ACX_PEER_HT_CAP = 0x0057, 1026 ACX_PEER_HT_CAP = 0x0057,
1171 ACX_HT_BSS_OPERATION = 0x0058, 1027 ACX_HT_BSS_OPERATION = 0x0058,
1172 ACX_COEX_ACTIVITY = 0x0059, 1028 ACX_COEX_ACTIVITY = 0x0059,
1029 ACX_SET_SMART_REFLEX_DEBUG = 0x005A,
1030 ACX_SET_SMART_REFLEX_STATE = 0x005B,
1031 ACX_SET_SMART_REFLEX_PARAMS = 0x005F,
1173 DOT11_RX_MSDU_LIFE_TIME = 0x1004, 1032 DOT11_RX_MSDU_LIFE_TIME = 0x1004,
1174 DOT11_CUR_TX_PWR = 0x100D, 1033 DOT11_CUR_TX_PWR = 0x100D,
1175 DOT11_RX_DOT11_MODE = 0x1012, 1034 DOT11_RX_DOT11_MODE = 0x1012,
@@ -1182,23 +1041,24 @@ enum {
1182}; 1041};
1183 1042
1184 1043
1185int wl1271_acx_wake_up_conditions(struct wl1271 *wl, u8 wake_up_event, 1044int wl1271_acx_wake_up_conditions(struct wl1271 *wl);
1186 u8 listen_interval);
1187int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth); 1045int wl1271_acx_sleep_auth(struct wl1271 *wl, u8 sleep_auth);
1188int wl1271_acx_fw_version(struct wl1271 *wl, char *buf, size_t len); 1046int wl1271_acx_fw_version(struct wl1271 *wl, char *buf, size_t len);
1189int wl1271_acx_tx_power(struct wl1271 *wl, int power); 1047int wl1271_acx_tx_power(struct wl1271 *wl, int power);
1190int wl1271_acx_feature_cfg(struct wl1271 *wl); 1048int wl1271_acx_feature_cfg(struct wl1271 *wl);
1191int wl1271_acx_mem_map(struct wl1271 *wl, 1049int wl1271_acx_mem_map(struct wl1271 *wl,
1192 struct acx_header *mem_map, size_t len); 1050 struct acx_header *mem_map, size_t len);
1193int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl, u32 life_time); 1051int wl1271_acx_rx_msdu_life_time(struct wl1271 *wl);
1194int wl1271_acx_rx_config(struct wl1271 *wl, u32 config, u32 filter); 1052int wl1271_acx_rx_config(struct wl1271 *wl, u32 config, u32 filter);
1195int wl1271_acx_pd_threshold(struct wl1271 *wl); 1053int wl1271_acx_pd_threshold(struct wl1271 *wl);
1196int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time); 1054int wl1271_acx_slot(struct wl1271 *wl, enum acx_slot_type slot_time);
1197int wl1271_acx_group_address_tbl(struct wl1271 *wl); 1055int wl1271_acx_group_address_tbl(struct wl1271 *wl, bool enable,
1056 void *mc_list, u32 mc_list_len);
1198int wl1271_acx_service_period_timeout(struct wl1271 *wl); 1057int wl1271_acx_service_period_timeout(struct wl1271 *wl);
1199int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold); 1058int wl1271_acx_rts_threshold(struct wl1271 *wl, u16 rts_threshold);
1200int wl1271_acx_beacon_filter_opt(struct wl1271 *wl); 1059int wl1271_acx_beacon_filter_opt(struct wl1271 *wl, bool enable_filter);
1201int wl1271_acx_beacon_filter_table(struct wl1271 *wl); 1060int wl1271_acx_beacon_filter_table(struct wl1271 *wl);
1061int wl1271_acx_conn_monit_params(struct wl1271 *wl);
1202int wl1271_acx_sg_enable(struct wl1271 *wl); 1062int wl1271_acx_sg_enable(struct wl1271 *wl);
1203int wl1271_acx_sg_cfg(struct wl1271 *wl); 1063int wl1271_acx_sg_cfg(struct wl1271 *wl);
1204int wl1271_acx_cca_threshold(struct wl1271 *wl); 1064int wl1271_acx_cca_threshold(struct wl1271 *wl);
@@ -1207,9 +1067,9 @@ int wl1271_acx_aid(struct wl1271 *wl, u16 aid);
1207int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask); 1067int wl1271_acx_event_mbox_mask(struct wl1271 *wl, u32 event_mask);
1208int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble); 1068int wl1271_acx_set_preamble(struct wl1271 *wl, enum acx_preamble_type preamble);
1209int wl1271_acx_cts_protect(struct wl1271 *wl, 1069int wl1271_acx_cts_protect(struct wl1271 *wl,
1210 enum acx_ctsprotect_type ctsprotect); 1070 enum acx_ctsprotect_type ctsprotect);
1211int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats); 1071int wl1271_acx_statistics(struct wl1271 *wl, struct acx_statistics *stats);
1212int wl1271_acx_rate_policies(struct wl1271 *wl); 1072int wl1271_acx_rate_policies(struct wl1271 *wl, u32 enabled_rates);
1213int wl1271_acx_ac_cfg(struct wl1271 *wl); 1073int wl1271_acx_ac_cfg(struct wl1271 *wl);
1214int wl1271_acx_tid_cfg(struct wl1271 *wl); 1074int wl1271_acx_tid_cfg(struct wl1271 *wl);
1215int wl1271_acx_frag_threshold(struct wl1271 *wl); 1075int wl1271_acx_frag_threshold(struct wl1271 *wl);
@@ -1217,5 +1077,9 @@ int wl1271_acx_tx_config_options(struct wl1271 *wl);
1217int wl1271_acx_mem_cfg(struct wl1271 *wl); 1077int wl1271_acx_mem_cfg(struct wl1271 *wl);
1218int wl1271_acx_init_mem_config(struct wl1271 *wl); 1078int wl1271_acx_init_mem_config(struct wl1271 *wl);
1219int wl1271_acx_init_rx_interrupt(struct wl1271 *wl); 1079int wl1271_acx_init_rx_interrupt(struct wl1271 *wl);
1080int wl1271_acx_smart_reflex(struct wl1271 *wl);
1081int wl1271_acx_bet_enable(struct wl1271 *wl, bool enable);
1082int wl1271_acx_arp_ip_filter(struct wl1271 *wl, bool enable, u8 *address,
1083 u8 version);
1220 1084
1221#endif /* __WL1271_ACX_H__ */ 1085#endif /* __WL1271_ACX_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.c b/drivers/net/wireless/wl12xx/wl1271_boot.c
index 8228ef474a7e..b7c96454cca3 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.c
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.c
@@ -39,6 +39,14 @@ static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
39 .start = REGISTERS_BASE, 39 .start = REGISTERS_BASE,
40 .size = 0x00008800 40 .size = 0x00008800
41 }, 41 },
42 .mem2 = {
43 .start = 0x00000000,
44 .size = 0x00000000
45 },
46 .mem3 = {
47 .start = 0x00000000,
48 .size = 0x00000000
49 },
42 }, 50 },
43 51
44 [PART_WORK] = { 52 [PART_WORK] = {
@@ -48,7 +56,15 @@ static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
48 }, 56 },
49 .reg = { 57 .reg = {
50 .start = REGISTERS_BASE, 58 .start = REGISTERS_BASE,
51 .size = 0x0000b000 59 .size = 0x0000a000
60 },
61 .mem2 = {
62 .start = 0x003004f8,
63 .size = 0x00000004
64 },
65 .mem3 = {
66 .start = 0x00040404,
67 .size = 0x00000000
52 }, 68 },
53 }, 69 },
54 70
@@ -60,6 +76,14 @@ static struct wl1271_partition_set part_table[PART_TABLE_LEN] = {
60 .reg = { 76 .reg = {
61 .start = DRPW_BASE, 77 .start = DRPW_BASE,
62 .size = 0x00006000 78 .size = 0x00006000
79 },
80 .mem2 = {
81 .start = 0x00000000,
82 .size = 0x00000000
83 },
84 .mem3 = {
85 .start = 0x00000000,
86 .size = 0x00000000
63 } 87 }
64 } 88 }
65}; 89};
@@ -69,19 +93,19 @@ static void wl1271_boot_set_ecpu_ctrl(struct wl1271 *wl, u32 flag)
69 u32 cpu_ctrl; 93 u32 cpu_ctrl;
70 94
71 /* 10.5.0 run the firmware (I) */ 95 /* 10.5.0 run the firmware (I) */
72 cpu_ctrl = wl1271_reg_read32(wl, ACX_REG_ECPU_CONTROL); 96 cpu_ctrl = wl1271_spi_read32(wl, ACX_REG_ECPU_CONTROL);
73 97
74 /* 10.5.1 run the firmware (II) */ 98 /* 10.5.1 run the firmware (II) */
75 cpu_ctrl |= flag; 99 cpu_ctrl |= flag;
76 wl1271_reg_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl); 100 wl1271_spi_write32(wl, ACX_REG_ECPU_CONTROL, cpu_ctrl);
77} 101}
78 102
79static void wl1271_boot_fw_version(struct wl1271 *wl) 103static void wl1271_boot_fw_version(struct wl1271 *wl)
80{ 104{
81 struct wl1271_static_data static_data; 105 struct wl1271_static_data static_data;
82 106
83 wl1271_spi_mem_read(wl, wl->cmd_box_addr, 107 wl1271_spi_read(wl, wl->cmd_box_addr,
84 &static_data, sizeof(static_data)); 108 &static_data, sizeof(static_data), false);
85 109
86 strncpy(wl->chip.fw_ver, static_data.fw_version, 110 strncpy(wl->chip.fw_ver, static_data.fw_version,
87 sizeof(wl->chip.fw_ver)); 111 sizeof(wl->chip.fw_ver));
@@ -93,8 +117,9 @@ static void wl1271_boot_fw_version(struct wl1271 *wl)
93static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf, 117static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
94 size_t fw_data_len, u32 dest) 118 size_t fw_data_len, u32 dest)
95{ 119{
120 struct wl1271_partition_set partition;
96 int addr, chunk_num, partition_limit; 121 int addr, chunk_num, partition_limit;
97 u8 *p; 122 u8 *p, *chunk;
98 123
99 /* whal_FwCtrl_LoadFwImageSm() */ 124 /* whal_FwCtrl_LoadFwImageSm() */
100 125
@@ -103,16 +128,20 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
103 wl1271_debug(DEBUG_BOOT, "fw_data_len %zd chunk_size %d", 128 wl1271_debug(DEBUG_BOOT, "fw_data_len %zd chunk_size %d",
104 fw_data_len, CHUNK_SIZE); 129 fw_data_len, CHUNK_SIZE);
105 130
106
107 if ((fw_data_len % 4) != 0) { 131 if ((fw_data_len % 4) != 0) {
108 wl1271_error("firmware length not multiple of four"); 132 wl1271_error("firmware length not multiple of four");
109 return -EIO; 133 return -EIO;
110 } 134 }
111 135
112 wl1271_set_partition(wl, dest, 136 chunk = kmalloc(CHUNK_SIZE, GFP_KERNEL);
113 part_table[PART_DOWN].mem.size, 137 if (!chunk) {
114 part_table[PART_DOWN].reg.start, 138 wl1271_error("allocation for firmware upload chunk failed");
115 part_table[PART_DOWN].reg.size); 139 return -ENOMEM;
140 }
141
142 memcpy(&partition, &part_table[PART_DOWN], sizeof(partition));
143 partition.mem.start = dest;
144 wl1271_set_partition(wl, &partition);
116 145
117 /* 10.1 set partition limit and chunk num */ 146 /* 10.1 set partition limit and chunk num */
118 chunk_num = 0; 147 chunk_num = 0;
@@ -125,21 +154,17 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
125 addr = dest + chunk_num * CHUNK_SIZE; 154 addr = dest + chunk_num * CHUNK_SIZE;
126 partition_limit = chunk_num * CHUNK_SIZE + 155 partition_limit = chunk_num * CHUNK_SIZE +
127 part_table[PART_DOWN].mem.size; 156 part_table[PART_DOWN].mem.size;
128 157 partition.mem.start = addr;
129 /* FIXME: Over 80 chars! */ 158 wl1271_set_partition(wl, &partition);
130 wl1271_set_partition(wl,
131 addr,
132 part_table[PART_DOWN].mem.size,
133 part_table[PART_DOWN].reg.start,
134 part_table[PART_DOWN].reg.size);
135 } 159 }
136 160
137 /* 10.3 upload the chunk */ 161 /* 10.3 upload the chunk */
138 addr = dest + chunk_num * CHUNK_SIZE; 162 addr = dest + chunk_num * CHUNK_SIZE;
139 p = buf + chunk_num * CHUNK_SIZE; 163 p = buf + chunk_num * CHUNK_SIZE;
164 memcpy(chunk, p, CHUNK_SIZE);
140 wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x", 165 wl1271_debug(DEBUG_BOOT, "uploading fw chunk 0x%p to 0x%x",
141 p, addr); 166 p, addr);
142 wl1271_spi_mem_write(wl, addr, p, CHUNK_SIZE); 167 wl1271_spi_write(wl, addr, chunk, CHUNK_SIZE, false);
143 168
144 chunk_num++; 169 chunk_num++;
145 } 170 }
@@ -147,28 +172,31 @@ static int wl1271_boot_upload_firmware_chunk(struct wl1271 *wl, void *buf,
147 /* 10.4 upload the last chunk */ 172 /* 10.4 upload the last chunk */
148 addr = dest + chunk_num * CHUNK_SIZE; 173 addr = dest + chunk_num * CHUNK_SIZE;
149 p = buf + chunk_num * CHUNK_SIZE; 174 p = buf + chunk_num * CHUNK_SIZE;
175 memcpy(chunk, p, fw_data_len % CHUNK_SIZE);
150 wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x", 176 wl1271_debug(DEBUG_BOOT, "uploading fw last chunk (%zd B) 0x%p to 0x%x",
151 fw_data_len % CHUNK_SIZE, p, addr); 177 fw_data_len % CHUNK_SIZE, p, addr);
152 wl1271_spi_mem_write(wl, addr, p, fw_data_len % CHUNK_SIZE); 178 wl1271_spi_write(wl, addr, chunk, fw_data_len % CHUNK_SIZE, false);
153 179
180 kfree(chunk);
154 return 0; 181 return 0;
155} 182}
156 183
157static int wl1271_boot_upload_firmware(struct wl1271 *wl) 184static int wl1271_boot_upload_firmware(struct wl1271 *wl)
158{ 185{
159 u32 chunks, addr, len; 186 u32 chunks, addr, len;
187 int ret = 0;
160 u8 *fw; 188 u8 *fw;
161 189
162 fw = wl->fw; 190 fw = wl->fw;
163 chunks = be32_to_cpup((u32 *) fw); 191 chunks = be32_to_cpup((__be32 *) fw);
164 fw += sizeof(u32); 192 fw += sizeof(u32);
165 193
166 wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks); 194 wl1271_debug(DEBUG_BOOT, "firmware chunks to be uploaded: %u", chunks);
167 195
168 while (chunks--) { 196 while (chunks--) {
169 addr = be32_to_cpup((u32 *) fw); 197 addr = be32_to_cpup((__be32 *) fw);
170 fw += sizeof(u32); 198 fw += sizeof(u32);
171 len = be32_to_cpup((u32 *) fw); 199 len = be32_to_cpup((__be32 *) fw);
172 fw += sizeof(u32); 200 fw += sizeof(u32);
173 201
174 if (len > 300000) { 202 if (len > 300000) {
@@ -177,11 +205,13 @@ static int wl1271_boot_upload_firmware(struct wl1271 *wl)
177 } 205 }
178 wl1271_debug(DEBUG_BOOT, "chunk %d addr 0x%x len %u", 206 wl1271_debug(DEBUG_BOOT, "chunk %d addr 0x%x len %u",
179 chunks, addr, len); 207 chunks, addr, len);
180 wl1271_boot_upload_firmware_chunk(wl, fw, len, addr); 208 ret = wl1271_boot_upload_firmware_chunk(wl, fw, len, addr);
209 if (ret != 0)
210 break;
181 fw += len; 211 fw += len;
182 } 212 }
183 213
184 return 0; 214 return ret;
185} 215}
186 216
187static int wl1271_boot_upload_nvs(struct wl1271 *wl) 217static int wl1271_boot_upload_nvs(struct wl1271 *wl)
@@ -235,7 +265,7 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
235 wl1271_debug(DEBUG_BOOT, 265 wl1271_debug(DEBUG_BOOT,
236 "nvs burst write 0x%x: 0x%x", 266 "nvs burst write 0x%x: 0x%x",
237 dest_addr, val); 267 dest_addr, val);
238 wl1271_reg_write32(wl, dest_addr, val); 268 wl1271_spi_write32(wl, dest_addr, val);
239 269
240 nvs_ptr += 4; 270 nvs_ptr += 4;
241 dest_addr += 4; 271 dest_addr += 4;
@@ -253,20 +283,18 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
253 /* FIXME: The driver sets the partition here, but this is not needed, 283 /* FIXME: The driver sets the partition here, but this is not needed,
254 since it sets to the same one as currently in use */ 284 since it sets to the same one as currently in use */
255 /* Now we must set the partition correctly */ 285 /* Now we must set the partition correctly */
256 wl1271_set_partition(wl, 286 wl1271_set_partition(wl, &part_table[PART_WORK]);
257 part_table[PART_WORK].mem.start,
258 part_table[PART_WORK].mem.size,
259 part_table[PART_WORK].reg.start,
260 part_table[PART_WORK].reg.size);
261 287
262 /* Copy the NVS tables to a new block to ensure alignment */ 288 /* Copy the NVS tables to a new block to ensure alignment */
263 nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL); 289 nvs_aligned = kmemdup(nvs_ptr, nvs_len, GFP_KERNEL);
290 if (!nvs_aligned)
291 return -ENOMEM;
264 292
265 /* And finally we upload the NVS tables */ 293 /* And finally we upload the NVS tables */
266 /* FIXME: In wl1271, we upload everything at once. 294 /* FIXME: In wl1271, we upload everything at once.
267 No endianness handling needed here?! The ref driver doesn't do 295 No endianness handling needed here?! The ref driver doesn't do
268 anything about it at this point */ 296 anything about it at this point */
269 wl1271_spi_mem_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len); 297 wl1271_spi_write(wl, CMD_MBOX_ADDRESS, nvs_aligned, nvs_len, false);
270 298
271 kfree(nvs_aligned); 299 kfree(nvs_aligned);
272 return 0; 300 return 0;
@@ -275,9 +303,9 @@ static int wl1271_boot_upload_nvs(struct wl1271 *wl)
275static void wl1271_boot_enable_interrupts(struct wl1271 *wl) 303static void wl1271_boot_enable_interrupts(struct wl1271 *wl)
276{ 304{
277 enable_irq(wl->irq); 305 enable_irq(wl->irq);
278 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK, 306 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK,
279 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK)); 307 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
280 wl1271_reg_write32(wl, HI_CFG, HI_CFG_DEF_VAL); 308 wl1271_spi_write32(wl, HI_CFG, HI_CFG_DEF_VAL);
281} 309}
282 310
283static int wl1271_boot_soft_reset(struct wl1271 *wl) 311static int wl1271_boot_soft_reset(struct wl1271 *wl)
@@ -286,12 +314,13 @@ static int wl1271_boot_soft_reset(struct wl1271 *wl)
286 u32 boot_data; 314 u32 boot_data;
287 315
288 /* perform soft reset */ 316 /* perform soft reset */
289 wl1271_reg_write32(wl, ACX_REG_SLV_SOFT_RESET, ACX_SLV_SOFT_RESET_BIT); 317 wl1271_spi_write32(wl, ACX_REG_SLV_SOFT_RESET,
318 ACX_SLV_SOFT_RESET_BIT);
290 319
291 /* SOFT_RESET is self clearing */ 320 /* SOFT_RESET is self clearing */
292 timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME); 321 timeout = jiffies + usecs_to_jiffies(SOFT_RESET_MAX_TIME);
293 while (1) { 322 while (1) {
294 boot_data = wl1271_reg_read32(wl, ACX_REG_SLV_SOFT_RESET); 323 boot_data = wl1271_spi_read32(wl, ACX_REG_SLV_SOFT_RESET);
295 wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data); 324 wl1271_debug(DEBUG_BOOT, "soft reset bootdata 0x%x", boot_data);
296 if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0) 325 if ((boot_data & ACX_SLV_SOFT_RESET_BIT) == 0)
297 break; 326 break;
@@ -307,10 +336,10 @@ static int wl1271_boot_soft_reset(struct wl1271 *wl)
307 } 336 }
308 337
309 /* disable Rx/Tx */ 338 /* disable Rx/Tx */
310 wl1271_reg_write32(wl, ENABLE, 0x0); 339 wl1271_spi_write32(wl, ENABLE, 0x0);
311 340
312 /* disable auto calibration on start*/ 341 /* disable auto calibration on start*/
313 wl1271_reg_write32(wl, SPARE_A2, 0xffff); 342 wl1271_spi_write32(wl, SPARE_A2, 0xffff);
314 343
315 return 0; 344 return 0;
316} 345}
@@ -322,7 +351,7 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
322 351
323 wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT); 352 wl1271_boot_set_ecpu_ctrl(wl, ECPU_CONTROL_HALT);
324 353
325 chip_id = wl1271_reg_read32(wl, CHIP_ID_B); 354 chip_id = wl1271_spi_read32(wl, CHIP_ID_B);
326 355
327 wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id); 356 wl1271_debug(DEBUG_BOOT, "chip id after firmware boot: 0x%x", chip_id);
328 357
@@ -335,7 +364,8 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
335 loop = 0; 364 loop = 0;
336 while (loop++ < INIT_LOOP) { 365 while (loop++ < INIT_LOOP) {
337 udelay(INIT_LOOP_DELAY); 366 udelay(INIT_LOOP_DELAY);
338 interrupt = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); 367 interrupt = wl1271_spi_read32(wl,
368 ACX_REG_INTERRUPT_NO_CLEAR);
339 369
340 if (interrupt == 0xffffffff) { 370 if (interrupt == 0xffffffff) {
341 wl1271_error("error reading hardware complete " 371 wl1271_error("error reading hardware complete "
@@ -344,30 +374,26 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
344 } 374 }
345 /* check that ACX_INTR_INIT_COMPLETE is enabled */ 375 /* check that ACX_INTR_INIT_COMPLETE is enabled */
346 else if (interrupt & WL1271_ACX_INTR_INIT_COMPLETE) { 376 else if (interrupt & WL1271_ACX_INTR_INIT_COMPLETE) {
347 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_ACK, 377 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_ACK,
348 WL1271_ACX_INTR_INIT_COMPLETE); 378 WL1271_ACX_INTR_INIT_COMPLETE);
349 break; 379 break;
350 } 380 }
351 } 381 }
352 382
353 if (loop >= INIT_LOOP) { 383 if (loop > INIT_LOOP) {
354 wl1271_error("timeout waiting for the hardware to " 384 wl1271_error("timeout waiting for the hardware to "
355 "complete initialization"); 385 "complete initialization");
356 return -EIO; 386 return -EIO;
357 } 387 }
358 388
359 /* get hardware config command mail box */ 389 /* get hardware config command mail box */
360 wl->cmd_box_addr = wl1271_reg_read32(wl, REG_COMMAND_MAILBOX_PTR); 390 wl->cmd_box_addr = wl1271_spi_read32(wl, REG_COMMAND_MAILBOX_PTR);
361 391
362 /* get hardware config event mail box */ 392 /* get hardware config event mail box */
363 wl->event_box_addr = wl1271_reg_read32(wl, REG_EVENT_MAILBOX_PTR); 393 wl->event_box_addr = wl1271_spi_read32(wl, REG_EVENT_MAILBOX_PTR);
364 394
365 /* set the working partition to its "running" mode offset */ 395 /* set the working partition to its "running" mode offset */
366 wl1271_set_partition(wl, 396 wl1271_set_partition(wl, &part_table[PART_WORK]);
367 part_table[PART_WORK].mem.start,
368 part_table[PART_WORK].mem.size,
369 part_table[PART_WORK].reg.start,
370 part_table[PART_WORK].reg.size);
371 397
372 wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x event_box_addr 0x%x", 398 wl1271_debug(DEBUG_MAILBOX, "cmd_box_addr 0x%x event_box_addr 0x%x",
373 wl->cmd_box_addr, wl->event_box_addr); 399 wl->cmd_box_addr, wl->event_box_addr);
@@ -379,11 +405,10 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
379 * ready to receive event from the command mailbox 405 * ready to receive event from the command mailbox
380 */ 406 */
381 407
382 /* enable gpio interrupts */ 408 /* unmask required mbox events */
383 wl1271_boot_enable_interrupts(wl); 409 wl->event_mask = BSS_LOSE_EVENT_ID |
384 410 SCAN_COMPLETE_EVENT_ID |
385 /* unmask all mbox events */ 411 PS_REPORT_EVENT_ID;
386 wl->event_mask = 0xffffffff;
387 412
388 ret = wl1271_event_unmask(wl); 413 ret = wl1271_event_unmask(wl);
389 if (ret < 0) { 414 if (ret < 0) {
@@ -399,34 +424,13 @@ static int wl1271_boot_run_firmware(struct wl1271 *wl)
399 424
400static int wl1271_boot_write_irq_polarity(struct wl1271 *wl) 425static int wl1271_boot_write_irq_polarity(struct wl1271 *wl)
401{ 426{
402 u32 polarity, status, i; 427 u32 polarity;
403
404 wl1271_reg_write32(wl, OCP_POR_CTR, OCP_REG_POLARITY);
405 wl1271_reg_write32(wl, OCP_CMD, OCP_CMD_READ);
406
407 /* Wait until the command is complete (ie. bit 18 is set) */
408 for (i = 0; i < OCP_CMD_LOOP; i++) {
409 polarity = wl1271_reg_read32(wl, OCP_DATA_READ);
410 if (polarity & OCP_READY_MASK)
411 break;
412 }
413 if (i == OCP_CMD_LOOP) {
414 wl1271_error("OCP command timeout!");
415 return -EIO;
416 }
417 428
418 status = polarity & OCP_STATUS_MASK; 429 polarity = wl1271_top_reg_read(wl, OCP_REG_POLARITY);
419 if (status != OCP_STATUS_OK) {
420 wl1271_error("OCP command failed (%d)", status);
421 return -EIO;
422 }
423 430
424 /* We use HIGH polarity, so unset the LOW bit */ 431 /* We use HIGH polarity, so unset the LOW bit */
425 polarity &= ~POLARITY_LOW; 432 polarity &= ~POLARITY_LOW;
426 433 wl1271_top_reg_write(wl, OCP_REG_POLARITY, polarity);
427 wl1271_reg_write32(wl, OCP_POR_CTR, OCP_REG_POLARITY);
428 wl1271_reg_write32(wl, OCP_DATA_WRITE, polarity);
429 wl1271_reg_write32(wl, OCP_CMD, OCP_CMD_WRITE);
430 434
431 return 0; 435 return 0;
432} 436}
@@ -436,16 +440,32 @@ int wl1271_boot(struct wl1271 *wl)
436 int ret = 0; 440 int ret = 0;
437 u32 tmp, clk, pause; 441 u32 tmp, clk, pause;
438 442
439 if (REF_CLOCK == 0 || REF_CLOCK == 2) 443 if (REF_CLOCK == 0 || REF_CLOCK == 2 || REF_CLOCK == 4)
440 /* ref clk: 19.2/38.4 */ 444 /* ref clk: 19.2/38.4/38.4-XTAL */
441 clk = 0x3; 445 clk = 0x3;
442 else if (REF_CLOCK == 1 || REF_CLOCK == 3) 446 else if (REF_CLOCK == 1 || REF_CLOCK == 3)
443 /* ref clk: 26/52 */ 447 /* ref clk: 26/52 */
444 clk = 0x5; 448 clk = 0x5;
445 449
446 wl1271_reg_write32(wl, PLL_PARAMETERS, clk); 450 if (REF_CLOCK != 0) {
451 u16 val;
452 /* Set clock type */
453 val = wl1271_top_reg_read(wl, OCP_REG_CLK_TYPE);
454 val &= FREF_CLK_TYPE_BITS;
455 val |= CLK_REQ_PRCM;
456 wl1271_top_reg_write(wl, OCP_REG_CLK_TYPE, val);
457 } else {
458 u16 val;
459 /* Set clock polarity */
460 val = wl1271_top_reg_read(wl, OCP_REG_CLK_POLARITY);
461 val &= FREF_CLK_POLARITY_BITS;
462 val |= CLK_REQ_OUTN_SEL;
463 wl1271_top_reg_write(wl, OCP_REG_CLK_POLARITY, val);
464 }
465
466 wl1271_spi_write32(wl, PLL_PARAMETERS, clk);
447 467
448 pause = wl1271_reg_read32(wl, PLL_PARAMETERS); 468 pause = wl1271_spi_read32(wl, PLL_PARAMETERS);
449 469
450 wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause); 470 wl1271_debug(DEBUG_BOOT, "pause1 0x%x", pause);
451 471
@@ -454,39 +474,31 @@ int wl1271_boot(struct wl1271 *wl)
454 * 0x3ff (magic number ). How does 474 * 0x3ff (magic number ). How does
455 * this work?! */ 475 * this work?! */
456 pause |= WU_COUNTER_PAUSE_VAL; 476 pause |= WU_COUNTER_PAUSE_VAL;
457 wl1271_reg_write32(wl, WU_COUNTER_PAUSE, pause); 477 wl1271_spi_write32(wl, WU_COUNTER_PAUSE, pause);
458 478
459 /* Continue the ELP wake up sequence */ 479 /* Continue the ELP wake up sequence */
460 wl1271_reg_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL); 480 wl1271_spi_write32(wl, WELP_ARM_COMMAND, WELP_ARM_COMMAND_VAL);
461 udelay(500); 481 udelay(500);
462 482
463 wl1271_set_partition(wl, 483 wl1271_set_partition(wl, &part_table[PART_DRPW]);
464 part_table[PART_DRPW].mem.start,
465 part_table[PART_DRPW].mem.size,
466 part_table[PART_DRPW].reg.start,
467 part_table[PART_DRPW].reg.size);
468 484
469 /* Read-modify-write DRPW_SCRATCH_START register (see next state) 485 /* Read-modify-write DRPW_SCRATCH_START register (see next state)
470 to be used by DRPw FW. The RTRIM value will be added by the FW 486 to be used by DRPw FW. The RTRIM value will be added by the FW
471 before taking DRPw out of reset */ 487 before taking DRPw out of reset */
472 488
473 wl1271_debug(DEBUG_BOOT, "DRPW_SCRATCH_START %08x", DRPW_SCRATCH_START); 489 wl1271_debug(DEBUG_BOOT, "DRPW_SCRATCH_START %08x", DRPW_SCRATCH_START);
474 clk = wl1271_reg_read32(wl, DRPW_SCRATCH_START); 490 clk = wl1271_spi_read32(wl, DRPW_SCRATCH_START);
475 491
476 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk); 492 wl1271_debug(DEBUG_BOOT, "clk2 0x%x", clk);
477 493
478 /* 2 */ 494 /* 2 */
479 clk |= (REF_CLOCK << 1) << 4; 495 clk |= (REF_CLOCK << 1) << 4;
480 wl1271_reg_write32(wl, DRPW_SCRATCH_START, clk); 496 wl1271_spi_write32(wl, DRPW_SCRATCH_START, clk);
481 497
482 wl1271_set_partition(wl, 498 wl1271_set_partition(wl, &part_table[PART_WORK]);
483 part_table[PART_WORK].mem.start,
484 part_table[PART_WORK].mem.size,
485 part_table[PART_WORK].reg.start,
486 part_table[PART_WORK].reg.size);
487 499
488 /* Disable interrupts */ 500 /* Disable interrupts */
489 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL); 501 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
490 502
491 ret = wl1271_boot_soft_reset(wl); 503 ret = wl1271_boot_soft_reset(wl);
492 if (ret < 0) 504 if (ret < 0)
@@ -501,21 +513,22 @@ int wl1271_boot(struct wl1271 *wl)
501 * ACX_EEPROMLESS_IND_REG */ 513 * ACX_EEPROMLESS_IND_REG */
502 wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG"); 514 wl1271_debug(DEBUG_BOOT, "ACX_EEPROMLESS_IND_REG");
503 515
504 wl1271_reg_write32(wl, ACX_EEPROMLESS_IND_REG, ACX_EEPROMLESS_IND_REG); 516 wl1271_spi_write32(wl, ACX_EEPROMLESS_IND_REG,
517 ACX_EEPROMLESS_IND_REG);
505 518
506 tmp = wl1271_reg_read32(wl, CHIP_ID_B); 519 tmp = wl1271_spi_read32(wl, CHIP_ID_B);
507 520
508 wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp); 521 wl1271_debug(DEBUG_BOOT, "chip id 0x%x", tmp);
509 522
510 /* 6. read the EEPROM parameters */ 523 /* 6. read the EEPROM parameters */
511 tmp = wl1271_reg_read32(wl, SCR_PAD2); 524 tmp = wl1271_spi_read32(wl, SCR_PAD2);
512 525
513 ret = wl1271_boot_write_irq_polarity(wl); 526 ret = wl1271_boot_write_irq_polarity(wl);
514 if (ret < 0) 527 if (ret < 0)
515 goto out; 528 goto out;
516 529
517 /* FIXME: Need to check whether this is really what we want */ 530 /* FIXME: Need to check whether this is really what we want */
518 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK, 531 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK,
519 WL1271_ACX_ALL_EVENTS_VECTOR); 532 WL1271_ACX_ALL_EVENTS_VECTOR);
520 533
521 /* WL1271: The reference driver skips steps 7 to 10 (jumps directly 534 /* WL1271: The reference driver skips steps 7 to 10 (jumps directly
@@ -530,6 +543,9 @@ int wl1271_boot(struct wl1271 *wl)
530 if (ret < 0) 543 if (ret < 0)
531 goto out; 544 goto out;
532 545
546 /* Enable firmware interrupts now */
547 wl1271_boot_enable_interrupts(wl);
548
533 /* set the wl1271 default filters */ 549 /* set the wl1271 default filters */
534 wl->rx_config = WL1271_DEFAULT_RX_CONFIG; 550 wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
535 wl->rx_filter = WL1271_DEFAULT_RX_FILTER; 551 wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
diff --git a/drivers/net/wireless/wl12xx/wl1271_boot.h b/drivers/net/wireless/wl12xx/wl1271_boot.h
index b0d8fb46a439..412443ee655a 100644
--- a/drivers/net/wireless/wl12xx/wl1271_boot.h
+++ b/drivers/net/wireless/wl12xx/wl1271_boot.h
@@ -50,23 +50,17 @@ struct wl1271_static_data {
50#define WU_COUNTER_PAUSE_VAL 0x3FF 50#define WU_COUNTER_PAUSE_VAL 0x3FF
51#define WELP_ARM_COMMAND_VAL 0x4 51#define WELP_ARM_COMMAND_VAL 0x4
52 52
53#define OCP_CMD_LOOP 32 53#define OCP_REG_POLARITY 0x0064
54 54#define OCP_REG_CLK_TYPE 0x0448
55#define OCP_CMD_WRITE 0x1 55#define OCP_REG_CLK_POLARITY 0x0cb2
56#define OCP_CMD_READ 0x2
57
58#define OCP_READY_MASK BIT(18)
59#define OCP_STATUS_MASK (BIT(16) | BIT(17))
60
61#define OCP_STATUS_NO_RESP 0x00000
62#define OCP_STATUS_OK 0x10000
63#define OCP_STATUS_REQ_FAILED 0x20000
64#define OCP_STATUS_RESP_ERROR 0x30000
65
66#define OCP_REG_POLARITY 0x30032
67 56
68#define CMD_MBOX_ADDRESS 0x407B4 57#define CMD_MBOX_ADDRESS 0x407B4
69 58
70#define POLARITY_LOW BIT(1) 59#define POLARITY_LOW BIT(1)
71 60
61#define FREF_CLK_TYPE_BITS 0xfffffe7f
62#define CLK_REQ_PRCM 0x100
63#define FREF_CLK_POLARITY_BITS 0xfffff8ff
64#define CLK_REQ_OUTN_SEL 0x700
65
72#endif 66#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.c b/drivers/net/wireless/wl12xx/wl1271_cmd.c
index 2a4351ff54dc..990eb01b4c71 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.c
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.c
@@ -42,26 +42,28 @@
42 * @buf: buffer containing the command, must work with dma 42 * @buf: buffer containing the command, must work with dma
43 * @len: length of the buffer 43 * @len: length of the buffer
44 */ 44 */
45int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len) 45int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
46 size_t res_len)
46{ 47{
47 struct wl1271_cmd_header *cmd; 48 struct wl1271_cmd_header *cmd;
48 unsigned long timeout; 49 unsigned long timeout;
49 u32 intr; 50 u32 intr;
50 int ret = 0; 51 int ret = 0;
52 u16 status;
51 53
52 cmd = buf; 54 cmd = buf;
53 cmd->id = id; 55 cmd->id = cpu_to_le16(id);
54 cmd->status = 0; 56 cmd->status = 0;
55 57
56 WARN_ON(len % 4 != 0); 58 WARN_ON(len % 4 != 0);
57 59
58 wl1271_spi_mem_write(wl, wl->cmd_box_addr, buf, len); 60 wl1271_spi_write(wl, wl->cmd_box_addr, buf, len, false);
59 61
60 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD); 62 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_CMD);
61 63
62 timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT); 64 timeout = jiffies + msecs_to_jiffies(WL1271_COMMAND_TIMEOUT);
63 65
64 intr = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); 66 intr = wl1271_spi_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
65 while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) { 67 while (!(intr & WL1271_ACX_INTR_CMD_COMPLETE)) {
66 if (time_after(jiffies, timeout)) { 68 if (time_after(jiffies, timeout)) {
67 wl1271_error("command complete timeout"); 69 wl1271_error("command complete timeout");
@@ -71,17 +73,28 @@ int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len)
71 73
72 msleep(1); 74 msleep(1);
73 75
74 intr = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR); 76 intr = wl1271_spi_read32(wl, ACX_REG_INTERRUPT_NO_CLEAR);
77 }
78
79 /* read back the status code of the command */
80 if (res_len == 0)
81 res_len = sizeof(struct wl1271_cmd_header);
82 wl1271_spi_read(wl, wl->cmd_box_addr, cmd, res_len, false);
83
84 status = le16_to_cpu(cmd->status);
85 if (status != CMD_STATUS_SUCCESS) {
86 wl1271_error("command execute failure %d", status);
87 ret = -EIO;
75 } 88 }
76 89
77 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_ACK, 90 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_ACK,
78 WL1271_ACX_INTR_CMD_COMPLETE); 91 WL1271_ACX_INTR_CMD_COMPLETE);
79 92
80out: 93out:
81 return ret; 94 return ret;
82} 95}
83 96
84int wl1271_cmd_cal_channel_tune(struct wl1271 *wl) 97static int wl1271_cmd_cal_channel_tune(struct wl1271 *wl)
85{ 98{
86 struct wl1271_cmd_cal_channel_tune *cmd; 99 struct wl1271_cmd_cal_channel_tune *cmd;
87 int ret = 0; 100 int ret = 0;
@@ -104,7 +117,7 @@ int wl1271_cmd_cal_channel_tune(struct wl1271 *wl)
104 return ret; 117 return ret;
105} 118}
106 119
107int wl1271_cmd_cal_update_ref_point(struct wl1271 *wl) 120static int wl1271_cmd_cal_update_ref_point(struct wl1271 *wl)
108{ 121{
109 struct wl1271_cmd_cal_update_ref_point *cmd; 122 struct wl1271_cmd_cal_update_ref_point *cmd;
110 int ret = 0; 123 int ret = 0;
@@ -129,7 +142,7 @@ int wl1271_cmd_cal_update_ref_point(struct wl1271 *wl)
129 return ret; 142 return ret;
130} 143}
131 144
132int wl1271_cmd_cal_p2g(struct wl1271 *wl) 145static int wl1271_cmd_cal_p2g(struct wl1271 *wl)
133{ 146{
134 struct wl1271_cmd_cal_p2g *cmd; 147 struct wl1271_cmd_cal_p2g *cmd;
135 int ret = 0; 148 int ret = 0;
@@ -150,7 +163,7 @@ int wl1271_cmd_cal_p2g(struct wl1271 *wl)
150 return ret; 163 return ret;
151} 164}
152 165
153int wl1271_cmd_cal(struct wl1271 *wl) 166static int wl1271_cmd_cal(struct wl1271 *wl)
154{ 167{
155 /* 168 /*
156 * FIXME: we must make sure that we're not sleeping when calibration 169 * FIXME: we must make sure that we're not sleeping when calibration
@@ -175,11 +188,9 @@ int wl1271_cmd_cal(struct wl1271 *wl)
175 return ret; 188 return ret;
176} 189}
177 190
178int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type, u8 dtim_interval, 191int wl1271_cmd_join(struct wl1271 *wl)
179 u16 beacon_interval, u8 wait)
180{ 192{
181 static bool do_cal = true; 193 static bool do_cal = true;
182 unsigned long timeout;
183 struct wl1271_cmd_join *join; 194 struct wl1271_cmd_join *join;
184 int ret, i; 195 int ret, i;
185 u8 *bssid; 196 u8 *bssid;
@@ -193,6 +204,18 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type, u8 dtim_interval,
193 do_cal = false; 204 do_cal = false;
194 } 205 }
195 206
207 /* FIXME: This is a workaround, because with the current stack, we
208 * cannot know when we have disassociated. So, if we have already
209 * joined, we disconnect before joining again. */
210 if (wl->joined) {
211 ret = wl1271_cmd_disconnect(wl);
212 if (ret < 0) {
213 wl1271_error("failed to disconnect before rejoining");
214 goto out;
215 }
216
217 wl->joined = false;
218 }
196 219
197 join = kzalloc(sizeof(*join), GFP_KERNEL); 220 join = kzalloc(sizeof(*join), GFP_KERNEL);
198 if (!join) { 221 if (!join) {
@@ -207,15 +230,34 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type, u8 dtim_interval,
207 for (i = 0; i < ETH_ALEN; i++) 230 for (i = 0; i < ETH_ALEN; i++)
208 bssid[i] = wl->bssid[ETH_ALEN - i - 1]; 231 bssid[i] = wl->bssid[ETH_ALEN - i - 1];
209 232
210 join->rx_config_options = wl->rx_config; 233 join->rx_config_options = cpu_to_le32(wl->rx_config);
211 join->rx_filter_options = wl->rx_filter; 234 join->rx_filter_options = cpu_to_le32(wl->rx_filter);
235 join->bss_type = wl->bss_type;
212 236
213 join->basic_rate_set = RATE_MASK_1MBPS | RATE_MASK_2MBPS | 237 /*
214 RATE_MASK_5_5MBPS | RATE_MASK_11MBPS; 238 * FIXME: disable temporarily all filters because after commit
239 * 9cef8737 "mac80211: fix managed mode BSSID handling" broke
240 * association. The filter logic needs to be implemented properly
241 * and once that is done, this hack can be removed.
242 */
243 join->rx_config_options = cpu_to_le32(0);
244 join->rx_filter_options = cpu_to_le32(WL1271_DEFAULT_RX_FILTER);
245
246 if (wl->band == IEEE80211_BAND_2GHZ)
247 join->basic_rate_set = cpu_to_le32(CONF_HW_BIT_RATE_1MBPS |
248 CONF_HW_BIT_RATE_2MBPS |
249 CONF_HW_BIT_RATE_5_5MBPS |
250 CONF_HW_BIT_RATE_11MBPS);
251 else {
252 join->bss_type |= WL1271_JOIN_CMD_BSS_TYPE_5GHZ;
253 join->basic_rate_set = cpu_to_le32(CONF_HW_BIT_RATE_6MBPS |
254 CONF_HW_BIT_RATE_12MBPS |
255 CONF_HW_BIT_RATE_24MBPS);
256 }
257
258 join->beacon_interval = cpu_to_le16(WL1271_DEFAULT_BEACON_INT);
259 join->dtim_interval = WL1271_DEFAULT_DTIM_PERIOD;
215 260
216 join->beacon_interval = beacon_interval;
217 join->dtim_interval = dtim_interval;
218 join->bss_type = bss_type;
219 join->channel = wl->channel; 261 join->channel = wl->channel;
220 join->ssid_len = wl->ssid_len; 262 join->ssid_len = wl->ssid_len;
221 memcpy(join->ssid, wl->ssid, wl->ssid_len); 263 memcpy(join->ssid, wl->ssid, wl->ssid_len);
@@ -228,21 +270,24 @@ int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type, u8 dtim_interval,
228 270
229 join->ctrl |= wl->session_counter << WL1271_JOIN_CMD_TX_SESSION_OFFSET; 271 join->ctrl |= wl->session_counter << WL1271_JOIN_CMD_TX_SESSION_OFFSET;
230 272
273 /* reset TX security counters */
274 wl->tx_security_last_seq = 0;
275 wl->tx_security_seq_16 = 0;
276 wl->tx_security_seq_32 = 0;
231 277
232 ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join)); 278 ret = wl1271_cmd_send(wl, CMD_START_JOIN, join, sizeof(*join), 0);
233 if (ret < 0) { 279 if (ret < 0) {
234 wl1271_error("failed to initiate cmd join"); 280 wl1271_error("failed to initiate cmd join");
235 goto out_free; 281 goto out_free;
236 } 282 }
237 283
238 timeout = msecs_to_jiffies(JOIN_TIMEOUT); 284 wl->joined = true;
239 285
240 /* 286 /*
241 * ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to 287 * ugly hack: we should wait for JOIN_EVENT_COMPLETE_ID but to
242 * simplify locking we just sleep instead, for now 288 * simplify locking we just sleep instead, for now
243 */ 289 */
244 if (wait) 290 msleep(10);
245 msleep(10);
246 291
247out_free: 292out_free:
248 kfree(join); 293 kfree(join);
@@ -262,34 +307,21 @@ out:
262int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer) 307int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer)
263{ 308{
264 int ret; 309 int ret;
310 size_t res_len = 0;
265 311
266 wl1271_debug(DEBUG_CMD, "cmd test"); 312 wl1271_debug(DEBUG_CMD, "cmd test");
267 313
268 ret = wl1271_cmd_send(wl, CMD_TEST, buf, buf_len); 314 if (answer)
315 res_len = buf_len;
316
317 ret = wl1271_cmd_send(wl, CMD_TEST, buf, buf_len, res_len);
269 318
270 if (ret < 0) { 319 if (ret < 0) {
271 wl1271_warning("TEST command failed"); 320 wl1271_warning("TEST command failed");
272 return ret; 321 return ret;
273 } 322 }
274 323
275 if (answer) { 324 return ret;
276 struct wl1271_command *cmd_answer;
277
278 /*
279 * The test command got in, we can read the answer.
280 * The answer would be a wl1271_command, where the
281 * parameter array contains the actual answer.
282 */
283 wl1271_spi_mem_read(wl, wl->cmd_box_addr, buf, buf_len);
284
285 cmd_answer = buf;
286
287 if (cmd_answer->header.status != CMD_STATUS_SUCCESS)
288 wl1271_error("TEST command answer error: %d",
289 cmd_answer->header.status);
290 }
291
292 return 0;
293} 325}
294 326
295/** 327/**
@@ -307,26 +339,15 @@ int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len)
307 339
308 wl1271_debug(DEBUG_CMD, "cmd interrogate"); 340 wl1271_debug(DEBUG_CMD, "cmd interrogate");
309 341
310 acx->id = id; 342 acx->id = cpu_to_le16(id);
311 343
312 /* payload length, does not include any headers */ 344 /* payload length, does not include any headers */
313 acx->len = len - sizeof(*acx); 345 acx->len = cpu_to_le16(len - sizeof(*acx));
314 346
315 ret = wl1271_cmd_send(wl, CMD_INTERROGATE, acx, sizeof(*acx)); 347 ret = wl1271_cmd_send(wl, CMD_INTERROGATE, acx, sizeof(*acx), len);
316 if (ret < 0) { 348 if (ret < 0)
317 wl1271_error("INTERROGATE command failed"); 349 wl1271_error("INTERROGATE command failed");
318 goto out;
319 }
320
321 /* the interrogate command got in, we can read the answer */
322 wl1271_spi_mem_read(wl, wl->cmd_box_addr, buf, len);
323 350
324 acx = buf;
325 if (acx->cmd.status != CMD_STATUS_SUCCESS)
326 wl1271_error("INTERROGATE command error: %d",
327 acx->cmd.status);
328
329out:
330 return ret; 351 return ret;
331} 352}
332 353
@@ -345,12 +366,12 @@ int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len)
345 366
346 wl1271_debug(DEBUG_CMD, "cmd configure"); 367 wl1271_debug(DEBUG_CMD, "cmd configure");
347 368
348 acx->id = id; 369 acx->id = cpu_to_le16(id);
349 370
350 /* payload length, does not include any headers */ 371 /* payload length, does not include any headers */
351 acx->len = len - sizeof(*acx); 372 acx->len = cpu_to_le16(len - sizeof(*acx));
352 373
353 ret = wl1271_cmd_send(wl, CMD_CONFIGURE, acx, len); 374 ret = wl1271_cmd_send(wl, CMD_CONFIGURE, acx, len, 0);
354 if (ret < 0) { 375 if (ret < 0) {
355 wl1271_warning("CONFIGURE command NOK"); 376 wl1271_warning("CONFIGURE command NOK");
356 return ret; 377 return ret;
@@ -383,7 +404,7 @@ int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable)
383 cmd_tx = CMD_DISABLE_TX; 404 cmd_tx = CMD_DISABLE_TX;
384 } 405 }
385 406
386 ret = wl1271_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd)); 407 ret = wl1271_cmd_send(wl, cmd_rx, cmd, sizeof(*cmd), 0);
387 if (ret < 0) { 408 if (ret < 0) {
388 wl1271_error("rx %s cmd for channel %d failed", 409 wl1271_error("rx %s cmd for channel %d failed",
389 enable ? "start" : "stop", channel); 410 enable ? "start" : "stop", channel);
@@ -393,7 +414,7 @@ int wl1271_cmd_data_path(struct wl1271 *wl, u8 channel, bool enable)
393 wl1271_debug(DEBUG_BOOT, "rx %s cmd channel %d", 414 wl1271_debug(DEBUG_BOOT, "rx %s cmd channel %d",
394 enable ? "start" : "stop", channel); 415 enable ? "start" : "stop", channel);
395 416
396 ret = wl1271_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd)); 417 ret = wl1271_cmd_send(wl, cmd_tx, cmd, sizeof(*cmd), 0);
397 if (ret < 0) { 418 if (ret < 0) {
398 wl1271_error("tx %s cmd for channel %d failed", 419 wl1271_error("tx %s cmd for channel %d failed",
399 enable ? "start" : "stop", channel); 420 enable ? "start" : "stop", channel);
@@ -414,8 +435,7 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
414 int ret = 0; 435 int ret = 0;
415 436
416 /* FIXME: this should be in ps.c */ 437 /* FIXME: this should be in ps.c */
417 ret = wl1271_acx_wake_up_conditions(wl, WAKE_UP_EVENT_DTIM_BITMAP, 438 ret = wl1271_acx_wake_up_conditions(wl);
418 wl->listen_int);
419 if (ret < 0) { 439 if (ret < 0) {
420 wl1271_error("couldn't set wake up conditions"); 440 wl1271_error("couldn't set wake up conditions");
421 goto out; 441 goto out;
@@ -433,10 +453,10 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode)
433 ps_params->send_null_data = 1; 453 ps_params->send_null_data = 1;
434 ps_params->retries = 5; 454 ps_params->retries = 5;
435 ps_params->hang_over_period = 128; 455 ps_params->hang_over_period = 128;
436 ps_params->null_data_rate = 1; /* 1 Mbps */ 456 ps_params->null_data_rate = cpu_to_le32(1); /* 1 Mbps */
437 457
438 ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params, 458 ret = wl1271_cmd_send(wl, CMD_SET_PS_MODE, ps_params,
439 sizeof(*ps_params)); 459 sizeof(*ps_params), 0);
440 if (ret < 0) { 460 if (ret < 0) {
441 wl1271_error("cmd set_ps_mode failed"); 461 wl1271_error("cmd set_ps_mode failed");
442 goto out; 462 goto out;
@@ -464,22 +484,17 @@ int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
464 WARN_ON(len > MAX_READ_SIZE); 484 WARN_ON(len > MAX_READ_SIZE);
465 len = min_t(size_t, len, MAX_READ_SIZE); 485 len = min_t(size_t, len, MAX_READ_SIZE);
466 486
467 cmd->addr = addr; 487 cmd->addr = cpu_to_le32(addr);
468 cmd->size = len; 488 cmd->size = cpu_to_le32(len);
469 489
470 ret = wl1271_cmd_send(wl, CMD_READ_MEMORY, cmd, sizeof(*cmd)); 490 ret = wl1271_cmd_send(wl, CMD_READ_MEMORY, cmd, sizeof(*cmd),
491 sizeof(*cmd));
471 if (ret < 0) { 492 if (ret < 0) {
472 wl1271_error("read memory command failed: %d", ret); 493 wl1271_error("read memory command failed: %d", ret);
473 goto out; 494 goto out;
474 } 495 }
475 496
476 /* the read command got in, we can now read the answer */ 497 /* the read command got in */
477 wl1271_spi_mem_read(wl, wl->cmd_box_addr, cmd, sizeof(*cmd));
478
479 if (cmd->header.status != CMD_STATUS_SUCCESS)
480 wl1271_error("error in read command result: %d",
481 cmd->header.status);
482
483 memcpy(answer, cmd->value, len); 498 memcpy(answer, cmd->value, len);
484 499
485out: 500out:
@@ -488,14 +503,31 @@ out:
488} 503}
489 504
490int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len, 505int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
491 u8 active_scan, u8 high_prio, u8 num_channels, 506 u8 active_scan, u8 high_prio, u8 band,
492 u8 probe_requests) 507 u8 probe_requests)
493{ 508{
494 509
495 struct wl1271_cmd_trigger_scan_to *trigger = NULL; 510 struct wl1271_cmd_trigger_scan_to *trigger = NULL;
496 struct wl1271_cmd_scan *params = NULL; 511 struct wl1271_cmd_scan *params = NULL;
497 int i, ret; 512 struct ieee80211_channel *channels;
513 int i, j, n_ch, ret;
498 u16 scan_options = 0; 514 u16 scan_options = 0;
515 u8 ieee_band;
516
517 if (band == WL1271_SCAN_BAND_2_4_GHZ)
518 ieee_band = IEEE80211_BAND_2GHZ;
519 else if (band == WL1271_SCAN_BAND_DUAL && wl1271_11a_enabled())
520 ieee_band = IEEE80211_BAND_2GHZ;
521 else if (band == WL1271_SCAN_BAND_5_GHZ && wl1271_11a_enabled())
522 ieee_band = IEEE80211_BAND_5GHZ;
523 else
524 return -EINVAL;
525
526 if (wl->hw->wiphy->bands[ieee_band]->channels == NULL)
527 return -EINVAL;
528
529 channels = wl->hw->wiphy->bands[ieee_band]->channels;
530 n_ch = wl->hw->wiphy->bands[ieee_band]->n_channels;
499 531
500 if (wl->scanning) 532 if (wl->scanning)
501 return -EINVAL; 533 return -EINVAL;
@@ -512,32 +544,43 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
512 scan_options |= WL1271_SCAN_OPT_PASSIVE; 544 scan_options |= WL1271_SCAN_OPT_PASSIVE;
513 if (high_prio) 545 if (high_prio)
514 scan_options |= WL1271_SCAN_OPT_PRIORITY_HIGH; 546 scan_options |= WL1271_SCAN_OPT_PRIORITY_HIGH;
515 params->params.scan_options = scan_options; 547 params->params.scan_options = cpu_to_le16(scan_options);
516 548
517 params->params.num_channels = num_channels;
518 params->params.num_probe_requests = probe_requests; 549 params->params.num_probe_requests = probe_requests;
519 params->params.tx_rate = cpu_to_le32(RATE_MASK_2MBPS); 550 /* Let the fw autodetect suitable tx_rate for probes */
551 params->params.tx_rate = 0;
520 params->params.tid_trigger = 0; 552 params->params.tid_trigger = 0;
521 params->params.scan_tag = WL1271_SCAN_DEFAULT_TAG; 553 params->params.scan_tag = WL1271_SCAN_DEFAULT_TAG;
522 554
523 for (i = 0; i < num_channels; i++) { 555 if (band == WL1271_SCAN_BAND_DUAL)
524 params->channels[i].min_duration = 556 params->params.band = WL1271_SCAN_BAND_2_4_GHZ;
525 cpu_to_le32(WL1271_SCAN_CHAN_MIN_DURATION); 557 else
526 params->channels[i].max_duration = 558 params->params.band = band;
527 cpu_to_le32(WL1271_SCAN_CHAN_MAX_DURATION); 559
528 memset(&params->channels[i].bssid_lsb, 0xff, 4); 560 for (i = 0, j = 0; i < n_ch && i < WL1271_SCAN_MAX_CHANNELS; i++) {
529 memset(&params->channels[i].bssid_msb, 0xff, 2); 561 if (!(channels[i].flags & IEEE80211_CHAN_DISABLED)) {
530 params->channels[i].early_termination = 0; 562 params->channels[j].min_duration =
531 params->channels[i].tx_power_att = WL1271_SCAN_CURRENT_TX_PWR; 563 cpu_to_le32(WL1271_SCAN_CHAN_MIN_DURATION);
532 params->channels[i].channel = i + 1; 564 params->channels[j].max_duration =
565 cpu_to_le32(WL1271_SCAN_CHAN_MAX_DURATION);
566 memset(&params->channels[j].bssid_lsb, 0xff, 4);
567 memset(&params->channels[j].bssid_msb, 0xff, 2);
568 params->channels[j].early_termination = 0;
569 params->channels[j].tx_power_att =
570 WL1271_SCAN_CURRENT_TX_PWR;
571 params->channels[j].channel = channels[i].hw_value;
572 j++;
573 }
533 } 574 }
534 575
576 params->params.num_channels = j;
577
535 if (len && ssid) { 578 if (len && ssid) {
536 params->params.ssid_len = len; 579 params->params.ssid_len = len;
537 memcpy(params->params.ssid, ssid, len); 580 memcpy(params->params.ssid, ssid, len);
538 } 581 }
539 582
540 ret = wl1271_cmd_build_probe_req(wl, ssid, len); 583 ret = wl1271_cmd_build_probe_req(wl, ssid, len, ieee_band);
541 if (ret < 0) { 584 if (ret < 0) {
542 wl1271_error("PROBE request template failed"); 585 wl1271_error("PROBE request template failed");
543 goto out; 586 goto out;
@@ -553,7 +596,7 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
553 trigger->timeout = 0; 596 trigger->timeout = 0;
554 597
555 ret = wl1271_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger, 598 ret = wl1271_cmd_send(wl, CMD_TRIGGER_SCAN_TO, trigger,
556 sizeof(*trigger)); 599 sizeof(*trigger), 0);
557 if (ret < 0) { 600 if (ret < 0) {
558 wl1271_error("trigger scan to failed for hw scan"); 601 wl1271_error("trigger scan to failed for hw scan");
559 goto out; 602 goto out;
@@ -562,20 +605,24 @@ int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
562 wl1271_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params)); 605 wl1271_dump(DEBUG_SCAN, "SCAN: ", params, sizeof(*params));
563 606
564 wl->scanning = true; 607 wl->scanning = true;
608 if (wl1271_11a_enabled()) {
609 wl->scan.state = band;
610 if (band == WL1271_SCAN_BAND_DUAL) {
611 wl->scan.active = active_scan;
612 wl->scan.high_prio = high_prio;
613 wl->scan.probe_requests = probe_requests;
614 if (len && ssid) {
615 wl->scan.ssid_len = len;
616 memcpy(wl->scan.ssid, ssid, len);
617 } else
618 wl->scan.ssid_len = 0;
619 }
620 }
565 621
566 ret = wl1271_cmd_send(wl, CMD_SCAN, params, sizeof(*params)); 622 ret = wl1271_cmd_send(wl, CMD_SCAN, params, sizeof(*params), 0);
567 if (ret < 0) { 623 if (ret < 0) {
568 wl1271_error("SCAN failed"); 624 wl1271_error("SCAN failed");
569 goto out;
570 }
571
572 wl1271_spi_mem_read(wl, wl->cmd_box_addr, params, sizeof(*params));
573
574 if (params->header.status != CMD_STATUS_SUCCESS) {
575 wl1271_error("Scan command error: %d",
576 params->header.status);
577 wl->scanning = false; 625 wl->scanning = false;
578 ret = -EIO;
579 goto out; 626 goto out;
580 } 627 }
581 628
@@ -603,14 +650,14 @@ int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
603 650
604 cmd->len = cpu_to_le16(buf_len); 651 cmd->len = cpu_to_le16(buf_len);
605 cmd->template_type = template_id; 652 cmd->template_type = template_id;
606 cmd->enabled_rates = ACX_RATE_MASK_UNSPECIFIED; 653 cmd->enabled_rates = cpu_to_le32(wl->conf.tx.rc_conf.enabled_rates);
607 cmd->short_retry_limit = ACX_RATE_RETRY_LIMIT; 654 cmd->short_retry_limit = wl->conf.tx.rc_conf.short_retry_limit;
608 cmd->long_retry_limit = ACX_RATE_RETRY_LIMIT; 655 cmd->long_retry_limit = wl->conf.tx.rc_conf.long_retry_limit;
609 656
610 if (buf) 657 if (buf)
611 memcpy(cmd->template_data, buf, buf_len); 658 memcpy(cmd->template_data, buf, buf_len);
612 659
613 ret = wl1271_cmd_send(wl, CMD_SET_TEMPLATE, cmd, sizeof(*cmd)); 660 ret = wl1271_cmd_send(wl, CMD_SET_TEMPLATE, cmd, sizeof(*cmd), 0);
614 if (ret < 0) { 661 if (ret < 0) {
615 wl1271_warning("cmd set_template failed: %d", ret); 662 wl1271_warning("cmd set_template failed: %d", ret);
616 goto out_free; 663 goto out_free;
@@ -623,30 +670,62 @@ out:
623 return ret; 670 return ret;
624} 671}
625 672
626static int wl1271_build_basic_rates(char *rates) 673static int wl1271_build_basic_rates(char *rates, u8 band)
627{ 674{
628 u8 index = 0; 675 u8 index = 0;
629 676
630 rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB; 677 if (band == IEEE80211_BAND_2GHZ) {
631 rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB; 678 rates[index++] =
632 rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB; 679 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_1MB;
633 rates[index++] = IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB; 680 rates[index++] =
681 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_2MB;
682 rates[index++] =
683 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_5MB;
684 rates[index++] =
685 IEEE80211_BASIC_RATE_MASK | IEEE80211_CCK_RATE_11MB;
686 } else if (band == IEEE80211_BAND_5GHZ) {
687 rates[index++] =
688 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_6MB;
689 rates[index++] =
690 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_12MB;
691 rates[index++] =
692 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
693 } else {
694 wl1271_error("build_basic_rates invalid band: %d", band);
695 }
634 696
635 return index; 697 return index;
636} 698}
637 699
638static int wl1271_build_extended_rates(char *rates) 700static int wl1271_build_extended_rates(char *rates, u8 band)
639{ 701{
640 u8 index = 0; 702 u8 index = 0;
641 703
642 rates[index++] = IEEE80211_OFDM_RATE_6MB; 704 if (band == IEEE80211_BAND_2GHZ) {
643 rates[index++] = IEEE80211_OFDM_RATE_9MB; 705 rates[index++] = IEEE80211_OFDM_RATE_6MB;
644 rates[index++] = IEEE80211_OFDM_RATE_12MB; 706 rates[index++] = IEEE80211_OFDM_RATE_9MB;
645 rates[index++] = IEEE80211_OFDM_RATE_18MB; 707 rates[index++] = IEEE80211_OFDM_RATE_12MB;
646 rates[index++] = IEEE80211_OFDM_RATE_24MB; 708 rates[index++] = IEEE80211_OFDM_RATE_18MB;
647 rates[index++] = IEEE80211_OFDM_RATE_36MB; 709 rates[index++] = IEEE80211_OFDM_RATE_24MB;
648 rates[index++] = IEEE80211_OFDM_RATE_48MB; 710 rates[index++] = IEEE80211_OFDM_RATE_36MB;
649 rates[index++] = IEEE80211_OFDM_RATE_54MB; 711 rates[index++] = IEEE80211_OFDM_RATE_48MB;
712 rates[index++] = IEEE80211_OFDM_RATE_54MB;
713 } else if (band == IEEE80211_BAND_5GHZ) {
714 rates[index++] =
715 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_9MB;
716 rates[index++] =
717 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_18MB;
718 rates[index++] =
719 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_24MB;
720 rates[index++] =
721 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_36MB;
722 rates[index++] =
723 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_48MB;
724 rates[index++] =
725 IEEE80211_BASIC_RATE_MASK | IEEE80211_OFDM_RATE_54MB;
726 } else {
727 wl1271_error("build_basic_rates invalid band: %d", band);
728 }
650 729
651 return index; 730 return index;
652} 731}
@@ -665,7 +744,8 @@ int wl1271_cmd_build_null_data(struct wl1271 *wl)
665 744
666 memcpy(template.header.sa, wl->mac_addr, ETH_ALEN); 745 memcpy(template.header.sa, wl->mac_addr, ETH_ALEN);
667 template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA | 746 template.header.frame_ctl = cpu_to_le16(IEEE80211_FTYPE_DATA |
668 IEEE80211_STYPE_NULLFUNC); 747 IEEE80211_STYPE_NULLFUNC |
748 IEEE80211_FCTL_TODS);
669 749
670 return wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, &template, 750 return wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, &template,
671 sizeof(template)); 751 sizeof(template));
@@ -678,7 +758,10 @@ int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid)
678 758
679 memcpy(template.bssid, wl->bssid, ETH_ALEN); 759 memcpy(template.bssid, wl->bssid, ETH_ALEN);
680 memcpy(template.ta, wl->mac_addr, ETH_ALEN); 760 memcpy(template.ta, wl->mac_addr, ETH_ALEN);
681 template.aid = aid; 761
762 /* aid in PS-Poll has its two MSBs each set to 1 */
763 template.aid = cpu_to_le16(1 << 15 | 1 << 14 | aid);
764
682 template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL); 765 template.fc = cpu_to_le16(IEEE80211_FTYPE_CTL | IEEE80211_STYPE_PSPOLL);
683 766
684 return wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, &template, 767 return wl1271_cmd_template_set(wl, CMD_TEMPL_PS_POLL, &template,
@@ -686,12 +769,14 @@ int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid)
686 769
687} 770}
688 771
689int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len) 772int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len,
773 u8 band)
690{ 774{
691 struct wl12xx_probe_req_template template; 775 struct wl12xx_probe_req_template template;
692 struct wl12xx_ie_rates *rates; 776 struct wl12xx_ie_rates *rates;
693 char *ptr; 777 char *ptr;
694 u16 size; 778 u16 size;
779 int ret;
695 780
696 ptr = (char *)&template; 781 ptr = (char *)&template;
697 size = sizeof(struct ieee80211_header); 782 size = sizeof(struct ieee80211_header);
@@ -713,20 +798,25 @@ int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len)
713 /* Basic Rates */ 798 /* Basic Rates */
714 rates = (struct wl12xx_ie_rates *)ptr; 799 rates = (struct wl12xx_ie_rates *)ptr;
715 rates->header.id = WLAN_EID_SUPP_RATES; 800 rates->header.id = WLAN_EID_SUPP_RATES;
716 rates->header.len = wl1271_build_basic_rates(rates->rates); 801 rates->header.len = wl1271_build_basic_rates(rates->rates, band);
717 size += sizeof(struct wl12xx_ie_header) + rates->header.len; 802 size += sizeof(struct wl12xx_ie_header) + rates->header.len;
718 ptr += sizeof(struct wl12xx_ie_header) + rates->header.len; 803 ptr += sizeof(struct wl12xx_ie_header) + rates->header.len;
719 804
720 /* Extended rates */ 805 /* Extended rates */
721 rates = (struct wl12xx_ie_rates *)ptr; 806 rates = (struct wl12xx_ie_rates *)ptr;
722 rates->header.id = WLAN_EID_EXT_SUPP_RATES; 807 rates->header.id = WLAN_EID_EXT_SUPP_RATES;
723 rates->header.len = wl1271_build_extended_rates(rates->rates); 808 rates->header.len = wl1271_build_extended_rates(rates->rates, band);
724 size += sizeof(struct wl12xx_ie_header) + rates->header.len; 809 size += sizeof(struct wl12xx_ie_header) + rates->header.len;
725 810
726 wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", &template, size); 811 wl1271_dump(DEBUG_SCAN, "PROBE REQ: ", &template, size);
727 812
728 return wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4, 813 if (band == IEEE80211_BAND_2GHZ)
729 &template, size); 814 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
815 &template, size);
816 else
817 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
818 &template, size);
819 return ret;
730} 820}
731 821
732int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id) 822int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id)
@@ -743,10 +833,10 @@ int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id)
743 } 833 }
744 834
745 cmd->id = id; 835 cmd->id = id;
746 cmd->key_action = KEY_SET_ID; 836 cmd->key_action = cpu_to_le16(KEY_SET_ID);
747 cmd->key_type = KEY_WEP; 837 cmd->key_type = KEY_WEP;
748 838
749 ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd)); 839 ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
750 if (ret < 0) { 840 if (ret < 0) {
751 wl1271_warning("cmd set_default_wep_key failed: %d", ret); 841 wl1271_warning("cmd set_default_wep_key failed: %d", ret);
752 goto out; 842 goto out;
@@ -759,7 +849,8 @@ out:
759} 849}
760 850
761int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, 851int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
762 u8 key_size, const u8 *key, const u8 *addr) 852 u8 key_size, const u8 *key, const u8 *addr,
853 u32 tx_seq_32, u16 tx_seq_16)
763{ 854{
764 struct wl1271_cmd_set_keys *cmd; 855 struct wl1271_cmd_set_keys *cmd;
765 int ret = 0; 856 int ret = 0;
@@ -773,16 +864,18 @@ int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
773 if (key_type != KEY_WEP) 864 if (key_type != KEY_WEP)
774 memcpy(cmd->addr, addr, ETH_ALEN); 865 memcpy(cmd->addr, addr, ETH_ALEN);
775 866
776 cmd->key_action = action; 867 cmd->key_action = cpu_to_le16(action);
777 cmd->key_size = key_size; 868 cmd->key_size = key_size;
778 cmd->key_type = key_type; 869 cmd->key_type = key_type;
779 870
871 cmd->ac_seq_num16[0] = cpu_to_le16(tx_seq_16);
872 cmd->ac_seq_num32[0] = cpu_to_le32(tx_seq_32);
873
780 /* we have only one SSID profile */ 874 /* we have only one SSID profile */
781 cmd->ssid_profile = 0; 875 cmd->ssid_profile = 0;
782 876
783 cmd->id = id; 877 cmd->id = id;
784 878
785 /* FIXME: this is from wl1251, needs to be checked */
786 if (key_type == KEY_TKIP) { 879 if (key_type == KEY_TKIP) {
787 /* 880 /*
788 * We get the key in the following form: 881 * We get the key in the following form:
@@ -800,7 +893,7 @@ int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
800 893
801 wl1271_dump(DEBUG_CRYPT, "TARGET KEY: ", cmd, sizeof(*cmd)); 894 wl1271_dump(DEBUG_CRYPT, "TARGET KEY: ", cmd, sizeof(*cmd));
802 895
803 ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd)); 896 ret = wl1271_cmd_send(wl, CMD_SET_KEYS, cmd, sizeof(*cmd), 0);
804 if (ret < 0) { 897 if (ret < 0) {
805 wl1271_warning("could not set keys"); 898 wl1271_warning("could not set keys");
806 goto out; 899 goto out;
@@ -811,3 +904,34 @@ out:
811 904
812 return ret; 905 return ret;
813} 906}
907
908int wl1271_cmd_disconnect(struct wl1271 *wl)
909{
910 struct wl1271_cmd_disconnect *cmd;
911 int ret = 0;
912
913 wl1271_debug(DEBUG_CMD, "cmd disconnect");
914
915 cmd = kzalloc(sizeof(*cmd), GFP_KERNEL);
916 if (!cmd) {
917 ret = -ENOMEM;
918 goto out;
919 }
920
921 cmd->rx_config_options = cpu_to_le32(wl->rx_config);
922 cmd->rx_filter_options = cpu_to_le32(wl->rx_filter);
923 /* disconnect reason is not used in immediate disconnections */
924 cmd->type = DISCONNECT_IMMEDIATE;
925
926 ret = wl1271_cmd_send(wl, CMD_DISCONNECT, cmd, sizeof(*cmd), 0);
927 if (ret < 0) {
928 wl1271_error("failed to send disconnect command");
929 goto out_free;
930 }
931
932out_free:
933 kfree(cmd);
934
935out:
936 return ret;
937}
diff --git a/drivers/net/wireless/wl12xx/wl1271_cmd.h b/drivers/net/wireless/wl12xx/wl1271_cmd.h
index 951a8447a516..9d7061b3c8a0 100644
--- a/drivers/net/wireless/wl12xx/wl1271_cmd.h
+++ b/drivers/net/wireless/wl12xx/wl1271_cmd.h
@@ -29,9 +29,9 @@
29 29
30struct acx_header; 30struct acx_header;
31 31
32int wl1271_cmd_send(struct wl1271 *wl, u16 type, void *buf, size_t buf_len); 32int wl1271_cmd_send(struct wl1271 *wl, u16 id, void *buf, size_t len,
33int wl1271_cmd_join(struct wl1271 *wl, u8 bss_type, u8 dtim_interval, 33 size_t res_len);
34 u16 beacon_interval, u8 wait); 34int wl1271_cmd_join(struct wl1271 *wl);
35int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer); 35int wl1271_cmd_test(struct wl1271 *wl, void *buf, size_t buf_len, u8 answer);
36int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len); 36int wl1271_cmd_interrogate(struct wl1271 *wl, u16 id, void *buf, size_t len);
37int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len); 37int wl1271_cmd_configure(struct wl1271 *wl, u16 id, void *buf, size_t len);
@@ -40,16 +40,19 @@ int wl1271_cmd_ps_mode(struct wl1271 *wl, u8 ps_mode);
40int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer, 40int wl1271_cmd_read_memory(struct wl1271 *wl, u32 addr, void *answer,
41 size_t len); 41 size_t len);
42int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len, 42int wl1271_cmd_scan(struct wl1271 *wl, u8 *ssid, size_t len,
43 u8 active_scan, u8 high_prio, u8 num_channels, 43 u8 active_scan, u8 high_prio, u8 band,
44 u8 probe_requests); 44 u8 probe_requests);
45int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id, 45int wl1271_cmd_template_set(struct wl1271 *wl, u16 template_id,
46 void *buf, size_t buf_len); 46 void *buf, size_t buf_len);
47int wl1271_cmd_build_null_data(struct wl1271 *wl); 47int wl1271_cmd_build_null_data(struct wl1271 *wl);
48int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid); 48int wl1271_cmd_build_ps_poll(struct wl1271 *wl, u16 aid);
49int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len); 49int wl1271_cmd_build_probe_req(struct wl1271 *wl, u8 *ssid, size_t ssid_len,
50 u8 band);
50int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id); 51int wl1271_cmd_set_default_wep_key(struct wl1271 *wl, u8 id);
51int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, 52int wl1271_cmd_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type,
52 u8 key_size, const u8 *key, const u8 *addr); 53 u8 key_size, const u8 *key, const u8 *addr,
54 u32 tx_seq_32, u16 tx_seq_16);
55int wl1271_cmd_disconnect(struct wl1271 *wl);
53 56
54enum wl1271_commands { 57enum wl1271_commands {
55 CMD_INTERROGATE = 1, /*use this to read information elements*/ 58 CMD_INTERROGATE = 1, /*use this to read information elements*/
@@ -118,8 +121,8 @@ enum cmd_templ {
118#define WL1271_CMD_TEMPL_MAX_SIZE 252 121#define WL1271_CMD_TEMPL_MAX_SIZE 252
119 122
120struct wl1271_cmd_header { 123struct wl1271_cmd_header {
121 u16 id; 124 __le16 id;
122 u16 status; 125 __le16 status;
123 /* payload */ 126 /* payload */
124 u8 data[0]; 127 u8 data[0];
125} __attribute__ ((packed)); 128} __attribute__ ((packed));
@@ -172,17 +175,17 @@ struct cmd_read_write_memory {
172 struct wl1271_cmd_header header; 175 struct wl1271_cmd_header header;
173 176
174 /* The address of the memory to read from or write to.*/ 177 /* The address of the memory to read from or write to.*/
175 u32 addr; 178 __le32 addr;
176 179
177 /* The amount of data in bytes to read from or write to the WiLink 180 /* The amount of data in bytes to read from or write to the WiLink
178 * device.*/ 181 * device.*/
179 u32 size; 182 __le32 size;
180 183
181 /* The actual value read from or written to the Wilink. The source 184 /* The actual value read from or written to the Wilink. The source
182 of this field is the Host in WRITE command or the Wilink in READ 185 of this field is the Host in WRITE command or the Wilink in READ
183 command. */ 186 command. */
184 u8 value[MAX_READ_SIZE]; 187 u8 value[MAX_READ_SIZE];
185}; 188} __attribute__ ((packed));
186 189
187#define CMDMBOX_HEADER_LEN 4 190#define CMDMBOX_HEADER_LEN 4
188#define CMDMBOX_INFO_ELEM_HEADER_LEN 4 191#define CMDMBOX_INFO_ELEM_HEADER_LEN 4
@@ -196,22 +199,23 @@ enum {
196 199
197#define WL1271_JOIN_CMD_CTRL_TX_FLUSH 0x80 /* Firmware flushes all Tx */ 200#define WL1271_JOIN_CMD_CTRL_TX_FLUSH 0x80 /* Firmware flushes all Tx */
198#define WL1271_JOIN_CMD_TX_SESSION_OFFSET 1 201#define WL1271_JOIN_CMD_TX_SESSION_OFFSET 1
202#define WL1271_JOIN_CMD_BSS_TYPE_5GHZ 0x10
199 203
200struct wl1271_cmd_join { 204struct wl1271_cmd_join {
201 struct wl1271_cmd_header header; 205 struct wl1271_cmd_header header;
202 206
203 u32 bssid_lsb; 207 __le32 bssid_lsb;
204 u16 bssid_msb; 208 __le16 bssid_msb;
205 u16 beacon_interval; /* in TBTTs */ 209 __le16 beacon_interval; /* in TBTTs */
206 u32 rx_config_options; 210 __le32 rx_config_options;
207 u32 rx_filter_options; 211 __le32 rx_filter_options;
208 212
209 /* 213 /*
210 * The target uses this field to determine the rate at 214 * The target uses this field to determine the rate at
211 * which to transmit control frame responses (such as 215 * which to transmit control frame responses (such as
212 * ACK or CTS frames). 216 * ACK or CTS frames).
213 */ 217 */
214 u32 basic_rate_set; 218 __le32 basic_rate_set;
215 u8 dtim_interval; 219 u8 dtim_interval;
216 /* 220 /*
217 * bits 0-2: This bitwise field specifies the type 221 * bits 0-2: This bitwise field specifies the type
@@ -240,10 +244,10 @@ struct cmd_enabledisable_path {
240struct wl1271_cmd_template_set { 244struct wl1271_cmd_template_set {
241 struct wl1271_cmd_header header; 245 struct wl1271_cmd_header header;
242 246
243 u16 len; 247 __le16 len;
244 u8 template_type; 248 u8 template_type;
245 u8 index; /* relevant only for KLV_TEMPLATE type */ 249 u8 index; /* relevant only for KLV_TEMPLATE type */
246 u32 enabled_rates; 250 __le32 enabled_rates;
247 u8 short_retry_limit; 251 u8 short_retry_limit;
248 u8 long_retry_limit; 252 u8 long_retry_limit;
249 u8 aflags; 253 u8 aflags;
@@ -280,18 +284,13 @@ struct wl1271_cmd_ps_params {
280 * to power save mode. 284 * to power save mode.
281 */ 285 */
282 u8 hang_over_period; 286 u8 hang_over_period;
283 u32 null_data_rate; 287 __le32 null_data_rate;
284} __attribute__ ((packed)); 288} __attribute__ ((packed));
285 289
286/* HW encryption keys */ 290/* HW encryption keys */
287#define NUM_ACCESS_CATEGORIES_COPY 4 291#define NUM_ACCESS_CATEGORIES_COPY 4
288#define MAX_KEY_SIZE 32 292#define MAX_KEY_SIZE 32
289 293
290/* When set, disable HW encryption */
291#define DF_ENCRYPTION_DISABLE 0x01
292/* When set, disable HW decryption */
293#define DF_SNIFF_MODE_ENABLE 0x80
294
295enum wl1271_cmd_key_action { 294enum wl1271_cmd_key_action {
296 KEY_ADD_OR_REPLACE = 1, 295 KEY_ADD_OR_REPLACE = 1,
297 KEY_REMOVE = 2, 296 KEY_REMOVE = 2,
@@ -316,9 +315,9 @@ struct wl1271_cmd_set_keys {
316 u8 addr[ETH_ALEN]; 315 u8 addr[ETH_ALEN];
317 316
318 /* key_action_e */ 317 /* key_action_e */
319 u16 key_action; 318 __le16 key_action;
320 319
321 u16 reserved_1; 320 __le16 reserved_1;
322 321
323 /* key size in bytes */ 322 /* key size in bytes */
324 u8 key_size; 323 u8 key_size;
@@ -334,8 +333,8 @@ struct wl1271_cmd_set_keys {
334 u8 id; 333 u8 id;
335 u8 reserved_2[6]; 334 u8 reserved_2[6];
336 u8 key[MAX_KEY_SIZE]; 335 u8 key[MAX_KEY_SIZE];
337 u16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY]; 336 __le16 ac_seq_num16[NUM_ACCESS_CATEGORIES_COPY];
338 u32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY]; 337 __le32 ac_seq_num32[NUM_ACCESS_CATEGORIES_COPY];
339} __attribute__ ((packed)); 338} __attribute__ ((packed));
340 339
341 340
@@ -347,19 +346,22 @@ struct wl1271_cmd_set_keys {
347#define WL1271_SCAN_OPT_PRIORITY_HIGH 4 346#define WL1271_SCAN_OPT_PRIORITY_HIGH 4
348#define WL1271_SCAN_CHAN_MIN_DURATION 30000 /* TU */ 347#define WL1271_SCAN_CHAN_MIN_DURATION 30000 /* TU */
349#define WL1271_SCAN_CHAN_MAX_DURATION 60000 /* TU */ 348#define WL1271_SCAN_CHAN_MAX_DURATION 60000 /* TU */
349#define WL1271_SCAN_BAND_2_4_GHZ 0
350#define WL1271_SCAN_BAND_5_GHZ 1
351#define WL1271_SCAN_BAND_DUAL 2
350 352
351struct basic_scan_params { 353struct basic_scan_params {
352 u32 rx_config_options; 354 __le32 rx_config_options;
353 u32 rx_filter_options; 355 __le32 rx_filter_options;
354 /* Scan option flags (WL1271_SCAN_OPT_*) */ 356 /* Scan option flags (WL1271_SCAN_OPT_*) */
355 u16 scan_options; 357 __le16 scan_options;
356 /* Number of scan channels in the list (maximum 30) */ 358 /* Number of scan channels in the list (maximum 30) */
357 u8 num_channels; 359 u8 num_channels;
358 /* This field indicates the number of probe requests to send 360 /* This field indicates the number of probe requests to send
359 per channel for an active scan */ 361 per channel for an active scan */
360 u8 num_probe_requests; 362 u8 num_probe_requests;
361 /* Rate bit field for sending the probes */ 363 /* Rate bit field for sending the probes */
362 u32 tx_rate; 364 __le32 tx_rate;
363 u8 tid_trigger; 365 u8 tid_trigger;
364 u8 ssid_len; 366 u8 ssid_len;
365 /* in order to align */ 367 /* in order to align */
@@ -374,10 +376,10 @@ struct basic_scan_params {
374 376
375struct basic_scan_channel_params { 377struct basic_scan_channel_params {
376 /* Duration in TU to wait for frames on a channel for active scan */ 378 /* Duration in TU to wait for frames on a channel for active scan */
377 u32 min_duration; 379 __le32 min_duration;
378 u32 max_duration; 380 __le32 max_duration;
379 u32 bssid_lsb; 381 __le32 bssid_lsb;
380 u16 bssid_msb; 382 __le16 bssid_msb;
381 u8 early_termination; 383 u8 early_termination;
382 u8 tx_power_att; 384 u8 tx_power_att;
383 u8 channel; 385 u8 channel;
@@ -397,13 +399,13 @@ struct wl1271_cmd_scan {
397struct wl1271_cmd_trigger_scan_to { 399struct wl1271_cmd_trigger_scan_to {
398 struct wl1271_cmd_header header; 400 struct wl1271_cmd_header header;
399 401
400 u32 timeout; 402 __le32 timeout;
401}; 403} __attribute__ ((packed));
402 404
403struct wl1271_cmd_test_header { 405struct wl1271_cmd_test_header {
404 u8 id; 406 u8 id;
405 u8 padding[3]; 407 u8 padding[3];
406}; 408} __attribute__ ((packed));
407 409
408enum wl1271_channel_tune_bands { 410enum wl1271_channel_tune_bands {
409 WL1271_CHANNEL_TUNE_BAND_2_4, 411 WL1271_CHANNEL_TUNE_BAND_2_4,
@@ -425,7 +427,7 @@ struct wl1271_cmd_cal_channel_tune {
425 u8 band; 427 u8 band;
426 u8 channel; 428 u8 channel;
427 429
428 u16 radio_status; 430 __le16 radio_status;
429} __attribute__ ((packed)); 431} __attribute__ ((packed));
430 432
431struct wl1271_cmd_cal_update_ref_point { 433struct wl1271_cmd_cal_update_ref_point {
@@ -433,8 +435,8 @@ struct wl1271_cmd_cal_update_ref_point {
433 435
434 struct wl1271_cmd_test_header test; 436 struct wl1271_cmd_test_header test;
435 437
436 s32 ref_power; 438 __le32 ref_power;
437 s32 ref_detector; 439 __le32 ref_detector;
438 u8 sub_band; 440 u8 sub_band;
439 u8 padding[3]; 441 u8 padding[3];
440} __attribute__ ((packed)); 442} __attribute__ ((packed));
@@ -449,16 +451,42 @@ struct wl1271_cmd_cal_p2g {
449 451
450 struct wl1271_cmd_test_header test; 452 struct wl1271_cmd_test_header test;
451 453
452 u16 len; 454 __le16 len;
453 u8 buf[MAX_TLV_LENGTH]; 455 u8 buf[MAX_TLV_LENGTH];
454 u8 type; 456 u8 type;
455 u8 padding; 457 u8 padding;
456 458
457 s16 radio_status; 459 __le16 radio_status;
458 u8 nvs_version[MAX_NVS_VERSION_LENGTH]; 460 u8 nvs_version[MAX_NVS_VERSION_LENGTH];
459 461
460 u8 sub_band_mask; 462 u8 sub_band_mask;
461 u8 padding2; 463 u8 padding2;
462} __attribute__ ((packed)); 464} __attribute__ ((packed));
463 465
466
467/*
468 * There are three types of disconnections:
469 *
470 * DISCONNECT_IMMEDIATE: the fw doesn't send any frames
471 * DISCONNECT_DEAUTH: the fw generates a DEAUTH request with the reason
472 * we have passed
473 * DISCONNECT_DISASSOC: the fw generates a DESASSOC request with the reason
474 * we have passed
475 */
476enum wl1271_disconnect_type {
477 DISCONNECT_IMMEDIATE,
478 DISCONNECT_DEAUTH,
479 DISCONNECT_DISASSOC
480};
481
482struct wl1271_cmd_disconnect {
483 __le32 rx_config_options;
484 __le32 rx_filter_options;
485
486 __le16 reason;
487 u8 type;
488
489 u8 padding;
490} __attribute__ ((packed));
491
464#endif /* __WL1271_CMD_H__ */ 492#endif /* __WL1271_CMD_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_conf.h b/drivers/net/wireless/wl12xx/wl1271_conf.h
new file mode 100644
index 000000000000..565373ede265
--- /dev/null
+++ b/drivers/net/wireless/wl12xx/wl1271_conf.h
@@ -0,0 +1,919 @@
1/*
2 * This file is part of wl1271
3 *
4 * Copyright (C) 2009 Nokia Corporation
5 *
6 * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7 *
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License
10 * version 2 as published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful, but
13 * WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20 * 02110-1301 USA
21 *
22 */
23
24#ifndef __WL1271_CONF_H__
25#define __WL1271_CONF_H__
26
27enum {
28 CONF_HW_BIT_RATE_1MBPS = BIT(0),
29 CONF_HW_BIT_RATE_2MBPS = BIT(1),
30 CONF_HW_BIT_RATE_5_5MBPS = BIT(2),
31 CONF_HW_BIT_RATE_6MBPS = BIT(3),
32 CONF_HW_BIT_RATE_9MBPS = BIT(4),
33 CONF_HW_BIT_RATE_11MBPS = BIT(5),
34 CONF_HW_BIT_RATE_12MBPS = BIT(6),
35 CONF_HW_BIT_RATE_18MBPS = BIT(7),
36 CONF_HW_BIT_RATE_22MBPS = BIT(8),
37 CONF_HW_BIT_RATE_24MBPS = BIT(9),
38 CONF_HW_BIT_RATE_36MBPS = BIT(10),
39 CONF_HW_BIT_RATE_48MBPS = BIT(11),
40 CONF_HW_BIT_RATE_54MBPS = BIT(12),
41 CONF_HW_BIT_RATE_MCS_0 = BIT(13),
42 CONF_HW_BIT_RATE_MCS_1 = BIT(14),
43 CONF_HW_BIT_RATE_MCS_2 = BIT(15),
44 CONF_HW_BIT_RATE_MCS_3 = BIT(16),
45 CONF_HW_BIT_RATE_MCS_4 = BIT(17),
46 CONF_HW_BIT_RATE_MCS_5 = BIT(18),
47 CONF_HW_BIT_RATE_MCS_6 = BIT(19),
48 CONF_HW_BIT_RATE_MCS_7 = BIT(20)
49};
50
51enum {
52 CONF_HW_RATE_INDEX_1MBPS = 0,
53 CONF_HW_RATE_INDEX_2MBPS = 1,
54 CONF_HW_RATE_INDEX_5_5MBPS = 2,
55 CONF_HW_RATE_INDEX_6MBPS = 3,
56 CONF_HW_RATE_INDEX_9MBPS = 4,
57 CONF_HW_RATE_INDEX_11MBPS = 5,
58 CONF_HW_RATE_INDEX_12MBPS = 6,
59 CONF_HW_RATE_INDEX_18MBPS = 7,
60 CONF_HW_RATE_INDEX_22MBPS = 8,
61 CONF_HW_RATE_INDEX_24MBPS = 9,
62 CONF_HW_RATE_INDEX_36MBPS = 10,
63 CONF_HW_RATE_INDEX_48MBPS = 11,
64 CONF_HW_RATE_INDEX_54MBPS = 12,
65 CONF_HW_RATE_INDEX_MAX = CONF_HW_RATE_INDEX_54MBPS,
66};
67
68struct conf_sg_settings {
69 /*
70 * Defines the PER threshold in PPM of the BT voice of which reaching
71 * this value will trigger raising the priority of the BT voice by
72 * the BT IP until next NFS sample interval time as defined in
73 * nfs_sample_interval.
74 *
75 * Unit: PER value in PPM (parts per million)
76 * #Error_packets / #Total_packets
77
78 * Range: u32
79 */
80 u32 per_threshold;
81
82 /*
83 * This value is an absolute time in micro-seconds to limit the
84 * maximum scan duration compensation while in SG
85 */
86 u32 max_scan_compensation_time;
87
88 /* Defines the PER threshold of the BT voice of which reaching this
89 * value will trigger raising the priority of the BT voice until next
90 * NFS sample interval time as defined in sample_interval.
91 *
92 * Unit: msec
93 * Range: 1-65000
94 */
95 u16 nfs_sample_interval;
96
97 /*
98 * Defines the load ratio for the BT.
99 * The WLAN ratio is: 100 - load_ratio
100 *
101 * Unit: Percent
102 * Range: 0-100
103 */
104 u8 load_ratio;
105
106 /*
107 * true - Co-ex is allowed to enter/exit P.S automatically and
108 * transparently to the host
109 *
110 * false - Co-ex is disallowed to enter/exit P.S and will trigger an
111 * event to the host to notify for the need to enter/exit P.S
112 * due to BT change state
113 *
114 */
115 u8 auto_ps_mode;
116
117 /*
118 * This parameter defines the compensation percentage of num of probe
119 * requests in case scan is initiated during BT voice/BT ACL
120 * guaranteed link.
121 *
122 * Unit: Percent
123 * Range: 0-255 (0 - No compensation)
124 */
125 u8 probe_req_compensation;
126
127 /*
128 * This parameter defines the compensation percentage of scan window
129 * size in case scan is initiated during BT voice/BT ACL Guaranteed
130 * link.
131 *
132 * Unit: Percent
133 * Range: 0-255 (0 - No compensation)
134 */
135 u8 scan_window_compensation;
136
137 /*
138 * Defines the antenna configuration.
139 *
140 * Range: 0 - Single Antenna; 1 - Dual Antenna
141 */
142 u8 antenna_config;
143
144 /*
145 * The percent out of the Max consecutive beacon miss roaming trigger
146 * which is the threshold for raising the priority of beacon
147 * reception.
148 *
149 * Range: 1-100
150 * N = MaxConsecutiveBeaconMiss
151 * P = coexMaxConsecutiveBeaconMissPrecent
152 * Threshold = MIN( N-1, round(N * P / 100))
153 */
154 u8 beacon_miss_threshold;
155
156 /*
157 * The RX rate threshold below which rate adaptation is assumed to be
158 * occurring at the AP which will raise priority for ACTIVE_RX and RX
159 * SP.
160 *
161 * Range: HW_BIT_RATE_*
162 */
163 u32 rate_adaptation_threshold;
164
165 /*
166 * The SNR above which the RX rate threshold indicating AP rate
167 * adaptation is valid
168 *
169 * Range: -128 - 127
170 */
171 s8 rate_adaptation_snr;
172};
173
174enum conf_rx_queue_type {
175 CONF_RX_QUEUE_TYPE_LOW_PRIORITY, /* All except the high priority */
176 CONF_RX_QUEUE_TYPE_HIGH_PRIORITY, /* Management and voice packets */
177};
178
179struct conf_rx_settings {
180 /*
181 * The maximum amount of time, in TU, before the
182 * firmware discards the MSDU.
183 *
184 * Range: 0 - 0xFFFFFFFF
185 */
186 u32 rx_msdu_life_time;
187
188 /*
189 * Packet detection threshold in the PHY.
190 *
191 * FIXME: details unknown.
192 */
193 u32 packet_detection_threshold;
194
195 /*
196 * The longest time the STA will wait to receive traffic from the AP
197 * after a PS-poll has been transmitted.
198 *
199 * Range: 0 - 200000
200 */
201 u16 ps_poll_timeout;
202 /*
203 * The longest time the STA will wait to receive traffic from the AP
204 * after a frame has been sent from an UPSD enabled queue.
205 *
206 * Range: 0 - 200000
207 */
208 u16 upsd_timeout;
209
210 /*
211 * The number of octets in an MPDU, below which an RTS/CTS
212 * handshake is not performed.
213 *
214 * Range: 0 - 4096
215 */
216 u16 rts_threshold;
217
218 /*
219 * The RX Clear Channel Assessment threshold in the PHY
220 * (the energy threshold).
221 *
222 * Range: ENABLE_ENERGY_D == 0x140A
223 * DISABLE_ENERGY_D == 0xFFEF
224 */
225 u16 rx_cca_threshold;
226
227 /*
228 * Occupied Rx mem-blocks number which requires interrupting the host
229 * (0 = no buffering, 0xffff = disabled).
230 *
231 * Range: u16
232 */
233 u16 irq_blk_threshold;
234
235 /*
236 * Rx packets number which requires interrupting the host
237 * (0 = no buffering).
238 *
239 * Range: u16
240 */
241 u16 irq_pkt_threshold;
242
243 /*
244 * Max time in msec the FW may delay RX-Complete interrupt.
245 *
246 * Range: 1 - 100
247 */
248 u16 irq_timeout;
249
250 /*
251 * The RX queue type.
252 *
253 * Range: RX_QUEUE_TYPE_RX_LOW_PRIORITY, RX_QUEUE_TYPE_RX_HIGH_PRIORITY,
254 */
255 u8 queue_type;
256};
257
258#define CONF_TX_MAX_RATE_CLASSES 8
259
260#define CONF_TX_RATE_MASK_UNSPECIFIED 0
261#define CONF_TX_RATE_MASK_ALL 0x1eff
262#define CONF_TX_RATE_RETRY_LIMIT 10
263
264struct conf_tx_rate_class {
265
266 /*
267 * The rates enabled for this rate class.
268 *
269 * Range: CONF_HW_BIT_RATE_* bit mask
270 */
271 u32 enabled_rates;
272
273 /*
274 * The dot11 short retry limit used for TX retries.
275 *
276 * Range: u8
277 */
278 u8 short_retry_limit;
279
280 /*
281 * The dot11 long retry limit used for TX retries.
282 *
283 * Range: u8
284 */
285 u8 long_retry_limit;
286
287 /*
288 * Flags controlling the attributes of TX transmission.
289 *
290 * Range: bit 0: Truncate - when set, FW attempts to send a frame stop
291 * when the total valid per-rate attempts have
292 * been exhausted; otherwise transmissions
293 * will continue at the lowest available rate
294 * until the appropriate one of the
295 * short_retry_limit, long_retry_limit,
296 * dot11_max_transmit_msdu_life_time, or
297 * max_tx_life_time, is exhausted.
298 * 1: Preamble Override - indicates if the preamble type
299 * should be used in TX.
300 * 2: Preamble Type - the type of the preamble to be used by
301 * the policy (0 - long preamble, 1 - short preamble.
302 */
303 u8 aflags;
304};
305
306#define CONF_TX_MAX_AC_COUNT 4
307
308/* Slot number setting to start transmission at PIFS interval */
309#define CONF_TX_AIFS_PIFS 1
310/* Slot number setting to start transmission at DIFS interval normal
311 * DCF access */
312#define CONF_TX_AIFS_DIFS 2
313
314
315enum conf_tx_ac {
316 CONF_TX_AC_BE = 0, /* best effort / legacy */
317 CONF_TX_AC_BK = 1, /* background */
318 CONF_TX_AC_VI = 2, /* video */
319 CONF_TX_AC_VO = 3, /* voice */
320 CONF_TX_AC_CTS2SELF = 4, /* fictious AC, follows AC_VO */
321 CONF_TX_AC_ANY_TID = 0x1f
322};
323
324struct conf_tx_ac_category {
325 /*
326 * The AC class identifier.
327 *
328 * Range: enum conf_tx_ac
329 */
330 u8 ac;
331
332 /*
333 * The contention window minimum size (in slots) for the access
334 * class.
335 *
336 * Range: u8
337 */
338 u8 cw_min;
339
340 /*
341 * The contention window maximum size (in slots) for the access
342 * class.
343 *
344 * Range: u8
345 */
346 u16 cw_max;
347
348 /*
349 * The AIF value (in slots) for the access class.
350 *
351 * Range: u8
352 */
353 u8 aifsn;
354
355 /*
356 * The TX Op Limit (in microseconds) for the access class.
357 *
358 * Range: u16
359 */
360 u16 tx_op_limit;
361};
362
363#define CONF_TX_MAX_TID_COUNT 7
364
365enum {
366 CONF_CHANNEL_TYPE_DCF = 0, /* DC/LEGACY*/
367 CONF_CHANNEL_TYPE_EDCF = 1, /* EDCA*/
368 CONF_CHANNEL_TYPE_HCCA = 2, /* HCCA*/
369};
370
371enum {
372 CONF_PS_SCHEME_LEGACY = 0,
373 CONF_PS_SCHEME_UPSD_TRIGGER = 1,
374 CONF_PS_SCHEME_LEGACY_PSPOLL = 2,
375 CONF_PS_SCHEME_SAPSD = 3,
376};
377
378enum {
379 CONF_ACK_POLICY_LEGACY = 0,
380 CONF_ACK_POLICY_NO_ACK = 1,
381 CONF_ACK_POLICY_BLOCK = 2,
382};
383
384
385struct conf_tx_tid {
386 u8 queue_id;
387 u8 channel_type;
388 u8 tsid;
389 u8 ps_scheme;
390 u8 ack_policy;
391 u32 apsd_conf[2];
392};
393
394struct conf_tx_settings {
395 /*
396 * The TX ED value for TELEC Enable/Disable.
397 *
398 * Range: 0, 1
399 */
400 u8 tx_energy_detection;
401
402 /*
403 * Configuration for rate classes for TX (currently only one
404 * rate class supported.)
405 */
406 struct conf_tx_rate_class rc_conf;
407
408 /*
409 * Configuration for access categories for TX rate control.
410 */
411 u8 ac_conf_count;
412 struct conf_tx_ac_category ac_conf[CONF_TX_MAX_AC_COUNT];
413
414 /*
415 * Configuration for TID parameters.
416 */
417 u8 tid_conf_count;
418 struct conf_tx_tid tid_conf[CONF_TX_MAX_TID_COUNT];
419
420 /*
421 * The TX fragmentation threshold.
422 *
423 * Range: u16
424 */
425 u16 frag_threshold;
426
427 /*
428 * Max time in msec the FW may delay frame TX-Complete interrupt.
429 *
430 * Range: u16
431 */
432 u16 tx_compl_timeout;
433
434 /*
435 * Completed TX packet count which requires to issue the TX-Complete
436 * interrupt.
437 *
438 * Range: u16
439 */
440 u16 tx_compl_threshold;
441
442};
443
444enum {
445 CONF_WAKE_UP_EVENT_BEACON = 0x01, /* Wake on every Beacon*/
446 CONF_WAKE_UP_EVENT_DTIM = 0x02, /* Wake on every DTIM*/
447 CONF_WAKE_UP_EVENT_N_DTIM = 0x04, /* Wake every Nth DTIM */
448 CONF_WAKE_UP_EVENT_N_BEACONS = 0x08, /* Wake every Nth beacon */
449 CONF_WAKE_UP_EVENT_BITS_MASK = 0x0F
450};
451
452#define CONF_MAX_BCN_FILT_IE_COUNT 32
453
454#define CONF_BCN_RULE_PASS_ON_CHANGE BIT(0)
455#define CONF_BCN_RULE_PASS_ON_APPEARANCE BIT(1)
456
457#define CONF_BCN_IE_OUI_LEN 3
458#define CONF_BCN_IE_VER_LEN 2
459
460struct conf_bcn_filt_rule {
461 /*
462 * IE number to which to associate a rule.
463 *
464 * Range: u8
465 */
466 u8 ie;
467
468 /*
469 * Rule to associate with the specific ie.
470 *
471 * Range: CONF_BCN_RULE_PASS_ON_*
472 */
473 u8 rule;
474
475 /*
476 * OUI for the vendor specifie IE (221)
477 */
478 u8 oui[CONF_BCN_IE_OUI_LEN];
479
480 /*
481 * Type for the vendor specifie IE (221)
482 */
483 u8 type;
484
485 /*
486 * Version for the vendor specifie IE (221)
487 */
488 u8 version[CONF_BCN_IE_VER_LEN];
489};
490
491#define CONF_MAX_RSSI_SNR_TRIGGERS 8
492
493enum {
494 CONF_TRIG_METRIC_RSSI_BEACON = 0,
495 CONF_TRIG_METRIC_RSSI_DATA,
496 CONF_TRIG_METRIC_SNR_BEACON,
497 CONF_TRIG_METRIC_SNR_DATA
498};
499
500enum {
501 CONF_TRIG_EVENT_TYPE_LEVEL = 0,
502 CONF_TRIG_EVENT_TYPE_EDGE
503};
504
505enum {
506 CONF_TRIG_EVENT_DIR_LOW = 0,
507 CONF_TRIG_EVENT_DIR_HIGH,
508 CONF_TRIG_EVENT_DIR_BIDIR
509};
510
511
512struct conf_sig_trigger {
513 /*
514 * The RSSI / SNR threshold value.
515 *
516 * FIXME: what is the range?
517 */
518 s16 threshold;
519
520 /*
521 * Minimum delay between two trigger events for this trigger in ms.
522 *
523 * Range: 0 - 60000
524 */
525 u16 pacing;
526
527 /*
528 * The measurement data source for this trigger.
529 *
530 * Range: CONF_TRIG_METRIC_*
531 */
532 u8 metric;
533
534 /*
535 * The trigger type of this trigger.
536 *
537 * Range: CONF_TRIG_EVENT_TYPE_*
538 */
539 u8 type;
540
541 /*
542 * The direction of the trigger.
543 *
544 * Range: CONF_TRIG_EVENT_DIR_*
545 */
546 u8 direction;
547
548 /*
549 * Hysteresis range of the trigger around the threshold (in dB)
550 *
551 * Range: u8
552 */
553 u8 hysteresis;
554
555 /*
556 * Index of the trigger rule.
557 *
558 * Range: 0 - CONF_MAX_RSSI_SNR_TRIGGERS-1
559 */
560 u8 index;
561
562 /*
563 * Enable / disable this rule (to use for clearing rules.)
564 *
565 * Range: 1 - Enabled, 2 - Not enabled
566 */
567 u8 enable;
568};
569
570struct conf_sig_weights {
571
572 /*
573 * RSSI from beacons average weight.
574 *
575 * Range: u8
576 */
577 u8 rssi_bcn_avg_weight;
578
579 /*
580 * RSSI from data average weight.
581 *
582 * Range: u8
583 */
584 u8 rssi_pkt_avg_weight;
585
586 /*
587 * SNR from beacons average weight.
588 *
589 * Range: u8
590 */
591 u8 snr_bcn_avg_weight;
592
593 /*
594 * SNR from data average weight.
595 *
596 * Range: u8
597 */
598 u8 snr_pkt_avg_weight;
599};
600
601enum conf_bcn_filt_mode {
602 CONF_BCN_FILT_MODE_DISABLED = 0,
603 CONF_BCN_FILT_MODE_ENABLED = 1
604};
605
606enum conf_bet_mode {
607 CONF_BET_MODE_DISABLE = 0,
608 CONF_BET_MODE_ENABLE = 1,
609};
610
611struct conf_conn_settings {
612 /*
613 * Firmware wakeup conditions configuration. The host may set only
614 * one bit.
615 *
616 * Range: CONF_WAKE_UP_EVENT_*
617 */
618 u8 wake_up_event;
619
620 /*
621 * Listen interval for beacons or Dtims.
622 *
623 * Range: 0 for beacon and Dtim wakeup
624 * 1-10 for x Dtims
625 * 1-255 for x beacons
626 */
627 u8 listen_interval;
628
629 /*
630 * Enable or disable the beacon filtering.
631 *
632 * Range: CONF_BCN_FILT_MODE_*
633 */
634 enum conf_bcn_filt_mode bcn_filt_mode;
635
636 /*
637 * Configure Beacon filter pass-thru rules.
638 */
639 u8 bcn_filt_ie_count;
640 struct conf_bcn_filt_rule bcn_filt_ie[CONF_MAX_BCN_FILT_IE_COUNT];
641
642 /*
643 * The number of consequtive beacons to lose, before the firmware
644 * becomes out of synch.
645 *
646 * Range: u32
647 */
648 u32 synch_fail_thold;
649
650 /*
651 * After out-of-synch, the number of TU's to wait without a further
652 * received beacon (or probe response) before issuing the BSS_EVENT_LOSE
653 * event.
654 *
655 * Range: u32
656 */
657 u32 bss_lose_timeout;
658
659 /*
660 * Beacon receive timeout.
661 *
662 * Range: u32
663 */
664 u32 beacon_rx_timeout;
665
666 /*
667 * Broadcast receive timeout.
668 *
669 * Range: u32
670 */
671 u32 broadcast_timeout;
672
673 /*
674 * Enable/disable reception of broadcast packets in power save mode
675 *
676 * Range: 1 - enable, 0 - disable
677 */
678 u8 rx_broadcast_in_ps;
679
680 /*
681 * Consequtive PS Poll failures before sending event to driver
682 *
683 * Range: u8
684 */
685 u8 ps_poll_threshold;
686
687 /*
688 * Configuration of signal (rssi/snr) triggers.
689 */
690 u8 sig_trigger_count;
691 struct conf_sig_trigger sig_trigger[CONF_MAX_RSSI_SNR_TRIGGERS];
692
693 /*
694 * Configuration of signal average weights.
695 */
696 struct conf_sig_weights sig_weights;
697
698 /*
699 * Specifies if beacon early termination procedure is enabled or
700 * disabled.
701 *
702 * Range: CONF_BET_MODE_*
703 */
704 u8 bet_enable;
705
706 /*
707 * Specifies the maximum number of consecutive beacons that may be
708 * early terminated. After this number is reached at least one full
709 * beacon must be correctly received in FW before beacon ET
710 * resumes.
711 *
712 * Range 0 - 255
713 */
714 u8 bet_max_consecutive;
715
716 /*
717 * Specifies the maximum number of times to try PSM entry if it fails
718 * (if sending the appropriate null-func message fails.)
719 *
720 * Range 0 - 255
721 */
722 u8 psm_entry_retries;
723};
724
725#define CONF_SR_ERR_TBL_MAX_VALUES 14
726
727struct conf_mart_reflex_err_table {
728 /*
729 * Length of the error table values table.
730 *
731 * Range: 0 - CONF_SR_ERR_TBL_MAX_VALUES
732 */
733 u8 len;
734
735 /*
736 * Smart Reflex error table upper limit.
737 *
738 * Range: s8
739 */
740 s8 upper_limit;
741
742 /*
743 * Smart Reflex error table values.
744 *
745 * Range: s8
746 */
747 s8 values[CONF_SR_ERR_TBL_MAX_VALUES];
748};
749
750enum {
751 CONF_REF_CLK_19_2_E,
752 CONF_REF_CLK_26_E,
753 CONF_REF_CLK_38_4_E,
754 CONF_REF_CLK_52_E
755};
756
757enum single_dual_band_enum {
758 CONF_SINGLE_BAND,
759 CONF_DUAL_BAND
760};
761
762struct conf_general_parms {
763 /*
764 * RF Reference Clock type / speed
765 *
766 * Range: CONF_REF_CLK_*
767 */
768 u8 ref_clk;
769
770 /*
771 * Settling time of the reference clock after boot.
772 *
773 * Range: u8
774 */
775 u8 settling_time;
776
777 /*
778 * Flag defining whether clock is valid on wakeup.
779 *
780 * Range: 0 - not valid on wakeup, 1 - valid on wakeup
781 */
782 u8 clk_valid_on_wakeup;
783
784 /*
785 * DC-to-DC mode.
786 *
787 * Range: Unknown
788 */
789 u8 dc2dcmode;
790
791 /*
792 * Flag defining whether used as single or dual-band.
793 *
794 * Range: CONF_SINGLE_BAND, CONF_DUAL_BAND
795 */
796 u8 single_dual_band;
797
798 /*
799 * TX bip fem autodetect flag.
800 *
801 * Range: Unknown
802 */
803 u8 tx_bip_fem_autodetect;
804
805 /*
806 * TX bip gem manufacturer.
807 *
808 * Range: Unknown
809 */
810 u8 tx_bip_fem_manufacturer;
811
812 /*
813 * Settings flags.
814 *
815 * Range: Unknown
816 */
817 u8 settings;
818};
819
820#define CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE 15
821#define CONF_NUMBER_OF_SUB_BANDS_5 7
822#define CONF_NUMBER_OF_RATE_GROUPS 6
823#define CONF_NUMBER_OF_CHANNELS_2_4 14
824#define CONF_NUMBER_OF_CHANNELS_5 35
825
826struct conf_radio_parms {
827 /*
828 * Static radio parameters for 2.4GHz
829 *
830 * Range: unknown
831 */
832 u8 rx_trace_loss;
833 u8 tx_trace_loss;
834 s8 rx_rssi_and_proc_compens[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
835
836 /*
837 * Static radio parameters for 5GHz
838 *
839 * Range: unknown
840 */
841 u8 rx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
842 u8 tx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
843 s8 rx_rssi_and_proc_compens_5[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
844
845 /*
846 * Dynamic radio parameters for 2.4GHz
847 *
848 * Range: unknown
849 */
850 s16 tx_ref_pd_voltage;
851 s8 tx_ref_power;
852 s8 tx_offset_db;
853
854 s8 tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS];
855 s8 tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS];
856
857 s8 tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4];
858 s8 tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4];
859 s8 tx_pdv_rate_offsets[CONF_NUMBER_OF_RATE_GROUPS];
860
861 u8 tx_ibias[CONF_NUMBER_OF_RATE_GROUPS];
862 u8 rx_fem_insertion_loss;
863
864 /*
865 * Dynamic radio parameters for 5GHz
866 *
867 * Range: unknown
868 */
869 s16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5];
870 s8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
871 s8 tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5];
872
873 s8 tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS];
874 s8 tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS];
875
876 s8 tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5];
877 s8 tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS];
878
879 /* FIXME: this is inconsistent with the types for 2.4GHz */
880 s8 tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS];
881 s8 rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
882};
883
884#define CONF_SR_ERR_TBL_COUNT 3
885
886struct conf_init_settings {
887 /*
888 * Configure Smart Reflex error table values.
889 */
890 struct conf_mart_reflex_err_table sr_err_tbl[CONF_SR_ERR_TBL_COUNT];
891
892 /*
893 * Smart Reflex enable flag.
894 *
895 * Range: 1 - Smart Reflex enabled, 0 - Smart Reflex disabled
896 */
897 u8 sr_enable;
898
899 /*
900 * Configure general parameters.
901 */
902 struct conf_general_parms genparam;
903
904 /*
905 * Configure radio parameters.
906 */
907 struct conf_radio_parms radioparam;
908
909};
910
911struct conf_drv_settings {
912 struct conf_sg_settings sg;
913 struct conf_rx_settings rx;
914 struct conf_tx_settings tx;
915 struct conf_conn_settings conn;
916 struct conf_init_settings init;
917};
918
919#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.c b/drivers/net/wireless/wl12xx/wl1271_event.c
index f3afd4a6ff33..e135d894b42a 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.c
+++ b/drivers/net/wireless/wl12xx/wl1271_event.c
@@ -26,23 +26,82 @@
26#include "wl1271_spi.h" 26#include "wl1271_spi.h"
27#include "wl1271_event.h" 27#include "wl1271_event.h"
28#include "wl1271_ps.h" 28#include "wl1271_ps.h"
29#include "wl12xx_80211.h"
29 30
30static int wl1271_event_scan_complete(struct wl1271 *wl, 31static int wl1271_event_scan_complete(struct wl1271 *wl,
31 struct event_mailbox *mbox) 32 struct event_mailbox *mbox)
32{ 33{
34 int size = sizeof(struct wl12xx_probe_req_template);
33 wl1271_debug(DEBUG_EVENT, "status: 0x%x", 35 wl1271_debug(DEBUG_EVENT, "status: 0x%x",
34 mbox->scheduled_scan_status); 36 mbox->scheduled_scan_status);
35 37
36 if (wl->scanning) { 38 if (wl->scanning) {
37 mutex_unlock(&wl->mutex); 39 if (wl->scan.state == WL1271_SCAN_BAND_DUAL) {
38 ieee80211_scan_completed(wl->hw, false); 40 wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_2_4,
39 mutex_lock(&wl->mutex); 41 NULL, size);
40 wl->scanning = false; 42 /* 2.4 GHz band scanned, scan 5 GHz band, pretend
43 * to the wl1271_cmd_scan function that we are not
44 * scanning as it checks that.
45 */
46 wl->scanning = false;
47 wl1271_cmd_scan(wl, wl->scan.ssid, wl->scan.ssid_len,
48 wl->scan.active,
49 wl->scan.high_prio,
50 WL1271_SCAN_BAND_5_GHZ,
51 wl->scan.probe_requests);
52 } else {
53 if (wl->scan.state == WL1271_SCAN_BAND_2_4_GHZ)
54 wl1271_cmd_template_set(wl,
55 CMD_TEMPL_CFG_PROBE_REQ_2_4,
56 NULL, size);
57 else
58 wl1271_cmd_template_set(wl,
59 CMD_TEMPL_CFG_PROBE_REQ_5,
60 NULL, size);
61
62 mutex_unlock(&wl->mutex);
63 ieee80211_scan_completed(wl->hw, false);
64 mutex_lock(&wl->mutex);
65 wl->scanning = false;
66 }
41 } 67 }
42
43 return 0; 68 return 0;
44} 69}
45 70
71static int wl1271_event_ps_report(struct wl1271 *wl,
72 struct event_mailbox *mbox,
73 bool *beacon_loss)
74{
75 int ret = 0;
76
77 wl1271_debug(DEBUG_EVENT, "ps_status: 0x%x", mbox->ps_status);
78
79 switch (mbox->ps_status) {
80 case EVENT_ENTER_POWER_SAVE_FAIL:
81 if (wl->psm_entry_retry < wl->conf.conn.psm_entry_retries) {
82 wl->psm_entry_retry++;
83 wl1271_error("PSM entry failed, retrying %d\n",
84 wl->psm_entry_retry);
85 ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE);
86 } else {
87 wl->psm_entry_retry = 0;
88 *beacon_loss = true;
89 }
90 break;
91 case EVENT_ENTER_POWER_SAVE_SUCCESS:
92 wl->psm_entry_retry = 0;
93 break;
94 case EVENT_EXIT_POWER_SAVE_FAIL:
95 wl1271_info("PSM exit failed");
96 break;
97 case EVENT_EXIT_POWER_SAVE_SUCCESS:
98 default:
99 break;
100 }
101
102 return ret;
103}
104
46static void wl1271_event_mbox_dump(struct event_mailbox *mbox) 105static void wl1271_event_mbox_dump(struct event_mailbox *mbox)
47{ 106{
48 wl1271_debug(DEBUG_EVENT, "MBOX DUMP:"); 107 wl1271_debug(DEBUG_EVENT, "MBOX DUMP:");
@@ -54,10 +113,12 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
54{ 113{
55 int ret; 114 int ret;
56 u32 vector; 115 u32 vector;
116 bool beacon_loss = false;
57 117
58 wl1271_event_mbox_dump(mbox); 118 wl1271_event_mbox_dump(mbox);
59 119
60 vector = mbox->events_vector & ~(mbox->events_mask); 120 vector = le32_to_cpu(mbox->events_vector);
121 vector &= ~(le32_to_cpu(mbox->events_mask));
61 wl1271_debug(DEBUG_EVENT, "vector: 0x%x", vector); 122 wl1271_debug(DEBUG_EVENT, "vector: 0x%x", vector);
62 123
63 if (vector & SCAN_COMPLETE_EVENT_ID) { 124 if (vector & SCAN_COMPLETE_EVENT_ID) {
@@ -66,14 +127,34 @@ static int wl1271_event_process(struct wl1271 *wl, struct event_mailbox *mbox)
66 return ret; 127 return ret;
67 } 128 }
68 129
69 if (vector & BSS_LOSE_EVENT_ID) { 130 /*
131 * The BSS_LOSE_EVENT_ID is only needed while psm (and hence beacon
132 * filtering) is enabled. Without PSM, the stack will receive all
133 * beacons and can detect beacon loss by itself.
134 */
135 if (vector & BSS_LOSE_EVENT_ID && wl->psm) {
70 wl1271_debug(DEBUG_EVENT, "BSS_LOSE_EVENT"); 136 wl1271_debug(DEBUG_EVENT, "BSS_LOSE_EVENT");
71 137
72 if (wl->psm_requested && wl->psm) { 138 /* indicate to the stack, that beacons have been lost */
73 ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE); 139 beacon_loss = true;
74 if (ret < 0) 140 }
75 return ret; 141
76 } 142 if (vector & PS_REPORT_EVENT_ID) {
143 wl1271_debug(DEBUG_EVENT, "PS_REPORT_EVENT");
144 ret = wl1271_event_ps_report(wl, mbox, &beacon_loss);
145 if (ret < 0)
146 return ret;
147 }
148
149 if (beacon_loss) {
150 /* Obviously, it's dangerous to release the mutex while
151 we are holding many of the variables in the wl struct.
152 That's why it's done last in the function, and care must
153 be taken that nothing more is done after this function
154 returns. */
155 mutex_unlock(&wl->mutex);
156 ieee80211_beacon_loss(wl->vif);
157 mutex_lock(&wl->mutex);
77 } 158 }
78 159
79 return 0; 160 return 0;
@@ -92,14 +173,14 @@ int wl1271_event_unmask(struct wl1271 *wl)
92 173
93void wl1271_event_mbox_config(struct wl1271 *wl) 174void wl1271_event_mbox_config(struct wl1271 *wl)
94{ 175{
95 wl->mbox_ptr[0] = wl1271_reg_read32(wl, REG_EVENT_MAILBOX_PTR); 176 wl->mbox_ptr[0] = wl1271_spi_read32(wl, REG_EVENT_MAILBOX_PTR);
96 wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox); 177 wl->mbox_ptr[1] = wl->mbox_ptr[0] + sizeof(struct event_mailbox);
97 178
98 wl1271_debug(DEBUG_EVENT, "MBOX ptrs: 0x%x 0x%x", 179 wl1271_debug(DEBUG_EVENT, "MBOX ptrs: 0x%x 0x%x",
99 wl->mbox_ptr[0], wl->mbox_ptr[1]); 180 wl->mbox_ptr[0], wl->mbox_ptr[1]);
100} 181}
101 182
102int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num) 183int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num, bool do_ack)
103{ 184{
104 struct event_mailbox mbox; 185 struct event_mailbox mbox;
105 int ret; 186 int ret;
@@ -110,8 +191,8 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
110 return -EINVAL; 191 return -EINVAL;
111 192
112 /* first we read the mbox descriptor */ 193 /* first we read the mbox descriptor */
113 wl1271_spi_mem_read(wl, wl->mbox_ptr[mbox_num], &mbox, 194 wl1271_spi_read(wl, wl->mbox_ptr[mbox_num], &mbox,
114 sizeof(struct event_mailbox)); 195 sizeof(struct event_mailbox), false);
115 196
116 /* process the descriptor */ 197 /* process the descriptor */
117 ret = wl1271_event_process(wl, &mbox); 198 ret = wl1271_event_process(wl, &mbox);
@@ -119,7 +200,9 @@ int wl1271_event_handle(struct wl1271 *wl, u8 mbox_num)
119 return ret; 200 return ret;
120 201
121 /* then we let the firmware know it can go on...*/ 202 /* then we let the firmware know it can go on...*/
122 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_TRIG, INTR_TRIG_EVENT_ACK); 203 if (do_ack)
204 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_TRIG,
205 INTR_TRIG_EVENT_ACK);
123 206
124 return 0; 207 return 0;
125} 208}
diff --git a/drivers/net/wireless/wl12xx/wl1271_event.h b/drivers/net/wireless/wl12xx/wl1271_event.h
index 2cdce7c34bf0..4e3f55ebb1a8 100644
--- a/drivers/net/wireless/wl12xx/wl1271_event.h
+++ b/drivers/net/wireless/wl12xx/wl1271_event.h
@@ -63,36 +63,43 @@ enum {
63 EVENT_MBOX_ALL_EVENT_ID = 0x7fffffff, 63 EVENT_MBOX_ALL_EVENT_ID = 0x7fffffff,
64}; 64};
65 65
66enum {
67 EVENT_ENTER_POWER_SAVE_FAIL = 0,
68 EVENT_ENTER_POWER_SAVE_SUCCESS,
69 EVENT_EXIT_POWER_SAVE_FAIL,
70 EVENT_EXIT_POWER_SAVE_SUCCESS,
71};
72
66struct event_debug_report { 73struct event_debug_report {
67 u8 debug_event_id; 74 u8 debug_event_id;
68 u8 num_params; 75 u8 num_params;
69 u16 pad; 76 __le16 pad;
70 u32 report_1; 77 __le32 report_1;
71 u32 report_2; 78 __le32 report_2;
72 u32 report_3; 79 __le32 report_3;
73} __attribute__ ((packed)); 80} __attribute__ ((packed));
74 81
75#define NUM_OF_RSSI_SNR_TRIGGERS 8 82#define NUM_OF_RSSI_SNR_TRIGGERS 8
76 83
77struct event_mailbox { 84struct event_mailbox {
78 u32 events_vector; 85 __le32 events_vector;
79 u32 events_mask; 86 __le32 events_mask;
80 u32 reserved_1; 87 __le32 reserved_1;
81 u32 reserved_2; 88 __le32 reserved_2;
82 89
83 u8 dbg_event_id; 90 u8 dbg_event_id;
84 u8 num_relevant_params; 91 u8 num_relevant_params;
85 u16 reserved_3; 92 __le16 reserved_3;
86 u32 event_report_p1; 93 __le32 event_report_p1;
87 u32 event_report_p2; 94 __le32 event_report_p2;
88 u32 event_report_p3; 95 __le32 event_report_p3;
89 96
90 u8 number_of_scan_results; 97 u8 number_of_scan_results;
91 u8 scan_tag; 98 u8 scan_tag;
92 u8 reserved_4[2]; 99 u8 reserved_4[2];
93 u32 compl_scheduled_scan_status; 100 __le32 compl_scheduled_scan_status;
94 101
95 u16 scheduled_scan_attended_channels; 102 __le16 scheduled_scan_attended_channels;
96 u8 soft_gemini_sense_info; 103 u8 soft_gemini_sense_info;
97 u8 soft_gemini_protective_info; 104 u8 soft_gemini_protective_info;
98 s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS]; 105 s8 rssi_snr_trigger_metric[NUM_OF_RSSI_SNR_TRIGGERS];
@@ -105,6 +112,6 @@ struct event_mailbox {
105 112
106int wl1271_event_unmask(struct wl1271 *wl); 113int wl1271_event_unmask(struct wl1271 *wl);
107void wl1271_event_mbox_config(struct wl1271 *wl); 114void wl1271_event_mbox_config(struct wl1271 *wl);
108int wl1271_event_handle(struct wl1271 *wl, u8 mbox); 115int wl1271_event_handle(struct wl1271 *wl, u8 mbox, bool do_ack);
109 116
110#endif 117#endif
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.c b/drivers/net/wireless/wl12xx/wl1271_init.c
index 490df217605a..7c2017f480ea 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.c
+++ b/drivers/net/wireless/wl12xx/wl1271_init.c
@@ -59,6 +59,14 @@ static int wl1271_init_templates_config(struct wl1271 *wl)
59 if (ret < 0) 59 if (ret < 0)
60 return ret; 60 return ret;
61 61
62 if (wl1271_11a_enabled()) {
63 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_CFG_PROBE_REQ_5,
64 NULL,
65 sizeof(struct wl12xx_probe_req_template));
66 if (ret < 0)
67 return ret;
68 }
69
62 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL, 70 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_NULL_DATA, NULL,
63 sizeof(struct wl12xx_null_data_template)); 71 sizeof(struct wl12xx_null_data_template));
64 if (ret < 0) 72 if (ret < 0)
@@ -94,7 +102,7 @@ static int wl1271_init_rx_config(struct wl1271 *wl, u32 config, u32 filter)
94{ 102{
95 int ret; 103 int ret;
96 104
97 ret = wl1271_acx_rx_msdu_life_time(wl, RX_MSDU_LIFETIME_DEF); 105 ret = wl1271_acx_rx_msdu_life_time(wl);
98 if (ret < 0) 106 if (ret < 0)
99 return ret; 107 return ret;
100 108
@@ -117,7 +125,7 @@ static int wl1271_init_phy_config(struct wl1271 *wl)
117 if (ret < 0) 125 if (ret < 0)
118 return ret; 126 return ret;
119 127
120 ret = wl1271_acx_group_address_tbl(wl); 128 ret = wl1271_acx_group_address_tbl(wl, true, NULL, 0);
121 if (ret < 0) 129 if (ret < 0)
122 return ret; 130 return ret;
123 131
@@ -125,7 +133,7 @@ static int wl1271_init_phy_config(struct wl1271 *wl)
125 if (ret < 0) 133 if (ret < 0)
126 return ret; 134 return ret;
127 135
128 ret = wl1271_acx_rts_threshold(wl, RTS_THRESHOLD_DEF); 136 ret = wl1271_acx_rts_threshold(wl, wl->conf.rx.rts_threshold);
129 if (ret < 0) 137 if (ret < 0)
130 return ret; 138 return ret;
131 139
@@ -136,7 +144,8 @@ static int wl1271_init_beacon_filter(struct wl1271 *wl)
136{ 144{
137 int ret; 145 int ret;
138 146
139 ret = wl1271_acx_beacon_filter_opt(wl); 147 /* disable beacon filtering at this stage */
148 ret = wl1271_acx_beacon_filter_opt(wl, false);
140 if (ret < 0) 149 if (ret < 0)
141 return ret; 150 return ret;
142 151
@@ -187,6 +196,7 @@ static int wl1271_init_beacon_broadcast(struct wl1271 *wl)
187static int wl1271_init_general_parms(struct wl1271 *wl) 196static int wl1271_init_general_parms(struct wl1271 *wl)
188{ 197{
189 struct wl1271_general_parms *gen_parms; 198 struct wl1271_general_parms *gen_parms;
199 struct conf_general_parms *g = &wl->conf.init.genparam;
190 int ret; 200 int ret;
191 201
192 gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL); 202 gen_parms = kzalloc(sizeof(*gen_parms), GFP_KERNEL);
@@ -195,15 +205,14 @@ static int wl1271_init_general_parms(struct wl1271 *wl)
195 205
196 gen_parms->id = TEST_CMD_INI_FILE_GENERAL_PARAM; 206 gen_parms->id = TEST_CMD_INI_FILE_GENERAL_PARAM;
197 207
198 gen_parms->ref_clk = REF_CLK_38_4_E; 208 gen_parms->ref_clk = g->ref_clk;
199 /* FIXME: magic numbers */ 209 gen_parms->settling_time = g->settling_time;
200 gen_parms->settling_time = 5; 210 gen_parms->clk_valid_on_wakeup = g->clk_valid_on_wakeup;
201 gen_parms->clk_valid_on_wakeup = 0; 211 gen_parms->dc2dcmode = g->dc2dcmode;
202 gen_parms->dc2dcmode = 0; 212 gen_parms->single_dual_band = g->single_dual_band;
203 gen_parms->single_dual_band = 0; 213 gen_parms->tx_bip_fem_autodetect = g->tx_bip_fem_autodetect;
204 gen_parms->tx_bip_fem_autodetect = 1; 214 gen_parms->tx_bip_fem_manufacturer = g->tx_bip_fem_manufacturer;
205 gen_parms->tx_bip_fem_manufacturer = 1; 215 gen_parms->settings = g->settings;
206 gen_parms->settings = 1;
207 216
208 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0); 217 ret = wl1271_cmd_test(wl, gen_parms, sizeof(*gen_parms), 0);
209 if (ret < 0) { 218 if (ret < 0) {
@@ -217,32 +226,9 @@ static int wl1271_init_general_parms(struct wl1271 *wl)
217 226
218static int wl1271_init_radio_parms(struct wl1271 *wl) 227static int wl1271_init_radio_parms(struct wl1271 *wl)
219{ 228{
220 /*
221 * FIXME: All these magic numbers should be moved to some place where
222 * they can be configured (separate file?)
223 */
224
225 struct wl1271_radio_parms *radio_parms; 229 struct wl1271_radio_parms *radio_parms;
226 int ret; 230 struct conf_radio_parms *r = &wl->conf.init.radioparam;
227 u8 compensation[] = { 0xec, 0xf6, 0x00, 0x0c, 0x18, 0xf8, 0xfc, 0x00, 231 int i, ret;
228 0x08, 0x10, 0xf0, 0xf8, 0x00, 0x0a, 0x14 };
229
230 u8 tx_rate_limits_normal[] = { 0x1e, 0x1f, 0x22, 0x24, 0x28, 0x29 };
231 u8 tx_rate_limits_degraded[] = { 0x1b, 0x1c, 0x1e, 0x20, 0x24, 0x25 };
232
233 u8 tx_channel_limits_11b[] = { 0x22, 0x50, 0x50, 0x50,
234 0x50, 0x50, 0x50, 0x50,
235 0x50, 0x50, 0x22, 0x50,
236 0x22, 0x50 };
237
238 u8 tx_channel_limits_ofdm[] = { 0x20, 0x50, 0x50, 0x50,
239 0x50, 0x50, 0x50, 0x50,
240 0x50, 0x50, 0x20, 0x50,
241 0x20, 0x50 };
242
243 u8 tx_pdv_rate_offsets[] = { 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
244
245 u8 tx_ibias[] = { 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x27 };
246 232
247 radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL); 233 radio_parms = kzalloc(sizeof(*radio_parms), GFP_KERNEL);
248 if (!radio_parms) 234 if (!radio_parms)
@@ -251,33 +237,59 @@ static int wl1271_init_radio_parms(struct wl1271 *wl)
251 radio_parms->id = TEST_CMD_INI_FILE_RADIO_PARAM; 237 radio_parms->id = TEST_CMD_INI_FILE_RADIO_PARAM;
252 238
253 /* Static radio parameters */ 239 /* Static radio parameters */
254 radio_parms->rx_trace_loss = 10; 240 radio_parms->rx_trace_loss = r->rx_trace_loss;
255 radio_parms->tx_trace_loss = 10; 241 radio_parms->tx_trace_loss = r->tx_trace_loss;
256 memcpy(radio_parms->rx_rssi_and_proc_compens, compensation, 242 memcpy(radio_parms->rx_rssi_and_proc_compens,
257 sizeof(compensation)); 243 r->rx_rssi_and_proc_compens,
258 244 CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE);
259 /* We don't set the 5GHz -- N/A */ 245
246 memcpy(radio_parms->rx_trace_loss_5, r->rx_trace_loss_5,
247 CONF_NUMBER_OF_SUB_BANDS_5);
248 memcpy(radio_parms->tx_trace_loss_5, r->tx_trace_loss_5,
249 CONF_NUMBER_OF_SUB_BANDS_5);
250 memcpy(radio_parms->rx_rssi_and_proc_compens_5,
251 r->rx_rssi_and_proc_compens_5,
252 CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE);
260 253
261 /* Dynamic radio parameters */ 254 /* Dynamic radio parameters */
262 radio_parms->tx_ref_pd_voltage = cpu_to_le16(0x24e); 255 radio_parms->tx_ref_pd_voltage = cpu_to_le16(r->tx_ref_pd_voltage);
263 radio_parms->tx_ref_power = 0x78; 256 radio_parms->tx_ref_power = r->tx_ref_power;
264 radio_parms->tx_offset_db = 0x0; 257 radio_parms->tx_offset_db = r->tx_offset_db;
265 258
266 memcpy(radio_parms->tx_rate_limits_normal, tx_rate_limits_normal, 259 memcpy(radio_parms->tx_rate_limits_normal, r->tx_rate_limits_normal,
267 sizeof(tx_rate_limits_normal)); 260 CONF_NUMBER_OF_RATE_GROUPS);
268 memcpy(radio_parms->tx_rate_limits_degraded, tx_rate_limits_degraded, 261 memcpy(radio_parms->tx_rate_limits_degraded, r->tx_rate_limits_degraded,
269 sizeof(tx_rate_limits_degraded)); 262 CONF_NUMBER_OF_RATE_GROUPS);
270 263
271 memcpy(radio_parms->tx_channel_limits_11b, tx_channel_limits_11b, 264 memcpy(radio_parms->tx_channel_limits_11b, r->tx_channel_limits_11b,
272 sizeof(tx_channel_limits_11b)); 265 CONF_NUMBER_OF_CHANNELS_2_4);
273 memcpy(radio_parms->tx_channel_limits_ofdm, tx_channel_limits_ofdm, 266 memcpy(radio_parms->tx_channel_limits_ofdm, r->tx_channel_limits_ofdm,
274 sizeof(tx_channel_limits_ofdm)); 267 CONF_NUMBER_OF_CHANNELS_2_4);
275 memcpy(radio_parms->tx_pdv_rate_offsets, tx_pdv_rate_offsets, 268 memcpy(radio_parms->tx_pdv_rate_offsets, r->tx_pdv_rate_offsets,
276 sizeof(tx_pdv_rate_offsets)); 269 CONF_NUMBER_OF_RATE_GROUPS);
277 memcpy(radio_parms->tx_ibias, tx_ibias, 270 memcpy(radio_parms->tx_ibias, r->tx_ibias, CONF_NUMBER_OF_RATE_GROUPS);
278 sizeof(tx_ibias)); 271
279 272 radio_parms->rx_fem_insertion_loss = r->rx_fem_insertion_loss;
280 radio_parms->rx_fem_insertion_loss = 0x14; 273
274 for (i = 0; i < CONF_NUMBER_OF_SUB_BANDS_5; i++)
275 radio_parms->tx_ref_pd_voltage_5[i] =
276 cpu_to_le16(r->tx_ref_pd_voltage_5[i]);
277 memcpy(radio_parms->tx_ref_power_5, r->tx_ref_power_5,
278 CONF_NUMBER_OF_SUB_BANDS_5);
279 memcpy(radio_parms->tx_offset_db_5, r->tx_offset_db_5,
280 CONF_NUMBER_OF_SUB_BANDS_5);
281 memcpy(radio_parms->tx_rate_limits_normal_5,
282 r->tx_rate_limits_normal_5, CONF_NUMBER_OF_RATE_GROUPS);
283 memcpy(radio_parms->tx_rate_limits_degraded_5,
284 r->tx_rate_limits_degraded_5, CONF_NUMBER_OF_RATE_GROUPS);
285 memcpy(radio_parms->tx_channel_limits_ofdm_5,
286 r->tx_channel_limits_ofdm_5, CONF_NUMBER_OF_CHANNELS_5);
287 memcpy(radio_parms->tx_pdv_rate_offsets_5, r->tx_pdv_rate_offsets_5,
288 CONF_NUMBER_OF_RATE_GROUPS);
289 memcpy(radio_parms->tx_ibias_5, r->tx_ibias_5,
290 CONF_NUMBER_OF_RATE_GROUPS);
291 memcpy(radio_parms->rx_fem_insertion_loss_5,
292 r->rx_fem_insertion_loss_5, CONF_NUMBER_OF_SUB_BANDS_5);
281 293
282 ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0); 294 ret = wl1271_cmd_test(wl, radio_parms, sizeof(*radio_parms), 0);
283 if (ret < 0) 295 if (ret < 0)
@@ -291,12 +303,15 @@ int wl1271_hw_init(struct wl1271 *wl)
291{ 303{
292 int ret; 304 int ret;
293 305
306 /* FIXME: the following parameter setting functions return error
307 * codes - the reason is so far unknown. The -EIO is therefore
308 * ignored for the time being. */
294 ret = wl1271_init_general_parms(wl); 309 ret = wl1271_init_general_parms(wl);
295 if (ret < 0) 310 if (ret < 0 && ret != -EIO)
296 return ret; 311 return ret;
297 312
298 ret = wl1271_init_radio_parms(wl); 313 ret = wl1271_init_radio_parms(wl);
299 if (ret < 0) 314 if (ret < 0 && ret != -EIO)
300 return ret; 315 return ret;
301 316
302 /* Template settings */ 317 /* Template settings */
@@ -311,8 +326,8 @@ int wl1271_hw_init(struct wl1271 *wl)
311 326
312 /* RX config */ 327 /* RX config */
313 ret = wl1271_init_rx_config(wl, 328 ret = wl1271_init_rx_config(wl,
314 RX_CFG_PROMISCUOUS | RX_CFG_TSF, 329 RX_CFG_PROMISCUOUS | RX_CFG_TSF,
315 RX_FILTER_OPTION_DEF); 330 RX_FILTER_OPTION_DEF);
316 /* RX_CONFIG_OPTION_ANY_DST_ANY_BSS, 331 /* RX_CONFIG_OPTION_ANY_DST_ANY_BSS,
317 RX_FILTER_OPTION_FILTER_ALL); */ 332 RX_FILTER_OPTION_FILTER_ALL); */
318 if (ret < 0) 333 if (ret < 0)
@@ -323,6 +338,11 @@ int wl1271_hw_init(struct wl1271 *wl)
323 if (ret < 0) 338 if (ret < 0)
324 goto out_free_memmap; 339 goto out_free_memmap;
325 340
341 /* Initialize connection monitoring thresholds */
342 ret = wl1271_acx_conn_monit_params(wl);
343 if (ret < 0)
344 goto out_free_memmap;
345
326 /* Beacon filtering */ 346 /* Beacon filtering */
327 ret = wl1271_init_beacon_filter(wl); 347 ret = wl1271_init_beacon_filter(wl);
328 if (ret < 0) 348 if (ret < 0)
@@ -369,7 +389,7 @@ int wl1271_hw_init(struct wl1271 *wl)
369 goto out_free_memmap; 389 goto out_free_memmap;
370 390
371 /* Configure TX rate classes */ 391 /* Configure TX rate classes */
372 ret = wl1271_acx_rate_policies(wl); 392 ret = wl1271_acx_rate_policies(wl, CONF_TX_RATE_MASK_ALL);
373 if (ret < 0) 393 if (ret < 0)
374 goto out_free_memmap; 394 goto out_free_memmap;
375 395
@@ -388,10 +408,16 @@ int wl1271_hw_init(struct wl1271 *wl)
388 if (ret < 0) 408 if (ret < 0)
389 goto out_free_memmap; 409 goto out_free_memmap;
390 410
411 /* Configure smart reflex */
412 ret = wl1271_acx_smart_reflex(wl);
413 if (ret < 0)
414 goto out_free_memmap;
415
391 return 0; 416 return 0;
392 417
393 out_free_memmap: 418 out_free_memmap:
394 kfree(wl->target_mem_map); 419 kfree(wl->target_mem_map);
420 wl->target_mem_map = NULL;
395 421
396 return ret; 422 return ret;
397} 423}
diff --git a/drivers/net/wireless/wl12xx/wl1271_init.h b/drivers/net/wireless/wl12xx/wl1271_init.h
index bd8ff0fa2272..6e21ceee76a6 100644
--- a/drivers/net/wireless/wl12xx/wl1271_init.h
+++ b/drivers/net/wireless/wl12xx/wl1271_init.h
@@ -48,19 +48,6 @@ struct wl1271_general_parms {
48 u8 settings; 48 u8 settings;
49} __attribute__ ((packed)); 49} __attribute__ ((packed));
50 50
51enum ref_clk_enum {
52 REF_CLK_19_2_E,
53 REF_CLK_26_E,
54 REF_CLK_38_4_E,
55 REF_CLK_52_E
56};
57
58#define RSSI_AND_PROCESS_COMPENSATION_SIZE 15
59#define NUMBER_OF_SUB_BANDS_5 7
60#define NUMBER_OF_RATE_GROUPS 6
61#define NUMBER_OF_CHANNELS_2_4 14
62#define NUMBER_OF_CHANNELS_5 35
63
64struct wl1271_radio_parms { 51struct wl1271_radio_parms {
65 u8 id; 52 u8 id;
66 u8 padding[3]; 53 u8 padding[3];
@@ -69,45 +56,45 @@ struct wl1271_radio_parms {
69 /* 2.4GHz */ 56 /* 2.4GHz */
70 u8 rx_trace_loss; 57 u8 rx_trace_loss;
71 u8 tx_trace_loss; 58 u8 tx_trace_loss;
72 s8 rx_rssi_and_proc_compens[RSSI_AND_PROCESS_COMPENSATION_SIZE]; 59 s8 rx_rssi_and_proc_compens[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
73 60
74 /* 5GHz */ 61 /* 5GHz */
75 u8 rx_trace_loss_5[NUMBER_OF_SUB_BANDS_5]; 62 u8 rx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
76 u8 tx_trace_loss_5[NUMBER_OF_SUB_BANDS_5]; 63 u8 tx_trace_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
77 s8 rx_rssi_and_proc_compens_5[RSSI_AND_PROCESS_COMPENSATION_SIZE]; 64 s8 rx_rssi_and_proc_compens_5[CONF_RSSI_AND_PROCESS_COMPENSATION_SIZE];
78 65
79 /* Dynamic radio parameters */ 66 /* Dynamic radio parameters */
80 /* 2.4GHz */ 67 /* 2.4GHz */
81 s16 tx_ref_pd_voltage; 68 __le16 tx_ref_pd_voltage;
82 s8 tx_ref_power; 69 s8 tx_ref_power;
83 s8 tx_offset_db; 70 s8 tx_offset_db;
84 71
85 s8 tx_rate_limits_normal[NUMBER_OF_RATE_GROUPS]; 72 s8 tx_rate_limits_normal[CONF_NUMBER_OF_RATE_GROUPS];
86 s8 tx_rate_limits_degraded[NUMBER_OF_RATE_GROUPS]; 73 s8 tx_rate_limits_degraded[CONF_NUMBER_OF_RATE_GROUPS];
87 74
88 s8 tx_channel_limits_11b[NUMBER_OF_CHANNELS_2_4]; 75 s8 tx_channel_limits_11b[CONF_NUMBER_OF_CHANNELS_2_4];
89 s8 tx_channel_limits_ofdm[NUMBER_OF_CHANNELS_2_4]; 76 s8 tx_channel_limits_ofdm[CONF_NUMBER_OF_CHANNELS_2_4];
90 s8 tx_pdv_rate_offsets[NUMBER_OF_RATE_GROUPS]; 77 s8 tx_pdv_rate_offsets[CONF_NUMBER_OF_RATE_GROUPS];
91 78
92 u8 tx_ibias[NUMBER_OF_RATE_GROUPS]; 79 u8 tx_ibias[CONF_NUMBER_OF_RATE_GROUPS];
93 u8 rx_fem_insertion_loss; 80 u8 rx_fem_insertion_loss;
94 81
95 u8 padding2; 82 u8 padding2;
96 83
97 /* 5GHz */ 84 /* 5GHz */
98 s16 tx_ref_pd_voltage_5[NUMBER_OF_SUB_BANDS_5]; 85 __le16 tx_ref_pd_voltage_5[CONF_NUMBER_OF_SUB_BANDS_5];
99 s8 tx_ref_power_5[NUMBER_OF_SUB_BANDS_5]; 86 s8 tx_ref_power_5[CONF_NUMBER_OF_SUB_BANDS_5];
100 s8 tx_offset_db_5[NUMBER_OF_SUB_BANDS_5]; 87 s8 tx_offset_db_5[CONF_NUMBER_OF_SUB_BANDS_5];
101 88
102 s8 tx_rate_limits_normal_5[NUMBER_OF_RATE_GROUPS]; 89 s8 tx_rate_limits_normal_5[CONF_NUMBER_OF_RATE_GROUPS];
103 s8 tx_rate_limits_degraded_5[NUMBER_OF_RATE_GROUPS]; 90 s8 tx_rate_limits_degraded_5[CONF_NUMBER_OF_RATE_GROUPS];
104 91
105 s8 tx_channel_limits_ofdm_5[NUMBER_OF_CHANNELS_5]; 92 s8 tx_channel_limits_ofdm_5[CONF_NUMBER_OF_CHANNELS_5];
106 s8 tx_pdv_rate_offsets_5[NUMBER_OF_RATE_GROUPS]; 93 s8 tx_pdv_rate_offsets_5[CONF_NUMBER_OF_RATE_GROUPS];
107 94
108 /* FIXME: this is inconsistent with the types for 2.4GHz */ 95 /* FIXME: this is inconsistent with the types for 2.4GHz */
109 s8 tx_ibias_5[NUMBER_OF_RATE_GROUPS]; 96 s8 tx_ibias_5[CONF_NUMBER_OF_RATE_GROUPS];
110 s8 rx_fem_insertion_loss_5[NUMBER_OF_SUB_BANDS_5]; 97 s8 rx_fem_insertion_loss_5[CONF_NUMBER_OF_SUB_BANDS_5];
111 98
112 u8 padding3[2]; 99 u8 padding3[2];
113} __attribute__ ((packed)); 100} __attribute__ ((packed));
diff --git a/drivers/net/wireless/wl12xx/wl1271_main.c b/drivers/net/wireless/wl12xx/wl1271_main.c
index 27298b19d5bd..d2149fcd3cf1 100644
--- a/drivers/net/wireless/wl12xx/wl1271_main.c
+++ b/drivers/net/wireless/wl12xx/wl1271_main.c
@@ -30,7 +30,9 @@
30#include <linux/spi/spi.h> 30#include <linux/spi/spi.h>
31#include <linux/crc32.h> 31#include <linux/crc32.h>
32#include <linux/etherdevice.h> 32#include <linux/etherdevice.h>
33#include <linux/vmalloc.h>
33#include <linux/spi/wl12xx.h> 34#include <linux/spi/wl12xx.h>
35#include <linux/inetdevice.h>
34 36
35#include "wl1271.h" 37#include "wl1271.h"
36#include "wl12xx_80211.h" 38#include "wl12xx_80211.h"
@@ -45,6 +47,309 @@
45#include "wl1271_cmd.h" 47#include "wl1271_cmd.h"
46#include "wl1271_boot.h" 48#include "wl1271_boot.h"
47 49
50static struct conf_drv_settings default_conf = {
51 .sg = {
52 .per_threshold = 7500,
53 .max_scan_compensation_time = 120000,
54 .nfs_sample_interval = 400,
55 .load_ratio = 50,
56 .auto_ps_mode = 0,
57 .probe_req_compensation = 170,
58 .scan_window_compensation = 50,
59 .antenna_config = 0,
60 .beacon_miss_threshold = 60,
61 .rate_adaptation_threshold = CONF_HW_BIT_RATE_12MBPS,
62 .rate_adaptation_snr = 0
63 },
64 .rx = {
65 .rx_msdu_life_time = 512000,
66 .packet_detection_threshold = 0,
67 .ps_poll_timeout = 15,
68 .upsd_timeout = 15,
69 .rts_threshold = 2347,
70 .rx_cca_threshold = 0xFFEF,
71 .irq_blk_threshold = 0,
72 .irq_pkt_threshold = USHORT_MAX,
73 .irq_timeout = 5,
74 .queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY,
75 },
76 .tx = {
77 .tx_energy_detection = 0,
78 .rc_conf = {
79 .enabled_rates = CONF_TX_RATE_MASK_UNSPECIFIED,
80 .short_retry_limit = 10,
81 .long_retry_limit = 10,
82 .aflags = 0
83 },
84 .ac_conf_count = 4,
85 .ac_conf = {
86 [0] = {
87 .ac = CONF_TX_AC_BE,
88 .cw_min = 15,
89 .cw_max = 63,
90 .aifsn = 3,
91 .tx_op_limit = 0,
92 },
93 [1] = {
94 .ac = CONF_TX_AC_BK,
95 .cw_min = 15,
96 .cw_max = 63,
97 .aifsn = 7,
98 .tx_op_limit = 0,
99 },
100 [2] = {
101 .ac = CONF_TX_AC_VI,
102 .cw_min = 15,
103 .cw_max = 63,
104 .aifsn = CONF_TX_AIFS_PIFS,
105 .tx_op_limit = 3008,
106 },
107 [3] = {
108 .ac = CONF_TX_AC_VO,
109 .cw_min = 15,
110 .cw_max = 63,
111 .aifsn = CONF_TX_AIFS_PIFS,
112 .tx_op_limit = 1504,
113 },
114 },
115 .tid_conf_count = 7,
116 .tid_conf = {
117 [0] = {
118 .queue_id = 0,
119 .channel_type = CONF_CHANNEL_TYPE_DCF,
120 .tsid = CONF_TX_AC_BE,
121 .ps_scheme = CONF_PS_SCHEME_LEGACY,
122 .ack_policy = CONF_ACK_POLICY_LEGACY,
123 .apsd_conf = {0, 0},
124 },
125 [1] = {
126 .queue_id = 1,
127 .channel_type = CONF_CHANNEL_TYPE_DCF,
128 .tsid = CONF_TX_AC_BE,
129 .ps_scheme = CONF_PS_SCHEME_LEGACY,
130 .ack_policy = CONF_ACK_POLICY_LEGACY,
131 .apsd_conf = {0, 0},
132 },
133 [2] = {
134 .queue_id = 2,
135 .channel_type = CONF_CHANNEL_TYPE_DCF,
136 .tsid = CONF_TX_AC_BE,
137 .ps_scheme = CONF_PS_SCHEME_LEGACY,
138 .ack_policy = CONF_ACK_POLICY_LEGACY,
139 .apsd_conf = {0, 0},
140 },
141 [3] = {
142 .queue_id = 3,
143 .channel_type = CONF_CHANNEL_TYPE_DCF,
144 .tsid = CONF_TX_AC_BE,
145 .ps_scheme = CONF_PS_SCHEME_LEGACY,
146 .ack_policy = CONF_ACK_POLICY_LEGACY,
147 .apsd_conf = {0, 0},
148 },
149 [4] = {
150 .queue_id = 4,
151 .channel_type = CONF_CHANNEL_TYPE_DCF,
152 .tsid = CONF_TX_AC_BE,
153 .ps_scheme = CONF_PS_SCHEME_LEGACY,
154 .ack_policy = CONF_ACK_POLICY_LEGACY,
155 .apsd_conf = {0, 0},
156 },
157 [5] = {
158 .queue_id = 5,
159 .channel_type = CONF_CHANNEL_TYPE_DCF,
160 .tsid = CONF_TX_AC_BE,
161 .ps_scheme = CONF_PS_SCHEME_LEGACY,
162 .ack_policy = CONF_ACK_POLICY_LEGACY,
163 .apsd_conf = {0, 0},
164 },
165 [6] = {
166 .queue_id = 6,
167 .channel_type = CONF_CHANNEL_TYPE_DCF,
168 .tsid = CONF_TX_AC_BE,
169 .ps_scheme = CONF_PS_SCHEME_LEGACY,
170 .ack_policy = CONF_ACK_POLICY_LEGACY,
171 .apsd_conf = {0, 0},
172 }
173 },
174 .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD,
175 .tx_compl_timeout = 5,
176 .tx_compl_threshold = 5
177 },
178 .conn = {
179 .wake_up_event = CONF_WAKE_UP_EVENT_DTIM,
180 .listen_interval = 0,
181 .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED,
182 .bcn_filt_ie_count = 1,
183 .bcn_filt_ie = {
184 [0] = {
185 .ie = WLAN_EID_CHANNEL_SWITCH,
186 .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE,
187 }
188 },
189 .synch_fail_thold = 5,
190 .bss_lose_timeout = 100,
191 .beacon_rx_timeout = 10000,
192 .broadcast_timeout = 20000,
193 .rx_broadcast_in_ps = 1,
194 .ps_poll_threshold = 4,
195 .sig_trigger_count = 2,
196 .sig_trigger = {
197 [0] = {
198 .threshold = -75,
199 .pacing = 500,
200 .metric = CONF_TRIG_METRIC_RSSI_BEACON,
201 .type = CONF_TRIG_EVENT_TYPE_EDGE,
202 .direction = CONF_TRIG_EVENT_DIR_LOW,
203 .hysteresis = 2,
204 .index = 0,
205 .enable = 1
206 },
207 [1] = {
208 .threshold = -75,
209 .pacing = 500,
210 .metric = CONF_TRIG_METRIC_RSSI_BEACON,
211 .type = CONF_TRIG_EVENT_TYPE_EDGE,
212 .direction = CONF_TRIG_EVENT_DIR_HIGH,
213 .hysteresis = 2,
214 .index = 1,
215 .enable = 1
216 }
217 },
218 .sig_weights = {
219 .rssi_bcn_avg_weight = 10,
220 .rssi_pkt_avg_weight = 10,
221 .snr_bcn_avg_weight = 10,
222 .snr_pkt_avg_weight = 10
223 },
224 .bet_enable = CONF_BET_MODE_ENABLE,
225 .bet_max_consecutive = 100,
226 .psm_entry_retries = 3
227 },
228 .init = {
229 .sr_err_tbl = {
230 [0] = {
231 .len = 7,
232 .upper_limit = 0x03,
233 .values = {
234 0x18, 0x10, 0x05, 0xfb, 0xf0, 0xe8,
235 0x00 }
236 },
237 [1] = {
238 .len = 7,
239 .upper_limit = 0x03,
240 .values = {
241 0x18, 0x10, 0x05, 0xf6, 0xf0, 0xe8,
242 0x00 }
243 },
244 [2] = {
245 .len = 7,
246 .upper_limit = 0x03,
247 .values = {
248 0x18, 0x10, 0x05, 0xfb, 0xf0, 0xe8,
249 0x00 }
250 }
251 },
252 .sr_enable = 1,
253 .genparam = {
254 /*
255 * FIXME: The correct value CONF_REF_CLK_38_4_E
256 * causes the firmware to crash on boot.
257 * The value 5 apparently is an
258 * unnoficial XTAL configuration of the
259 * same frequency, which appears to work.
260 */
261 .ref_clk = 5,
262 .settling_time = 5,
263 .clk_valid_on_wakeup = 0,
264 .dc2dcmode = 0,
265 .single_dual_band = CONF_SINGLE_BAND,
266 .tx_bip_fem_autodetect = 0,
267 .tx_bip_fem_manufacturer = 1,
268 .settings = 1,
269 },
270 .radioparam = {
271 .rx_trace_loss = 10,
272 .tx_trace_loss = 10,
273 .rx_rssi_and_proc_compens = {
274 0xec, 0xf6, 0x00, 0x0c, 0x18, 0xf8,
275 0xfc, 0x00, 0x08, 0x10, 0xf0, 0xf8,
276 0x00, 0x0a, 0x14 },
277 .rx_trace_loss_5 = { 0, 0, 0, 0, 0, 0, 0 },
278 .tx_trace_loss_5 = { 0, 0, 0, 0, 0, 0, 0 },
279 .rx_rssi_and_proc_compens_5 = {
280 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
281 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
282 0x00, 0x00, 0x00 },
283 .tx_ref_pd_voltage = 0x24e,
284 .tx_ref_power = 0x78,
285 .tx_offset_db = 0x0,
286 .tx_rate_limits_normal = {
287 0x1e, 0x1f, 0x22, 0x24, 0x28, 0x29 },
288 .tx_rate_limits_degraded = {
289 0x1b, 0x1c, 0x1e, 0x20, 0x24, 0x25 },
290 .tx_channel_limits_11b = {
291 0x22, 0x50, 0x50, 0x50, 0x50, 0x50,
292 0x50, 0x50, 0x50, 0x50, 0x22, 0x50,
293 0x22, 0x50 },
294 .tx_channel_limits_ofdm = {
295 0x20, 0x50, 0x50, 0x50, 0x50, 0x50,
296 0x50, 0x50, 0x50, 0x50, 0x20, 0x50,
297 0x20, 0x50 },
298 .tx_pdv_rate_offsets = {
299 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
300 .tx_ibias = {
301 0x1a, 0x1a, 0x1a, 0x1a, 0x1a, 0x27 },
302 .rx_fem_insertion_loss = 0x14,
303 .tx_ref_pd_voltage_5 = {
304 0x0190, 0x01a4, 0x01c3, 0x01d8,
305 0x020a, 0x021c },
306 .tx_ref_power_5 = {
307 0x80, 0x80, 0x80, 0x80, 0x80, 0x80, 0x80 },
308 .tx_offset_db_5 = {
309 0x00, 0x00, 0x00, 0x00, 0x00, 0x00 },
310 .tx_rate_limits_normal_5 = {
311 0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
312 .tx_rate_limits_degraded_5 = {
313 0x1b, 0x1e, 0x21, 0x23, 0x27, 0x00 },
314 .tx_channel_limits_ofdm_5 = {
315 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
316 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
317 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
318 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50, 0x50,
319 0x50, 0x50, 0x50 },
320 .tx_pdv_rate_offsets_5 = {
321 0x01, 0x02, 0x02, 0x02, 0x02, 0x00 },
322 .tx_ibias_5 = {
323 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 },
324 .rx_fem_insertion_loss_5 = {
325 0x10, 0x10, 0x10, 0x10, 0x10, 0x10, 0x10 }
326 }
327 }
328};
329
330static LIST_HEAD(wl_list);
331
332static void wl1271_conf_init(struct wl1271 *wl)
333{
334
335 /*
336 * This function applies the default configuration to the driver. This
337 * function is invoked upon driver load (spi probe.)
338 *
339 * The configuration is stored in a run-time structure in order to
340 * facilitate for run-time adjustment of any of the parameters. Making
341 * changes to the configuration structure will apply the new values on
342 * the next interface up (wl1271_op_start.)
343 */
344
345 /* apply driver default configuration */
346 memcpy(&wl->conf, &default_conf, sizeof(default_conf));
347
348 if (wl1271_11a_enabled())
349 wl->conf.init.genparam.single_dual_band = CONF_DUAL_BAND;
350}
351
352
48static int wl1271_plt_init(struct wl1271 *wl) 353static int wl1271_plt_init(struct wl1271 *wl)
49{ 354{
50 int ret; 355 int ret;
@@ -75,20 +380,14 @@ static void wl1271_power_on(struct wl1271 *wl)
75 wl->set_power(true); 380 wl->set_power(true);
76} 381}
77 382
78static void wl1271_fw_status(struct wl1271 *wl, struct wl1271_fw_status *status) 383static void wl1271_fw_status(struct wl1271 *wl,
384 struct wl1271_fw_status *status)
79{ 385{
80 u32 total = 0; 386 u32 total = 0;
81 int i; 387 int i;
82 388
83 /* 389 wl1271_spi_read(wl, FW_STATUS_ADDR, status,
84 * FIXME: Reading the FW status directly from the registers seems to 390 sizeof(*status), false);
85 * be the right thing to do, but it doesn't work. And in the
86 * reference driver, there is a workaround called
87 * USE_SDIO_24M_WORKAROUND, which reads the status from memory
88 * instead, so we do the same here.
89 */
90
91 wl1271_spi_mem_read(wl, STATUS_MEM_ADDRESS, status, sizeof(*status));
92 391
93 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, " 392 wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, "
94 "drv_rx_counter = %d, tx_results_counter = %d)", 393 "drv_rx_counter = %d, tx_results_counter = %d)",
@@ -99,25 +398,28 @@ static void wl1271_fw_status(struct wl1271 *wl, struct wl1271_fw_status *status)
99 398
100 /* update number of available TX blocks */ 399 /* update number of available TX blocks */
101 for (i = 0; i < NUM_TX_QUEUES; i++) { 400 for (i = 0; i < NUM_TX_QUEUES; i++) {
102 u32 cnt = status->tx_released_blks[i] - wl->tx_blocks_freed[i]; 401 u32 cnt = le32_to_cpu(status->tx_released_blks[i]) -
103 wl->tx_blocks_freed[i] = status->tx_released_blks[i]; 402 wl->tx_blocks_freed[i];
403
404 wl->tx_blocks_freed[i] =
405 le32_to_cpu(status->tx_released_blks[i]);
104 wl->tx_blocks_available += cnt; 406 wl->tx_blocks_available += cnt;
105 total += cnt; 407 total += cnt;
106 } 408 }
107 409
108 /* if more blocks are available now, schedule some tx work */ 410 /* if more blocks are available now, schedule some tx work */
109 if (total && !skb_queue_empty(&wl->tx_queue)) 411 if (total && !skb_queue_empty(&wl->tx_queue))
110 schedule_work(&wl->tx_work); 412 ieee80211_queue_work(wl->hw, &wl->tx_work);
111 413
112 /* update the host-chipset time offset */ 414 /* update the host-chipset time offset */
113 wl->time_offset = jiffies_to_usecs(jiffies) - status->fw_localtime; 415 wl->time_offset = jiffies_to_usecs(jiffies) -
416 le32_to_cpu(status->fw_localtime);
114} 417}
115 418
116#define WL1271_IRQ_MAX_LOOPS 10
117static void wl1271_irq_work(struct work_struct *work) 419static void wl1271_irq_work(struct work_struct *work)
118{ 420{
119 u32 intr, ctr = WL1271_IRQ_MAX_LOOPS;
120 int ret; 421 int ret;
422 u32 intr;
121 struct wl1271 *wl = 423 struct wl1271 *wl =
122 container_of(work, struct wl1271, irq_work); 424 container_of(work, struct wl1271, irq_work);
123 425
@@ -132,9 +434,10 @@ static void wl1271_irq_work(struct work_struct *work)
132 if (ret < 0) 434 if (ret < 0)
133 goto out; 435 goto out;
134 436
135 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL); 437 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK, WL1271_ACX_INTR_ALL);
136 438
137 intr = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR); 439 wl1271_fw_status(wl, wl->fw_status);
440 intr = le32_to_cpu(wl->fw_status->intr);
138 if (!intr) { 441 if (!intr) {
139 wl1271_debug(DEBUG_IRQ, "Zero interrupt received."); 442 wl1271_debug(DEBUG_IRQ, "Zero interrupt received.");
140 goto out_sleep; 443 goto out_sleep;
@@ -142,46 +445,39 @@ static void wl1271_irq_work(struct work_struct *work)
142 445
143 intr &= WL1271_INTR_MASK; 446 intr &= WL1271_INTR_MASK;
144 447
145 do { 448 if (intr & WL1271_ACX_INTR_EVENT_A) {
146 wl1271_fw_status(wl, wl->fw_status); 449 bool do_ack = (intr & WL1271_ACX_INTR_EVENT_B) ? false : true;
147 450 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A");
148 451 wl1271_event_handle(wl, 0, do_ack);
149 if (intr & (WL1271_ACX_INTR_EVENT_A | 452 }
150 WL1271_ACX_INTR_EVENT_B)) {
151 wl1271_debug(DEBUG_IRQ,
152 "WL1271_ACX_INTR_EVENT (0x%x)", intr);
153 if (intr & WL1271_ACX_INTR_EVENT_A)
154 wl1271_event_handle(wl, 0);
155 else
156 wl1271_event_handle(wl, 1);
157 }
158 453
159 if (intr & WL1271_ACX_INTR_INIT_COMPLETE) 454 if (intr & WL1271_ACX_INTR_EVENT_B) {
160 wl1271_debug(DEBUG_IRQ, 455 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B");
161 "WL1271_ACX_INTR_INIT_COMPLETE"); 456 wl1271_event_handle(wl, 1, true);
457 }
162 458
163 if (intr & WL1271_ACX_INTR_HW_AVAILABLE) 459 if (intr & WL1271_ACX_INTR_INIT_COMPLETE)
164 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE"); 460 wl1271_debug(DEBUG_IRQ,
461 "WL1271_ACX_INTR_INIT_COMPLETE");
165 462
166 if (intr & WL1271_ACX_INTR_DATA) { 463 if (intr & WL1271_ACX_INTR_HW_AVAILABLE)
167 u8 tx_res_cnt = wl->fw_status->tx_results_counter - 464 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE");
168 wl->tx_results_count;
169 465
170 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA"); 466 if (intr & WL1271_ACX_INTR_DATA) {
467 u8 tx_res_cnt = wl->fw_status->tx_results_counter -
468 wl->tx_results_count;
171 469
172 /* check for tx results */ 470 wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA");
173 if (tx_res_cnt)
174 wl1271_tx_complete(wl, tx_res_cnt);
175 471
176 wl1271_rx(wl, wl->fw_status); 472 /* check for tx results */
177 } 473 if (tx_res_cnt)
474 wl1271_tx_complete(wl, tx_res_cnt);
178 475
179 intr = wl1271_reg_read32(wl, ACX_REG_INTERRUPT_CLEAR); 476 wl1271_rx(wl, wl->fw_status);
180 intr &= WL1271_INTR_MASK; 477 }
181 } while (intr && --ctr);
182 478
183out_sleep: 479out_sleep:
184 wl1271_reg_write32(wl, ACX_REG_INTERRUPT_MASK, 480 wl1271_spi_write32(wl, ACX_REG_INTERRUPT_MASK,
185 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK)); 481 WL1271_ACX_INTR_ALL & ~(WL1271_INTR_MASK));
186 wl1271_ps_elp_sleep(wl); 482 wl1271_ps_elp_sleep(wl);
187 483
@@ -205,7 +501,7 @@ static irqreturn_t wl1271_irq(int irq, void *cookie)
205 wl->elp_compl = NULL; 501 wl->elp_compl = NULL;
206 } 502 }
207 503
208 schedule_work(&wl->irq_work); 504 ieee80211_queue_work(wl->hw, &wl->irq_work);
209 spin_unlock_irqrestore(&wl->wl_lock, flags); 505 spin_unlock_irqrestore(&wl->wl_lock, flags);
210 506
211 return IRQ_HANDLED; 507 return IRQ_HANDLED;
@@ -231,7 +527,7 @@ static int wl1271_fetch_firmware(struct wl1271 *wl)
231 } 527 }
232 528
233 wl->fw_len = fw->size; 529 wl->fw_len = fw->size;
234 wl->fw = kmalloc(wl->fw_len, GFP_KERNEL); 530 wl->fw = vmalloc(wl->fw_len);
235 531
236 if (!wl->fw) { 532 if (!wl->fw) {
237 wl1271_error("could not allocate memory for the firmware"); 533 wl1271_error("could not allocate memory for the firmware");
@@ -292,7 +588,7 @@ static void wl1271_fw_wakeup(struct wl1271 *wl)
292 u32 elp_reg; 588 u32 elp_reg;
293 589
294 elp_reg = ELPCTRL_WAKE_UP; 590 elp_reg = ELPCTRL_WAKE_UP;
295 wl1271_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg); 591 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg);
296} 592}
297 593
298static int wl1271_setup(struct wl1271 *wl) 594static int wl1271_setup(struct wl1271 *wl)
@@ -314,6 +610,7 @@ static int wl1271_setup(struct wl1271 *wl)
314 610
315static int wl1271_chip_wakeup(struct wl1271 *wl) 611static int wl1271_chip_wakeup(struct wl1271 *wl)
316{ 612{
613 struct wl1271_partition_set partition;
317 int ret = 0; 614 int ret = 0;
318 615
319 wl1271_power_on(wl); 616 wl1271_power_on(wl);
@@ -323,11 +620,10 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
323 620
324 /* We don't need a real memory partition here, because we only want 621 /* We don't need a real memory partition here, because we only want
325 * to use the registers at this point. */ 622 * to use the registers at this point. */
326 wl1271_set_partition(wl, 623 memset(&partition, 0, sizeof(partition));
327 0x00000000, 624 partition.reg.start = REGISTERS_BASE;
328 0x00000000, 625 partition.reg.size = REGISTERS_DOWN_SIZE;
329 REGISTERS_BASE, 626 wl1271_set_partition(wl, &partition);
330 REGISTERS_DOWN_SIZE);
331 627
332 /* ELP module wake up */ 628 /* ELP module wake up */
333 wl1271_fw_wakeup(wl); 629 wl1271_fw_wakeup(wl);
@@ -335,7 +631,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
335 /* whal_FwCtrl_BootSm() */ 631 /* whal_FwCtrl_BootSm() */
336 632
337 /* 0. read chip id from CHIP_ID */ 633 /* 0. read chip id from CHIP_ID */
338 wl->chip.id = wl1271_reg_read32(wl, CHIP_ID_B); 634 wl->chip.id = wl1271_spi_read32(wl, CHIP_ID_B);
339 635
340 /* 1. check if chip id is valid */ 636 /* 1. check if chip id is valid */
341 637
@@ -346,7 +642,7 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
346 642
347 ret = wl1271_setup(wl); 643 ret = wl1271_setup(wl);
348 if (ret < 0) 644 if (ret < 0)
349 goto out; 645 goto out_power_off;
350 break; 646 break;
351 case CHIP_ID_1271_PG20: 647 case CHIP_ID_1271_PG20:
352 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)", 648 wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)",
@@ -354,56 +650,34 @@ static int wl1271_chip_wakeup(struct wl1271 *wl)
354 650
355 ret = wl1271_setup(wl); 651 ret = wl1271_setup(wl);
356 if (ret < 0) 652 if (ret < 0)
357 goto out; 653 goto out_power_off;
358 break; 654 break;
359 default: 655 default:
360 wl1271_error("unsupported chip id: 0x%x", wl->chip.id); 656 wl1271_error("unsupported chip id: 0x%x", wl->chip.id);
361 ret = -ENODEV; 657 ret = -ENODEV;
362 goto out; 658 goto out_power_off;
363 } 659 }
364 660
365 if (wl->fw == NULL) { 661 if (wl->fw == NULL) {
366 ret = wl1271_fetch_firmware(wl); 662 ret = wl1271_fetch_firmware(wl);
367 if (ret < 0) 663 if (ret < 0)
368 goto out; 664 goto out_power_off;
369 } 665 }
370 666
371 /* No NVS from netlink, try to get it from the filesystem */ 667 /* No NVS from netlink, try to get it from the filesystem */
372 if (wl->nvs == NULL) { 668 if (wl->nvs == NULL) {
373 ret = wl1271_fetch_nvs(wl); 669 ret = wl1271_fetch_nvs(wl);
374 if (ret < 0) 670 if (ret < 0)
375 goto out; 671 goto out_power_off;
376 } 672 }
377 673
378out: 674 goto out;
379 return ret;
380}
381 675
382static void wl1271_filter_work(struct work_struct *work) 676out_power_off:
383{ 677 wl1271_power_off(wl);
384 struct wl1271 *wl =
385 container_of(work, struct wl1271, filter_work);
386 int ret;
387
388 mutex_lock(&wl->mutex);
389
390 if (wl->state == WL1271_STATE_OFF)
391 goto out;
392
393 ret = wl1271_ps_elp_wakeup(wl, false);
394 if (ret < 0)
395 goto out;
396
397 /* FIXME: replace the magic numbers with proper definitions */
398 ret = wl1271_cmd_join(wl, wl->bss_type, 1, 100, 0);
399 if (ret < 0)
400 goto out_sleep;
401
402out_sleep:
403 wl1271_ps_elp_sleep(wl);
404 678
405out: 679out:
406 mutex_unlock(&wl->mutex); 680 return ret;
407} 681}
408 682
409int wl1271_plt_start(struct wl1271 *wl) 683int wl1271_plt_start(struct wl1271 *wl)
@@ -429,13 +703,26 @@ int wl1271_plt_start(struct wl1271 *wl)
429 703
430 ret = wl1271_boot(wl); 704 ret = wl1271_boot(wl);
431 if (ret < 0) 705 if (ret < 0)
432 goto out; 706 goto out_power_off;
433 707
434 wl1271_notice("firmware booted in PLT mode (%s)", wl->chip.fw_ver); 708 wl1271_notice("firmware booted in PLT mode (%s)", wl->chip.fw_ver);
435 709
436 ret = wl1271_plt_init(wl); 710 ret = wl1271_plt_init(wl);
437 if (ret < 0) 711 if (ret < 0)
438 goto out; 712 goto out_irq_disable;
713
714 /* Make sure power saving is disabled */
715 ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM);
716 if (ret < 0)
717 goto out_irq_disable;
718
719 goto out;
720
721out_irq_disable:
722 wl1271_disable_interrupts(wl);
723
724out_power_off:
725 wl1271_power_off(wl);
439 726
440out: 727out:
441 mutex_unlock(&wl->mutex); 728 mutex_unlock(&wl->mutex);
@@ -462,6 +749,7 @@ int wl1271_plt_stop(struct wl1271 *wl)
462 wl1271_power_off(wl); 749 wl1271_power_off(wl);
463 750
464 wl->state = WL1271_STATE_OFF; 751 wl->state = WL1271_STATE_OFF;
752 wl->rx_counter = 0;
465 753
466out: 754out:
467 mutex_unlock(&wl->mutex); 755 mutex_unlock(&wl->mutex);
@@ -481,7 +769,7 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
481 * before that, the tx_work will not be initialized! 769 * before that, the tx_work will not be initialized!
482 */ 770 */
483 771
484 schedule_work(&wl->tx_work); 772 ieee80211_queue_work(wl->hw, &wl->tx_work);
485 773
486 /* 774 /*
487 * The workqueue is slow to process the tx_queue and we need stop 775 * The workqueue is slow to process the tx_queue and we need stop
@@ -501,6 +789,93 @@ static int wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb)
501 return NETDEV_TX_OK; 789 return NETDEV_TX_OK;
502} 790}
503 791
792static int wl1271_dev_notify(struct notifier_block *me, unsigned long what,
793 void *arg)
794{
795 struct net_device *dev;
796 struct wireless_dev *wdev;
797 struct wiphy *wiphy;
798 struct ieee80211_hw *hw;
799 struct wl1271 *wl;
800 struct wl1271 *wl_temp;
801 struct in_device *idev;
802 struct in_ifaddr *ifa = arg;
803 int ret = 0;
804
805 /* FIXME: this ugly function should probably be implemented in the
806 * mac80211, and here should only be a simple callback handling actual
807 * setting of the filters. Now we need to dig up references to
808 * various structures to gain access to what we need.
809 * Also, because of this, there is no "initial" setting of the filter
810 * in "op_start", because we don't want to dig up struct net_device
811 * there - the filter will be set upon first change of the interface
812 * IP address. */
813
814 dev = ifa->ifa_dev->dev;
815
816 wdev = dev->ieee80211_ptr;
817 if (wdev == NULL)
818 return -ENODEV;
819
820 wiphy = wdev->wiphy;
821 if (wiphy == NULL)
822 return -ENODEV;
823
824 hw = wiphy_priv(wiphy);
825 if (hw == NULL)
826 return -ENODEV;
827
828 /* Check that the interface is one supported by this driver. */
829 wl_temp = hw->priv;
830 list_for_each_entry(wl, &wl_list, list) {
831 if (wl == wl_temp)
832 break;
833 }
834 if (wl == NULL)
835 return -ENODEV;
836
837 /* Get the interface IP address for the device. "ifa" will become
838 NULL if:
839 - there is no IPV4 protocol address configured
840 - there are multiple (virtual) IPV4 addresses configured
841 When "ifa" is NULL, filtering will be disabled.
842 */
843 ifa = NULL;
844 idev = dev->ip_ptr;
845 if (idev)
846 ifa = idev->ifa_list;
847
848 if (ifa && ifa->ifa_next)
849 ifa = NULL;
850
851 mutex_lock(&wl->mutex);
852
853 if (wl->state == WL1271_STATE_OFF)
854 goto out;
855
856 ret = wl1271_ps_elp_wakeup(wl, false);
857 if (ret < 0)
858 goto out;
859 if (ifa)
860 ret = wl1271_acx_arp_ip_filter(wl, true,
861 (u8 *)&ifa->ifa_address,
862 ACX_IPV4_VERSION);
863 else
864 ret = wl1271_acx_arp_ip_filter(wl, false, NULL,
865 ACX_IPV4_VERSION);
866 wl1271_ps_elp_sleep(wl);
867
868out:
869 mutex_unlock(&wl->mutex);
870
871 return ret;
872}
873
874static struct notifier_block wl1271_dev_notifier = {
875 .notifier_call = wl1271_dev_notify,
876};
877
878
504static int wl1271_op_start(struct ieee80211_hw *hw) 879static int wl1271_op_start(struct ieee80211_hw *hw)
505{ 880{
506 struct wl1271 *wl = hw->priv; 881 struct wl1271 *wl = hw->priv;
@@ -523,22 +898,32 @@ static int wl1271_op_start(struct ieee80211_hw *hw)
523 898
524 ret = wl1271_boot(wl); 899 ret = wl1271_boot(wl);
525 if (ret < 0) 900 if (ret < 0)
526 goto out; 901 goto out_power_off;
527 902
528 ret = wl1271_hw_init(wl); 903 ret = wl1271_hw_init(wl);
529 if (ret < 0) 904 if (ret < 0)
530 goto out; 905 goto out_irq_disable;
531 906
532 wl->state = WL1271_STATE_ON; 907 wl->state = WL1271_STATE_ON;
533 908
534 wl1271_info("firmware booted (%s)", wl->chip.fw_ver); 909 wl1271_info("firmware booted (%s)", wl->chip.fw_ver);
535 910
536out: 911 goto out;
537 if (ret < 0)
538 wl1271_power_off(wl);
539 912
913out_irq_disable:
914 wl1271_disable_interrupts(wl);
915
916out_power_off:
917 wl1271_power_off(wl);
918
919out:
540 mutex_unlock(&wl->mutex); 920 mutex_unlock(&wl->mutex);
541 921
922 if (!ret) {
923 list_add(&wl->list, &wl_list);
924 register_inetaddr_notifier(&wl1271_dev_notifier);
925 }
926
542 return ret; 927 return ret;
543} 928}
544 929
@@ -551,6 +936,9 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
551 936
552 wl1271_debug(DEBUG_MAC80211, "mac80211 stop"); 937 wl1271_debug(DEBUG_MAC80211, "mac80211 stop");
553 938
939 unregister_inetaddr_notifier(&wl1271_dev_notifier);
940 list_del(&wl->list);
941
554 mutex_lock(&wl->mutex); 942 mutex_lock(&wl->mutex);
555 943
556 WARN_ON(wl->state != WL1271_STATE_ON); 944 WARN_ON(wl->state != WL1271_STATE_ON);
@@ -570,7 +958,6 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
570 958
571 cancel_work_sync(&wl->irq_work); 959 cancel_work_sync(&wl->irq_work);
572 cancel_work_sync(&wl->tx_work); 960 cancel_work_sync(&wl->tx_work);
573 cancel_work_sync(&wl->filter_work);
574 961
575 mutex_lock(&wl->mutex); 962 mutex_lock(&wl->mutex);
576 963
@@ -581,19 +968,25 @@ static void wl1271_op_stop(struct ieee80211_hw *hw)
581 memset(wl->bssid, 0, ETH_ALEN); 968 memset(wl->bssid, 0, ETH_ALEN);
582 memset(wl->ssid, 0, IW_ESSID_MAX_SIZE + 1); 969 memset(wl->ssid, 0, IW_ESSID_MAX_SIZE + 1);
583 wl->ssid_len = 0; 970 wl->ssid_len = 0;
584 wl->listen_int = 1;
585 wl->bss_type = MAX_BSS_TYPE; 971 wl->bss_type = MAX_BSS_TYPE;
972 wl->band = IEEE80211_BAND_2GHZ;
586 973
587 wl->rx_counter = 0; 974 wl->rx_counter = 0;
588 wl->elp = false; 975 wl->elp = false;
589 wl->psm = 0; 976 wl->psm = 0;
977 wl->psm_entry_retry = 0;
590 wl->tx_queue_stopped = false; 978 wl->tx_queue_stopped = false;
591 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 979 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
592 wl->tx_blocks_available = 0; 980 wl->tx_blocks_available = 0;
593 wl->tx_results_count = 0; 981 wl->tx_results_count = 0;
594 wl->tx_packets_count = 0; 982 wl->tx_packets_count = 0;
983 wl->tx_security_last_seq = 0;
984 wl->tx_security_seq_16 = 0;
985 wl->tx_security_seq_32 = 0;
595 wl->time_offset = 0; 986 wl->time_offset = 0;
596 wl->session_counter = 0; 987 wl->session_counter = 0;
988 wl->joined = false;
989
597 for (i = 0; i < NUM_TX_QUEUES; i++) 990 for (i = 0; i < NUM_TX_QUEUES; i++)
598 wl->tx_blocks_freed[i] = 0; 991 wl->tx_blocks_freed[i] = 0;
599 992
@@ -611,6 +1004,12 @@ static int wl1271_op_add_interface(struct ieee80211_hw *hw,
611 conf->type, conf->mac_addr); 1004 conf->type, conf->mac_addr);
612 1005
613 mutex_lock(&wl->mutex); 1006 mutex_lock(&wl->mutex);
1007 if (wl->vif) {
1008 ret = -EBUSY;
1009 goto out;
1010 }
1011
1012 wl->vif = conf->vif;
614 1013
615 switch (conf->type) { 1014 switch (conf->type) {
616 case NL80211_IFTYPE_STATION: 1015 case NL80211_IFTYPE_STATION:
@@ -634,7 +1033,12 @@ out:
634static void wl1271_op_remove_interface(struct ieee80211_hw *hw, 1033static void wl1271_op_remove_interface(struct ieee80211_hw *hw,
635 struct ieee80211_if_init_conf *conf) 1034 struct ieee80211_if_init_conf *conf)
636{ 1035{
1036 struct wl1271 *wl = hw->priv;
1037
1038 mutex_lock(&wl->mutex);
637 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface"); 1039 wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface");
1040 wl->vif = NULL;
1041 mutex_unlock(&wl->mutex);
638} 1042}
639 1043
640#if 0 1044#if 0
@@ -657,23 +1061,24 @@ static int wl1271_op_config_interface(struct ieee80211_hw *hw,
657 if (ret < 0) 1061 if (ret < 0)
658 goto out; 1062 goto out;
659 1063
660 memcpy(wl->bssid, conf->bssid, ETH_ALEN); 1064 if (memcmp(wl->bssid, conf->bssid, ETH_ALEN)) {
1065 wl1271_debug(DEBUG_MAC80211, "bssid changed");
661 1066
662 ret = wl1271_cmd_build_null_data(wl); 1067 memcpy(wl->bssid, conf->bssid, ETH_ALEN);
663 if (ret < 0)
664 goto out_sleep;
665 1068
666 wl->ssid_len = conf->ssid_len; 1069 ret = wl1271_cmd_join(wl);
667 if (wl->ssid_len) 1070 if (ret < 0)
668 memcpy(wl->ssid, conf->ssid, wl->ssid_len); 1071 goto out_sleep;
669 1072
670 if (wl->bss_type != BSS_TYPE_IBSS) { 1073 ret = wl1271_cmd_build_null_data(wl);
671 /* FIXME: replace the magic numbers with proper definitions */
672 ret = wl1271_cmd_join(wl, wl->bss_type, 5, 100, 1);
673 if (ret < 0) 1074 if (ret < 0)
674 goto out_sleep; 1075 goto out_sleep;
675 } 1076 }
676 1077
1078 wl->ssid_len = conf->ssid_len;
1079 if (wl->ssid_len)
1080 memcpy(wl->ssid, conf->ssid, wl->ssid_len);
1081
677 if (conf->changed & IEEE80211_IFCC_BEACON) { 1082 if (conf->changed & IEEE80211_IFCC_BEACON) {
678 beacon = ieee80211_beacon_get(hw, vif); 1083 beacon = ieee80211_beacon_get(hw, vif);
679 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON, 1084 ret = wl1271_cmd_template_set(wl, CMD_TEMPL_BEACON,
@@ -691,12 +1096,6 @@ static int wl1271_op_config_interface(struct ieee80211_hw *hw,
691 1096
692 if (ret < 0) 1097 if (ret < 0)
693 goto out_sleep; 1098 goto out_sleep;
694
695 /* FIXME: replace the magic numbers with proper definitions */
696 ret = wl1271_cmd_join(wl, wl->bss_type, 1, 100, 0);
697
698 if (ret < 0)
699 goto out_sleep;
700 } 1099 }
701 1100
702out_sleep: 1101out_sleep:
@@ -724,26 +1123,22 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
724 1123
725 mutex_lock(&wl->mutex); 1124 mutex_lock(&wl->mutex);
726 1125
1126 wl->band = conf->channel->band;
1127
727 ret = wl1271_ps_elp_wakeup(wl, false); 1128 ret = wl1271_ps_elp_wakeup(wl, false);
728 if (ret < 0) 1129 if (ret < 0)
729 goto out; 1130 goto out;
730 1131
731 if (channel != wl->channel) { 1132 if (channel != wl->channel) {
732 u8 old_channel = wl->channel; 1133 /*
1134 * We assume that the stack will configure the right channel
1135 * before associating, so we don't need to send a join
1136 * command here. We will join the right channel when the
1137 * BSSID changes
1138 */
733 wl->channel = channel; 1139 wl->channel = channel;
734
735 /* FIXME: use beacon interval provided by mac80211 */
736 ret = wl1271_cmd_join(wl, wl->bss_type, 1, 100, 0);
737 if (ret < 0) {
738 wl->channel = old_channel;
739 goto out_sleep;
740 }
741 } 1140 }
742 1141
743 ret = wl1271_cmd_build_null_data(wl);
744 if (ret < 0)
745 goto out_sleep;
746
747 if (conf->flags & IEEE80211_CONF_PS && !wl->psm_requested) { 1142 if (conf->flags & IEEE80211_CONF_PS && !wl->psm_requested) {
748 wl1271_info("psm enabled"); 1143 wl1271_info("psm enabled");
749 1144
@@ -768,7 +1163,7 @@ static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed)
768 if (conf->power_level != wl->power_level) { 1163 if (conf->power_level != wl->power_level) {
769 ret = wl1271_acx_tx_power(wl, conf->power_level); 1164 ret = wl1271_acx_tx_power(wl, conf->power_level);
770 if (ret < 0) 1165 if (ret < 0)
771 goto out; 1166 goto out_sleep;
772 1167
773 wl->power_level = conf->power_level; 1168 wl->power_level = conf->power_level;
774 } 1169 }
@@ -782,6 +1177,45 @@ out:
782 return ret; 1177 return ret;
783} 1178}
784 1179
1180struct wl1271_filter_params {
1181 bool enabled;
1182 int mc_list_length;
1183 u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN];
1184};
1185
1186static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count,
1187 struct dev_addr_list *mc_list)
1188{
1189 struct wl1271_filter_params *fp;
1190 int i;
1191
1192 fp = kzalloc(sizeof(*fp), GFP_ATOMIC);
1193 if (!fp) {
1194 wl1271_error("Out of memory setting filters.");
1195 return 0;
1196 }
1197
1198 /* update multicast filtering parameters */
1199 fp->enabled = true;
1200 if (mc_count > ACX_MC_ADDRESS_GROUP_MAX) {
1201 mc_count = 0;
1202 fp->enabled = false;
1203 }
1204
1205 fp->mc_list_length = 0;
1206 for (i = 0; i < mc_count; i++) {
1207 if (mc_list->da_addrlen == ETH_ALEN) {
1208 memcpy(fp->mc_list[fp->mc_list_length],
1209 mc_list->da_addr, ETH_ALEN);
1210 fp->mc_list_length++;
1211 } else
1212 wl1271_warning("Unknown mc address length.");
1213 mc_list = mc_list->next;
1214 }
1215
1216 return (u64)(unsigned long)fp;
1217}
1218
785#define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \ 1219#define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \
786 FIF_ALLMULTI | \ 1220 FIF_ALLMULTI | \
787 FIF_FCSFAIL | \ 1221 FIF_FCSFAIL | \
@@ -791,28 +1225,53 @@ out:
791 1225
792static void wl1271_op_configure_filter(struct ieee80211_hw *hw, 1226static void wl1271_op_configure_filter(struct ieee80211_hw *hw,
793 unsigned int changed, 1227 unsigned int changed,
794 unsigned int *total,u64 multicast) 1228 unsigned int *total, u64 multicast)
795{ 1229{
1230 struct wl1271_filter_params *fp = (void *)(unsigned long)multicast;
796 struct wl1271 *wl = hw->priv; 1231 struct wl1271 *wl = hw->priv;
1232 int ret;
797 1233
798 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter"); 1234 wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter");
799 1235
1236 mutex_lock(&wl->mutex);
1237
1238 if (wl->state == WL1271_STATE_OFF)
1239 goto out;
1240
1241 ret = wl1271_ps_elp_wakeup(wl, false);
1242 if (ret < 0)
1243 goto out;
1244
800 *total &= WL1271_SUPPORTED_FILTERS; 1245 *total &= WL1271_SUPPORTED_FILTERS;
801 changed &= WL1271_SUPPORTED_FILTERS; 1246 changed &= WL1271_SUPPORTED_FILTERS;
802 1247
1248 if (*total & FIF_ALLMULTI)
1249 ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0);
1250 else if (fp)
1251 ret = wl1271_acx_group_address_tbl(wl, fp->enabled,
1252 fp->mc_list,
1253 fp->mc_list_length);
1254 if (ret < 0)
1255 goto out_sleep;
1256
1257 kfree(fp);
1258
1259 /* FIXME: We still need to set our filters properly */
1260
1261 /* determine, whether supported filter values have changed */
803 if (changed == 0) 1262 if (changed == 0)
804 return; 1263 goto out_sleep;
805 1264
806 /* FIXME: wl->rx_config and wl->rx_filter are not protected */ 1265 /* apply configured filters */
807 wl->rx_config = WL1271_DEFAULT_RX_CONFIG; 1266 ret = wl1271_acx_rx_config(wl, wl->rx_config, wl->rx_filter);
808 wl->rx_filter = WL1271_DEFAULT_RX_FILTER; 1267 if (ret < 0)
1268 goto out_sleep;
809 1269
810 /* 1270out_sleep:
811 * FIXME: workqueues need to be properly cancelled on stop(), for 1271 wl1271_ps_elp_sleep(wl);
812 * now let's just disable changing the filter settings. They will 1272
813 * be updated any on config(). 1273out:
814 */ 1274 mutex_unlock(&wl->mutex);
815 /* schedule_work(&wl->filter_work); */
816} 1275}
817 1276
818static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 1277static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
@@ -823,6 +1282,8 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
823 struct wl1271 *wl = hw->priv; 1282 struct wl1271 *wl = hw->priv;
824 const u8 *addr; 1283 const u8 *addr;
825 int ret; 1284 int ret;
1285 u32 tx_seq_32 = 0;
1286 u16 tx_seq_16 = 0;
826 u8 key_type; 1287 u8 key_type;
827 1288
828 static const u8 bcast_addr[ETH_ALEN] = 1289 static const u8 bcast_addr[ETH_ALEN] =
@@ -861,11 +1322,15 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
861 key_type = KEY_TKIP; 1322 key_type = KEY_TKIP;
862 1323
863 key_conf->hw_key_idx = key_conf->keyidx; 1324 key_conf->hw_key_idx = key_conf->keyidx;
1325 tx_seq_32 = wl->tx_security_seq_32;
1326 tx_seq_16 = wl->tx_security_seq_16;
864 break; 1327 break;
865 case ALG_CCMP: 1328 case ALG_CCMP:
866 key_type = KEY_AES; 1329 key_type = KEY_AES;
867 1330
868 key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 1331 key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
1332 tx_seq_32 = wl->tx_security_seq_32;
1333 tx_seq_16 = wl->tx_security_seq_16;
869 break; 1334 break;
870 default: 1335 default:
871 wl1271_error("Unknown key algo 0x%x", key_conf->alg); 1336 wl1271_error("Unknown key algo 0x%x", key_conf->alg);
@@ -879,7 +1344,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
879 ret = wl1271_cmd_set_key(wl, KEY_ADD_OR_REPLACE, 1344 ret = wl1271_cmd_set_key(wl, KEY_ADD_OR_REPLACE,
880 key_conf->keyidx, key_type, 1345 key_conf->keyidx, key_type,
881 key_conf->keylen, key_conf->key, 1346 key_conf->keylen, key_conf->key,
882 addr); 1347 addr, tx_seq_32, tx_seq_16);
883 if (ret < 0) { 1348 if (ret < 0) {
884 wl1271_error("Could not add or replace key"); 1349 wl1271_error("Could not add or replace key");
885 goto out_sleep; 1350 goto out_sleep;
@@ -890,7 +1355,7 @@ static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
890 ret = wl1271_cmd_set_key(wl, KEY_REMOVE, 1355 ret = wl1271_cmd_set_key(wl, KEY_REMOVE,
891 key_conf->keyidx, key_type, 1356 key_conf->keyidx, key_type,
892 key_conf->keylen, key_conf->key, 1357 key_conf->keylen, key_conf->key,
893 addr); 1358 addr, 0, 0);
894 if (ret < 0) { 1359 if (ret < 0) {
895 wl1271_error("Could not remove key"); 1360 wl1271_error("Could not remove key");
896 goto out_sleep; 1361 goto out_sleep;
@@ -921,13 +1386,13 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
921 struct wl1271 *wl = hw->priv; 1386 struct wl1271 *wl = hw->priv;
922 int ret; 1387 int ret;
923 u8 *ssid = NULL; 1388 u8 *ssid = NULL;
924 size_t ssid_len = 0; 1389 size_t len = 0;
925 1390
926 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan"); 1391 wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan");
927 1392
928 if (req->n_ssids) { 1393 if (req->n_ssids) {
929 ssid = req->ssids[0].ssid; 1394 ssid = req->ssids[0].ssid;
930 ssid_len = req->ssids[0].ssid_len; 1395 len = req->ssids[0].ssid_len;
931 } 1396 }
932 1397
933 mutex_lock(&wl->mutex); 1398 mutex_lock(&wl->mutex);
@@ -936,7 +1401,12 @@ static int wl1271_op_hw_scan(struct ieee80211_hw *hw,
936 if (ret < 0) 1401 if (ret < 0)
937 goto out; 1402 goto out;
938 1403
939 ret = wl1271_cmd_scan(hw->priv, ssid, ssid_len, 1, 0, 13, 3); 1404 if (wl1271_11a_enabled())
1405 ret = wl1271_cmd_scan(hw->priv, ssid, len, 1, 0,
1406 WL1271_SCAN_BAND_DUAL, 3);
1407 else
1408 ret = wl1271_cmd_scan(hw->priv, ssid, len, 1, 0,
1409 WL1271_SCAN_BAND_2_4_GHZ, 3);
940 1410
941 wl1271_ps_elp_sleep(wl); 1411 wl1271_ps_elp_sleep(wl);
942 1412
@@ -969,6 +1439,22 @@ out:
969 return ret; 1439 return ret;
970} 1440}
971 1441
1442static u32 wl1271_enabled_rates_get(struct wl1271 *wl, u64 basic_rate_set)
1443{
1444 struct ieee80211_supported_band *band;
1445 u32 enabled_rates = 0;
1446 int bit;
1447
1448 band = wl->hw->wiphy->bands[wl->band];
1449 for (bit = 0; bit < band->n_bitrates; bit++) {
1450 if (basic_rate_set & 0x1)
1451 enabled_rates |= band->bitrates[bit].hw_value;
1452 basic_rate_set >>= 1;
1453 }
1454
1455 return enabled_rates;
1456}
1457
972static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw, 1458static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
973 struct ieee80211_vif *vif, 1459 struct ieee80211_vif *vif,
974 struct ieee80211_bss_conf *bss_conf, 1460 struct ieee80211_bss_conf *bss_conf,
@@ -990,6 +1476,12 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
990 if (bss_conf->assoc) { 1476 if (bss_conf->assoc) {
991 wl->aid = bss_conf->aid; 1477 wl->aid = bss_conf->aid;
992 1478
1479 /*
1480 * with wl1271, we don't need to update the
1481 * beacon_int and dtim_period, because the firmware
1482 * updates it by itself when the first beacon is
1483 * received after a join.
1484 */
993 ret = wl1271_cmd_build_ps_poll(wl, wl->aid); 1485 ret = wl1271_cmd_build_ps_poll(wl, wl->aid);
994 if (ret < 0) 1486 if (ret < 0)
995 goto out_sleep; 1487 goto out_sleep;
@@ -1005,8 +1497,14 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1005 if (ret < 0) 1497 if (ret < 0)
1006 goto out_sleep; 1498 goto out_sleep;
1007 } 1499 }
1500 } else {
1501 /* use defaults when not associated */
1502 wl->basic_rate_set = WL1271_DEFAULT_BASIC_RATE_SET;
1503 wl->aid = 0;
1008 } 1504 }
1505
1009 } 1506 }
1507
1010 if (changed & BSS_CHANGED_ERP_SLOT) { 1508 if (changed & BSS_CHANGED_ERP_SLOT) {
1011 if (bss_conf->use_short_slot) 1509 if (bss_conf->use_short_slot)
1012 ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT); 1510 ret = wl1271_acx_slot(wl, SLOT_TIME_SHORT);
@@ -1036,6 +1534,17 @@ static void wl1271_op_bss_info_changed(struct ieee80211_hw *hw,
1036 } 1534 }
1037 } 1535 }
1038 1536
1537 if (changed & BSS_CHANGED_BASIC_RATES) {
1538 wl->basic_rate_set = wl1271_enabled_rates_get(
1539 wl, bss_conf->basic_rates);
1540
1541 ret = wl1271_acx_rate_policies(wl, wl->basic_rate_set);
1542 if (ret < 0) {
1543 wl1271_warning("Set rate policies failed %d", ret);
1544 goto out_sleep;
1545 }
1546 }
1547
1039out_sleep: 1548out_sleep:
1040 wl1271_ps_elp_sleep(wl); 1549 wl1271_ps_elp_sleep(wl);
1041 1550
@@ -1047,44 +1556,44 @@ out:
1047/* can't be const, mac80211 writes to this */ 1556/* can't be const, mac80211 writes to this */
1048static struct ieee80211_rate wl1271_rates[] = { 1557static struct ieee80211_rate wl1271_rates[] = {
1049 { .bitrate = 10, 1558 { .bitrate = 10,
1050 .hw_value = 0x1, 1559 .hw_value = CONF_HW_BIT_RATE_1MBPS,
1051 .hw_value_short = 0x1, }, 1560 .hw_value_short = CONF_HW_BIT_RATE_1MBPS, },
1052 { .bitrate = 20, 1561 { .bitrate = 20,
1053 .hw_value = 0x2, 1562 .hw_value = CONF_HW_BIT_RATE_2MBPS,
1054 .hw_value_short = 0x2, 1563 .hw_value_short = CONF_HW_BIT_RATE_2MBPS,
1055 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 1564 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
1056 { .bitrate = 55, 1565 { .bitrate = 55,
1057 .hw_value = 0x4, 1566 .hw_value = CONF_HW_BIT_RATE_5_5MBPS,
1058 .hw_value_short = 0x4, 1567 .hw_value_short = CONF_HW_BIT_RATE_5_5MBPS,
1059 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 1568 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
1060 { .bitrate = 110, 1569 { .bitrate = 110,
1061 .hw_value = 0x20, 1570 .hw_value = CONF_HW_BIT_RATE_11MBPS,
1062 .hw_value_short = 0x20, 1571 .hw_value_short = CONF_HW_BIT_RATE_11MBPS,
1063 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 1572 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
1064 { .bitrate = 60, 1573 { .bitrate = 60,
1065 .hw_value = 0x8, 1574 .hw_value = CONF_HW_BIT_RATE_6MBPS,
1066 .hw_value_short = 0x8, }, 1575 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
1067 { .bitrate = 90, 1576 { .bitrate = 90,
1068 .hw_value = 0x10, 1577 .hw_value = CONF_HW_BIT_RATE_9MBPS,
1069 .hw_value_short = 0x10, }, 1578 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
1070 { .bitrate = 120, 1579 { .bitrate = 120,
1071 .hw_value = 0x40, 1580 .hw_value = CONF_HW_BIT_RATE_12MBPS,
1072 .hw_value_short = 0x40, }, 1581 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
1073 { .bitrate = 180, 1582 { .bitrate = 180,
1074 .hw_value = 0x80, 1583 .hw_value = CONF_HW_BIT_RATE_18MBPS,
1075 .hw_value_short = 0x80, }, 1584 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
1076 { .bitrate = 240, 1585 { .bitrate = 240,
1077 .hw_value = 0x200, 1586 .hw_value = CONF_HW_BIT_RATE_24MBPS,
1078 .hw_value_short = 0x200, }, 1587 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
1079 { .bitrate = 360, 1588 { .bitrate = 360,
1080 .hw_value = 0x400, 1589 .hw_value = CONF_HW_BIT_RATE_36MBPS,
1081 .hw_value_short = 0x400, }, 1590 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
1082 { .bitrate = 480, 1591 { .bitrate = 480,
1083 .hw_value = 0x800, 1592 .hw_value = CONF_HW_BIT_RATE_48MBPS,
1084 .hw_value_short = 0x800, }, 1593 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
1085 { .bitrate = 540, 1594 { .bitrate = 540,
1086 .hw_value = 0x1000, 1595 .hw_value = CONF_HW_BIT_RATE_54MBPS,
1087 .hw_value_short = 0x1000, }, 1596 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
1088}; 1597};
1089 1598
1090/* can't be const, mac80211 writes to this */ 1599/* can't be const, mac80211 writes to this */
@@ -1112,6 +1621,88 @@ static struct ieee80211_supported_band wl1271_band_2ghz = {
1112 .n_bitrates = ARRAY_SIZE(wl1271_rates), 1621 .n_bitrates = ARRAY_SIZE(wl1271_rates),
1113}; 1622};
1114 1623
1624/* 5 GHz data rates for WL1273 */
1625static struct ieee80211_rate wl1271_rates_5ghz[] = {
1626 { .bitrate = 60,
1627 .hw_value = CONF_HW_BIT_RATE_6MBPS,
1628 .hw_value_short = CONF_HW_BIT_RATE_6MBPS, },
1629 { .bitrate = 90,
1630 .hw_value = CONF_HW_BIT_RATE_9MBPS,
1631 .hw_value_short = CONF_HW_BIT_RATE_9MBPS, },
1632 { .bitrate = 120,
1633 .hw_value = CONF_HW_BIT_RATE_12MBPS,
1634 .hw_value_short = CONF_HW_BIT_RATE_12MBPS, },
1635 { .bitrate = 180,
1636 .hw_value = CONF_HW_BIT_RATE_18MBPS,
1637 .hw_value_short = CONF_HW_BIT_RATE_18MBPS, },
1638 { .bitrate = 240,
1639 .hw_value = CONF_HW_BIT_RATE_24MBPS,
1640 .hw_value_short = CONF_HW_BIT_RATE_24MBPS, },
1641 { .bitrate = 360,
1642 .hw_value = CONF_HW_BIT_RATE_36MBPS,
1643 .hw_value_short = CONF_HW_BIT_RATE_36MBPS, },
1644 { .bitrate = 480,
1645 .hw_value = CONF_HW_BIT_RATE_48MBPS,
1646 .hw_value_short = CONF_HW_BIT_RATE_48MBPS, },
1647 { .bitrate = 540,
1648 .hw_value = CONF_HW_BIT_RATE_54MBPS,
1649 .hw_value_short = CONF_HW_BIT_RATE_54MBPS, },
1650};
1651
1652/* 5 GHz band channels for WL1273 */
1653static struct ieee80211_channel wl1271_channels_5ghz[] = {
1654 { .hw_value = 183, .center_freq = 4915},
1655 { .hw_value = 184, .center_freq = 4920},
1656 { .hw_value = 185, .center_freq = 4925},
1657 { .hw_value = 187, .center_freq = 4935},
1658 { .hw_value = 188, .center_freq = 4940},
1659 { .hw_value = 189, .center_freq = 4945},
1660 { .hw_value = 192, .center_freq = 4960},
1661 { .hw_value = 196, .center_freq = 4980},
1662 { .hw_value = 7, .center_freq = 5035},
1663 { .hw_value = 8, .center_freq = 5040},
1664 { .hw_value = 9, .center_freq = 5045},
1665 { .hw_value = 11, .center_freq = 5055},
1666 { .hw_value = 12, .center_freq = 5060},
1667 { .hw_value = 16, .center_freq = 5080},
1668 { .hw_value = 34, .center_freq = 5170},
1669 { .hw_value = 36, .center_freq = 5180},
1670 { .hw_value = 38, .center_freq = 5190},
1671 { .hw_value = 40, .center_freq = 5200},
1672 { .hw_value = 42, .center_freq = 5210},
1673 { .hw_value = 44, .center_freq = 5220},
1674 { .hw_value = 46, .center_freq = 5230},
1675 { .hw_value = 48, .center_freq = 5240},
1676 { .hw_value = 52, .center_freq = 5260},
1677 { .hw_value = 56, .center_freq = 5280},
1678 { .hw_value = 60, .center_freq = 5300},
1679 { .hw_value = 64, .center_freq = 5320},
1680 { .hw_value = 100, .center_freq = 5500},
1681 { .hw_value = 104, .center_freq = 5520},
1682 { .hw_value = 108, .center_freq = 5540},
1683 { .hw_value = 112, .center_freq = 5560},
1684 { .hw_value = 116, .center_freq = 5580},
1685 { .hw_value = 120, .center_freq = 5600},
1686 { .hw_value = 124, .center_freq = 5620},
1687 { .hw_value = 128, .center_freq = 5640},
1688 { .hw_value = 132, .center_freq = 5660},
1689 { .hw_value = 136, .center_freq = 5680},
1690 { .hw_value = 140, .center_freq = 5700},
1691 { .hw_value = 149, .center_freq = 5745},
1692 { .hw_value = 153, .center_freq = 5765},
1693 { .hw_value = 157, .center_freq = 5785},
1694 { .hw_value = 161, .center_freq = 5805},
1695 { .hw_value = 165, .center_freq = 5825},
1696};
1697
1698
1699static struct ieee80211_supported_band wl1271_band_5ghz = {
1700 .channels = wl1271_channels_5ghz,
1701 .n_channels = ARRAY_SIZE(wl1271_channels_5ghz),
1702 .bitrates = wl1271_rates_5ghz,
1703 .n_bitrates = ARRAY_SIZE(wl1271_rates_5ghz),
1704};
1705
1115static const struct ieee80211_ops wl1271_ops = { 1706static const struct ieee80211_ops wl1271_ops = {
1116 .start = wl1271_op_start, 1707 .start = wl1271_op_start,
1117 .stop = wl1271_op_stop, 1708 .stop = wl1271_op_stop,
@@ -1119,6 +1710,7 @@ static const struct ieee80211_ops wl1271_ops = {
1119 .remove_interface = wl1271_op_remove_interface, 1710 .remove_interface = wl1271_op_remove_interface,
1120 .config = wl1271_op_config, 1711 .config = wl1271_op_config,
1121/* .config_interface = wl1271_op_config_interface, */ 1712/* .config_interface = wl1271_op_config_interface, */
1713 .prepare_multicast = wl1271_op_prepare_multicast,
1122 .configure_filter = wl1271_op_configure_filter, 1714 .configure_filter = wl1271_op_configure_filter,
1123 .tx = wl1271_op_tx, 1715 .tx = wl1271_op_tx,
1124 .set_key = wl1271_op_set_key, 1716 .set_key = wl1271_op_set_key,
@@ -1151,24 +1743,25 @@ static int wl1271_register_hw(struct wl1271 *wl)
1151 1743
1152static int wl1271_init_ieee80211(struct wl1271 *wl) 1744static int wl1271_init_ieee80211(struct wl1271 *wl)
1153{ 1745{
1154 /* 1746 /* The tx descriptor buffer and the TKIP space. */
1155 * The tx descriptor buffer and the TKIP space. 1747 wl->hw->extra_tx_headroom = WL1271_TKIP_IV_SPACE +
1156 * 1748 sizeof(struct wl1271_tx_hw_descr);
1157 * FIXME: add correct 1271 descriptor size
1158 */
1159 wl->hw->extra_tx_headroom = WL1271_TKIP_IV_SPACE;
1160 1749
1161 /* unit us */ 1750 /* unit us */
1162 /* FIXME: find a proper value */ 1751 /* FIXME: find a proper value */
1163 wl->hw->channel_change_time = 10000; 1752 wl->hw->channel_change_time = 10000;
1164 1753
1165 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM | 1754 wl->hw->flags = IEEE80211_HW_SIGNAL_DBM |
1166 IEEE80211_HW_NOISE_DBM; 1755 IEEE80211_HW_NOISE_DBM |
1756 IEEE80211_HW_BEACON_FILTER;
1167 1757
1168 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION); 1758 wl->hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION);
1169 wl->hw->wiphy->max_scan_ssids = 1; 1759 wl->hw->wiphy->max_scan_ssids = 1;
1170 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz; 1760 wl->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = &wl1271_band_2ghz;
1171 1761
1762 if (wl1271_11a_enabled())
1763 wl->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = &wl1271_band_5ghz;
1764
1172 SET_IEEE80211_DEV(wl->hw, &wl->spi->dev); 1765 SET_IEEE80211_DEV(wl->hw, &wl->spi->dev);
1173 1766
1174 return 0; 1767 return 0;
@@ -1213,29 +1806,33 @@ static int __devinit wl1271_probe(struct spi_device *spi)
1213 wl = hw->priv; 1806 wl = hw->priv;
1214 memset(wl, 0, sizeof(*wl)); 1807 memset(wl, 0, sizeof(*wl));
1215 1808
1809 INIT_LIST_HEAD(&wl->list);
1810
1216 wl->hw = hw; 1811 wl->hw = hw;
1217 dev_set_drvdata(&spi->dev, wl); 1812 dev_set_drvdata(&spi->dev, wl);
1218 wl->spi = spi; 1813 wl->spi = spi;
1219 1814
1220 skb_queue_head_init(&wl->tx_queue); 1815 skb_queue_head_init(&wl->tx_queue);
1221 1816
1222 INIT_WORK(&wl->filter_work, wl1271_filter_work); 1817 INIT_DELAYED_WORK(&wl->elp_work, wl1271_elp_work);
1223 wl->channel = WL1271_DEFAULT_CHANNEL; 1818 wl->channel = WL1271_DEFAULT_CHANNEL;
1224 wl->scanning = false; 1819 wl->scanning = false;
1225 wl->default_key = 0; 1820 wl->default_key = 0;
1226 wl->listen_int = 1;
1227 wl->rx_counter = 0; 1821 wl->rx_counter = 0;
1228 wl->rx_config = WL1271_DEFAULT_RX_CONFIG; 1822 wl->rx_config = WL1271_DEFAULT_RX_CONFIG;
1229 wl->rx_filter = WL1271_DEFAULT_RX_FILTER; 1823 wl->rx_filter = WL1271_DEFAULT_RX_FILTER;
1230 wl->elp = false; 1824 wl->elp = false;
1231 wl->psm = 0; 1825 wl->psm = 0;
1232 wl->psm_requested = false; 1826 wl->psm_requested = false;
1827 wl->psm_entry_retry = 0;
1233 wl->tx_queue_stopped = false; 1828 wl->tx_queue_stopped = false;
1234 wl->power_level = WL1271_DEFAULT_POWER_LEVEL; 1829 wl->power_level = WL1271_DEFAULT_POWER_LEVEL;
1830 wl->basic_rate_set = WL1271_DEFAULT_BASIC_RATE_SET;
1831 wl->band = IEEE80211_BAND_2GHZ;
1832 wl->vif = NULL;
1833 wl->joined = false;
1235 1834
1236 /* We use the default power on sleep time until we know which chip 1835 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
1237 * we're using */
1238 for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
1239 wl->tx_frames[i] = NULL; 1836 wl->tx_frames[i] = NULL;
1240 1837
1241 spin_lock_init(&wl->wl_lock); 1838 spin_lock_init(&wl->wl_lock);
@@ -1250,13 +1847,6 @@ static int __devinit wl1271_probe(struct spi_device *spi)
1250 wl->state = WL1271_STATE_OFF; 1847 wl->state = WL1271_STATE_OFF;
1251 mutex_init(&wl->mutex); 1848 mutex_init(&wl->mutex);
1252 1849
1253 wl->rx_descriptor = kmalloc(sizeof(*wl->rx_descriptor), GFP_KERNEL);
1254 if (!wl->rx_descriptor) {
1255 wl1271_error("could not allocate memory for rx descriptor");
1256 ret = -ENOMEM;
1257 goto out_free;
1258 }
1259
1260 /* This is the only SPI value that we need to set here, the rest 1850 /* This is the only SPI value that we need to set here, the rest
1261 * comes from the board-peripherals file */ 1851 * comes from the board-peripherals file */
1262 spi->bits_per_word = 32; 1852 spi->bits_per_word = 32;
@@ -1298,6 +1888,9 @@ static int __devinit wl1271_probe(struct spi_device *spi)
1298 } 1888 }
1299 dev_set_drvdata(&wl1271_device.dev, wl); 1889 dev_set_drvdata(&wl1271_device.dev, wl);
1300 1890
1891 /* Apply default driver configuration. */
1892 wl1271_conf_init(wl);
1893
1301 ret = wl1271_init_ieee80211(wl); 1894 ret = wl1271_init_ieee80211(wl);
1302 if (ret) 1895 if (ret)
1303 goto out_platform; 1896 goto out_platform;
@@ -1319,9 +1912,6 @@ static int __devinit wl1271_probe(struct spi_device *spi)
1319 free_irq(wl->irq, wl); 1912 free_irq(wl->irq, wl);
1320 1913
1321 out_free: 1914 out_free:
1322 kfree(wl->rx_descriptor);
1323 wl->rx_descriptor = NULL;
1324
1325 ieee80211_free_hw(hw); 1915 ieee80211_free_hw(hw);
1326 1916
1327 return ret; 1917 return ret;
@@ -1337,14 +1927,11 @@ static int __devexit wl1271_remove(struct spi_device *spi)
1337 platform_device_unregister(&wl1271_device); 1927 platform_device_unregister(&wl1271_device);
1338 free_irq(wl->irq, wl); 1928 free_irq(wl->irq, wl);
1339 kfree(wl->target_mem_map); 1929 kfree(wl->target_mem_map);
1340 kfree(wl->fw); 1930 vfree(wl->fw);
1341 wl->fw = NULL; 1931 wl->fw = NULL;
1342 kfree(wl->nvs); 1932 kfree(wl->nvs);
1343 wl->nvs = NULL; 1933 wl->nvs = NULL;
1344 1934
1345 kfree(wl->rx_descriptor);
1346 wl->rx_descriptor = NULL;
1347
1348 kfree(wl->fw_status); 1935 kfree(wl->fw_status);
1349 kfree(wl->tx_res_if); 1936 kfree(wl->tx_res_if);
1350 1937
@@ -1391,3 +1978,4 @@ module_exit(wl1271_exit);
1391 1978
1392MODULE_LICENSE("GPL"); 1979MODULE_LICENSE("GPL");
1393MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>"); 1980MODULE_AUTHOR("Luciano Coelho <luciano.coelho@nokia.com>");
1981MODULE_AUTHOR("Juuso Oikarinen <juuso.oikarinen@nokia.com>");
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.c b/drivers/net/wireless/wl12xx/wl1271_ps.c
index 1dc74b0c7736..507cd91d7eed 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.c
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.c
@@ -27,25 +27,38 @@
27 27
28#define WL1271_WAKEUP_TIMEOUT 500 28#define WL1271_WAKEUP_TIMEOUT 500
29 29
30void wl1271_elp_work(struct work_struct *work)
31{
32 struct delayed_work *dwork;
33 struct wl1271 *wl;
34
35 dwork = container_of(work, struct delayed_work, work);
36 wl = container_of(dwork, struct wl1271, elp_work);
37
38 wl1271_debug(DEBUG_PSM, "elp work");
39
40 mutex_lock(&wl->mutex);
41
42 if (wl->elp || !wl->psm)
43 goto out;
44
45 wl1271_debug(DEBUG_PSM, "chip to elp");
46 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
47 wl->elp = true;
48
49out:
50 mutex_unlock(&wl->mutex);
51}
52
53#define ELP_ENTRY_DELAY 5
54
30/* Routines to toggle sleep mode while in ELP */ 55/* Routines to toggle sleep mode while in ELP */
31void wl1271_ps_elp_sleep(struct wl1271 *wl) 56void wl1271_ps_elp_sleep(struct wl1271 *wl)
32{ 57{
33 /* 58 if (wl->psm) {
34 * FIXME: due to a problem in the firmware (causing a firmware 59 cancel_delayed_work(&wl->elp_work);
35 * crash), ELP entry is prevented below. Remove the "true" to 60 ieee80211_queue_delayed_work(wl->hw, &wl->elp_work,
36 * re-enable ELP entry. 61 msecs_to_jiffies(ELP_ENTRY_DELAY));
37 */
38 if (true || wl->elp || !wl->psm)
39 return;
40
41 /*
42 * Go to ELP unless there is work already pending - pending work
43 * will immediately wakeup the chipset anyway.
44 */
45 if (!work_pending(&wl->irq_work) && !work_pending(&wl->tx_work)) {
46 wl1271_debug(DEBUG_PSM, "chip to elp");
47 wl1271_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_SLEEP);
48 wl->elp = true;
49 } 62 }
50} 63}
51 64
@@ -73,7 +86,7 @@ int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake)
73 wl->elp_compl = &compl; 86 wl->elp_compl = &compl;
74 spin_unlock_irqrestore(&wl->wl_lock, flags); 87 spin_unlock_irqrestore(&wl->wl_lock, flags);
75 88
76 wl1271_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP); 89 wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, ELPCTRL_WAKE_UP);
77 90
78 if (!pending) { 91 if (!pending) {
79 ret = wait_for_completion_timeout( 92 ret = wait_for_completion_timeout(
@@ -111,6 +124,17 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode)
111 switch (mode) { 124 switch (mode) {
112 case STATION_POWER_SAVE_MODE: 125 case STATION_POWER_SAVE_MODE:
113 wl1271_debug(DEBUG_PSM, "entering psm"); 126 wl1271_debug(DEBUG_PSM, "entering psm");
127
128 /* enable beacon filtering */
129 ret = wl1271_acx_beacon_filter_opt(wl, true);
130 if (ret < 0)
131 return ret;
132
133 /* enable beacon early termination */
134 ret = wl1271_acx_bet_enable(wl, true);
135 if (ret < 0)
136 return ret;
137
114 ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE); 138 ret = wl1271_cmd_ps_mode(wl, STATION_POWER_SAVE_MODE);
115 if (ret < 0) 139 if (ret < 0)
116 return ret; 140 return ret;
@@ -128,6 +152,16 @@ int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode)
128 if (ret < 0) 152 if (ret < 0)
129 return ret; 153 return ret;
130 154
155 /* disable beacon early termination */
156 ret = wl1271_acx_bet_enable(wl, false);
157 if (ret < 0)
158 return ret;
159
160 /* disable beacon filtering */
161 ret = wl1271_acx_beacon_filter_opt(wl, false);
162 if (ret < 0)
163 return ret;
164
131 ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE); 165 ret = wl1271_cmd_ps_mode(wl, STATION_ACTIVE_MODE);
132 if (ret < 0) 166 if (ret < 0)
133 return ret; 167 return ret;
diff --git a/drivers/net/wireless/wl12xx/wl1271_ps.h b/drivers/net/wireless/wl12xx/wl1271_ps.h
index de2bd3c7dc9c..779653d0ae85 100644
--- a/drivers/net/wireless/wl12xx/wl1271_ps.h
+++ b/drivers/net/wireless/wl12xx/wl1271_ps.h
@@ -30,6 +30,6 @@
30int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode); 30int wl1271_ps_set_mode(struct wl1271 *wl, enum wl1271_cmd_ps_mode mode);
31void wl1271_ps_elp_sleep(struct wl1271 *wl); 31void wl1271_ps_elp_sleep(struct wl1271 *wl);
32int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake); 32int wl1271_ps_elp_wakeup(struct wl1271 *wl, bool chip_awake);
33 33void wl1271_elp_work(struct work_struct *work);
34 34
35#endif /* __WL1271_PS_H__ */ 35#endif /* __WL1271_PS_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_reg.h b/drivers/net/wireless/wl12xx/wl1271_reg.h
index f8ed4a4fc691..1f237389d1c7 100644
--- a/drivers/net/wireless/wl12xx/wl1271_reg.h
+++ b/drivers/net/wireless/wl12xx/wl1271_reg.h
@@ -34,7 +34,7 @@
34#define REGISTERS_WORK_SIZE 0x0000b000 34#define REGISTERS_WORK_SIZE 0x0000b000
35 35
36#define HW_ACCESS_ELP_CTRL_REG_ADDR 0x1FFFC 36#define HW_ACCESS_ELP_CTRL_REG_ADDR 0x1FFFC
37#define STATUS_MEM_ADDRESS 0x40400 37#define FW_STATUS_ADDR (0x14FC0 + 0xA000)
38 38
39/* ELP register commands */ 39/* ELP register commands */
40#define ELPCTRL_WAKE_UP 0x1 40#define ELPCTRL_WAKE_UP 0x1
@@ -213,7 +213,6 @@
213==============================================*/ 213==============================================*/
214#define ACX_REG_INTERRUPT_ACK (REGISTERS_BASE + 0x04F0) 214#define ACX_REG_INTERRUPT_ACK (REGISTERS_BASE + 0x04F0)
215 215
216#define RX_DRIVER_DUMMY_WRITE_ADDRESS (REGISTERS_BASE + 0x0534)
217#define RX_DRIVER_COUNTER_ADDRESS (REGISTERS_BASE + 0x0538) 216#define RX_DRIVER_COUNTER_ADDRESS (REGISTERS_BASE + 0x0538)
218 217
219/* Device Configuration registers*/ 218/* Device Configuration registers*/
@@ -614,50 +613,6 @@ enum {
614 MAX_RADIO_BANDS = 0xFF 613 MAX_RADIO_BANDS = 0xFF
615}; 614};
616 615
617enum {
618 NO_RATE = 0,
619 RATE_1MBPS = 0x0A,
620 RATE_2MBPS = 0x14,
621 RATE_5_5MBPS = 0x37,
622 RATE_6MBPS = 0x0B,
623 RATE_9MBPS = 0x0F,
624 RATE_11MBPS = 0x6E,
625 RATE_12MBPS = 0x0A,
626 RATE_18MBPS = 0x0E,
627 RATE_22MBPS = 0xDC,
628 RATE_24MBPS = 0x09,
629 RATE_36MBPS = 0x0D,
630 RATE_48MBPS = 0x08,
631 RATE_54MBPS = 0x0C
632};
633
634enum {
635 RATE_INDEX_1MBPS = 0,
636 RATE_INDEX_2MBPS = 1,
637 RATE_INDEX_5_5MBPS = 2,
638 RATE_INDEX_6MBPS = 3,
639 RATE_INDEX_9MBPS = 4,
640 RATE_INDEX_11MBPS = 5,
641 RATE_INDEX_12MBPS = 6,
642 RATE_INDEX_18MBPS = 7,
643 RATE_INDEX_22MBPS = 8,
644 RATE_INDEX_24MBPS = 9,
645 RATE_INDEX_36MBPS = 10,
646 RATE_INDEX_48MBPS = 11,
647 RATE_INDEX_54MBPS = 12,
648 RATE_INDEX_MAX = RATE_INDEX_54MBPS,
649 MAX_RATE_INDEX,
650 INVALID_RATE_INDEX = MAX_RATE_INDEX,
651 RATE_INDEX_ENUM_MAX_SIZE = 0x7FFFFFFF
652};
653
654enum {
655 RATE_MASK_1MBPS = 0x1,
656 RATE_MASK_2MBPS = 0x2,
657 RATE_MASK_5_5MBPS = 0x4,
658 RATE_MASK_11MBPS = 0x20,
659};
660
661#define SHORT_PREAMBLE_BIT BIT(0) /* CCK or Barker depending on the rate */ 616#define SHORT_PREAMBLE_BIT BIT(0) /* CCK or Barker depending on the rate */
662#define OFDM_RATE_BIT BIT(6) 617#define OFDM_RATE_BIT BIT(6)
663#define PBCC_RATE_BIT BIT(7) 618#define PBCC_RATE_BIT BIT(7)
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.c b/drivers/net/wireless/wl12xx/wl1271_rx.c
index ad8b6904c5eb..ca645f38109b 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.c
@@ -30,14 +30,15 @@
30static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status, 30static u8 wl1271_rx_get_mem_block(struct wl1271_fw_status *status,
31 u32 drv_rx_counter) 31 u32 drv_rx_counter)
32{ 32{
33 return status->rx_pkt_descs[drv_rx_counter] & RX_MEM_BLOCK_MASK; 33 return le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
34 RX_MEM_BLOCK_MASK;
34} 35}
35 36
36static u32 wl1271_rx_get_buf_size(struct wl1271_fw_status *status, 37static u32 wl1271_rx_get_buf_size(struct wl1271_fw_status *status,
37 u32 drv_rx_counter) 38 u32 drv_rx_counter)
38{ 39{
39 return (status->rx_pkt_descs[drv_rx_counter] & RX_BUF_SIZE_MASK) >> 40 return (le32_to_cpu(status->rx_pkt_descs[drv_rx_counter]) &
40 RX_BUF_SIZE_SHIFT_DIV; 41 RX_BUF_SIZE_MASK) >> RX_BUF_SIZE_SHIFT_DIV;
41} 42}
42 43
43/* The values of this table must match the wl1271_rates[] array */ 44/* The values of this table must match the wl1271_rates[] array */
@@ -70,6 +71,36 @@ static u8 wl1271_rx_rate_to_idx[] = {
70 0 /* WL1271_RATE_1 */ 71 0 /* WL1271_RATE_1 */
71}; 72};
72 73
74/* The values of this table must match the wl1271_rates[] array */
75static u8 wl1271_5_ghz_rx_rate_to_idx[] = {
76 /* MCS rates are used only with 11n */
77 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS7 */
78 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS6 */
79 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS5 */
80 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS4 */
81 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS3 */
82 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS2 */
83 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS1 */
84 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_MCS0 */
85
86 7, /* WL1271_RATE_54 */
87 6, /* WL1271_RATE_48 */
88 5, /* WL1271_RATE_36 */
89 4, /* WL1271_RATE_24 */
90
91 /* TI-specific rate */
92 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_22 */
93
94 3, /* WL1271_RATE_18 */
95 2, /* WL1271_RATE_12 */
96 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_11 */
97 1, /* WL1271_RATE_9 */
98 0, /* WL1271_RATE_6 */
99 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_5_5 */
100 WL1271_RX_RATE_UNSUPPORTED, /* WL1271_RATE_2 */
101 WL1271_RX_RATE_UNSUPPORTED /* WL1271_RATE_1 */
102};
103
73static void wl1271_rx_status(struct wl1271 *wl, 104static void wl1271_rx_status(struct wl1271 *wl,
74 struct wl1271_rx_descriptor *desc, 105 struct wl1271_rx_descriptor *desc,
75 struct ieee80211_rx_status *status, 106 struct ieee80211_rx_status *status,
@@ -77,12 +108,21 @@ static void wl1271_rx_status(struct wl1271 *wl,
77{ 108{
78 memset(status, 0, sizeof(struct ieee80211_rx_status)); 109 memset(status, 0, sizeof(struct ieee80211_rx_status));
79 110
80 if ((desc->flags & WL1271_RX_DESC_BAND_MASK) == WL1271_RX_DESC_BAND_BG) 111 if ((desc->flags & WL1271_RX_DESC_BAND_MASK) ==
112 WL1271_RX_DESC_BAND_BG) {
81 status->band = IEEE80211_BAND_2GHZ; 113 status->band = IEEE80211_BAND_2GHZ;
82 else 114 status->rate_idx = wl1271_rx_rate_to_idx[desc->rate];
115 } else if ((desc->flags & WL1271_RX_DESC_BAND_MASK) ==
116 WL1271_RX_DESC_BAND_A) {
117 status->band = IEEE80211_BAND_5GHZ;
118 status->rate_idx = wl1271_5_ghz_rx_rate_to_idx[desc->rate];
119 } else
83 wl1271_warning("unsupported band 0x%x", 120 wl1271_warning("unsupported band 0x%x",
84 desc->flags & WL1271_RX_DESC_BAND_MASK); 121 desc->flags & WL1271_RX_DESC_BAND_MASK);
85 122
123 if (unlikely(status->rate_idx == WL1271_RX_RATE_UNSUPPORTED))
124 wl1271_warning("unsupported rate");
125
86 /* 126 /*
87 * FIXME: Add mactime handling. For IBSS (ad-hoc) we need to get the 127 * FIXME: Add mactime handling. For IBSS (ad-hoc) we need to get the
88 * timestamp from the beacon (acx_tsf_info). In BSS mode (infra) we 128 * timestamp from the beacon (acx_tsf_info). In BSS mode (infra) we
@@ -91,12 +131,6 @@ static void wl1271_rx_status(struct wl1271 *wl,
91 */ 131 */
92 status->signal = desc->rssi; 132 status->signal = desc->rssi;
93 133
94 /* FIXME: Should this be optimized? */
95 status->qual = (desc->rssi - WL1271_RX_MIN_RSSI) * 100 /
96 (WL1271_RX_MAX_RSSI - WL1271_RX_MIN_RSSI);
97 status->qual = min(status->qual, 100);
98 status->qual = max(status->qual, 0);
99
100 /* 134 /*
101 * FIXME: In wl1251, the SNR should be divided by two. In wl1271 we 135 * FIXME: In wl1251, the SNR should be divided by two. In wl1271 we
102 * need to divide by two for now, but TI has been discussing about 136 * need to divide by two for now, but TI has been discussing about
@@ -109,17 +143,11 @@ static void wl1271_rx_status(struct wl1271 *wl,
109 if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) { 143 if (desc->flags & WL1271_RX_DESC_ENCRYPT_MASK) {
110 status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED; 144 status->flag |= RX_FLAG_IV_STRIPPED | RX_FLAG_MMIC_STRIPPED;
111 145
112 if (likely(!(desc->flags & WL1271_RX_DESC_DECRYPT_FAIL))) 146 if (likely(!(desc->status & WL1271_RX_DESC_DECRYPT_FAIL)))
113 status->flag |= RX_FLAG_DECRYPTED; 147 status->flag |= RX_FLAG_DECRYPTED;
114 148 if (unlikely(desc->status & WL1271_RX_DESC_MIC_FAIL))
115 if (unlikely(desc->flags & WL1271_RX_DESC_MIC_FAIL))
116 status->flag |= RX_FLAG_MMIC_ERROR; 149 status->flag |= RX_FLAG_MMIC_ERROR;
117 } 150 }
118
119 status->rate_idx = wl1271_rx_rate_to_idx[desc->rate];
120
121 if (status->rate_idx == WL1271_RX_RATE_UNSUPPORTED)
122 wl1271_warning("unsupported rate");
123} 151}
124 152
125static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length) 153static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
@@ -131,14 +159,14 @@ static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
131 u8 *buf; 159 u8 *buf;
132 u8 beacon = 0; 160 u8 beacon = 0;
133 161
134 skb = dev_alloc_skb(length); 162 skb = __dev_alloc_skb(length, GFP_KERNEL);
135 if (!skb) { 163 if (!skb) {
136 wl1271_error("Couldn't allocate RX frame"); 164 wl1271_error("Couldn't allocate RX frame");
137 return; 165 return;
138 } 166 }
139 167
140 buf = skb_put(skb, length); 168 buf = skb_put(skb, length);
141 wl1271_spi_reg_read(wl, WL1271_SLV_MEM_DATA, buf, length, true); 169 wl1271_spi_read(wl, WL1271_SLV_MEM_DATA, buf, length, true);
142 170
143 /* the data read starts with the descriptor */ 171 /* the data read starts with the descriptor */
144 desc = (struct wl1271_rx_descriptor *) buf; 172 desc = (struct wl1271_rx_descriptor *) buf;
@@ -156,7 +184,7 @@ static void wl1271_rx_handle_data(struct wl1271 *wl, u32 length)
156 beacon ? "beacon" : ""); 184 beacon ? "beacon" : "");
157 185
158 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status)); 186 memcpy(IEEE80211_SKB_RXCB(skb), &rx_status, sizeof(rx_status));
159 ieee80211_rx(wl->hw, skb); 187 ieee80211_rx_ni(wl->hw, skb);
160} 188}
161 189
162void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status) 190void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
@@ -176,15 +204,15 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
176 break; 204 break;
177 } 205 }
178 206
179 wl->rx_mem_pool_addr.addr = 207 wl->rx_mem_pool_addr.addr = (mem_block << 8) +
180 (mem_block << 8) + wl_mem_map->packet_memory_pool_start; 208 le32_to_cpu(wl_mem_map->packet_memory_pool_start);
181 wl->rx_mem_pool_addr.addr_extra = 209 wl->rx_mem_pool_addr.addr_extra =
182 wl->rx_mem_pool_addr.addr + 4; 210 wl->rx_mem_pool_addr.addr + 4;
183 211
184 /* Choose the block we want to read */ 212 /* Choose the block we want to read */
185 wl1271_spi_reg_write(wl, WL1271_SLV_REG_DATA, 213 wl1271_spi_write(wl, WL1271_SLV_REG_DATA,
186 &wl->rx_mem_pool_addr, 214 &wl->rx_mem_pool_addr,
187 sizeof(wl->rx_mem_pool_addr), false); 215 sizeof(wl->rx_mem_pool_addr), false);
188 216
189 wl1271_rx_handle_data(wl, buf_size); 217 wl1271_rx_handle_data(wl, buf_size);
190 218
@@ -192,9 +220,5 @@ void wl1271_rx(struct wl1271 *wl, struct wl1271_fw_status *status)
192 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK; 220 drv_rx_counter = wl->rx_counter & NUM_RX_PKT_DESC_MOD_MASK;
193 } 221 }
194 222
195 wl1271_reg_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter); 223 wl1271_spi_write32(wl, RX_DRIVER_COUNTER_ADDRESS, wl->rx_counter);
196
197 /* This is a workaround for some problems in the chip */
198 wl1271_reg_write32(wl, RX_DRIVER_DUMMY_WRITE_ADDRESS, 0x1);
199
200} 224}
diff --git a/drivers/net/wireless/wl12xx/wl1271_rx.h b/drivers/net/wireless/wl12xx/wl1271_rx.h
index d1ca60e43a25..1ae6d1783ed4 100644
--- a/drivers/net/wireless/wl12xx/wl1271_rx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_rx.h
@@ -102,14 +102,14 @@
102#define RX_BUF_SIZE_SHIFT_DIV 6 102#define RX_BUF_SIZE_SHIFT_DIV 6
103 103
104struct wl1271_rx_descriptor { 104struct wl1271_rx_descriptor {
105 u16 length; 105 __le16 length;
106 u8 status; 106 u8 status;
107 u8 flags; 107 u8 flags;
108 u8 rate; 108 u8 rate;
109 u8 channel; 109 u8 channel;
110 s8 rssi; 110 s8 rssi;
111 u8 snr; 111 u8 snr;
112 u32 timestamp; 112 __le32 timestamp;
113 u8 packet_class; 113 u8 packet_class;
114 u8 process_id; 114 u8 process_id;
115 u8 pad_len; 115 u8 pad_len;
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.c b/drivers/net/wireless/wl12xx/wl1271_spi.c
index 4a12880c16a8..02978a16e732 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.c
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.c
@@ -30,17 +30,29 @@
30#include "wl12xx_80211.h" 30#include "wl12xx_80211.h"
31#include "wl1271_spi.h" 31#include "wl1271_spi.h"
32 32
33static int wl1271_translate_reg_addr(struct wl1271 *wl, int addr) 33static int wl1271_translate_addr(struct wl1271 *wl, int addr)
34{ 34{
35 return addr - wl->physical_reg_addr + wl->virtual_reg_addr; 35 /*
36} 36 * To translate, first check to which window of addresses the
37 37 * particular address belongs. Then subtract the starting address
38static int wl1271_translate_mem_addr(struct wl1271 *wl, int addr) 38 * of that window from the address. Then, add offset of the
39{ 39 * translated region.
40 return addr - wl->physical_mem_addr + wl->virtual_mem_addr; 40 *
41 * The translated regions occur next to each other in physical device
42 * memory, so just add the sizes of the preceeding address regions to
43 * get the offset to the new region.
44 *
45 * Currently, only the two first regions are addressed, and the
46 * assumption is that all addresses will fall into either of those
47 * two.
48 */
49 if ((addr >= wl->part.reg.start) &&
50 (addr < wl->part.reg.start + wl->part.reg.size))
51 return addr - wl->part.reg.start + wl->part.mem.size;
52 else
53 return addr - wl->part.mem.start;
41} 54}
42 55
43
44void wl1271_spi_reset(struct wl1271 *wl) 56void wl1271_spi_reset(struct wl1271 *wl)
45{ 57{
46 u8 *cmd; 58 u8 *cmd;
@@ -123,133 +135,137 @@ void wl1271_spi_init(struct wl1271 *wl)
123 135
124/* Set the SPI partitions to access the chip addresses 136/* Set the SPI partitions to access the chip addresses
125 * 137 *
126 * There are two VIRTUAL (SPI) partitions (the memory partition and the 138 * To simplify driver code, a fixed (virtual) memory map is defined for
127 * registers partition), which are mapped to two different areas of the 139 * register and memory addresses. Because in the chipset, in different stages
128 * PHYSICAL (hardware) memory. This function also makes other checks to 140 * of operation, those addresses will move around, an address translation
129 * ensure that the partitions are not overlapping. In the diagram below, the 141 * mechanism is required.
130 * memory partition comes before the register partition, but the opposite is
131 * also supported.
132 * 142 *
133 * PHYSICAL address 143 * There are four partitions (three memory and one register partition),
144 * which are mapped to two different areas of the hardware memory.
145 *
146 * Virtual address
134 * space 147 * space
135 * 148 *
136 * | | 149 * | |
137 * ...+----+--> mem_start 150 * ...+----+--> mem.start
138 * VIRTUAL address ... | | 151 * Physical address ... | |
139 * space ... | | [PART_0] 152 * space ... | | [PART_0]
140 * ... | | 153 * ... | |
141 * 0x00000000 <--+----+... ...+----+--> mem_start + mem_size 154 * 00000000 <--+----+... ...+----+--> mem.start + mem.size
142 * | | ... | | 155 * | | ... | |
143 * |MEM | ... | | 156 * |MEM | ... | |
144 * | | ... | | 157 * | | ... | |
145 * part_size <--+----+... | | {unused area) 158 * mem.size <--+----+... | | {unused area)
146 * | | ... | | 159 * | | ... | |
147 * |REG | ... | | 160 * |REG | ... | |
148 * part_size | | ... | | 161 * mem.size | | ... | |
149 * + <--+----+... ...+----+--> reg_start 162 * + <--+----+... ...+----+--> reg.start
150 * reg_size ... | | 163 * reg.size | | ... | |
151 * ... | | [PART_1] 164 * |MEM2| ... | | [PART_1]
152 * ... | | 165 * | | ... | |
153 * ...+----+--> reg_start + reg_size 166 * ...+----+--> reg.start + reg.size
154 * | | 167 * | |
155 * 168 *
156 */ 169 */
157int wl1271_set_partition(struct wl1271 *wl, 170int wl1271_set_partition(struct wl1271 *wl,
158 u32 mem_start, u32 mem_size, 171 struct wl1271_partition_set *p)
159 u32 reg_start, u32 reg_size)
160{ 172{
161 struct wl1271_partition *partition; 173 /* copy partition info */
162 struct spi_transfer t; 174 memcpy(&wl->part, p, sizeof(*p));
163 struct spi_message m;
164 size_t len, cmd_len;
165 u32 *cmd;
166 int addr;
167
168 cmd_len = sizeof(u32) + 2 * sizeof(struct wl1271_partition);
169 cmd = kzalloc(cmd_len, GFP_KERNEL);
170 if (!cmd)
171 return -ENOMEM;
172
173 spi_message_init(&m);
174 memset(&t, 0, sizeof(t));
175
176 partition = (struct wl1271_partition *) (cmd + 1);
177 addr = HW_ACCESS_PART0_SIZE_ADDR;
178 len = 2 * sizeof(struct wl1271_partition);
179
180 *cmd |= WSPI_CMD_WRITE;
181 *cmd |= (len << WSPI_CMD_BYTE_LENGTH_OFFSET) & WSPI_CMD_BYTE_LENGTH;
182 *cmd |= addr & WSPI_CMD_BYTE_ADDR;
183 175
184 wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X", 176 wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
185 mem_start, mem_size); 177 p->mem.start, p->mem.size);
186 wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X", 178 wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
187 reg_start, reg_size); 179 p->reg.start, p->reg.size);
188 180 wl1271_debug(DEBUG_SPI, "mem2_start %08X mem2_size %08X",
189 /* Make sure that the two partitions together don't exceed the 181 p->mem2.start, p->mem2.size);
190 * address range */ 182 wl1271_debug(DEBUG_SPI, "mem3_start %08X mem3_size %08X",
191 if ((mem_size + reg_size) > HW_ACCESS_MEMORY_MAX_RANGE) { 183 p->mem3.start, p->mem3.size);
192 wl1271_debug(DEBUG_SPI, "Total size exceeds maximum virtual" 184
193 " address range. Truncating partition[0]."); 185 /* write partition info to the chipset */
194 mem_size = HW_ACCESS_MEMORY_MAX_RANGE - reg_size; 186 wl1271_raw_write32(wl, HW_PART0_START_ADDR, p->mem.start);
195 wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X", 187 wl1271_raw_write32(wl, HW_PART0_SIZE_ADDR, p->mem.size);
196 mem_start, mem_size); 188 wl1271_raw_write32(wl, HW_PART1_START_ADDR, p->reg.start);
197 wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X", 189 wl1271_raw_write32(wl, HW_PART1_SIZE_ADDR, p->reg.size);
198 reg_start, reg_size); 190 wl1271_raw_write32(wl, HW_PART2_START_ADDR, p->mem2.start);
199 } 191 wl1271_raw_write32(wl, HW_PART2_SIZE_ADDR, p->mem2.size);
192 wl1271_raw_write32(wl, HW_PART3_START_ADDR, p->mem3.start);
200 193
201 if ((mem_start < reg_start) && 194 return 0;
202 ((mem_start + mem_size) > reg_start)) { 195}
203 /* Guarantee that the memory partition doesn't overlap the
204 * registers partition */
205 wl1271_debug(DEBUG_SPI, "End of partition[0] is "
206 "overlapping partition[1]. Adjusted.");
207 mem_size = reg_start - mem_start;
208 wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
209 mem_start, mem_size);
210 wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
211 reg_start, reg_size);
212 } else if ((reg_start < mem_start) &&
213 ((reg_start + reg_size) > mem_start)) {
214 /* Guarantee that the register partition doesn't overlap the
215 * memory partition */
216 wl1271_debug(DEBUG_SPI, "End of partition[1] is"
217 " overlapping partition[0]. Adjusted.");
218 reg_size = mem_start - reg_start;
219 wl1271_debug(DEBUG_SPI, "mem_start %08X mem_size %08X",
220 mem_start, mem_size);
221 wl1271_debug(DEBUG_SPI, "reg_start %08X reg_size %08X",
222 reg_start, reg_size);
223 }
224 196
225 partition[0].start = mem_start; 197#define WL1271_BUSY_WORD_TIMEOUT 1000
226 partition[0].size = mem_size;
227 partition[1].start = reg_start;
228 partition[1].size = reg_size;
229 198
230 wl->physical_mem_addr = mem_start; 199/* FIXME: Check busy words, removed due to SPI bug */
231 wl->physical_reg_addr = reg_start; 200#if 0
201static void wl1271_spi_read_busy(struct wl1271 *wl, void *buf, size_t len)
202{
203 struct spi_transfer t[1];
204 struct spi_message m;
205 u32 *busy_buf;
206 int num_busy_bytes = 0;
232 207
233 wl->virtual_mem_addr = 0; 208 wl1271_info("spi read BUSY!");
234 wl->virtual_reg_addr = mem_size;
235 209
236 t.tx_buf = cmd; 210 /*
237 t.len = cmd_len; 211 * Look for the non-busy word in the read buffer, and if found,
238 spi_message_add_tail(&t, &m); 212 * read in the remaining data into the buffer.
213 */
214 busy_buf = (u32 *)buf;
215 for (; (u32)busy_buf < (u32)buf + len; busy_buf++) {
216 num_busy_bytes += sizeof(u32);
217 if (*busy_buf & 0x1) {
218 spi_message_init(&m);
219 memset(t, 0, sizeof(t));
220 memmove(buf, busy_buf, len - num_busy_bytes);
221 t[0].rx_buf = buf + (len - num_busy_bytes);
222 t[0].len = num_busy_bytes;
223 spi_message_add_tail(&t[0], &m);
224 spi_sync(wl->spi, &m);
225 return;
226 }
227 }
239 228
240 spi_sync(wl->spi, &m); 229 /*
230 * Read further busy words from SPI until a non-busy word is
231 * encountered, then read the data itself into the buffer.
232 */
233 wl1271_info("spi read BUSY-polling needed!");
241 234
242 kfree(cmd); 235 num_busy_bytes = WL1271_BUSY_WORD_TIMEOUT;
236 busy_buf = wl->buffer_busyword;
237 while (num_busy_bytes) {
238 num_busy_bytes--;
239 spi_message_init(&m);
240 memset(t, 0, sizeof(t));
241 t[0].rx_buf = busy_buf;
242 t[0].len = sizeof(u32);
243 spi_message_add_tail(&t[0], &m);
244 spi_sync(wl->spi, &m);
245
246 if (*busy_buf & 0x1) {
247 spi_message_init(&m);
248 memset(t, 0, sizeof(t));
249 t[0].rx_buf = buf;
250 t[0].len = len;
251 spi_message_add_tail(&t[0], &m);
252 spi_sync(wl->spi, &m);
253 return;
254 }
255 }
243 256
244 return 0; 257 /* The SPI bus is unresponsive, the read failed. */
258 memset(buf, 0, len);
259 wl1271_error("SPI read busy-word timeout!\n");
245} 260}
261#endif
246 262
247void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf, 263void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
248 size_t len, bool fixed) 264 size_t len, bool fixed)
249{ 265{
250 struct spi_transfer t[3]; 266 struct spi_transfer t[3];
251 struct spi_message m; 267 struct spi_message m;
252 u8 *busy_buf; 268 u32 *busy_buf;
253 u32 *cmd; 269 u32 *cmd;
254 270
255 cmd = &wl->buffer_cmd; 271 cmd = &wl->buffer_cmd;
@@ -281,14 +297,16 @@ void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf,
281 297
282 spi_sync(wl->spi, &m); 298 spi_sync(wl->spi, &m);
283 299
284 /* FIXME: check busy words */ 300 /* FIXME: Check busy words, removed due to SPI bug */
301 /* if (!(busy_buf[WL1271_BUSY_WORD_CNT - 1] & 0x1))
302 wl1271_spi_read_busy(wl, buf, len); */
285 303
286 wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd)); 304 wl1271_dump(DEBUG_SPI, "spi_read cmd -> ", cmd, sizeof(*cmd));
287 wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, len); 305 wl1271_dump(DEBUG_SPI, "spi_read buf <- ", buf, len);
288} 306}
289 307
290void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf, 308void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
291 size_t len, bool fixed) 309 size_t len, bool fixed)
292{ 310{
293 struct spi_transfer t[2]; 311 struct spi_transfer t[2];
294 struct spi_message m; 312 struct spi_message m;
@@ -321,62 +339,77 @@ void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf,
321 wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, len); 339 wl1271_dump(DEBUG_SPI, "spi_write buf -> ", buf, len);
322} 340}
323 341
324void wl1271_spi_mem_read(struct wl1271 *wl, int addr, void *buf, 342void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf, size_t len,
325 size_t len) 343 bool fixed)
326{ 344{
327 int physical; 345 int physical;
328 346
329 physical = wl1271_translate_mem_addr(wl, addr); 347 physical = wl1271_translate_addr(wl, addr);
330 348
331 wl1271_spi_read(wl, physical, buf, len, false); 349 wl1271_spi_raw_read(wl, physical, buf, len, fixed);
332} 350}
333 351
334void wl1271_spi_mem_write(struct wl1271 *wl, int addr, void *buf, 352void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf, size_t len,
335 size_t len) 353 bool fixed)
336{ 354{
337 int physical; 355 int physical;
338 356
339 physical = wl1271_translate_mem_addr(wl, addr); 357 physical = wl1271_translate_addr(wl, addr);
340 358
341 wl1271_spi_write(wl, physical, buf, len, false); 359 wl1271_spi_raw_write(wl, physical, buf, len, fixed);
342} 360}
343 361
344void wl1271_spi_reg_read(struct wl1271 *wl, int addr, void *buf, size_t len, 362u32 wl1271_spi_read32(struct wl1271 *wl, int addr)
345 bool fixed)
346{ 363{
347 int physical; 364 return wl1271_raw_read32(wl, wl1271_translate_addr(wl, addr));
348 365}
349 physical = wl1271_translate_reg_addr(wl, addr);
350 366
351 wl1271_spi_read(wl, physical, buf, len, fixed); 367void wl1271_spi_write32(struct wl1271 *wl, int addr, u32 val)
368{
369 wl1271_raw_write32(wl, wl1271_translate_addr(wl, addr), val);
352} 370}
353 371
354void wl1271_spi_reg_write(struct wl1271 *wl, int addr, void *buf, size_t len, 372void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val)
355 bool fixed)
356{ 373{
357 int physical; 374 /* write address >> 1 + 0x30000 to OCP_POR_CTR */
375 addr = (addr >> 1) + 0x30000;
376 wl1271_spi_write32(wl, OCP_POR_CTR, addr);
358 377
359 physical = wl1271_translate_reg_addr(wl, addr); 378 /* write value to OCP_POR_WDATA */
379 wl1271_spi_write32(wl, OCP_DATA_WRITE, val);
360 380
361 wl1271_spi_write(wl, physical, buf, len, fixed); 381 /* write 1 to OCP_CMD */
382 wl1271_spi_write32(wl, OCP_CMD, OCP_CMD_WRITE);
362} 383}
363 384
364u32 wl1271_mem_read32(struct wl1271 *wl, int addr) 385u16 wl1271_top_reg_read(struct wl1271 *wl, int addr)
365{ 386{
366 return wl1271_read32(wl, wl1271_translate_mem_addr(wl, addr)); 387 u32 val;
367} 388 int timeout = OCP_CMD_LOOP;
368 389
369void wl1271_mem_write32(struct wl1271 *wl, int addr, u32 val) 390 /* write address >> 1 + 0x30000 to OCP_POR_CTR */
370{ 391 addr = (addr >> 1) + 0x30000;
371 wl1271_write32(wl, wl1271_translate_mem_addr(wl, addr), val); 392 wl1271_spi_write32(wl, OCP_POR_CTR, addr);
372}
373 393
374u32 wl1271_reg_read32(struct wl1271 *wl, int addr) 394 /* write 2 to OCP_CMD */
375{ 395 wl1271_spi_write32(wl, OCP_CMD, OCP_CMD_READ);
376 return wl1271_read32(wl, wl1271_translate_reg_addr(wl, addr));
377}
378 396
379void wl1271_reg_write32(struct wl1271 *wl, int addr, u32 val) 397 /* poll for data ready */
380{ 398 do {
381 wl1271_write32(wl, wl1271_translate_reg_addr(wl, addr), val); 399 val = wl1271_spi_read32(wl, OCP_DATA_READ);
400 timeout--;
401 } while (!(val & OCP_READY_MASK) && timeout);
402
403 if (!timeout) {
404 wl1271_warning("Top register access timed out.");
405 return 0xffff;
406 }
407
408 /* check data status and return if OK */
409 if ((val & OCP_STATUS_MASK) == OCP_STATUS_OK)
410 return val & 0xffff;
411 else {
412 wl1271_warning("Top register access returned error.");
413 return 0xffff;
414 }
382} 415}
diff --git a/drivers/net/wireless/wl12xx/wl1271_spi.h b/drivers/net/wireless/wl12xx/wl1271_spi.h
index 2c9968458646..cb7df1c56314 100644
--- a/drivers/net/wireless/wl12xx/wl1271_spi.h
+++ b/drivers/net/wireless/wl12xx/wl1271_spi.h
@@ -29,10 +29,14 @@
29 29
30#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0 30#define HW_ACCESS_MEMORY_MAX_RANGE 0x1FFC0
31 31
32#define HW_ACCESS_PART0_SIZE_ADDR 0x1FFC0 32#define HW_PARTITION_REGISTERS_ADDR 0x1ffc0
33#define HW_ACCESS_PART0_START_ADDR 0x1FFC4 33#define HW_PART0_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR)
34#define HW_ACCESS_PART1_SIZE_ADDR 0x1FFC8 34#define HW_PART0_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 4)
35#define HW_ACCESS_PART1_START_ADDR 0x1FFCC 35#define HW_PART1_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 8)
36#define HW_PART1_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 12)
37#define HW_PART2_SIZE_ADDR (HW_PARTITION_REGISTERS_ADDR + 16)
38#define HW_PART2_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 20)
39#define HW_PART3_START_ADDR (HW_PARTITION_REGISTERS_ADDR + 24)
36 40
37#define HW_ACCESS_REGISTER_SIZE 4 41#define HW_ACCESS_REGISTER_SIZE 4
38 42
@@ -67,47 +71,56 @@
67 ((WL1271_BUSY_WORD_LEN - 4) / sizeof(u32)) 71 ((WL1271_BUSY_WORD_LEN - 4) / sizeof(u32))
68#define HW_ACCESS_WSPI_INIT_CMD_MASK 0 72#define HW_ACCESS_WSPI_INIT_CMD_MASK 0
69 73
74#define OCP_CMD_LOOP 32
75
76#define OCP_CMD_WRITE 0x1
77#define OCP_CMD_READ 0x2
78
79#define OCP_READY_MASK BIT(18)
80#define OCP_STATUS_MASK (BIT(16) | BIT(17))
81
82#define OCP_STATUS_NO_RESP 0x00000
83#define OCP_STATUS_OK 0x10000
84#define OCP_STATUS_REQ_FAILED 0x20000
85#define OCP_STATUS_RESP_ERROR 0x30000
70 86
71/* Raw target IO, address is not translated */ 87/* Raw target IO, address is not translated */
72void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf, 88void wl1271_spi_raw_write(struct wl1271 *wl, int addr, void *buf,
73 size_t len, bool fixed); 89 size_t len, bool fixed);
74void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf, 90void wl1271_spi_raw_read(struct wl1271 *wl, int addr, void *buf,
75 size_t len, bool fixed); 91 size_t len, bool fixed);
76 92
77/* Memory target IO, address is tranlated to partition 0 */ 93/* Translated target IO */
78void wl1271_spi_mem_read(struct wl1271 *wl, int addr, void *buf, size_t len); 94void wl1271_spi_read(struct wl1271 *wl, int addr, void *buf, size_t len,
79void wl1271_spi_mem_write(struct wl1271 *wl, int addr, void *buf, size_t len); 95 bool fixed);
80u32 wl1271_mem_read32(struct wl1271 *wl, int addr); 96void wl1271_spi_write(struct wl1271 *wl, int addr, void *buf, size_t len,
81void wl1271_mem_write32(struct wl1271 *wl, int addr, u32 val); 97 bool fixed);
98u32 wl1271_spi_read32(struct wl1271 *wl, int addr);
99void wl1271_spi_write32(struct wl1271 *wl, int addr, u32 val);
82 100
83/* Registers IO */ 101/* Top Register IO */
84void wl1271_spi_reg_read(struct wl1271 *wl, int addr, void *buf, size_t len, 102void wl1271_top_reg_write(struct wl1271 *wl, int addr, u16 val);
85 bool fixed); 103u16 wl1271_top_reg_read(struct wl1271 *wl, int addr);
86void wl1271_spi_reg_write(struct wl1271 *wl, int addr, void *buf, size_t len,
87 bool fixed);
88u32 wl1271_reg_read32(struct wl1271 *wl, int addr);
89void wl1271_reg_write32(struct wl1271 *wl, int addr, u32 val);
90 104
91/* INIT and RESET words */ 105/* INIT and RESET words */
92void wl1271_spi_reset(struct wl1271 *wl); 106void wl1271_spi_reset(struct wl1271 *wl);
93void wl1271_spi_init(struct wl1271 *wl); 107void wl1271_spi_init(struct wl1271 *wl);
94int wl1271_set_partition(struct wl1271 *wl, 108int wl1271_set_partition(struct wl1271 *wl,
95 u32 part_start, u32 part_size, 109 struct wl1271_partition_set *p);
96 u32 reg_start, u32 reg_size);
97 110
98static inline u32 wl1271_read32(struct wl1271 *wl, int addr) 111static inline u32 wl1271_raw_read32(struct wl1271 *wl, int addr)
99{ 112{
100 wl1271_spi_read(wl, addr, &wl->buffer_32, 113 wl1271_spi_raw_read(wl, addr, &wl->buffer_32,
101 sizeof(wl->buffer_32), false); 114 sizeof(wl->buffer_32), false);
102 115
103 return wl->buffer_32; 116 return wl->buffer_32;
104} 117}
105 118
106static inline void wl1271_write32(struct wl1271 *wl, int addr, u32 val) 119static inline void wl1271_raw_write32(struct wl1271 *wl, int addr, u32 val)
107{ 120{
108 wl->buffer_32 = val; 121 wl->buffer_32 = val;
109 wl1271_spi_write(wl, addr, &wl->buffer_32, 122 wl1271_spi_raw_write(wl, addr, &wl->buffer_32,
110 sizeof(wl->buffer_32), false); 123 sizeof(wl->buffer_32), false);
111} 124}
112 125
113#endif /* __WL1271_SPI_H__ */ 126#endif /* __WL1271_SPI_H__ */
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.c b/drivers/net/wireless/wl12xx/wl1271_tx.c
index ff221258b941..00af065c77c2 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.c
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.c
@@ -33,8 +33,7 @@
33static int wl1271_tx_id(struct wl1271 *wl, struct sk_buff *skb) 33static int wl1271_tx_id(struct wl1271 *wl, struct sk_buff *skb)
34{ 34{
35 int i; 35 int i;
36 36 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
37 for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++)
38 if (wl->tx_frames[i] == NULL) { 37 if (wl->tx_frames[i] == NULL) {
39 wl->tx_frames[i] = skb; 38 wl->tx_frames[i] = skb;
40 return i; 39 return i;
@@ -58,8 +57,8 @@ static int wl1271_tx_allocate(struct wl1271 *wl, struct sk_buff *skb, u32 extra)
58 /* approximate the number of blocks required for this packet 57 /* approximate the number of blocks required for this packet
59 in the firmware */ 58 in the firmware */
60 /* FIXME: try to figure out what is done here and make it cleaner */ 59 /* FIXME: try to figure out what is done here and make it cleaner */
61 total_blocks = (skb->len) >> TX_HW_BLOCK_SHIFT_DIV; 60 total_blocks = (total_len + 20) >> TX_HW_BLOCK_SHIFT_DIV;
62 excluded = (total_blocks << 2) + (skb->len & 0xff) + 34; 61 excluded = (total_blocks << 2) + ((total_len + 20) & 0xff) + 34;
63 total_blocks += (excluded > 252) ? 2 : 1; 62 total_blocks += (excluded > 252) ? 2 : 1;
64 total_blocks += TX_HW_BLOCK_SPARE; 63 total_blocks += TX_HW_BLOCK_SPARE;
65 64
@@ -89,15 +88,25 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
89{ 88{
90 struct wl1271_tx_hw_descr *desc; 89 struct wl1271_tx_hw_descr *desc;
91 int pad; 90 int pad;
91 u16 tx_attr;
92 92
93 desc = (struct wl1271_tx_hw_descr *) skb->data; 93 desc = (struct wl1271_tx_hw_descr *) skb->data;
94 94
95 /* relocate space for security header */
96 if (extra) {
97 void *framestart = skb->data + sizeof(*desc);
98 u16 fc = *(u16 *)(framestart + extra);
99 int hdrlen = ieee80211_hdrlen(cpu_to_le16(fc));
100 memmove(framestart, framestart + extra, hdrlen);
101 }
102
95 /* configure packet life time */ 103 /* configure packet life time */
96 desc->start_time = jiffies_to_usecs(jiffies) - wl->time_offset; 104 desc->start_time = cpu_to_le32(jiffies_to_usecs(jiffies) -
97 desc->life_time = TX_HW_MGMT_PKT_LIFETIME_TU; 105 wl->time_offset);
106 desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
98 107
99 /* configure the tx attributes */ 108 /* configure the tx attributes */
100 desc->tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER; 109 tx_attr = wl->session_counter << TX_HW_ATTR_OFST_SESSION_COUNTER;
101 /* FIXME: do we know the packet priority? can we identify mgmt 110 /* FIXME: do we know the packet priority? can we identify mgmt
102 packets, and use max prio for them at least? */ 111 packets, and use max prio for them at least? */
103 desc->tid = 0; 112 desc->tid = 0;
@@ -106,11 +115,13 @@ static int wl1271_tx_fill_hdr(struct wl1271 *wl, struct sk_buff *skb,
106 115
107 /* align the length (and store in terms of words) */ 116 /* align the length (and store in terms of words) */
108 pad = WL1271_TX_ALIGN(skb->len); 117 pad = WL1271_TX_ALIGN(skb->len);
109 desc->length = pad >> 2; 118 desc->length = cpu_to_le16(pad >> 2);
110 119
111 /* calculate number of padding bytes */ 120 /* calculate number of padding bytes */
112 pad = pad - skb->len; 121 pad = pad - skb->len;
113 desc->tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD; 122 tx_attr |= pad << TX_HW_ATTR_OFST_LAST_WORD_PAD;
123
124 desc->tx_attr = cpu_to_le16(tx_attr);
114 125
115 wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad); 126 wl1271_debug(DEBUG_TX, "tx_fill_hdr: pad: %d", pad);
116 return 0; 127 return 0;
@@ -147,11 +158,11 @@ static int wl1271_tx_send_packet(struct wl1271 *wl, struct sk_buff *skb,
147 len = WL1271_TX_ALIGN(skb->len); 158 len = WL1271_TX_ALIGN(skb->len);
148 159
149 /* perform a fixed address block write with the packet */ 160 /* perform a fixed address block write with the packet */
150 wl1271_spi_reg_write(wl, WL1271_SLV_MEM_DATA, skb->data, len, true); 161 wl1271_spi_write(wl, WL1271_SLV_MEM_DATA, skb->data, len, true);
151 162
152 /* write packet new counter into the write access register */ 163 /* write packet new counter into the write access register */
153 wl->tx_packets_count++; 164 wl->tx_packets_count++;
154 wl1271_reg_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count); 165 wl1271_spi_write32(wl, WL1271_HOST_WR_ACCESS, wl->tx_packets_count);
155 166
156 desc = (struct wl1271_tx_hw_descr *) skb->data; 167 desc = (struct wl1271_tx_hw_descr *) skb->data;
157 wl1271_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u (%u words)", 168 wl1271_debug(DEBUG_TX, "tx id %u skb 0x%p payload %u (%u words)",
@@ -254,14 +265,13 @@ out:
254static void wl1271_tx_complete_packet(struct wl1271 *wl, 265static void wl1271_tx_complete_packet(struct wl1271 *wl,
255 struct wl1271_tx_hw_res_descr *result) 266 struct wl1271_tx_hw_res_descr *result)
256{ 267{
257
258 struct ieee80211_tx_info *info; 268 struct ieee80211_tx_info *info;
259 struct sk_buff *skb; 269 struct sk_buff *skb;
260 u32 header_len; 270 u16 seq;
261 int id = result->id; 271 int id = result->id;
262 272
263 /* check for id legality */ 273 /* check for id legality */
264 if (id >= TX_HW_RESULT_QUEUE_LEN || wl->tx_frames[id] == NULL) { 274 if (id >= ACX_TX_DESCRIPTORS || wl->tx_frames[id] == NULL) {
265 wl1271_warning("TX result illegal id: %d", id); 275 wl1271_warning("TX result illegal id: %d", id);
266 return; 276 return;
267 } 277 }
@@ -284,22 +294,32 @@ static void wl1271_tx_complete_packet(struct wl1271 *wl,
284 /* info->status.retry_count = result->ack_failures; */ 294 /* info->status.retry_count = result->ack_failures; */
285 wl->stats.retry_count += result->ack_failures; 295 wl->stats.retry_count += result->ack_failures;
286 296
287 /* get header len */ 297 /* update security sequence number */
298 seq = wl->tx_security_seq_16 +
299 (result->lsb_security_sequence_number -
300 wl->tx_security_last_seq);
301 wl->tx_security_last_seq = result->lsb_security_sequence_number;
302
303 if (seq < wl->tx_security_seq_16)
304 wl->tx_security_seq_32++;
305 wl->tx_security_seq_16 = seq;
306
307 /* remove private header from packet */
308 skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
309
310 /* remove TKIP header space if present */
288 if (info->control.hw_key && 311 if (info->control.hw_key &&
289 info->control.hw_key->alg == ALG_TKIP) 312 info->control.hw_key->alg == ALG_TKIP) {
290 header_len = WL1271_TKIP_IV_SPACE + 313 int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
291 sizeof(struct wl1271_tx_hw_descr); 314 memmove(skb->data + WL1271_TKIP_IV_SPACE, skb->data, hdrlen);
292 else 315 skb_pull(skb, WL1271_TKIP_IV_SPACE);
293 header_len = sizeof(struct wl1271_tx_hw_descr); 316 }
294 317
295 wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x" 318 wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
296 " status 0x%x", 319 " status 0x%x",
297 result->id, skb, result->ack_failures, 320 result->id, skb, result->ack_failures,
298 result->rate_class_index, result->status); 321 result->rate_class_index, result->status);
299 322
300 /* remove private header from packet */
301 skb_pull(skb, header_len);
302
303 /* return the packet to the stack */ 323 /* return the packet to the stack */
304 ieee80211_tx_status(wl->hw, skb); 324 ieee80211_tx_status(wl->hw, skb);
305 wl->tx_frames[result->id] = NULL; 325 wl->tx_frames[result->id] = NULL;
@@ -315,8 +335,8 @@ void wl1271_tx_complete(struct wl1271 *wl, u32 count)
315 wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count); 335 wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
316 336
317 /* read the tx results from the chipset */ 337 /* read the tx results from the chipset */
318 wl1271_spi_mem_read(wl, memmap->tx_result, 338 wl1271_spi_read(wl, le32_to_cpu(memmap->tx_result),
319 wl->tx_res_if, sizeof(*wl->tx_res_if)); 339 wl->tx_res_if, sizeof(*wl->tx_res_if), false);
320 340
321 /* verify that the result buffer is not getting overrun */ 341 /* verify that the result buffer is not getting overrun */
322 if (count > TX_HW_RESULT_QUEUE_LEN) { 342 if (count > TX_HW_RESULT_QUEUE_LEN) {
@@ -337,10 +357,10 @@ void wl1271_tx_complete(struct wl1271 *wl, u32 count)
337 } 357 }
338 358
339 /* write host counter to chipset (to ack) */ 359 /* write host counter to chipset (to ack) */
340 wl1271_mem_write32(wl, memmap->tx_result + 360 wl1271_spi_write32(wl, le32_to_cpu(memmap->tx_result) +
341 offsetof(struct wl1271_tx_hw_res_if, 361 offsetof(struct wl1271_tx_hw_res_if,
342 tx_result_host_counter), 362 tx_result_host_counter),
343 wl->tx_res_if->tx_result_fw_counter); 363 le32_to_cpu(wl->tx_res_if->tx_result_fw_counter));
344} 364}
345 365
346/* caller must hold wl->mutex */ 366/* caller must hold wl->mutex */
@@ -364,7 +384,7 @@ void wl1271_tx_flush(struct wl1271 *wl)
364 ieee80211_tx_status(wl->hw, skb); 384 ieee80211_tx_status(wl->hw, skb);
365 } 385 }
366 386
367 for (i = 0; i < FW_TX_CMPLT_BLOCK_SIZE; i++) 387 for (i = 0; i < ACX_TX_DESCRIPTORS; i++)
368 if (wl->tx_frames[i] != NULL) { 388 if (wl->tx_frames[i] != NULL) {
369 skb = wl->tx_frames[i]; 389 skb = wl->tx_frames[i];
370 info = IEEE80211_SKB_CB(skb); 390 info = IEEE80211_SKB_CB(skb);
diff --git a/drivers/net/wireless/wl12xx/wl1271_tx.h b/drivers/net/wireless/wl12xx/wl1271_tx.h
index 4a614067ddba..416396caf0a0 100644
--- a/drivers/net/wireless/wl12xx/wl1271_tx.h
+++ b/drivers/net/wireless/wl12xx/wl1271_tx.h
@@ -58,7 +58,7 @@
58 58
59struct wl1271_tx_hw_descr { 59struct wl1271_tx_hw_descr {
60 /* Length of packet in words, including descriptor+header+data */ 60 /* Length of packet in words, including descriptor+header+data */
61 u16 length; 61 __le16 length;
62 /* Number of extra memory blocks to allocate for this packet in 62 /* Number of extra memory blocks to allocate for this packet in
63 addition to the number of blocks derived from the packet length */ 63 addition to the number of blocks derived from the packet length */
64 u8 extra_mem_blocks; 64 u8 extra_mem_blocks;
@@ -67,12 +67,12 @@ struct wl1271_tx_hw_descr {
67 HW!! */ 67 HW!! */
68 u8 total_mem_blocks; 68 u8 total_mem_blocks;
69 /* Device time (in us) when the packet arrived to the driver */ 69 /* Device time (in us) when the packet arrived to the driver */
70 u32 start_time; 70 __le32 start_time;
71 /* Max delay in TUs until transmission. The last device time the 71 /* Max delay in TUs until transmission. The last device time the
72 packet can be transmitted is: startTime+(1024*LifeTime) */ 72 packet can be transmitted is: startTime+(1024*LifeTime) */
73 u16 life_time; 73 __le16 life_time;
74 /* Bitwise fields - see TX_ATTR... definitions above. */ 74 /* Bitwise fields - see TX_ATTR... definitions above. */
75 u16 tx_attr; 75 __le16 tx_attr;
76 /* Packet identifier used also in the Tx-Result. */ 76 /* Packet identifier used also in the Tx-Result. */
77 u8 id; 77 u8 id;
78 /* The packet TID value (as User-Priority) */ 78 /* The packet TID value (as User-Priority) */
@@ -100,12 +100,12 @@ struct wl1271_tx_hw_res_descr {
100 several possible reasons for failure. */ 100 several possible reasons for failure. */
101 u8 status; 101 u8 status;
102 /* Total air access duration including all retrys and overheads.*/ 102 /* Total air access duration including all retrys and overheads.*/
103 u16 medium_usage; 103 __le16 medium_usage;
104 /* The time passed from host xfer to Tx-complete.*/ 104 /* The time passed from host xfer to Tx-complete.*/
105 u32 fw_handling_time; 105 __le32 fw_handling_time;
106 /* Total media delay 106 /* Total media delay
107 (from 1st EDCA AIFS counter until TX Complete). */ 107 (from 1st EDCA AIFS counter until TX Complete). */
108 u32 medium_delay; 108 __le32 medium_delay;
109 /* LS-byte of last TKIP seq-num (saved per AC for recovery). */ 109 /* LS-byte of last TKIP seq-num (saved per AC for recovery). */
110 u8 lsb_security_sequence_number; 110 u8 lsb_security_sequence_number;
111 /* Retry count - number of transmissions without successful ACK.*/ 111 /* Retry count - number of transmissions without successful ACK.*/
@@ -118,8 +118,8 @@ struct wl1271_tx_hw_res_descr {
118} __attribute__ ((packed)); 118} __attribute__ ((packed));
119 119
120struct wl1271_tx_hw_res_if { 120struct wl1271_tx_hw_res_if {
121 u32 tx_result_fw_counter; 121 __le32 tx_result_fw_counter;
122 u32 tx_result_host_counter; 122 __le32 tx_result_host_counter;
123 struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN]; 123 struct wl1271_tx_hw_res_descr tx_results_queue[TX_HW_RESULT_QUEUE_LEN];
124} __attribute__ ((packed)); 124} __attribute__ ((packed));
125 125
diff --git a/drivers/net/wireless/wl12xx/wl12xx_80211.h b/drivers/net/wireless/wl12xx/wl12xx_80211.h
index 657c2dbcb7d3..055d7bc6f592 100644
--- a/drivers/net/wireless/wl12xx/wl12xx_80211.h
+++ b/drivers/net/wireless/wl12xx/wl12xx_80211.h
@@ -122,8 +122,8 @@ struct wl12xx_null_data_template {
122} __attribute__ ((packed)); 122} __attribute__ ((packed));
123 123
124struct wl12xx_ps_poll_template { 124struct wl12xx_ps_poll_template {
125 u16 fc; 125 __le16 fc;
126 u16 aid; 126 __le16 aid;
127 u8 bssid[ETH_ALEN]; 127 u8 bssid[ETH_ALEN];
128 u8 ta[ETH_ALEN]; 128 u8 ta[ETH_ALEN];
129} __attribute__ ((packed)); 129} __attribute__ ((packed));
diff --git a/drivers/net/wireless/zd1211rw/Kconfig b/drivers/net/wireless/zd1211rw/Kconfig
index 74b31eafe72d..5f809695f71a 100644
--- a/drivers/net/wireless/zd1211rw/Kconfig
+++ b/drivers/net/wireless/zd1211rw/Kconfig
@@ -1,6 +1,6 @@
1config ZD1211RW 1config ZD1211RW
2 tristate "ZyDAS ZD1211/ZD1211B USB-wireless support" 2 tristate "ZyDAS ZD1211/ZD1211B USB-wireless support"
3 depends on USB && MAC80211 && WLAN_80211 && EXPERIMENTAL 3 depends on USB && MAC80211 && EXPERIMENTAL
4 select FW_LOADER 4 select FW_LOADER
5 ---help--- 5 ---help---
6 This is an experimental driver for the ZyDAS ZD1211/ZD1211B wireless 6 This is an experimental driver for the ZyDAS ZD1211/ZD1211B wireless
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.c b/drivers/net/wireless/zd1211rw/zd_chip.c
index 4e79a9800134..dfa1b9bc22c8 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.c
+++ b/drivers/net/wireless/zd1211rw/zd_chip.c
@@ -755,7 +755,7 @@ static int hw_reset_phy(struct zd_chip *chip)
755static int zd1211_hw_init_hmac(struct zd_chip *chip) 755static int zd1211_hw_init_hmac(struct zd_chip *chip)
756{ 756{
757 static const struct zd_ioreq32 ioreqs[] = { 757 static const struct zd_ioreq32 ioreqs[] = {
758 { CR_ZD1211_RETRY_MAX, 0x2 }, 758 { CR_ZD1211_RETRY_MAX, ZD1211_RETRY_COUNT },
759 { CR_RX_THRESHOLD, 0x000c0640 }, 759 { CR_RX_THRESHOLD, 0x000c0640 },
760 }; 760 };
761 761
@@ -767,7 +767,7 @@ static int zd1211_hw_init_hmac(struct zd_chip *chip)
767static int zd1211b_hw_init_hmac(struct zd_chip *chip) 767static int zd1211b_hw_init_hmac(struct zd_chip *chip)
768{ 768{
769 static const struct zd_ioreq32 ioreqs[] = { 769 static const struct zd_ioreq32 ioreqs[] = {
770 { CR_ZD1211B_RETRY_MAX, 0x02020202 }, 770 { CR_ZD1211B_RETRY_MAX, ZD1211B_RETRY_COUNT },
771 { CR_ZD1211B_CWIN_MAX_MIN_AC0, 0x007f003f }, 771 { CR_ZD1211B_CWIN_MAX_MIN_AC0, 0x007f003f },
772 { CR_ZD1211B_CWIN_MAX_MIN_AC1, 0x007f003f }, 772 { CR_ZD1211B_CWIN_MAX_MIN_AC1, 0x007f003f },
773 { CR_ZD1211B_CWIN_MAX_MIN_AC2, 0x003f001f }, 773 { CR_ZD1211B_CWIN_MAX_MIN_AC2, 0x003f001f },
diff --git a/drivers/net/wireless/zd1211rw/zd_chip.h b/drivers/net/wireless/zd1211rw/zd_chip.h
index 678c139a840c..9fd8f3508d66 100644
--- a/drivers/net/wireless/zd1211rw/zd_chip.h
+++ b/drivers/net/wireless/zd1211rw/zd_chip.h
@@ -642,13 +642,29 @@ enum {
642#define CR_ZD1211B_TXOP CTL_REG(0x0b20) 642#define CR_ZD1211B_TXOP CTL_REG(0x0b20)
643#define CR_ZD1211B_RETRY_MAX CTL_REG(0x0b28) 643#define CR_ZD1211B_RETRY_MAX CTL_REG(0x0b28)
644 644
645/* Value for CR_ZD1211_RETRY_MAX & CR_ZD1211B_RETRY_MAX. Vendor driver uses 2,
646 * we use 0. The first rate is tried (count+2), then all next rates are tried
647 * twice, until 1 Mbits is tried. */
648#define ZD1211_RETRY_COUNT 0
649#define ZD1211B_RETRY_COUNT \
650 (ZD1211_RETRY_COUNT << 0)| \
651 (ZD1211_RETRY_COUNT << 8)| \
652 (ZD1211_RETRY_COUNT << 16)| \
653 (ZD1211_RETRY_COUNT << 24)
654
645/* Used to detect PLL lock */ 655/* Used to detect PLL lock */
646#define UW2453_INTR_REG ((zd_addr_t)0x85c1) 656#define UW2453_INTR_REG ((zd_addr_t)0x85c1)
647 657
648#define CWIN_SIZE 0x007f043f 658#define CWIN_SIZE 0x007f043f
649 659
650 660
651#define HWINT_ENABLED 0x004f0000 661#define HWINT_ENABLED \
662 (INT_TX_COMPLETE_EN| \
663 INT_RX_COMPLETE_EN| \
664 INT_RETRY_FAIL_EN| \
665 INT_WAKEUP_EN| \
666 INT_CFG_NEXT_BCN_EN)
667
652#define HWINT_DISABLED 0 668#define HWINT_DISABLED 0
653 669
654#define E2P_PWR_INT_GUARD 8 670#define E2P_PWR_INT_GUARD 8
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.c b/drivers/net/wireless/zd1211rw/zd_mac.c
index 6d666359a42f..8a243732c519 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.c
+++ b/drivers/net/wireless/zd1211rw/zd_mac.c
@@ -88,6 +88,34 @@ static const struct ieee80211_rate zd_rates[] = {
88 .flags = 0 }, 88 .flags = 0 },
89}; 89};
90 90
91/*
92 * Zydas retry rates table. Each line is listed in the same order as
93 * in zd_rates[] and contains all the rate used when a packet is sent
94 * starting with a given rates. Let's consider an example :
95 *
96 * "11 Mbits : 4, 3, 2, 1, 0" means :
97 * - packet is sent using 4 different rates
98 * - 1st rate is index 3 (ie 11 Mbits)
99 * - 2nd rate is index 2 (ie 5.5 Mbits)
100 * - 3rd rate is index 1 (ie 2 Mbits)
101 * - 4th rate is index 0 (ie 1 Mbits)
102 */
103
104static const struct tx_retry_rate zd_retry_rates[] = {
105 { /* 1 Mbits */ 1, { 0 }},
106 { /* 2 Mbits */ 2, { 1, 0 }},
107 { /* 5.5 Mbits */ 3, { 2, 1, 0 }},
108 { /* 11 Mbits */ 4, { 3, 2, 1, 0 }},
109 { /* 6 Mbits */ 5, { 4, 3, 2, 1, 0 }},
110 { /* 9 Mbits */ 6, { 5, 4, 3, 2, 1, 0}},
111 { /* 12 Mbits */ 5, { 6, 3, 2, 1, 0 }},
112 { /* 18 Mbits */ 6, { 7, 6, 3, 2, 1, 0 }},
113 { /* 24 Mbits */ 6, { 8, 6, 3, 2, 1, 0 }},
114 { /* 36 Mbits */ 7, { 9, 8, 6, 3, 2, 1, 0 }},
115 { /* 48 Mbits */ 8, {10, 9, 8, 6, 3, 2, 1, 0 }},
116 { /* 54 Mbits */ 9, {11, 10, 9, 8, 6, 3, 2, 1, 0 }}
117};
118
91static const struct ieee80211_channel zd_channels[] = { 119static const struct ieee80211_channel zd_channels[] = {
92 { .center_freq = 2412, .hw_value = 1 }, 120 { .center_freq = 2412, .hw_value = 1 },
93 { .center_freq = 2417, .hw_value = 2 }, 121 { .center_freq = 2417, .hw_value = 2 },
@@ -282,7 +310,7 @@ static void zd_op_stop(struct ieee80211_hw *hw)
282} 310}
283 311
284/** 312/**
285 * tx_status - reports tx status of a packet if required 313 * zd_mac_tx_status - reports tx status of a packet if required
286 * @hw - a &struct ieee80211_hw pointer 314 * @hw - a &struct ieee80211_hw pointer
287 * @skb - a sk-buffer 315 * @skb - a sk-buffer
288 * @flags: extra flags to set in the TX status info 316 * @flags: extra flags to set in the TX status info
@@ -295,15 +323,49 @@ static void zd_op_stop(struct ieee80211_hw *hw)
295 * 323 *
296 * If no status information has been requested, the skb is freed. 324 * If no status information has been requested, the skb is freed.
297 */ 325 */
298static void tx_status(struct ieee80211_hw *hw, struct sk_buff *skb, 326static void zd_mac_tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
299 int ackssi, bool success) 327 int ackssi, struct tx_status *tx_status)
300{ 328{
301 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 329 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
330 int i;
331 int success = 1, retry = 1;
332 int first_idx;
333 const struct tx_retry_rate *retries;
302 334
303 ieee80211_tx_info_clear_status(info); 335 ieee80211_tx_info_clear_status(info);
304 336
305 if (success) 337 if (tx_status) {
338 success = !tx_status->failure;
339 retry = tx_status->retry + success;
340 }
341
342 if (success) {
343 /* success */
306 info->flags |= IEEE80211_TX_STAT_ACK; 344 info->flags |= IEEE80211_TX_STAT_ACK;
345 } else {
346 /* failure */
347 info->flags &= ~IEEE80211_TX_STAT_ACK;
348 }
349
350 first_idx = info->status.rates[0].idx;
351 ZD_ASSERT(0<=first_idx && first_idx<ARRAY_SIZE(zd_retry_rates));
352 retries = &zd_retry_rates[first_idx];
353 ZD_ASSERT(0<=retry && retry<=retries->count);
354
355 info->status.rates[0].idx = retries->rate[0];
356 info->status.rates[0].count = 1; // (retry > 1 ? 2 : 1);
357
358 for (i=1; i<IEEE80211_TX_MAX_RATES-1 && i<retry; i++) {
359 info->status.rates[i].idx = retries->rate[i];
360 info->status.rates[i].count = 1; // ((i==retry-1) && success ? 1:2);
361 }
362 for (; i<IEEE80211_TX_MAX_RATES && i<retry; i++) {
363 info->status.rates[i].idx = retries->rate[retry-1];
364 info->status.rates[i].count = 1; // (success ? 1:2);
365 }
366 if (i<IEEE80211_TX_MAX_RATES)
367 info->status.rates[i].idx = -1; /* terminate */
368
307 info->status.ack_signal = ackssi; 369 info->status.ack_signal = ackssi;
308 ieee80211_tx_status_irqsafe(hw, skb); 370 ieee80211_tx_status_irqsafe(hw, skb);
309} 371}
@@ -316,16 +378,79 @@ static void tx_status(struct ieee80211_hw *hw, struct sk_buff *skb,
316 * transferred. The first frame from the tx queue, will be selected and 378 * transferred. The first frame from the tx queue, will be selected and
317 * reported as error to the upper layers. 379 * reported as error to the upper layers.
318 */ 380 */
319void zd_mac_tx_failed(struct ieee80211_hw *hw) 381void zd_mac_tx_failed(struct urb *urb)
320{ 382{
321 struct sk_buff_head *q = &zd_hw_mac(hw)->ack_wait_queue; 383 struct ieee80211_hw * hw = zd_usb_to_hw(urb->context);
384 struct zd_mac *mac = zd_hw_mac(hw);
385 struct sk_buff_head *q = &mac->ack_wait_queue;
322 struct sk_buff *skb; 386 struct sk_buff *skb;
387 struct tx_status *tx_status = (struct tx_status *)urb->transfer_buffer;
388 unsigned long flags;
389 int success = !tx_status->failure;
390 int retry = tx_status->retry + success;
391 int found = 0;
392 int i, position = 0;
323 393
324 skb = skb_dequeue(q); 394 q = &mac->ack_wait_queue;
325 if (skb == NULL) 395 spin_lock_irqsave(&q->lock, flags);
326 return; 396
397 skb_queue_walk(q, skb) {
398 struct ieee80211_hdr *tx_hdr;
399 struct ieee80211_tx_info *info;
400 int first_idx, final_idx;
401 const struct tx_retry_rate *retries;
402 u8 final_rate;
403
404 position ++;
405
406 /* if the hardware reports a failure and we had a 802.11 ACK
407 * pending, then we skip the first skb when searching for a
408 * matching frame */
409 if (tx_status->failure && mac->ack_pending &&
410 skb_queue_is_first(q, skb)) {
411 continue;
412 }
413
414 tx_hdr = (struct ieee80211_hdr *)skb->data;
415
416 /* we skip all frames not matching the reported destination */
417 if (unlikely(memcmp(tx_hdr->addr1, tx_status->mac, ETH_ALEN))) {
418 continue;
419 }
420
421 /* we skip all frames not matching the reported final rate */
327 422
328 tx_status(hw, skb, 0, 0); 423 info = IEEE80211_SKB_CB(skb);
424 first_idx = info->status.rates[0].idx;
425 ZD_ASSERT(0<=first_idx && first_idx<ARRAY_SIZE(zd_retry_rates));
426 retries = &zd_retry_rates[first_idx];
427 if (retry < 0 || retry > retries->count) {
428 continue;
429 }
430
431 ZD_ASSERT(0<=retry && retry<=retries->count);
432 final_idx = retries->rate[retry-1];
433 final_rate = zd_rates[final_idx].hw_value;
434
435 if (final_rate != tx_status->rate) {
436 continue;
437 }
438
439 found = 1;
440 break;
441 }
442
443 if (found) {
444 for (i=1; i<=position; i++) {
445 skb = __skb_dequeue(q);
446 zd_mac_tx_status(hw, skb,
447 mac->ack_pending ? mac->ack_signal : 0,
448 i == position ? tx_status : NULL);
449 mac->ack_pending = 0;
450 }
451 }
452
453 spin_unlock_irqrestore(&q->lock, flags);
329} 454}
330 455
331/** 456/**
@@ -342,18 +467,27 @@ void zd_mac_tx_to_dev(struct sk_buff *skb, int error)
342{ 467{
343 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 468 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
344 struct ieee80211_hw *hw = info->rate_driver_data[0]; 469 struct ieee80211_hw *hw = info->rate_driver_data[0];
470 struct zd_mac *mac = zd_hw_mac(hw);
471
472 ieee80211_tx_info_clear_status(info);
345 473
346 skb_pull(skb, sizeof(struct zd_ctrlset)); 474 skb_pull(skb, sizeof(struct zd_ctrlset));
347 if (unlikely(error || 475 if (unlikely(error ||
348 (info->flags & IEEE80211_TX_CTL_NO_ACK))) { 476 (info->flags & IEEE80211_TX_CTL_NO_ACK))) {
349 tx_status(hw, skb, 0, !error); 477 /*
478 * FIXME : do we need to fill in anything ?
479 */
480 ieee80211_tx_status_irqsafe(hw, skb);
350 } else { 481 } else {
351 struct sk_buff_head *q = 482 struct sk_buff_head *q = &mac->ack_wait_queue;
352 &zd_hw_mac(hw)->ack_wait_queue;
353 483
354 skb_queue_tail(q, skb); 484 skb_queue_tail(q, skb);
355 while (skb_queue_len(q) > ZD_MAC_MAX_ACK_WAITERS) 485 while (skb_queue_len(q) > ZD_MAC_MAX_ACK_WAITERS) {
356 zd_mac_tx_failed(hw); 486 zd_mac_tx_status(hw, skb_dequeue(q),
487 mac->ack_pending ? mac->ack_signal : 0,
488 NULL);
489 mac->ack_pending = 0;
490 }
357 } 491 }
358} 492}
359 493
@@ -606,27 +740,47 @@ fail:
606static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr, 740static int filter_ack(struct ieee80211_hw *hw, struct ieee80211_hdr *rx_hdr,
607 struct ieee80211_rx_status *stats) 741 struct ieee80211_rx_status *stats)
608{ 742{
743 struct zd_mac *mac = zd_hw_mac(hw);
609 struct sk_buff *skb; 744 struct sk_buff *skb;
610 struct sk_buff_head *q; 745 struct sk_buff_head *q;
611 unsigned long flags; 746 unsigned long flags;
747 int found = 0;
748 int i, position = 0;
612 749
613 if (!ieee80211_is_ack(rx_hdr->frame_control)) 750 if (!ieee80211_is_ack(rx_hdr->frame_control))
614 return 0; 751 return 0;
615 752
616 q = &zd_hw_mac(hw)->ack_wait_queue; 753 q = &mac->ack_wait_queue;
617 spin_lock_irqsave(&q->lock, flags); 754 spin_lock_irqsave(&q->lock, flags);
618 skb_queue_walk(q, skb) { 755 skb_queue_walk(q, skb) {
619 struct ieee80211_hdr *tx_hdr; 756 struct ieee80211_hdr *tx_hdr;
620 757
758 position ++;
759
760 if (mac->ack_pending && skb_queue_is_first(q, skb))
761 continue;
762
621 tx_hdr = (struct ieee80211_hdr *)skb->data; 763 tx_hdr = (struct ieee80211_hdr *)skb->data;
622 if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN))) 764 if (likely(!memcmp(tx_hdr->addr2, rx_hdr->addr1, ETH_ALEN)))
623 { 765 {
624 __skb_unlink(skb, q); 766 found = 1;
625 tx_status(hw, skb, stats->signal, 1); 767 break;
626 goto out;
627 } 768 }
628 } 769 }
629out: 770
771 if (found) {
772 for (i=1; i<position; i++) {
773 skb = __skb_dequeue(q);
774 zd_mac_tx_status(hw, skb,
775 mac->ack_pending ? mac->ack_signal : 0,
776 NULL);
777 mac->ack_pending = 0;
778 }
779
780 mac->ack_pending = 1;
781 mac->ack_signal = stats->signal;
782 }
783
630 spin_unlock_irqrestore(&q->lock, flags); 784 spin_unlock_irqrestore(&q->lock, flags);
631 return 1; 785 return 1;
632} 786}
@@ -709,6 +863,7 @@ int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length)
709 skb_reserve(skb, 2); 863 skb_reserve(skb, 2);
710 } 864 }
711 865
866 /* FIXME : could we avoid this big memcpy ? */
712 memcpy(skb_put(skb, length), buffer, length); 867 memcpy(skb_put(skb, length), buffer, length);
713 868
714 memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats)); 869 memcpy(IEEE80211_SKB_RXCB(skb), &stats, sizeof(stats));
@@ -999,7 +1154,14 @@ struct ieee80211_hw *zd_mac_alloc_hw(struct usb_interface *intf)
999 hw->queues = 1; 1154 hw->queues = 1;
1000 hw->extra_tx_headroom = sizeof(struct zd_ctrlset); 1155 hw->extra_tx_headroom = sizeof(struct zd_ctrlset);
1001 1156
1157 /*
1158 * Tell mac80211 that we support multi rate retries
1159 */
1160 hw->max_rates = IEEE80211_TX_MAX_RATES;
1161 hw->max_rate_tries = 18; /* 9 rates * 2 retries/rate */
1162
1002 skb_queue_head_init(&mac->ack_wait_queue); 1163 skb_queue_head_init(&mac->ack_wait_queue);
1164 mac->ack_pending = 0;
1003 1165
1004 zd_chip_init(&mac->chip, hw, intf); 1166 zd_chip_init(&mac->chip, hw, intf);
1005 housekeeping_init(mac); 1167 housekeeping_init(mac);
diff --git a/drivers/net/wireless/zd1211rw/zd_mac.h b/drivers/net/wireless/zd1211rw/zd_mac.h
index 7c2759118d13..630c298a730e 100644
--- a/drivers/net/wireless/zd1211rw/zd_mac.h
+++ b/drivers/net/wireless/zd1211rw/zd_mac.h
@@ -140,6 +140,21 @@ struct rx_status {
140#define ZD_RX_CRC16_ERROR 0x40 140#define ZD_RX_CRC16_ERROR 0x40
141#define ZD_RX_ERROR 0x80 141#define ZD_RX_ERROR 0x80
142 142
143struct tx_retry_rate {
144 int count; /* number of valid element in rate[] array */
145 int rate[10]; /* retry rates, described by an index in zd_rates[] */
146};
147
148struct tx_status {
149 u8 type; /* must always be 0x01 : USB_INT_TYPE */
150 u8 id; /* must always be 0xa0 : USB_INT_ID_RETRY_FAILED */
151 u8 rate;
152 u8 pad;
153 u8 mac[ETH_ALEN];
154 u8 retry;
155 u8 failure;
156} __attribute__((packed));
157
143enum mac_flags { 158enum mac_flags {
144 MAC_FIXED_CHANNEL = 0x01, 159 MAC_FIXED_CHANNEL = 0x01,
145}; 160};
@@ -150,7 +165,7 @@ struct housekeeping {
150 165
151#define ZD_MAC_STATS_BUFFER_SIZE 16 166#define ZD_MAC_STATS_BUFFER_SIZE 16
152 167
153#define ZD_MAC_MAX_ACK_WAITERS 10 168#define ZD_MAC_MAX_ACK_WAITERS 50
154 169
155struct zd_mac { 170struct zd_mac {
156 struct zd_chip chip; 171 struct zd_chip chip;
@@ -184,6 +199,12 @@ struct zd_mac {
184 199
185 /* whether to pass control frames to stack */ 200 /* whether to pass control frames to stack */
186 unsigned int pass_ctrl:1; 201 unsigned int pass_ctrl:1;
202
203 /* whether we have received a 802.11 ACK that is pending */
204 unsigned int ack_pending:1;
205
206 /* signal strength of the last 802.11 ACK received */
207 int ack_signal;
187}; 208};
188 209
189#define ZD_REGDOMAIN_FCC 0x10 210#define ZD_REGDOMAIN_FCC 0x10
@@ -279,7 +300,7 @@ int zd_mac_preinit_hw(struct ieee80211_hw *hw);
279int zd_mac_init_hw(struct ieee80211_hw *hw); 300int zd_mac_init_hw(struct ieee80211_hw *hw);
280 301
281int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length); 302int zd_mac_rx(struct ieee80211_hw *hw, const u8 *buffer, unsigned int length);
282void zd_mac_tx_failed(struct ieee80211_hw *hw); 303void zd_mac_tx_failed(struct urb *urb);
283void zd_mac_tx_to_dev(struct sk_buff *skb, int error); 304void zd_mac_tx_to_dev(struct sk_buff *skb, int error);
284 305
285#ifdef DEBUG 306#ifdef DEBUG
diff --git a/drivers/net/wireless/zd1211rw/zd_usb.c b/drivers/net/wireless/zd1211rw/zd_usb.c
index 23a6a6d4863b..d46f20a57b7d 100644
--- a/drivers/net/wireless/zd1211rw/zd_usb.c
+++ b/drivers/net/wireless/zd1211rw/zd_usb.c
@@ -419,7 +419,7 @@ static void int_urb_complete(struct urb *urb)
419 handle_regs_int(urb); 419 handle_regs_int(urb);
420 break; 420 break;
421 case USB_INT_ID_RETRY_FAILED: 421 case USB_INT_ID_RETRY_FAILED:
422 zd_mac_tx_failed(zd_usb_to_hw(urb->context)); 422 zd_mac_tx_failed(urb);
423 break; 423 break;
424 default: 424 default:
425 dev_dbg_f(urb_dev(urb), "error: urb %p unknown id %x\n", urb, 425 dev_dbg_f(urb_dev(urb), "error: urb %p unknown id %x\n", urb,
@@ -553,6 +553,8 @@ static void handle_rx_packet(struct zd_usb *usb, const u8 *buffer,
553 553
554 if (length < sizeof(struct rx_length_info)) { 554 if (length < sizeof(struct rx_length_info)) {
555 /* It's not a complete packet anyhow. */ 555 /* It's not a complete packet anyhow. */
556 printk("%s: invalid, small RX packet : %d\n",
557 __func__, length);
556 return; 558 return;
557 } 559 }
558 length_info = (struct rx_length_info *) 560 length_info = (struct rx_length_info *)
diff --git a/drivers/parisc/led.c b/drivers/parisc/led.c
index 9581d3619450..79caf1ca4a29 100644
--- a/drivers/parisc/led.c
+++ b/drivers/parisc/led.c
@@ -352,11 +352,9 @@ static __inline__ int led_get_net_activity(void)
352 352
353 rx_total = tx_total = 0; 353 rx_total = tx_total = 0;
354 354
355 /* we are running as a workqueue task, so locking dev_base 355 /* we are running as a workqueue task, so we can use an RCU lookup */
356 * for reading should be OK */
357 read_lock(&dev_base_lock);
358 rcu_read_lock(); 356 rcu_read_lock();
359 for_each_netdev(&init_net, dev) { 357 for_each_netdev_rcu(&init_net, dev) {
360 const struct net_device_stats *stats; 358 const struct net_device_stats *stats;
361 struct in_device *in_dev = __in_dev_get_rcu(dev); 359 struct in_device *in_dev = __in_dev_get_rcu(dev);
362 if (!in_dev || !in_dev->ifa_list) 360 if (!in_dev || !in_dev->ifa_list)
@@ -368,7 +366,6 @@ static __inline__ int led_get_net_activity(void)
368 tx_total += stats->tx_packets; 366 tx_total += stats->tx_packets;
369 } 367 }
370 rcu_read_unlock(); 368 rcu_read_unlock();
371 read_unlock(&dev_base_lock);
372 369
373 retval = 0; 370 retval = 0;
374 371
diff --git a/drivers/pci/hotplug/acpiphp_ibm.c b/drivers/pci/hotplug/acpiphp_ibm.c
index a9d926b7d805..e7be66dbac21 100644
--- a/drivers/pci/hotplug/acpiphp_ibm.c
+++ b/drivers/pci/hotplug/acpiphp_ibm.c
@@ -406,7 +406,6 @@ static acpi_status __init ibm_find_acpi_device(acpi_handle handle,
406 __func__, status); 406 __func__, status);
407 return retval; 407 return retval;
408 } 408 }
409 info->hardware_id.string[sizeof(info->hardware_id.length) - 1] = '\0';
410 409
411 if (info->current_status && (info->valid & ACPI_VALID_HID) && 410 if (info->current_status && (info->valid & ACPI_VALID_HID) &&
412 (!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) || 411 (!strcmp(info->hardware_id.string, IBM_HARDWARE_ID1) ||
diff --git a/drivers/pcmcia/Kconfig b/drivers/pcmcia/Kconfig
index fbf965b31c14..17f38a781d47 100644
--- a/drivers/pcmcia/Kconfig
+++ b/drivers/pcmcia/Kconfig
@@ -192,6 +192,10 @@ config PCMCIA_AU1X00
192 tristate "Au1x00 pcmcia support" 192 tristate "Au1x00 pcmcia support"
193 depends on SOC_AU1X00 && PCMCIA 193 depends on SOC_AU1X00 && PCMCIA
194 194
195config PCMCIA_BCM63XX
196 tristate "bcm63xx pcmcia support"
197 depends on BCM63XX && PCMCIA
198
195config PCMCIA_SA1100 199config PCMCIA_SA1100
196 tristate "SA1100 support" 200 tristate "SA1100 support"
197 depends on ARM && ARCH_SA1100 && PCMCIA 201 depends on ARM && ARCH_SA1100 && PCMCIA
diff --git a/drivers/pcmcia/Makefile b/drivers/pcmcia/Makefile
index 047394d98ac2..a03a38acd77d 100644
--- a/drivers/pcmcia/Makefile
+++ b/drivers/pcmcia/Makefile
@@ -27,6 +27,7 @@ obj-$(CONFIG_PCMCIA_SA1111) += sa11xx_core.o sa1111_cs.o
27obj-$(CONFIG_M32R_PCC) += m32r_pcc.o 27obj-$(CONFIG_M32R_PCC) += m32r_pcc.o
28obj-$(CONFIG_M32R_CFC) += m32r_cfc.o 28obj-$(CONFIG_M32R_CFC) += m32r_cfc.o
29obj-$(CONFIG_PCMCIA_AU1X00) += au1x00_ss.o 29obj-$(CONFIG_PCMCIA_AU1X00) += au1x00_ss.o
30obj-$(CONFIG_PCMCIA_BCM63XX) += bcm63xx_pcmcia.o
30obj-$(CONFIG_PCMCIA_VRC4171) += vrc4171_card.o 31obj-$(CONFIG_PCMCIA_VRC4171) += vrc4171_card.o
31obj-$(CONFIG_PCMCIA_VRC4173) += vrc4173_cardu.o 32obj-$(CONFIG_PCMCIA_VRC4173) += vrc4173_cardu.o
32obj-$(CONFIG_OMAP_CF) += omap_cf.o 33obj-$(CONFIG_OMAP_CF) += omap_cf.o
@@ -71,6 +72,7 @@ pxa2xx-obj-$(CONFIG_MACH_ARMCORE) += pxa2xx_cm_x2xx_cs.o
71pxa2xx-obj-$(CONFIG_ARCH_VIPER) += pxa2xx_viper.o 72pxa2xx-obj-$(CONFIG_ARCH_VIPER) += pxa2xx_viper.o
72pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps4.o 73pxa2xx-obj-$(CONFIG_TRIZEPS_PCMCIA) += pxa2xx_trizeps4.o
73pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o 74pxa2xx-obj-$(CONFIG_MACH_PALMTX) += pxa2xx_palmtx.o
75pxa2xx-obj-$(CONFIG_MACH_PALMTC) += pxa2xx_palmtc.o
74pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o 76pxa2xx-obj-$(CONFIG_MACH_PALMLD) += pxa2xx_palmld.o
75pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o 77pxa2xx-obj-$(CONFIG_MACH_E740) += pxa2xx_e740.o
76pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o 78pxa2xx-obj-$(CONFIG_MACH_STARGATE2) += pxa2xx_stargate2.o
diff --git a/drivers/pcmcia/at91_cf.c b/drivers/pcmcia/at91_cf.c
index 9e1140f085fd..e1dccedc5960 100644
--- a/drivers/pcmcia/at91_cf.c
+++ b/drivers/pcmcia/at91_cf.c
@@ -363,7 +363,7 @@ static int at91_cf_suspend(struct platform_device *pdev, pm_message_t mesg)
363 struct at91_cf_socket *cf = platform_get_drvdata(pdev); 363 struct at91_cf_socket *cf = platform_get_drvdata(pdev);
364 struct at91_cf_data *board = cf->board; 364 struct at91_cf_data *board = cf->board;
365 365
366 pcmcia_socket_dev_suspend(&pdev->dev, mesg); 366 pcmcia_socket_dev_suspend(&pdev->dev);
367 if (device_may_wakeup(&pdev->dev)) { 367 if (device_may_wakeup(&pdev->dev)) {
368 enable_irq_wake(board->det_pin); 368 enable_irq_wake(board->det_pin);
369 if (board->irq_pin) 369 if (board->irq_pin)
diff --git a/drivers/pcmcia/au1000_generic.c b/drivers/pcmcia/au1000_generic.c
index 90013341cd5f..02088704ac2c 100644
--- a/drivers/pcmcia/au1000_generic.c
+++ b/drivers/pcmcia/au1000_generic.c
@@ -515,7 +515,7 @@ static int au1x00_drv_pcmcia_probe(struct platform_device *dev)
515static int au1x00_drv_pcmcia_suspend(struct platform_device *dev, 515static int au1x00_drv_pcmcia_suspend(struct platform_device *dev,
516 pm_message_t state) 516 pm_message_t state)
517{ 517{
518 return pcmcia_socket_dev_suspend(&dev->dev, state); 518 return pcmcia_socket_dev_suspend(&dev->dev);
519} 519}
520 520
521static int au1x00_drv_pcmcia_resume(struct platform_device *dev) 521static int au1x00_drv_pcmcia_resume(struct platform_device *dev)
diff --git a/drivers/pcmcia/bcm63xx_pcmcia.c b/drivers/pcmcia/bcm63xx_pcmcia.c
new file mode 100644
index 000000000000..bc88a3b19bb3
--- /dev/null
+++ b/drivers/pcmcia/bcm63xx_pcmcia.c
@@ -0,0 +1,536 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
7 */
8
9#include <linux/kernel.h>
10#include <linux/module.h>
11#include <linux/ioport.h>
12#include <linux/timer.h>
13#include <linux/platform_device.h>
14#include <linux/delay.h>
15#include <linux/pci.h>
16#include <linux/gpio.h>
17
18#include <bcm63xx_regs.h>
19#include <bcm63xx_io.h>
20#include "bcm63xx_pcmcia.h"
21
22#define PFX "bcm63xx_pcmcia: "
23
24#ifdef CONFIG_CARDBUS
25/* if cardbus is used, platform device needs reference to actual pci
26 * device */
27static struct pci_dev *bcm63xx_cb_dev;
28#endif
29
30/*
31 * read/write helper for pcmcia regs
32 */
33static inline u32 pcmcia_readl(struct bcm63xx_pcmcia_socket *skt, u32 off)
34{
35 return bcm_readl(skt->base + off);
36}
37
38static inline void pcmcia_writel(struct bcm63xx_pcmcia_socket *skt,
39 u32 val, u32 off)
40{
41 bcm_writel(val, skt->base + off);
42}
43
44/*
45 * This callback should (re-)initialise the socket, turn on status
46 * interrupts and PCMCIA bus, and wait for power to stabilise so that
47 * the card status signals report correctly.
48 *
49 * Hardware cannot do that.
50 */
51static int bcm63xx_pcmcia_sock_init(struct pcmcia_socket *sock)
52{
53 return 0;
54}
55
56/*
57 * This callback should remove power on the socket, disable IRQs from
58 * the card, turn off status interrupts, and disable the PCMCIA bus.
59 *
60 * Hardware cannot do that.
61 */
62static int bcm63xx_pcmcia_suspend(struct pcmcia_socket *sock)
63{
64 return 0;
65}
66
67/*
68 * Implements the set_socket() operation for the in-kernel PCMCIA
69 * service (formerly SS_SetSocket in Card Services). We more or
70 * less punt all of this work and let the kernel handle the details
71 * of power configuration, reset, &c. We also record the value of
72 * `state' in order to regurgitate it to the PCMCIA core later.
73 */
74static int bcm63xx_pcmcia_set_socket(struct pcmcia_socket *sock,
75 socket_state_t *state)
76{
77 struct bcm63xx_pcmcia_socket *skt;
78 unsigned long flags;
79 u32 val;
80
81 skt = sock->driver_data;
82
83 spin_lock_irqsave(&skt->lock, flags);
84
85 /* note: hardware cannot control socket power, so we will
86 * always report SS_POWERON */
87
88 /* apply socket reset */
89 val = pcmcia_readl(skt, PCMCIA_C1_REG);
90 if (state->flags & SS_RESET)
91 val |= PCMCIA_C1_RESET_MASK;
92 else
93 val &= ~PCMCIA_C1_RESET_MASK;
94
95 /* reverse reset logic for cardbus card */
96 if (skt->card_detected && (skt->card_type & CARD_CARDBUS))
97 val ^= PCMCIA_C1_RESET_MASK;
98
99 pcmcia_writel(skt, val, PCMCIA_C1_REG);
100
101 /* keep requested state for event reporting */
102 skt->requested_state = *state;
103
104 spin_unlock_irqrestore(&skt->lock, flags);
105
106 return 0;
107}
108
109/*
110 * identity cardtype from VS[12] input, CD[12] input while only VS2 is
111 * floating, and CD[12] input while only VS1 is floating
112 */
113enum {
114 IN_VS1 = (1 << 0),
115 IN_VS2 = (1 << 1),
116 IN_CD1_VS2H = (1 << 2),
117 IN_CD2_VS2H = (1 << 3),
118 IN_CD1_VS1H = (1 << 4),
119 IN_CD2_VS1H = (1 << 5),
120};
121
122static const u8 vscd_to_cardtype[] = {
123
124 /* VS1 float, VS2 float */
125 [IN_VS1 | IN_VS2] = (CARD_PCCARD | CARD_5V),
126
127 /* VS1 grounded, VS2 float */
128 [IN_VS2] = (CARD_PCCARD | CARD_5V | CARD_3V),
129
130 /* VS1 grounded, VS2 grounded */
131 [0] = (CARD_PCCARD | CARD_5V | CARD_3V | CARD_XV),
132
133 /* VS1 tied to CD1, VS2 float */
134 [IN_VS1 | IN_VS2 | IN_CD1_VS1H] = (CARD_CARDBUS | CARD_3V),
135
136 /* VS1 grounded, VS2 tied to CD2 */
137 [IN_VS2 | IN_CD2_VS2H] = (CARD_CARDBUS | CARD_3V | CARD_XV),
138
139 /* VS1 tied to CD2, VS2 grounded */
140 [IN_VS1 | IN_CD2_VS1H] = (CARD_CARDBUS | CARD_3V | CARD_XV | CARD_YV),
141
142 /* VS1 float, VS2 grounded */
143 [IN_VS1] = (CARD_PCCARD | CARD_XV),
144
145 /* VS1 float, VS2 tied to CD2 */
146 [IN_VS1 | IN_VS2 | IN_CD2_VS2H] = (CARD_CARDBUS | CARD_3V),
147
148 /* VS1 float, VS2 tied to CD1 */
149 [IN_VS1 | IN_VS2 | IN_CD1_VS2H] = (CARD_CARDBUS | CARD_XV | CARD_YV),
150
151 /* VS1 tied to CD2, VS2 float */
152 [IN_VS1 | IN_VS2 | IN_CD2_VS1H] = (CARD_CARDBUS | CARD_YV),
153
154 /* VS2 grounded, VS1 is tied to CD1, CD2 is grounded */
155 [IN_VS1 | IN_CD1_VS1H] = 0, /* ignore cardbay */
156};
157
158/*
159 * poll hardware to check card insertion status
160 */
161static unsigned int __get_socket_status(struct bcm63xx_pcmcia_socket *skt)
162{
163 unsigned int stat;
164 u32 val;
165
166 stat = 0;
167
168 /* check CD for card presence */
169 val = pcmcia_readl(skt, PCMCIA_C1_REG);
170
171 if (!(val & PCMCIA_C1_CD1_MASK) && !(val & PCMCIA_C1_CD2_MASK))
172 stat |= SS_DETECT;
173
174 /* if new insertion, detect cardtype */
175 if ((stat & SS_DETECT) && !skt->card_detected) {
176 unsigned int stat = 0;
177
178 /* float VS1, float VS2 */
179 val |= PCMCIA_C1_VS1OE_MASK;
180 val |= PCMCIA_C1_VS2OE_MASK;
181 pcmcia_writel(skt, val, PCMCIA_C1_REG);
182
183 /* wait for output to stabilize and read VS[12] */
184 udelay(10);
185 val = pcmcia_readl(skt, PCMCIA_C1_REG);
186 stat |= (val & PCMCIA_C1_VS1_MASK) ? IN_VS1 : 0;
187 stat |= (val & PCMCIA_C1_VS2_MASK) ? IN_VS2 : 0;
188
189 /* drive VS1 low, float VS2 */
190 val &= ~PCMCIA_C1_VS1OE_MASK;
191 val |= PCMCIA_C1_VS2OE_MASK;
192 pcmcia_writel(skt, val, PCMCIA_C1_REG);
193
194 /* wait for output to stabilize and read CD[12] */
195 udelay(10);
196 val = pcmcia_readl(skt, PCMCIA_C1_REG);
197 stat |= (val & PCMCIA_C1_CD1_MASK) ? IN_CD1_VS2H : 0;
198 stat |= (val & PCMCIA_C1_CD2_MASK) ? IN_CD2_VS2H : 0;
199
200 /* float VS1, drive VS2 low */
201 val |= PCMCIA_C1_VS1OE_MASK;
202 val &= ~PCMCIA_C1_VS2OE_MASK;
203 pcmcia_writel(skt, val, PCMCIA_C1_REG);
204
205 /* wait for output to stabilize and read CD[12] */
206 udelay(10);
207 val = pcmcia_readl(skt, PCMCIA_C1_REG);
208 stat |= (val & PCMCIA_C1_CD1_MASK) ? IN_CD1_VS1H : 0;
209 stat |= (val & PCMCIA_C1_CD2_MASK) ? IN_CD2_VS1H : 0;
210
211 /* guess cardtype from all this */
212 skt->card_type = vscd_to_cardtype[stat];
213 if (!skt->card_type)
214 dev_err(&skt->socket.dev, "unsupported card type\n");
215
216 /* drive both VS pin to 0 again */
217 val &= ~(PCMCIA_C1_VS1OE_MASK | PCMCIA_C1_VS2OE_MASK);
218
219 /* enable correct logic */
220 val &= ~(PCMCIA_C1_EN_PCMCIA_MASK | PCMCIA_C1_EN_CARDBUS_MASK);
221 if (skt->card_type & CARD_PCCARD)
222 val |= PCMCIA_C1_EN_PCMCIA_MASK;
223 else
224 val |= PCMCIA_C1_EN_CARDBUS_MASK;
225
226 pcmcia_writel(skt, val, PCMCIA_C1_REG);
227 }
228 skt->card_detected = (stat & SS_DETECT) ? 1 : 0;
229
230 /* report card type/voltage */
231 if (skt->card_type & CARD_CARDBUS)
232 stat |= SS_CARDBUS;
233 if (skt->card_type & CARD_3V)
234 stat |= SS_3VCARD;
235 if (skt->card_type & CARD_XV)
236 stat |= SS_XVCARD;
237 stat |= SS_POWERON;
238
239 if (gpio_get_value(skt->pd->ready_gpio))
240 stat |= SS_READY;
241
242 return stat;
243}
244
245/*
246 * core request to get current socket status
247 */
248static int bcm63xx_pcmcia_get_status(struct pcmcia_socket *sock,
249 unsigned int *status)
250{
251 struct bcm63xx_pcmcia_socket *skt;
252
253 skt = sock->driver_data;
254
255 spin_lock_bh(&skt->lock);
256 *status = __get_socket_status(skt);
257 spin_unlock_bh(&skt->lock);
258
259 return 0;
260}
261
262/*
263 * socket polling timer callback
264 */
265static void bcm63xx_pcmcia_poll(unsigned long data)
266{
267 struct bcm63xx_pcmcia_socket *skt;
268 unsigned int stat, events;
269
270 skt = (struct bcm63xx_pcmcia_socket *)data;
271
272 spin_lock_bh(&skt->lock);
273
274 stat = __get_socket_status(skt);
275
276 /* keep only changed bits, and mask with required one from the
277 * core */
278 events = (stat ^ skt->old_status) & skt->requested_state.csc_mask;
279 skt->old_status = stat;
280 spin_unlock_bh(&skt->lock);
281
282 if (events)
283 pcmcia_parse_events(&skt->socket, events);
284
285 mod_timer(&skt->timer,
286 jiffies + msecs_to_jiffies(BCM63XX_PCMCIA_POLL_RATE));
287}
288
289static int bcm63xx_pcmcia_set_io_map(struct pcmcia_socket *sock,
290 struct pccard_io_map *map)
291{
292 /* this doesn't seem to be called by pcmcia layer if static
293 * mapping is used */
294 return 0;
295}
296
297static int bcm63xx_pcmcia_set_mem_map(struct pcmcia_socket *sock,
298 struct pccard_mem_map *map)
299{
300 struct bcm63xx_pcmcia_socket *skt;
301 struct resource *res;
302
303 skt = sock->driver_data;
304 if (map->flags & MAP_ATTRIB)
305 res = skt->attr_res;
306 else
307 res = skt->common_res;
308
309 map->static_start = res->start + map->card_start;
310 return 0;
311}
312
313static struct pccard_operations bcm63xx_pcmcia_operations = {
314 .init = bcm63xx_pcmcia_sock_init,
315 .suspend = bcm63xx_pcmcia_suspend,
316 .get_status = bcm63xx_pcmcia_get_status,
317 .set_socket = bcm63xx_pcmcia_set_socket,
318 .set_io_map = bcm63xx_pcmcia_set_io_map,
319 .set_mem_map = bcm63xx_pcmcia_set_mem_map,
320};
321
322/*
323 * register pcmcia socket to core
324 */
325static int __devinit bcm63xx_drv_pcmcia_probe(struct platform_device *pdev)
326{
327 struct bcm63xx_pcmcia_socket *skt;
328 struct pcmcia_socket *sock;
329 struct resource *res, *irq_res;
330 unsigned int regmem_size = 0, iomem_size = 0;
331 u32 val;
332 int ret;
333
334 skt = kzalloc(sizeof(*skt), GFP_KERNEL);
335 if (!skt)
336 return -ENOMEM;
337 spin_lock_init(&skt->lock);
338 sock = &skt->socket;
339 sock->driver_data = skt;
340
341 /* make sure we have all resources we need */
342 skt->common_res = platform_get_resource(pdev, IORESOURCE_MEM, 1);
343 skt->attr_res = platform_get_resource(pdev, IORESOURCE_MEM, 2);
344 irq_res = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
345 skt->pd = pdev->dev.platform_data;
346 if (!skt->common_res || !skt->attr_res || !irq_res || !skt->pd) {
347 ret = -EINVAL;
348 goto err;
349 }
350
351 /* remap pcmcia registers */
352 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
353 regmem_size = resource_size(res);
354 if (!request_mem_region(res->start, regmem_size, "bcm63xx_pcmcia")) {
355 ret = -EINVAL;
356 goto err;
357 }
358 skt->reg_res = res;
359
360 skt->base = ioremap(res->start, regmem_size);
361 if (!skt->base) {
362 ret = -ENOMEM;
363 goto err;
364 }
365
366 /* remap io registers */
367 res = platform_get_resource(pdev, IORESOURCE_MEM, 3);
368 iomem_size = resource_size(res);
369 skt->io_base = ioremap(res->start, iomem_size);
370 if (!skt->io_base) {
371 ret = -ENOMEM;
372 goto err;
373 }
374
375 /* resources are static */
376 sock->resource_ops = &pccard_static_ops;
377 sock->ops = &bcm63xx_pcmcia_operations;
378 sock->owner = THIS_MODULE;
379 sock->dev.parent = &pdev->dev;
380 sock->features = SS_CAP_STATIC_MAP | SS_CAP_PCCARD;
381 sock->io_offset = (unsigned long)skt->io_base;
382 sock->pci_irq = irq_res->start;
383
384#ifdef CONFIG_CARDBUS
385 sock->cb_dev = bcm63xx_cb_dev;
386 if (bcm63xx_cb_dev)
387 sock->features |= SS_CAP_CARDBUS;
388#endif
389
390 /* assume common & attribute memory have the same size */
391 sock->map_size = resource_size(skt->common_res);
392
393 /* initialize polling timer */
394 setup_timer(&skt->timer, bcm63xx_pcmcia_poll, (unsigned long)skt);
395
396 /* initialize pcmcia control register, drive VS[12] to 0,
397 * leave CB IDSEL to the old value since it is set by the PCI
398 * layer */
399 val = pcmcia_readl(skt, PCMCIA_C1_REG);
400 val &= PCMCIA_C1_CBIDSEL_MASK;
401 val |= PCMCIA_C1_EN_PCMCIA_GPIO_MASK;
402 pcmcia_writel(skt, val, PCMCIA_C1_REG);
403
404 /*
405 * Hardware has only one set of timings registers, not one for
406 * each memory access type, so we configure them for the
407 * slowest one: attribute memory.
408 */
409 val = PCMCIA_C2_DATA16_MASK;
410 val |= 10 << PCMCIA_C2_RWCOUNT_SHIFT;
411 val |= 6 << PCMCIA_C2_INACTIVE_SHIFT;
412 val |= 3 << PCMCIA_C2_SETUP_SHIFT;
413 val |= 3 << PCMCIA_C2_HOLD_SHIFT;
414 pcmcia_writel(skt, val, PCMCIA_C2_REG);
415
416 ret = pcmcia_register_socket(sock);
417 if (ret)
418 goto err;
419
420 /* start polling socket */
421 mod_timer(&skt->timer,
422 jiffies + msecs_to_jiffies(BCM63XX_PCMCIA_POLL_RATE));
423
424 platform_set_drvdata(pdev, skt);
425 return 0;
426
427err:
428 if (skt->io_base)
429 iounmap(skt->io_base);
430 if (skt->base)
431 iounmap(skt->base);
432 if (skt->reg_res)
433 release_mem_region(skt->reg_res->start, regmem_size);
434 kfree(skt);
435 return ret;
436}
437
438static int __devexit bcm63xx_drv_pcmcia_remove(struct platform_device *pdev)
439{
440 struct bcm63xx_pcmcia_socket *skt;
441 struct resource *res;
442
443 skt = platform_get_drvdata(pdev);
444 del_timer_sync(&skt->timer);
445 iounmap(skt->base);
446 iounmap(skt->io_base);
447 res = skt->reg_res;
448 release_mem_region(res->start, resource_size(res));
449 kfree(skt);
450 return 0;
451}
452
453struct platform_driver bcm63xx_pcmcia_driver = {
454 .probe = bcm63xx_drv_pcmcia_probe,
455 .remove = __devexit_p(bcm63xx_drv_pcmcia_remove),
456 .driver = {
457 .name = "bcm63xx_pcmcia",
458 .owner = THIS_MODULE,
459 },
460};
461
462#ifdef CONFIG_CARDBUS
463static int __devinit bcm63xx_cb_probe(struct pci_dev *dev,
464 const struct pci_device_id *id)
465{
466 /* keep pci device */
467 bcm63xx_cb_dev = dev;
468 return platform_driver_register(&bcm63xx_pcmcia_driver);
469}
470
471static void __devexit bcm63xx_cb_exit(struct pci_dev *dev)
472{
473 platform_driver_unregister(&bcm63xx_pcmcia_driver);
474 bcm63xx_cb_dev = NULL;
475}
476
477static struct pci_device_id bcm63xx_cb_table[] = {
478 {
479 .vendor = PCI_VENDOR_ID_BROADCOM,
480 .device = BCM6348_CPU_ID,
481 .subvendor = PCI_VENDOR_ID_BROADCOM,
482 .subdevice = PCI_ANY_ID,
483 .class = PCI_CLASS_BRIDGE_CARDBUS << 8,
484 .class_mask = ~0,
485 },
486
487 {
488 .vendor = PCI_VENDOR_ID_BROADCOM,
489 .device = BCM6358_CPU_ID,
490 .subvendor = PCI_VENDOR_ID_BROADCOM,
491 .subdevice = PCI_ANY_ID,
492 .class = PCI_CLASS_BRIDGE_CARDBUS << 8,
493 .class_mask = ~0,
494 },
495
496 { },
497};
498
499MODULE_DEVICE_TABLE(pci, bcm63xx_cb_table);
500
501static struct pci_driver bcm63xx_cardbus_driver = {
502 .name = "bcm63xx_cardbus",
503 .id_table = bcm63xx_cb_table,
504 .probe = bcm63xx_cb_probe,
505 .remove = __devexit_p(bcm63xx_cb_exit),
506};
507#endif
508
509/*
510 * if cardbus support is enabled, register our platform device after
511 * our fake cardbus bridge has been registered
512 */
513static int __init bcm63xx_pcmcia_init(void)
514{
515#ifdef CONFIG_CARDBUS
516 return pci_register_driver(&bcm63xx_cardbus_driver);
517#else
518 return platform_driver_register(&bcm63xx_pcmcia_driver);
519#endif
520}
521
522static void __exit bcm63xx_pcmcia_exit(void)
523{
524#ifdef CONFIG_CARDBUS
525 return pci_unregister_driver(&bcm63xx_cardbus_driver);
526#else
527 platform_driver_unregister(&bcm63xx_pcmcia_driver);
528#endif
529}
530
531module_init(bcm63xx_pcmcia_init);
532module_exit(bcm63xx_pcmcia_exit);
533
534MODULE_LICENSE("GPL");
535MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
536MODULE_DESCRIPTION("Linux PCMCIA Card Services: bcm63xx Socket Controller");
diff --git a/drivers/pcmcia/bcm63xx_pcmcia.h b/drivers/pcmcia/bcm63xx_pcmcia.h
new file mode 100644
index 000000000000..ed957399d863
--- /dev/null
+++ b/drivers/pcmcia/bcm63xx_pcmcia.h
@@ -0,0 +1,60 @@
1#ifndef BCM63XX_PCMCIA_H_
2#define BCM63XX_PCMCIA_H_
3
4#include <linux/types.h>
5#include <linux/timer.h>
6#include <pcmcia/ss.h>
7#include <bcm63xx_dev_pcmcia.h>
8
9/* socket polling rate in ms */
10#define BCM63XX_PCMCIA_POLL_RATE 500
11
12enum {
13 CARD_CARDBUS = (1 << 0),
14 CARD_PCCARD = (1 << 1),
15 CARD_5V = (1 << 2),
16 CARD_3V = (1 << 3),
17 CARD_XV = (1 << 4),
18 CARD_YV = (1 << 5),
19};
20
21struct bcm63xx_pcmcia_socket {
22 struct pcmcia_socket socket;
23
24 /* platform specific data */
25 struct bcm63xx_pcmcia_platform_data *pd;
26
27 /* all regs access are protected by this spinlock */
28 spinlock_t lock;
29
30 /* pcmcia registers resource */
31 struct resource *reg_res;
32
33 /* base remapped address of registers */
34 void __iomem *base;
35
36 /* whether a card is detected at the moment */
37 int card_detected;
38
39 /* type of detected card (mask of above enum) */
40 u8 card_type;
41
42 /* keep last socket status to implement event reporting */
43 unsigned int old_status;
44
45 /* backup of requested socket state */
46 socket_state_t requested_state;
47
48 /* timer used for socket status polling */
49 struct timer_list timer;
50
51 /* attribute/common memory resources */
52 struct resource *attr_res;
53 struct resource *common_res;
54 struct resource *io_res;
55
56 /* base address of io memory */
57 void __iomem *io_base;
58};
59
60#endif /* BCM63XX_PCMCIA_H_ */
diff --git a/drivers/pcmcia/bfin_cf_pcmcia.c b/drivers/pcmcia/bfin_cf_pcmcia.c
index b59d4115d20f..300b368605c9 100644
--- a/drivers/pcmcia/bfin_cf_pcmcia.c
+++ b/drivers/pcmcia/bfin_cf_pcmcia.c
@@ -302,7 +302,7 @@ static int __devexit bfin_cf_remove(struct platform_device *pdev)
302 302
303static int bfin_cf_suspend(struct platform_device *pdev, pm_message_t mesg) 303static int bfin_cf_suspend(struct platform_device *pdev, pm_message_t mesg)
304{ 304{
305 return pcmcia_socket_dev_suspend(&pdev->dev, mesg); 305 return pcmcia_socket_dev_suspend(&pdev->dev);
306} 306}
307 307
308static int bfin_cf_resume(struct platform_device *pdev) 308static int bfin_cf_resume(struct platform_device *pdev)
diff --git a/drivers/pcmcia/cs.c b/drivers/pcmcia/cs.c
index 0660ad182589..934d4bee39a0 100644
--- a/drivers/pcmcia/cs.c
+++ b/drivers/pcmcia/cs.c
@@ -101,7 +101,7 @@ EXPORT_SYMBOL(pcmcia_socket_list_rwsem);
101static int socket_resume(struct pcmcia_socket *skt); 101static int socket_resume(struct pcmcia_socket *skt);
102static int socket_suspend(struct pcmcia_socket *skt); 102static int socket_suspend(struct pcmcia_socket *skt);
103 103
104int pcmcia_socket_dev_suspend(struct device *dev, pm_message_t state) 104int pcmcia_socket_dev_suspend(struct device *dev)
105{ 105{
106 struct pcmcia_socket *socket; 106 struct pcmcia_socket *socket;
107 107
diff --git a/drivers/pcmcia/i82092.c b/drivers/pcmcia/i82092.c
index 46561face128..a04f21c8170f 100644
--- a/drivers/pcmcia/i82092.c
+++ b/drivers/pcmcia/i82092.c
@@ -42,7 +42,7 @@ MODULE_DEVICE_TABLE(pci, i82092aa_pci_ids);
42#ifdef CONFIG_PM 42#ifdef CONFIG_PM
43static int i82092aa_socket_suspend (struct pci_dev *dev, pm_message_t state) 43static int i82092aa_socket_suspend (struct pci_dev *dev, pm_message_t state)
44{ 44{
45 return pcmcia_socket_dev_suspend(&dev->dev, state); 45 return pcmcia_socket_dev_suspend(&dev->dev);
46} 46}
47 47
48static int i82092aa_socket_resume (struct pci_dev *dev) 48static int i82092aa_socket_resume (struct pci_dev *dev)
diff --git a/drivers/pcmcia/i82365.c b/drivers/pcmcia/i82365.c
index 40d4953e4b12..b906abe26ad0 100644
--- a/drivers/pcmcia/i82365.c
+++ b/drivers/pcmcia/i82365.c
@@ -1241,7 +1241,7 @@ static int pcic_init(struct pcmcia_socket *s)
1241static int i82365_drv_pcmcia_suspend(struct platform_device *dev, 1241static int i82365_drv_pcmcia_suspend(struct platform_device *dev,
1242 pm_message_t state) 1242 pm_message_t state)
1243{ 1243{
1244 return pcmcia_socket_dev_suspend(&dev->dev, state); 1244 return pcmcia_socket_dev_suspend(&dev->dev);
1245} 1245}
1246 1246
1247static int i82365_drv_pcmcia_resume(struct platform_device *dev) 1247static int i82365_drv_pcmcia_resume(struct platform_device *dev)
diff --git a/drivers/pcmcia/m32r_cfc.c b/drivers/pcmcia/m32r_cfc.c
index 62b4ecc97c46..d1d89c4491ad 100644
--- a/drivers/pcmcia/m32r_cfc.c
+++ b/drivers/pcmcia/m32r_cfc.c
@@ -699,7 +699,7 @@ static struct pccard_operations pcc_operations = {
699static int cfc_drv_pcmcia_suspend(struct platform_device *dev, 699static int cfc_drv_pcmcia_suspend(struct platform_device *dev,
700 pm_message_t state) 700 pm_message_t state)
701{ 701{
702 return pcmcia_socket_dev_suspend(&dev->dev, state); 702 return pcmcia_socket_dev_suspend(&dev->dev);
703} 703}
704 704
705static int cfc_drv_pcmcia_resume(struct platform_device *dev) 705static int cfc_drv_pcmcia_resume(struct platform_device *dev)
diff --git a/drivers/pcmcia/m32r_pcc.c b/drivers/pcmcia/m32r_pcc.c
index 12034b41d196..a0655839c8d3 100644
--- a/drivers/pcmcia/m32r_pcc.c
+++ b/drivers/pcmcia/m32r_pcc.c
@@ -675,7 +675,7 @@ static struct pccard_operations pcc_operations = {
675static int pcc_drv_pcmcia_suspend(struct platform_device *dev, 675static int pcc_drv_pcmcia_suspend(struct platform_device *dev,
676 pm_message_t state) 676 pm_message_t state)
677{ 677{
678 return pcmcia_socket_dev_suspend(&dev->dev, state); 678 return pcmcia_socket_dev_suspend(&dev->dev);
679} 679}
680 680
681static int pcc_drv_pcmcia_resume(struct platform_device *dev) 681static int pcc_drv_pcmcia_resume(struct platform_device *dev)
diff --git a/drivers/pcmcia/m8xx_pcmcia.c b/drivers/pcmcia/m8xx_pcmcia.c
index d1ad0966392d..c69f2c4fe520 100644
--- a/drivers/pcmcia/m8xx_pcmcia.c
+++ b/drivers/pcmcia/m8xx_pcmcia.c
@@ -1296,7 +1296,7 @@ static int m8xx_remove(struct of_device *ofdev)
1296#ifdef CONFIG_PM 1296#ifdef CONFIG_PM
1297static int m8xx_suspend(struct platform_device *pdev, pm_message_t state) 1297static int m8xx_suspend(struct platform_device *pdev, pm_message_t state)
1298{ 1298{
1299 return pcmcia_socket_dev_suspend(&pdev->dev, state); 1299 return pcmcia_socket_dev_suspend(&pdev->dev);
1300} 1300}
1301 1301
1302static int m8xx_resume(struct platform_device *pdev) 1302static int m8xx_resume(struct platform_device *pdev)
diff --git a/drivers/pcmcia/omap_cf.c b/drivers/pcmcia/omap_cf.c
index f3736398900e..68570bc3ac86 100644
--- a/drivers/pcmcia/omap_cf.c
+++ b/drivers/pcmcia/omap_cf.c
@@ -334,7 +334,7 @@ static int __exit omap_cf_remove(struct platform_device *pdev)
334 334
335static int omap_cf_suspend(struct platform_device *pdev, pm_message_t mesg) 335static int omap_cf_suspend(struct platform_device *pdev, pm_message_t mesg)
336{ 336{
337 return pcmcia_socket_dev_suspend(&pdev->dev, mesg); 337 return pcmcia_socket_dev_suspend(&pdev->dev);
338} 338}
339 339
340static int omap_cf_resume(struct platform_device *pdev) 340static int omap_cf_resume(struct platform_device *pdev)
diff --git a/drivers/pcmcia/pd6729.c b/drivers/pcmcia/pd6729.c
index 8bed1dab9039..1c39d3438f20 100644
--- a/drivers/pcmcia/pd6729.c
+++ b/drivers/pcmcia/pd6729.c
@@ -758,7 +758,7 @@ static void __devexit pd6729_pci_remove(struct pci_dev *dev)
758#ifdef CONFIG_PM 758#ifdef CONFIG_PM
759static int pd6729_socket_suspend(struct pci_dev *dev, pm_message_t state) 759static int pd6729_socket_suspend(struct pci_dev *dev, pm_message_t state)
760{ 760{
761 return pcmcia_socket_dev_suspend(&dev->dev, state); 761 return pcmcia_socket_dev_suspend(&dev->dev);
762} 762}
763 763
764static int pd6729_socket_resume(struct pci_dev *dev) 764static int pd6729_socket_resume(struct pci_dev *dev)
diff --git a/drivers/pcmcia/pxa2xx_base.c b/drivers/pcmcia/pxa2xx_base.c
index c49a7269f6d1..0e35acb1366b 100644
--- a/drivers/pcmcia/pxa2xx_base.c
+++ b/drivers/pcmcia/pxa2xx_base.c
@@ -300,25 +300,29 @@ static int pxa2xx_drv_pcmcia_remove(struct platform_device *dev)
300 return soc_common_drv_pcmcia_remove(&dev->dev); 300 return soc_common_drv_pcmcia_remove(&dev->dev);
301} 301}
302 302
303static int pxa2xx_drv_pcmcia_suspend(struct platform_device *dev, pm_message_t state) 303static int pxa2xx_drv_pcmcia_suspend(struct device *dev)
304{ 304{
305 return pcmcia_socket_dev_suspend(&dev->dev, state); 305 return pcmcia_socket_dev_suspend(dev);
306} 306}
307 307
308static int pxa2xx_drv_pcmcia_resume(struct platform_device *dev) 308static int pxa2xx_drv_pcmcia_resume(struct device *dev)
309{ 309{
310 pxa2xx_configure_sockets(&dev->dev); 310 pxa2xx_configure_sockets(dev);
311 return pcmcia_socket_dev_resume(&dev->dev); 311 return pcmcia_socket_dev_resume(dev);
312} 312}
313 313
314static struct dev_pm_ops pxa2xx_drv_pcmcia_pm_ops = {
315 .suspend = pxa2xx_drv_pcmcia_suspend,
316 .resume = pxa2xx_drv_pcmcia_resume,
317};
318
314static struct platform_driver pxa2xx_pcmcia_driver = { 319static struct platform_driver pxa2xx_pcmcia_driver = {
315 .probe = pxa2xx_drv_pcmcia_probe, 320 .probe = pxa2xx_drv_pcmcia_probe,
316 .remove = pxa2xx_drv_pcmcia_remove, 321 .remove = pxa2xx_drv_pcmcia_remove,
317 .suspend = pxa2xx_drv_pcmcia_suspend,
318 .resume = pxa2xx_drv_pcmcia_resume,
319 .driver = { 322 .driver = {
320 .name = "pxa2xx-pcmcia", 323 .name = "pxa2xx-pcmcia",
321 .owner = THIS_MODULE, 324 .owner = THIS_MODULE,
325 .pm = &pxa2xx_drv_pcmcia_pm_ops,
322 }, 326 },
323}; 327};
324 328
diff --git a/drivers/pcmcia/pxa2xx_palmtc.c b/drivers/pcmcia/pxa2xx_palmtc.c
new file mode 100644
index 000000000000..3a8993ed5621
--- /dev/null
+++ b/drivers/pcmcia/pxa2xx_palmtc.c
@@ -0,0 +1,230 @@
1/*
2 * linux/drivers/pcmcia/pxa2xx_palmtc.c
3 *
4 * Driver for Palm Tungsten|C PCMCIA
5 *
6 * Copyright (C) 2008 Alex Osborne <ato@meshy.org>
7 * Copyright (C) 2009 Marek Vasut <marek.vasut@gmail.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 */
14
15#include <linux/module.h>
16#include <linux/platform_device.h>
17#include <linux/gpio.h>
18#include <linux/delay.h>
19
20#include <asm/mach-types.h>
21#include <mach/palmtc.h>
22#include "soc_common.h"
23
24static int palmtc_pcmcia_hw_init(struct soc_pcmcia_socket *skt)
25{
26 int ret;
27
28 ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_POWER1, "PCMCIA PWR1");
29 if (ret)
30 goto err1;
31 ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_POWER1, 0);
32 if (ret)
33 goto err2;
34
35 ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_POWER2, "PCMCIA PWR2");
36 if (ret)
37 goto err2;
38 ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_POWER2, 0);
39 if (ret)
40 goto err3;
41
42 ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_POWER3, "PCMCIA PWR3");
43 if (ret)
44 goto err3;
45 ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_POWER3, 0);
46 if (ret)
47 goto err4;
48
49 ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_RESET, "PCMCIA RST");
50 if (ret)
51 goto err4;
52 ret = gpio_direction_output(GPIO_NR_PALMTC_PCMCIA_RESET, 1);
53 if (ret)
54 goto err5;
55
56 ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_READY, "PCMCIA RDY");
57 if (ret)
58 goto err5;
59 ret = gpio_direction_input(GPIO_NR_PALMTC_PCMCIA_READY);
60 if (ret)
61 goto err6;
62
63 ret = gpio_request(GPIO_NR_PALMTC_PCMCIA_PWRREADY, "PCMCIA PWRRDY");
64 if (ret)
65 goto err6;
66 ret = gpio_direction_input(GPIO_NR_PALMTC_PCMCIA_PWRREADY);
67 if (ret)
68 goto err7;
69
70 skt->irq = IRQ_GPIO(GPIO_NR_PALMTC_PCMCIA_READY);
71 return 0;
72
73err7:
74 gpio_free(GPIO_NR_PALMTC_PCMCIA_PWRREADY);
75err6:
76 gpio_free(GPIO_NR_PALMTC_PCMCIA_READY);
77err5:
78 gpio_free(GPIO_NR_PALMTC_PCMCIA_RESET);
79err4:
80 gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER3);
81err3:
82 gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER2);
83err2:
84 gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER1);
85err1:
86 return ret;
87}
88
89static void palmtc_pcmcia_hw_shutdown(struct soc_pcmcia_socket *skt)
90{
91 gpio_free(GPIO_NR_PALMTC_PCMCIA_PWRREADY);
92 gpio_free(GPIO_NR_PALMTC_PCMCIA_READY);
93 gpio_free(GPIO_NR_PALMTC_PCMCIA_RESET);
94 gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER3);
95 gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER2);
96 gpio_free(GPIO_NR_PALMTC_PCMCIA_POWER1);
97}
98
99static void palmtc_pcmcia_socket_state(struct soc_pcmcia_socket *skt,
100 struct pcmcia_state *state)
101{
102 state->detect = 1; /* always inserted */
103 state->ready = !!gpio_get_value(GPIO_NR_PALMTC_PCMCIA_READY);
104 state->bvd1 = 1;
105 state->bvd2 = 1;
106 state->wrprot = 0;
107 state->vs_3v = 1;
108 state->vs_Xv = 0;
109}
110
111static int palmtc_wifi_powerdown(void)
112{
113 gpio_set_value(GPIO_NR_PALMTC_PCMCIA_RESET, 1);
114 gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER2, 0);
115 mdelay(40);
116 gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER1, 0);
117 return 0;
118}
119
120static int palmtc_wifi_powerup(void)
121{
122 int timeout = 50;
123
124 gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER3, 1);
125 mdelay(50);
126
127 /* Power up the card, 1.8V first, after a while 3.3V */
128 gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER1, 1);
129 mdelay(100);
130 gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER2, 1);
131
132 /* Wait till the card is ready */
133 while (!gpio_get_value(GPIO_NR_PALMTC_PCMCIA_PWRREADY) &&
134 timeout) {
135 mdelay(1);
136 timeout--;
137 }
138
139 /* Power down the WiFi in case of error */
140 if (!timeout) {
141 palmtc_wifi_powerdown();
142 return 1;
143 }
144
145 /* Reset the card */
146 gpio_set_value(GPIO_NR_PALMTC_PCMCIA_RESET, 1);
147 mdelay(20);
148 gpio_set_value(GPIO_NR_PALMTC_PCMCIA_RESET, 0);
149 mdelay(25);
150
151 gpio_set_value(GPIO_NR_PALMTC_PCMCIA_POWER3, 0);
152
153 return 0;
154}
155
156static int palmtc_pcmcia_configure_socket(struct soc_pcmcia_socket *skt,
157 const socket_state_t *state)
158{
159 int ret = 1;
160
161 if (state->Vcc == 0)
162 ret = palmtc_wifi_powerdown();
163 else if (state->Vcc == 33)
164 ret = palmtc_wifi_powerup();
165
166 return ret;
167}
168
169static void palmtc_pcmcia_socket_init(struct soc_pcmcia_socket *skt)
170{
171}
172
173static void palmtc_pcmcia_socket_suspend(struct soc_pcmcia_socket *skt)
174{
175}
176
177static struct pcmcia_low_level palmtc_pcmcia_ops = {
178 .owner = THIS_MODULE,
179
180 .first = 0,
181 .nr = 1,
182
183 .hw_init = palmtc_pcmcia_hw_init,
184 .hw_shutdown = palmtc_pcmcia_hw_shutdown,
185
186 .socket_state = palmtc_pcmcia_socket_state,
187 .configure_socket = palmtc_pcmcia_configure_socket,
188
189 .socket_init = palmtc_pcmcia_socket_init,
190 .socket_suspend = palmtc_pcmcia_socket_suspend,
191};
192
193static struct platform_device *palmtc_pcmcia_device;
194
195static int __init palmtc_pcmcia_init(void)
196{
197 int ret;
198
199 if (!machine_is_palmtc())
200 return -ENODEV;
201
202 palmtc_pcmcia_device = platform_device_alloc("pxa2xx-pcmcia", -1);
203 if (!palmtc_pcmcia_device)
204 return -ENOMEM;
205
206 ret = platform_device_add_data(palmtc_pcmcia_device, &palmtc_pcmcia_ops,
207 sizeof(palmtc_pcmcia_ops));
208
209 if (!ret)
210 ret = platform_device_add(palmtc_pcmcia_device);
211
212 if (ret)
213 platform_device_put(palmtc_pcmcia_device);
214
215 return ret;
216}
217
218static void __exit palmtc_pcmcia_exit(void)
219{
220 platform_device_unregister(palmtc_pcmcia_device);
221}
222
223module_init(palmtc_pcmcia_init);
224module_exit(palmtc_pcmcia_exit);
225
226MODULE_AUTHOR("Alex Osborne <ato@meshy.org>,"
227 " Marek Vasut <marek.vasut@gmail.com>");
228MODULE_DESCRIPTION("PCMCIA support for Palm Tungsten|C");
229MODULE_ALIAS("platform:pxa2xx-pcmcia");
230MODULE_LICENSE("GPL");
diff --git a/drivers/pcmcia/sa1100_assabet.c b/drivers/pcmcia/sa1100_assabet.c
index f424146a2bc9..ac8aa09ba0da 100644
--- a/drivers/pcmcia/sa1100_assabet.c
+++ b/drivers/pcmcia/sa1100_assabet.c
@@ -130,7 +130,7 @@ static struct pcmcia_low_level assabet_pcmcia_ops = {
130 .socket_suspend = assabet_pcmcia_socket_suspend, 130 .socket_suspend = assabet_pcmcia_socket_suspend,
131}; 131};
132 132
133int __init pcmcia_assabet_init(struct device *dev) 133int pcmcia_assabet_init(struct device *dev)
134{ 134{
135 int ret = -ENODEV; 135 int ret = -ENODEV;
136 136
diff --git a/drivers/pcmcia/sa1100_generic.c b/drivers/pcmcia/sa1100_generic.c
index d8da5ac844e9..2d0e99751530 100644
--- a/drivers/pcmcia/sa1100_generic.c
+++ b/drivers/pcmcia/sa1100_generic.c
@@ -89,7 +89,7 @@ static int sa11x0_drv_pcmcia_remove(struct platform_device *dev)
89static int sa11x0_drv_pcmcia_suspend(struct platform_device *dev, 89static int sa11x0_drv_pcmcia_suspend(struct platform_device *dev,
90 pm_message_t state) 90 pm_message_t state)
91{ 91{
92 return pcmcia_socket_dev_suspend(&dev->dev, state); 92 return pcmcia_socket_dev_suspend(&dev->dev);
93} 93}
94 94
95static int sa11x0_drv_pcmcia_resume(struct platform_device *dev) 95static int sa11x0_drv_pcmcia_resume(struct platform_device *dev)
diff --git a/drivers/pcmcia/sa1100_neponset.c b/drivers/pcmcia/sa1100_neponset.c
index 4c41e86ccff9..0c76d337815b 100644
--- a/drivers/pcmcia/sa1100_neponset.c
+++ b/drivers/pcmcia/sa1100_neponset.c
@@ -123,7 +123,7 @@ static struct pcmcia_low_level neponset_pcmcia_ops = {
123 .socket_suspend = sa1111_pcmcia_socket_suspend, 123 .socket_suspend = sa1111_pcmcia_socket_suspend,
124}; 124};
125 125
126int __init pcmcia_neponset_init(struct sa1111_dev *sadev) 126int pcmcia_neponset_init(struct sa1111_dev *sadev)
127{ 127{
128 int ret = -ENODEV; 128 int ret = -ENODEV;
129 129
diff --git a/drivers/pcmcia/sa1111_generic.c b/drivers/pcmcia/sa1111_generic.c
index 401052a21ce8..4be4e172ffa1 100644
--- a/drivers/pcmcia/sa1111_generic.c
+++ b/drivers/pcmcia/sa1111_generic.c
@@ -159,7 +159,7 @@ static int __devexit pcmcia_remove(struct sa1111_dev *dev)
159 159
160static int pcmcia_suspend(struct sa1111_dev *dev, pm_message_t state) 160static int pcmcia_suspend(struct sa1111_dev *dev, pm_message_t state)
161{ 161{
162 return pcmcia_socket_dev_suspend(&dev->dev, state); 162 return pcmcia_socket_dev_suspend(&dev->dev);
163} 163}
164 164
165static int pcmcia_resume(struct sa1111_dev *dev) 165static int pcmcia_resume(struct sa1111_dev *dev)
diff --git a/drivers/pcmcia/tcic.c b/drivers/pcmcia/tcic.c
index 8eb04230fec7..582413fcb62f 100644
--- a/drivers/pcmcia/tcic.c
+++ b/drivers/pcmcia/tcic.c
@@ -366,7 +366,7 @@ static int __init get_tcic_id(void)
366static int tcic_drv_pcmcia_suspend(struct platform_device *dev, 366static int tcic_drv_pcmcia_suspend(struct platform_device *dev,
367 pm_message_t state) 367 pm_message_t state)
368{ 368{
369 return pcmcia_socket_dev_suspend(&dev->dev, state); 369 return pcmcia_socket_dev_suspend(&dev->dev);
370} 370}
371 371
372static int tcic_drv_pcmcia_resume(struct platform_device *dev) 372static int tcic_drv_pcmcia_resume(struct platform_device *dev)
diff --git a/drivers/pcmcia/vrc4171_card.c b/drivers/pcmcia/vrc4171_card.c
index d4ad50d737b0..c9fcbdc164ea 100644
--- a/drivers/pcmcia/vrc4171_card.c
+++ b/drivers/pcmcia/vrc4171_card.c
@@ -707,7 +707,7 @@ __setup("vrc4171_card=", vrc4171_card_setup);
707static int vrc4171_card_suspend(struct platform_device *dev, 707static int vrc4171_card_suspend(struct platform_device *dev,
708 pm_message_t state) 708 pm_message_t state)
709{ 709{
710 return pcmcia_socket_dev_suspend(&dev->dev, state); 710 return pcmcia_socket_dev_suspend(&dev->dev);
711} 711}
712 712
713static int vrc4171_card_resume(struct platform_device *dev) 713static int vrc4171_card_resume(struct platform_device *dev)
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index b459e87a30ac..abe0e44c6e9e 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -1225,60 +1225,71 @@ static int __devinit yenta_probe (struct pci_dev *dev, const struct pci_device_i
1225} 1225}
1226 1226
1227#ifdef CONFIG_PM 1227#ifdef CONFIG_PM
1228static int yenta_dev_suspend (struct pci_dev *dev, pm_message_t state) 1228static int yenta_dev_suspend_noirq(struct device *dev)
1229{ 1229{
1230 struct yenta_socket *socket = pci_get_drvdata(dev); 1230 struct pci_dev *pdev = to_pci_dev(dev);
1231 struct yenta_socket *socket = pci_get_drvdata(pdev);
1231 int ret; 1232 int ret;
1232 1233
1233 ret = pcmcia_socket_dev_suspend(&dev->dev, state); 1234 ret = pcmcia_socket_dev_suspend(dev);
1234 1235
1235 if (socket) { 1236 if (!socket)
1236 if (socket->type && socket->type->save_state) 1237 return ret;
1237 socket->type->save_state(socket);
1238 1238
1239 /* FIXME: pci_save_state needs to have a better interface */ 1239 if (socket->type && socket->type->save_state)
1240 pci_save_state(dev); 1240 socket->type->save_state(socket);
1241 pci_read_config_dword(dev, 16*4, &socket->saved_state[0]);
1242 pci_read_config_dword(dev, 17*4, &socket->saved_state[1]);
1243 pci_disable_device(dev);
1244 1241
1245 /* 1242 pci_save_state(pdev);
1246 * Some laptops (IBM T22) do not like us putting the Cardbus 1243 pci_read_config_dword(pdev, 16*4, &socket->saved_state[0]);
1247 * bridge into D3. At a guess, some other laptop will 1244 pci_read_config_dword(pdev, 17*4, &socket->saved_state[1]);
1248 * probably require this, so leave it commented out for now. 1245 pci_disable_device(pdev);
1249 */ 1246
1250 /* pci_set_power_state(dev, 3); */ 1247 /*
1251 } 1248 * Some laptops (IBM T22) do not like us putting the Cardbus
1249 * bridge into D3. At a guess, some other laptop will
1250 * probably require this, so leave it commented out for now.
1251 */
1252 /* pci_set_power_state(dev, 3); */
1252 1253
1253 return ret; 1254 return ret;
1254} 1255}
1255 1256
1256 1257static int yenta_dev_resume_noirq(struct device *dev)
1257static int yenta_dev_resume (struct pci_dev *dev)
1258{ 1258{
1259 struct yenta_socket *socket = pci_get_drvdata(dev); 1259 struct pci_dev *pdev = to_pci_dev(dev);
1260 struct yenta_socket *socket = pci_get_drvdata(pdev);
1261 int ret;
1260 1262
1261 if (socket) { 1263 if (!socket)
1262 int rc; 1264 return 0;
1263 1265
1264 pci_set_power_state(dev, 0); 1266 pci_write_config_dword(pdev, 16*4, socket->saved_state[0]);
1265 /* FIXME: pci_restore_state needs to have a better interface */ 1267 pci_write_config_dword(pdev, 17*4, socket->saved_state[1]);
1266 pci_restore_state(dev);
1267 pci_write_config_dword(dev, 16*4, socket->saved_state[0]);
1268 pci_write_config_dword(dev, 17*4, socket->saved_state[1]);
1269 1268
1270 rc = pci_enable_device(dev); 1269 ret = pci_enable_device(pdev);
1271 if (rc) 1270 if (ret)
1272 return rc; 1271 return ret;
1273 1272
1274 pci_set_master(dev); 1273 pci_set_master(pdev);
1275 1274
1276 if (socket->type && socket->type->restore_state) 1275 if (socket->type && socket->type->restore_state)
1277 socket->type->restore_state(socket); 1276 socket->type->restore_state(socket);
1278 }
1279 1277
1280 return pcmcia_socket_dev_resume(&dev->dev); 1278 return pcmcia_socket_dev_resume(dev);
1281} 1279}
1280
1281static struct dev_pm_ops yenta_pm_ops = {
1282 .suspend_noirq = yenta_dev_suspend_noirq,
1283 .resume_noirq = yenta_dev_resume_noirq,
1284 .freeze_noirq = yenta_dev_suspend_noirq,
1285 .thaw_noirq = yenta_dev_resume_noirq,
1286 .poweroff_noirq = yenta_dev_suspend_noirq,
1287 .restore_noirq = yenta_dev_resume_noirq,
1288};
1289
1290#define YENTA_PM_OPS (&yenta_pm_ops)
1291#else
1292#define YENTA_PM_OPS NULL
1282#endif 1293#endif
1283 1294
1284#define CB_ID(vend,dev,type) \ 1295#define CB_ID(vend,dev,type) \
@@ -1376,10 +1387,7 @@ static struct pci_driver yenta_cardbus_driver = {
1376 .id_table = yenta_table, 1387 .id_table = yenta_table,
1377 .probe = yenta_probe, 1388 .probe = yenta_probe,
1378 .remove = __devexit_p(yenta_close), 1389 .remove = __devexit_p(yenta_close),
1379#ifdef CONFIG_PM 1390 .driver.pm = YENTA_PM_OPS,
1380 .suspend = yenta_dev_suspend,
1381 .resume = yenta_dev_resume,
1382#endif
1383}; 1391};
1384 1392
1385 1393
diff --git a/drivers/platform/x86/eeepc-laptop.c b/drivers/platform/x86/eeepc-laptop.c
index da3c08b3dcc1..749e2102b2be 100644
--- a/drivers/platform/x86/eeepc-laptop.c
+++ b/drivers/platform/x86/eeepc-laptop.c
@@ -624,7 +624,7 @@ static int notify_brn(void)
624 struct backlight_device *bd = eeepc_backlight_device; 624 struct backlight_device *bd = eeepc_backlight_device;
625 if (bd) { 625 if (bd) {
626 int old = bd->props.brightness; 626 int old = bd->props.brightness;
627 bd->props.brightness = read_brightness(bd); 627 backlight_force_update(bd, BACKLIGHT_UPDATE_HOTKEY);
628 return old; 628 return old;
629 } 629 }
630 return -1; 630 return -1;
diff --git a/drivers/platform/x86/sony-laptop.c b/drivers/platform/x86/sony-laptop.c
index afdbdaaf80cb..a2a742c8ff7e 100644
--- a/drivers/platform/x86/sony-laptop.c
+++ b/drivers/platform/x86/sony-laptop.c
@@ -1211,15 +1211,6 @@ static int sony_nc_add(struct acpi_device *device)
1211 } 1211 }
1212 } 1212 }
1213 1213
1214 /* try to _INI the device if such method exists (ACPI spec 3.0-6.5.1
1215 * should be respected as we already checked for the device presence above */
1216 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, METHOD_NAME__INI, &handle))) {
1217 dprintk("Invoking _INI\n");
1218 if (ACPI_FAILURE(acpi_evaluate_object(sony_nc_acpi_handle, METHOD_NAME__INI,
1219 NULL, NULL)))
1220 dprintk("_INI Method failed\n");
1221 }
1222
1223 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON", 1214 if (ACPI_SUCCESS(acpi_get_handle(sony_nc_acpi_handle, "ECON",
1224 &handle))) { 1215 &handle))) {
1225 if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL)) 1216 if (acpi_callsetfunc(sony_nc_acpi_handle, "ECON", 1, NULL))
@@ -1399,27 +1390,20 @@ struct sonypi_eventtypes {
1399 struct sonypi_event *events; 1390 struct sonypi_event *events;
1400}; 1391};
1401 1392
1402struct device_ctrl { 1393struct sony_pic_dev {
1394 struct acpi_device *acpi_dev;
1395 struct sony_pic_irq *cur_irq;
1396 struct sony_pic_ioport *cur_ioport;
1397 struct list_head interrupts;
1398 struct list_head ioports;
1399 struct mutex lock;
1400 struct sonypi_eventtypes *event_types;
1401 int (*handle_irq)(const u8, const u8);
1403 int model; 1402 int model;
1404 int (*handle_irq)(const u8, const u8);
1405 u16 evport_offset; 1403 u16 evport_offset;
1406 u8 has_camera; 1404 u8 camera_power;
1407 u8 has_bluetooth; 1405 u8 bluetooth_power;
1408 u8 has_wwan; 1406 u8 wwan_power;
1409 struct sonypi_eventtypes *event_types;
1410};
1411
1412struct sony_pic_dev {
1413 struct device_ctrl *control;
1414 struct acpi_device *acpi_dev;
1415 struct sony_pic_irq *cur_irq;
1416 struct sony_pic_ioport *cur_ioport;
1417 struct list_head interrupts;
1418 struct list_head ioports;
1419 struct mutex lock;
1420 u8 camera_power;
1421 u8 bluetooth_power;
1422 u8 wwan_power;
1423}; 1407};
1424 1408
1425static struct sony_pic_dev spic_dev = { 1409static struct sony_pic_dev spic_dev = {
@@ -1427,6 +1411,8 @@ static struct sony_pic_dev spic_dev = {
1427 .ioports = LIST_HEAD_INIT(spic_dev.ioports), 1411 .ioports = LIST_HEAD_INIT(spic_dev.ioports),
1428}; 1412};
1429 1413
1414static int spic_drv_registered;
1415
1430/* Event masks */ 1416/* Event masks */
1431#define SONYPI_JOGGER_MASK 0x00000001 1417#define SONYPI_JOGGER_MASK 0x00000001
1432#define SONYPI_CAPTURE_MASK 0x00000002 1418#define SONYPI_CAPTURE_MASK 0x00000002
@@ -1724,27 +1710,6 @@ static int type3_handle_irq(const u8 data_mask, const u8 ev)
1724 return 1; 1710 return 1;
1725} 1711}
1726 1712
1727static struct device_ctrl spic_types[] = {
1728 {
1729 .model = SONYPI_DEVICE_TYPE1,
1730 .handle_irq = NULL,
1731 .evport_offset = SONYPI_TYPE1_OFFSET,
1732 .event_types = type1_events,
1733 },
1734 {
1735 .model = SONYPI_DEVICE_TYPE2,
1736 .handle_irq = NULL,
1737 .evport_offset = SONYPI_TYPE2_OFFSET,
1738 .event_types = type2_events,
1739 },
1740 {
1741 .model = SONYPI_DEVICE_TYPE3,
1742 .handle_irq = type3_handle_irq,
1743 .evport_offset = SONYPI_TYPE3_OFFSET,
1744 .event_types = type3_events,
1745 },
1746};
1747
1748static void sony_pic_detect_device_type(struct sony_pic_dev *dev) 1713static void sony_pic_detect_device_type(struct sony_pic_dev *dev)
1749{ 1714{
1750 struct pci_dev *pcidev; 1715 struct pci_dev *pcidev;
@@ -1752,48 +1717,63 @@ static void sony_pic_detect_device_type(struct sony_pic_dev *dev)
1752 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, 1717 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
1753 PCI_DEVICE_ID_INTEL_82371AB_3, NULL); 1718 PCI_DEVICE_ID_INTEL_82371AB_3, NULL);
1754 if (pcidev) { 1719 if (pcidev) {
1755 dev->control = &spic_types[0]; 1720 dev->model = SONYPI_DEVICE_TYPE1;
1721 dev->evport_offset = SONYPI_TYPE1_OFFSET;
1722 dev->event_types = type1_events;
1756 goto out; 1723 goto out;
1757 } 1724 }
1758 1725
1759 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, 1726 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
1760 PCI_DEVICE_ID_INTEL_ICH6_1, NULL); 1727 PCI_DEVICE_ID_INTEL_ICH6_1, NULL);
1761 if (pcidev) { 1728 if (pcidev) {
1762 dev->control = &spic_types[2]; 1729 dev->model = SONYPI_DEVICE_TYPE2;
1730 dev->evport_offset = SONYPI_TYPE2_OFFSET;
1731 dev->event_types = type2_events;
1763 goto out; 1732 goto out;
1764 } 1733 }
1765 1734
1766 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, 1735 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
1767 PCI_DEVICE_ID_INTEL_ICH7_1, NULL); 1736 PCI_DEVICE_ID_INTEL_ICH7_1, NULL);
1768 if (pcidev) { 1737 if (pcidev) {
1769 dev->control = &spic_types[2]; 1738 dev->model = SONYPI_DEVICE_TYPE3;
1739 dev->handle_irq = type3_handle_irq;
1740 dev->evport_offset = SONYPI_TYPE3_OFFSET;
1741 dev->event_types = type3_events;
1770 goto out; 1742 goto out;
1771 } 1743 }
1772 1744
1773 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, 1745 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
1774 PCI_DEVICE_ID_INTEL_ICH8_4, NULL); 1746 PCI_DEVICE_ID_INTEL_ICH8_4, NULL);
1775 if (pcidev) { 1747 if (pcidev) {
1776 dev->control = &spic_types[2]; 1748 dev->model = SONYPI_DEVICE_TYPE3;
1749 dev->handle_irq = type3_handle_irq;
1750 dev->evport_offset = SONYPI_TYPE3_OFFSET;
1751 dev->event_types = type3_events;
1777 goto out; 1752 goto out;
1778 } 1753 }
1779 1754
1780 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL, 1755 pcidev = pci_get_device(PCI_VENDOR_ID_INTEL,
1781 PCI_DEVICE_ID_INTEL_ICH9_1, NULL); 1756 PCI_DEVICE_ID_INTEL_ICH9_1, NULL);
1782 if (pcidev) { 1757 if (pcidev) {
1783 dev->control = &spic_types[2]; 1758 dev->model = SONYPI_DEVICE_TYPE3;
1759 dev->handle_irq = type3_handle_irq;
1760 dev->evport_offset = SONYPI_TYPE3_OFFSET;
1761 dev->event_types = type3_events;
1784 goto out; 1762 goto out;
1785 } 1763 }
1786 1764
1787 /* default */ 1765 /* default */
1788 dev->control = &spic_types[1]; 1766 dev->model = SONYPI_DEVICE_TYPE2;
1767 dev->evport_offset = SONYPI_TYPE2_OFFSET;
1768 dev->event_types = type2_events;
1789 1769
1790out: 1770out:
1791 if (pcidev) 1771 if (pcidev)
1792 pci_dev_put(pcidev); 1772 pci_dev_put(pcidev);
1793 1773
1794 printk(KERN_INFO DRV_PFX "detected Type%d model\n", 1774 printk(KERN_INFO DRV_PFX "detected Type%d model\n",
1795 dev->control->model == SONYPI_DEVICE_TYPE1 ? 1 : 1775 dev->model == SONYPI_DEVICE_TYPE1 ? 1 :
1796 dev->control->model == SONYPI_DEVICE_TYPE2 ? 2 : 3); 1776 dev->model == SONYPI_DEVICE_TYPE2 ? 2 : 3);
1797} 1777}
1798 1778
1799/* camera tests and poweron/poweroff */ 1779/* camera tests and poweron/poweroff */
@@ -2566,7 +2546,7 @@ static int sony_pic_enable(struct acpi_device *device,
2566 buffer.pointer = resource; 2546 buffer.pointer = resource;
2567 2547
2568 /* setup Type 1 resources */ 2548 /* setup Type 1 resources */
2569 if (spic_dev.control->model == SONYPI_DEVICE_TYPE1) { 2549 if (spic_dev.model == SONYPI_DEVICE_TYPE1) {
2570 2550
2571 /* setup io resources */ 2551 /* setup io resources */
2572 resource->res1.type = ACPI_RESOURCE_TYPE_IO; 2552 resource->res1.type = ACPI_RESOURCE_TYPE_IO;
@@ -2649,29 +2629,28 @@ static irqreturn_t sony_pic_irq(int irq, void *dev_id)
2649 data_mask = inb_p(dev->cur_ioport->io2.minimum); 2629 data_mask = inb_p(dev->cur_ioport->io2.minimum);
2650 else 2630 else
2651 data_mask = inb_p(dev->cur_ioport->io1.minimum + 2631 data_mask = inb_p(dev->cur_ioport->io1.minimum +
2652 dev->control->evport_offset); 2632 dev->evport_offset);
2653 2633
2654 dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n", 2634 dprintk("event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n",
2655 ev, data_mask, dev->cur_ioport->io1.minimum, 2635 ev, data_mask, dev->cur_ioport->io1.minimum,
2656 dev->control->evport_offset); 2636 dev->evport_offset);
2657 2637
2658 if (ev == 0x00 || ev == 0xff) 2638 if (ev == 0x00 || ev == 0xff)
2659 return IRQ_HANDLED; 2639 return IRQ_HANDLED;
2660 2640
2661 for (i = 0; dev->control->event_types[i].mask; i++) { 2641 for (i = 0; dev->event_types[i].mask; i++) {
2662 2642
2663 if ((data_mask & dev->control->event_types[i].data) != 2643 if ((data_mask & dev->event_types[i].data) !=
2664 dev->control->event_types[i].data) 2644 dev->event_types[i].data)
2665 continue; 2645 continue;
2666 2646
2667 if (!(mask & dev->control->event_types[i].mask)) 2647 if (!(mask & dev->event_types[i].mask))
2668 continue; 2648 continue;
2669 2649
2670 for (j = 0; dev->control->event_types[i].events[j].event; j++) { 2650 for (j = 0; dev->event_types[i].events[j].event; j++) {
2671 if (ev == dev->control->event_types[i].events[j].data) { 2651 if (ev == dev->event_types[i].events[j].data) {
2672 device_event = 2652 device_event =
2673 dev->control-> 2653 dev->event_types[i].events[j].event;
2674 event_types[i].events[j].event;
2675 goto found; 2654 goto found;
2676 } 2655 }
2677 } 2656 }
@@ -2679,13 +2658,12 @@ static irqreturn_t sony_pic_irq(int irq, void *dev_id)
2679 /* Still not able to decode the event try to pass 2658 /* Still not able to decode the event try to pass
2680 * it over to the minidriver 2659 * it over to the minidriver
2681 */ 2660 */
2682 if (dev->control->handle_irq && 2661 if (dev->handle_irq && dev->handle_irq(data_mask, ev) == 0)
2683 dev->control->handle_irq(data_mask, ev) == 0)
2684 return IRQ_HANDLED; 2662 return IRQ_HANDLED;
2685 2663
2686 dprintk("unknown event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n", 2664 dprintk("unknown event ([%.2x] [%.2x]) at port 0x%.4x(+0x%.2x)\n",
2687 ev, data_mask, dev->cur_ioport->io1.minimum, 2665 ev, data_mask, dev->cur_ioport->io1.minimum,
2688 dev->control->evport_offset); 2666 dev->evport_offset);
2689 return IRQ_HANDLED; 2667 return IRQ_HANDLED;
2690 2668
2691found: 2669found:
@@ -2816,7 +2794,7 @@ static int sony_pic_add(struct acpi_device *device)
2816 /* request IRQ */ 2794 /* request IRQ */
2817 list_for_each_entry_reverse(irq, &spic_dev.interrupts, list) { 2795 list_for_each_entry_reverse(irq, &spic_dev.interrupts, list) {
2818 if (!request_irq(irq->irq.interrupts[0], sony_pic_irq, 2796 if (!request_irq(irq->irq.interrupts[0], sony_pic_irq,
2819 IRQF_SHARED, "sony-laptop", &spic_dev)) { 2797 IRQF_DISABLED, "sony-laptop", &spic_dev)) {
2820 dprintk("IRQ: %d - triggering: %d - " 2798 dprintk("IRQ: %d - triggering: %d - "
2821 "polarity: %d - shr: %d\n", 2799 "polarity: %d - shr: %d\n",
2822 irq->irq.interrupts[0], 2800 irq->irq.interrupts[0],
@@ -2949,6 +2927,7 @@ static int __init sony_laptop_init(void)
2949 "Unable to register SPIC driver."); 2927 "Unable to register SPIC driver.");
2950 goto out; 2928 goto out;
2951 } 2929 }
2930 spic_drv_registered = 1;
2952 } 2931 }
2953 2932
2954 result = acpi_bus_register_driver(&sony_nc_driver); 2933 result = acpi_bus_register_driver(&sony_nc_driver);
@@ -2960,7 +2939,7 @@ static int __init sony_laptop_init(void)
2960 return 0; 2939 return 0;
2961 2940
2962out_unregister_pic: 2941out_unregister_pic:
2963 if (!no_spic) 2942 if (spic_drv_registered)
2964 acpi_bus_unregister_driver(&sony_pic_driver); 2943 acpi_bus_unregister_driver(&sony_pic_driver);
2965out: 2944out:
2966 return result; 2945 return result;
@@ -2969,7 +2948,7 @@ out:
2969static void __exit sony_laptop_exit(void) 2948static void __exit sony_laptop_exit(void)
2970{ 2949{
2971 acpi_bus_unregister_driver(&sony_nc_driver); 2950 acpi_bus_unregister_driver(&sony_nc_driver);
2972 if (!no_spic) 2951 if (spic_drv_registered)
2973 acpi_bus_unregister_driver(&sony_pic_driver); 2952 acpi_bus_unregister_driver(&sony_pic_driver);
2974} 2953}
2975 2954
diff --git a/drivers/platform/x86/thinkpad_acpi.c b/drivers/platform/x86/thinkpad_acpi.c
index f78d27503925..d93108d148fc 100644
--- a/drivers/platform/x86/thinkpad_acpi.c
+++ b/drivers/platform/x86/thinkpad_acpi.c
@@ -22,7 +22,7 @@
22 */ 22 */
23 23
24#define TPACPI_VERSION "0.23" 24#define TPACPI_VERSION "0.23"
25#define TPACPI_SYSFS_VERSION 0x020400 25#define TPACPI_SYSFS_VERSION 0x020500
26 26
27/* 27/*
28 * Changelog: 28 * Changelog:
@@ -145,6 +145,51 @@ enum {
145 TP_ACPI_WGSV_STATE_UWBPWR = 0x0020, /* UWB radio enabled */ 145 TP_ACPI_WGSV_STATE_UWBPWR = 0x0020, /* UWB radio enabled */
146}; 146};
147 147
148/* HKEY events */
149enum tpacpi_hkey_event_t {
150 /* Hotkey-related */
151 TP_HKEY_EV_HOTKEY_BASE = 0x1001, /* first hotkey (FN+F1) */
152 TP_HKEY_EV_BRGHT_UP = 0x1010, /* Brightness up */
153 TP_HKEY_EV_BRGHT_DOWN = 0x1011, /* Brightness down */
154 TP_HKEY_EV_VOL_UP = 0x1015, /* Volume up or unmute */
155 TP_HKEY_EV_VOL_DOWN = 0x1016, /* Volume down or unmute */
156 TP_HKEY_EV_VOL_MUTE = 0x1017, /* Mixer output mute */
157
158 /* Reasons for waking up from S3/S4 */
159 TP_HKEY_EV_WKUP_S3_UNDOCK = 0x2304, /* undock requested, S3 */
160 TP_HKEY_EV_WKUP_S4_UNDOCK = 0x2404, /* undock requested, S4 */
161 TP_HKEY_EV_WKUP_S3_BAYEJ = 0x2305, /* bay ejection req, S3 */
162 TP_HKEY_EV_WKUP_S4_BAYEJ = 0x2405, /* bay ejection req, S4 */
163 TP_HKEY_EV_WKUP_S3_BATLOW = 0x2313, /* battery empty, S3 */
164 TP_HKEY_EV_WKUP_S4_BATLOW = 0x2413, /* battery empty, S4 */
165
166 /* Auto-sleep after eject request */
167 TP_HKEY_EV_BAYEJ_ACK = 0x3003, /* bay ejection complete */
168 TP_HKEY_EV_UNDOCK_ACK = 0x4003, /* undock complete */
169
170 /* Misc bay events */
171 TP_HKEY_EV_OPTDRV_EJ = 0x3006, /* opt. drive tray ejected */
172
173 /* User-interface events */
174 TP_HKEY_EV_LID_CLOSE = 0x5001, /* laptop lid closed */
175 TP_HKEY_EV_LID_OPEN = 0x5002, /* laptop lid opened */
176 TP_HKEY_EV_TABLET_TABLET = 0x5009, /* tablet swivel up */
177 TP_HKEY_EV_TABLET_NOTEBOOK = 0x500a, /* tablet swivel down */
178 TP_HKEY_EV_PEN_INSERTED = 0x500b, /* tablet pen inserted */
179 TP_HKEY_EV_PEN_REMOVED = 0x500c, /* tablet pen removed */
180 TP_HKEY_EV_BRGHT_CHANGED = 0x5010, /* backlight control event */
181
182 /* Thermal events */
183 TP_HKEY_EV_ALARM_BAT_HOT = 0x6011, /* battery too hot */
184 TP_HKEY_EV_ALARM_BAT_XHOT = 0x6012, /* battery critically hot */
185 TP_HKEY_EV_ALARM_SENSOR_HOT = 0x6021, /* sensor too hot */
186 TP_HKEY_EV_ALARM_SENSOR_XHOT = 0x6022, /* sensor critically hot */
187 TP_HKEY_EV_THM_TABLE_CHANGED = 0x6030, /* thermal table changed */
188
189 /* Misc */
190 TP_HKEY_EV_RFKILL_CHANGED = 0x7000, /* rfkill switch changed */
191};
192
148/**************************************************************************** 193/****************************************************************************
149 * Main driver 194 * Main driver
150 */ 195 */
@@ -1848,6 +1893,27 @@ static struct ibm_struct thinkpad_acpi_driver_data = {
1848 * Hotkey subdriver 1893 * Hotkey subdriver
1849 */ 1894 */
1850 1895
1896/*
1897 * ThinkPad firmware event model
1898 *
1899 * The ThinkPad firmware has two main event interfaces: normal ACPI
1900 * notifications (which follow the ACPI standard), and a private event
1901 * interface.
1902 *
1903 * The private event interface also issues events for the hotkeys. As
1904 * the driver gained features, the event handling code ended up being
1905 * built around the hotkey subdriver. This will need to be refactored
1906 * to a more formal event API eventually.
1907 *
1908 * Some "hotkeys" are actually supposed to be used as event reports,
1909 * such as "brightness has changed", "volume has changed", depending on
1910 * the ThinkPad model and how the firmware is operating.
1911 *
1912 * Unlike other classes, hotkey-class events have mask/unmask control on
1913 * non-ancient firmware. However, how it behaves changes a lot with the
1914 * firmware model and version.
1915 */
1916
1851enum { /* hot key scan codes (derived from ACPI DSDT) */ 1917enum { /* hot key scan codes (derived from ACPI DSDT) */
1852 TP_ACPI_HOTKEYSCAN_FNF1 = 0, 1918 TP_ACPI_HOTKEYSCAN_FNF1 = 0,
1853 TP_ACPI_HOTKEYSCAN_FNF2, 1919 TP_ACPI_HOTKEYSCAN_FNF2,
@@ -1875,7 +1941,7 @@ enum { /* hot key scan codes (derived from ACPI DSDT) */
1875 TP_ACPI_HOTKEYSCAN_THINKPAD, 1941 TP_ACPI_HOTKEYSCAN_THINKPAD,
1876}; 1942};
1877 1943
1878enum { /* Keys available through NVRAM polling */ 1944enum { /* Keys/events available through NVRAM polling */
1879 TPACPI_HKEY_NVRAM_KNOWN_MASK = 0x00fb88c0U, 1945 TPACPI_HKEY_NVRAM_KNOWN_MASK = 0x00fb88c0U,
1880 TPACPI_HKEY_NVRAM_GOOD_MASK = 0x00fb8000U, 1946 TPACPI_HKEY_NVRAM_GOOD_MASK = 0x00fb8000U,
1881}; 1947};
@@ -1930,8 +1996,11 @@ static struct task_struct *tpacpi_hotkey_task;
1930static struct mutex hotkey_thread_mutex; 1996static struct mutex hotkey_thread_mutex;
1931 1997
1932/* 1998/*
1933 * Acquire mutex to write poller control variables. 1999 * Acquire mutex to write poller control variables as an
1934 * Increment hotkey_config_change when changing them. 2000 * atomic block.
2001 *
2002 * Increment hotkey_config_change when changing them if you
2003 * want the kthread to forget old state.
1935 * 2004 *
1936 * See HOTKEY_CONFIG_CRITICAL_START/HOTKEY_CONFIG_CRITICAL_END 2005 * See HOTKEY_CONFIG_CRITICAL_START/HOTKEY_CONFIG_CRITICAL_END
1937 */ 2006 */
@@ -1942,6 +2011,11 @@ static unsigned int hotkey_config_change;
1942 * hotkey poller control variables 2011 * hotkey poller control variables
1943 * 2012 *
1944 * Must be atomic or readers will also need to acquire mutex 2013 * Must be atomic or readers will also need to acquire mutex
2014 *
2015 * HOTKEY_CONFIG_CRITICAL_START/HOTKEY_CONFIG_CRITICAL_END
2016 * should be used only when the changes need to be taken as
2017 * a block, OR when one needs to force the kthread to forget
2018 * old state.
1945 */ 2019 */
1946static u32 hotkey_source_mask; /* bit mask 0=ACPI,1=NVRAM */ 2020static u32 hotkey_source_mask; /* bit mask 0=ACPI,1=NVRAM */
1947static unsigned int hotkey_poll_freq = 10; /* Hz */ 2021static unsigned int hotkey_poll_freq = 10; /* Hz */
@@ -1972,10 +2046,12 @@ static enum { /* Reasons for waking up */
1972 2046
1973static int hotkey_autosleep_ack; 2047static int hotkey_autosleep_ack;
1974 2048
1975static u32 hotkey_orig_mask; 2049static u32 hotkey_orig_mask; /* events the BIOS had enabled */
1976static u32 hotkey_all_mask; 2050static u32 hotkey_all_mask; /* all events supported in fw */
1977static u32 hotkey_reserved_mask; 2051static u32 hotkey_reserved_mask; /* events better left disabled */
1978static u32 hotkey_mask; 2052static u32 hotkey_driver_mask; /* events needed by the driver */
2053static u32 hotkey_user_mask; /* events visible to userspace */
2054static u32 hotkey_acpi_mask; /* events enabled in firmware */
1979 2055
1980static unsigned int hotkey_report_mode; 2056static unsigned int hotkey_report_mode;
1981 2057
@@ -1983,6 +2059,9 @@ static u16 *hotkey_keycode_map;
1983 2059
1984static struct attribute_set *hotkey_dev_attributes; 2060static struct attribute_set *hotkey_dev_attributes;
1985 2061
2062static void tpacpi_driver_event(const unsigned int hkey_event);
2063static void hotkey_driver_event(const unsigned int scancode);
2064
1986/* HKEY.MHKG() return bits */ 2065/* HKEY.MHKG() return bits */
1987#define TP_HOTKEY_TABLET_MASK (1 << 3) 2066#define TP_HOTKEY_TABLET_MASK (1 << 3)
1988 2067
@@ -2017,24 +2096,53 @@ static int hotkey_get_tablet_mode(int *status)
2017} 2096}
2018 2097
2019/* 2098/*
2099 * Reads current event mask from firmware, and updates
2100 * hotkey_acpi_mask accordingly. Also resets any bits
2101 * from hotkey_user_mask that are unavailable to be
2102 * delivered (shadow requirement of the userspace ABI).
2103 *
2020 * Call with hotkey_mutex held 2104 * Call with hotkey_mutex held
2021 */ 2105 */
2022static int hotkey_mask_get(void) 2106static int hotkey_mask_get(void)
2023{ 2107{
2024 u32 m = 0;
2025
2026 if (tp_features.hotkey_mask) { 2108 if (tp_features.hotkey_mask) {
2109 u32 m = 0;
2110
2027 if (!acpi_evalf(hkey_handle, &m, "DHKN", "d")) 2111 if (!acpi_evalf(hkey_handle, &m, "DHKN", "d"))
2028 return -EIO; 2112 return -EIO;
2113
2114 hotkey_acpi_mask = m;
2115 } else {
2116 /* no mask support doesn't mean no event support... */
2117 hotkey_acpi_mask = hotkey_all_mask;
2029 } 2118 }
2030 HOTKEY_CONFIG_CRITICAL_START 2119
2031 hotkey_mask = m | (hotkey_source_mask & hotkey_mask); 2120 /* sync userspace-visible mask */
2032 HOTKEY_CONFIG_CRITICAL_END 2121 hotkey_user_mask &= (hotkey_acpi_mask | hotkey_source_mask);
2033 2122
2034 return 0; 2123 return 0;
2035} 2124}
2036 2125
2126void static hotkey_mask_warn_incomplete_mask(void)
2127{
2128 /* log only what the user can fix... */
2129 const u32 wantedmask = hotkey_driver_mask &
2130 ~(hotkey_acpi_mask | hotkey_source_mask) &
2131 (hotkey_all_mask | TPACPI_HKEY_NVRAM_KNOWN_MASK);
2132
2133 if (wantedmask)
2134 printk(TPACPI_NOTICE
2135 "required events 0x%08x not enabled!\n",
2136 wantedmask);
2137}
2138
2037/* 2139/*
2140 * Set the firmware mask when supported
2141 *
2142 * Also calls hotkey_mask_get to update hotkey_acpi_mask.
2143 *
2144 * NOTE: does not set bits in hotkey_user_mask, but may reset them.
2145 *
2038 * Call with hotkey_mutex held 2146 * Call with hotkey_mutex held
2039 */ 2147 */
2040static int hotkey_mask_set(u32 mask) 2148static int hotkey_mask_set(u32 mask)
@@ -2042,66 +2150,100 @@ static int hotkey_mask_set(u32 mask)
2042 int i; 2150 int i;
2043 int rc = 0; 2151 int rc = 0;
2044 2152
2045 if (tp_features.hotkey_mask) { 2153 const u32 fwmask = mask & ~hotkey_source_mask;
2046 if (!tp_warned.hotkey_mask_ff &&
2047 (mask == 0xffff || mask == 0xffffff ||
2048 mask == 0xffffffff)) {
2049 tp_warned.hotkey_mask_ff = 1;
2050 printk(TPACPI_NOTICE
2051 "setting the hotkey mask to 0x%08x is likely "
2052 "not the best way to go about it\n", mask);
2053 printk(TPACPI_NOTICE
2054 "please consider using the driver defaults, "
2055 "and refer to up-to-date thinkpad-acpi "
2056 "documentation\n");
2057 }
2058 2154
2059 HOTKEY_CONFIG_CRITICAL_START 2155 if (tp_features.hotkey_mask) {
2060 for (i = 0; i < 32; i++) { 2156 for (i = 0; i < 32; i++) {
2061 u32 m = 1 << i;
2062 /* enable in firmware mask only keys not in NVRAM
2063 * mode, but enable the key in the cached hotkey_mask
2064 * regardless of mode, or the key will end up
2065 * disabled by hotkey_mask_get() */
2066 if (!acpi_evalf(hkey_handle, 2157 if (!acpi_evalf(hkey_handle,
2067 NULL, "MHKM", "vdd", i + 1, 2158 NULL, "MHKM", "vdd", i + 1,
2068 !!((mask & ~hotkey_source_mask) & m))) { 2159 !!(mask & (1 << i)))) {
2069 rc = -EIO; 2160 rc = -EIO;
2070 break; 2161 break;
2071 } else {
2072 hotkey_mask = (hotkey_mask & ~m) | (mask & m);
2073 } 2162 }
2074 } 2163 }
2075 HOTKEY_CONFIG_CRITICAL_END 2164 }
2076 2165
2077 /* hotkey_mask_get must be called unconditionally below */ 2166 /*
2078 if (!hotkey_mask_get() && !rc && 2167 * We *must* make an inconditional call to hotkey_mask_get to
2079 (hotkey_mask & ~hotkey_source_mask) != 2168 * refresh hotkey_acpi_mask and update hotkey_user_mask
2080 (mask & ~hotkey_source_mask)) { 2169 *
2081 printk(TPACPI_NOTICE 2170 * Take the opportunity to also log when we cannot _enable_
2082 "requested hot key mask 0x%08x, but " 2171 * a given event.
2083 "firmware forced it to 0x%08x\n", 2172 */
2084 mask, hotkey_mask); 2173 if (!hotkey_mask_get() && !rc && (fwmask & ~hotkey_acpi_mask)) {
2085 } 2174 printk(TPACPI_NOTICE
2086 } else { 2175 "asked for hotkey mask 0x%08x, but "
2087#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL 2176 "firmware forced it to 0x%08x\n",
2088 HOTKEY_CONFIG_CRITICAL_START 2177 fwmask, hotkey_acpi_mask);
2089 hotkey_mask = mask & hotkey_source_mask;
2090 HOTKEY_CONFIG_CRITICAL_END
2091 hotkey_mask_get();
2092 if (hotkey_mask != mask) {
2093 printk(TPACPI_NOTICE
2094 "requested hot key mask 0x%08x, "
2095 "forced to 0x%08x (NVRAM poll mask is "
2096 "0x%08x): no firmware mask support\n",
2097 mask, hotkey_mask, hotkey_source_mask);
2098 }
2099#else
2100 hotkey_mask_get();
2101 rc = -ENXIO;
2102#endif /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
2103 } 2178 }
2104 2179
2180 hotkey_mask_warn_incomplete_mask();
2181
2182 return rc;
2183}
2184
2185/*
2186 * Sets hotkey_user_mask and tries to set the firmware mask
2187 *
2188 * Call with hotkey_mutex held
2189 */
2190static int hotkey_user_mask_set(const u32 mask)
2191{
2192 int rc;
2193
2194 /* Give people a chance to notice they are doing something that
2195 * is bound to go boom on their users sooner or later */
2196 if (!tp_warned.hotkey_mask_ff &&
2197 (mask == 0xffff || mask == 0xffffff ||
2198 mask == 0xffffffff)) {
2199 tp_warned.hotkey_mask_ff = 1;
2200 printk(TPACPI_NOTICE
2201 "setting the hotkey mask to 0x%08x is likely "
2202 "not the best way to go about it\n", mask);
2203 printk(TPACPI_NOTICE
2204 "please consider using the driver defaults, "
2205 "and refer to up-to-date thinkpad-acpi "
2206 "documentation\n");
2207 }
2208
2209 /* Try to enable what the user asked for, plus whatever we need.
2210 * this syncs everything but won't enable bits in hotkey_user_mask */
2211 rc = hotkey_mask_set((mask | hotkey_driver_mask) & ~hotkey_source_mask);
2212
2213 /* Enable the available bits in hotkey_user_mask */
2214 hotkey_user_mask = mask & (hotkey_acpi_mask | hotkey_source_mask);
2215
2216 return rc;
2217}
2218
2219/*
2220 * Sets the driver hotkey mask.
2221 *
2222 * Can be called even if the hotkey subdriver is inactive
2223 */
2224static int tpacpi_hotkey_driver_mask_set(const u32 mask)
2225{
2226 int rc;
2227
2228 /* Do the right thing if hotkey_init has not been called yet */
2229 if (!tp_features.hotkey) {
2230 hotkey_driver_mask = mask;
2231 return 0;
2232 }
2233
2234 mutex_lock(&hotkey_mutex);
2235
2236 HOTKEY_CONFIG_CRITICAL_START
2237 hotkey_driver_mask = mask;
2238#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
2239 hotkey_source_mask |= (mask & ~hotkey_all_mask);
2240#endif
2241 HOTKEY_CONFIG_CRITICAL_END
2242
2243 rc = hotkey_mask_set((hotkey_acpi_mask | hotkey_driver_mask) &
2244 ~hotkey_source_mask);
2245 mutex_unlock(&hotkey_mutex);
2246
2105 return rc; 2247 return rc;
2106} 2248}
2107 2249
@@ -2137,11 +2279,10 @@ static void tpacpi_input_send_tabletsw(void)
2137 } 2279 }
2138} 2280}
2139 2281
2140static void tpacpi_input_send_key(unsigned int scancode) 2282/* Do NOT call without validating scancode first */
2283static void tpacpi_input_send_key(const unsigned int scancode)
2141{ 2284{
2142 unsigned int keycode; 2285 const unsigned int keycode = hotkey_keycode_map[scancode];
2143
2144 keycode = hotkey_keycode_map[scancode];
2145 2286
2146 if (keycode != KEY_RESERVED) { 2287 if (keycode != KEY_RESERVED) {
2147 mutex_lock(&tpacpi_inputdev_send_mutex); 2288 mutex_lock(&tpacpi_inputdev_send_mutex);
@@ -2162,19 +2303,28 @@ static void tpacpi_input_send_key(unsigned int scancode)
2162 } 2303 }
2163} 2304}
2164 2305
2306/* Do NOT call without validating scancode first */
2307static void tpacpi_input_send_key_masked(const unsigned int scancode)
2308{
2309 hotkey_driver_event(scancode);
2310 if (hotkey_user_mask & (1 << scancode))
2311 tpacpi_input_send_key(scancode);
2312}
2313
2165#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL 2314#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
2166static struct tp_acpi_drv_struct ibm_hotkey_acpidriver; 2315static struct tp_acpi_drv_struct ibm_hotkey_acpidriver;
2167 2316
2317/* Do NOT call without validating scancode first */
2168static void tpacpi_hotkey_send_key(unsigned int scancode) 2318static void tpacpi_hotkey_send_key(unsigned int scancode)
2169{ 2319{
2170 tpacpi_input_send_key(scancode); 2320 tpacpi_input_send_key_masked(scancode);
2171 if (hotkey_report_mode < 2) { 2321 if (hotkey_report_mode < 2) {
2172 acpi_bus_generate_proc_event(ibm_hotkey_acpidriver.device, 2322 acpi_bus_generate_proc_event(ibm_hotkey_acpidriver.device,
2173 0x80, 0x1001 + scancode); 2323 0x80, TP_HKEY_EV_HOTKEY_BASE + scancode);
2174 } 2324 }
2175} 2325}
2176 2326
2177static void hotkey_read_nvram(struct tp_nvram_state *n, u32 m) 2327static void hotkey_read_nvram(struct tp_nvram_state *n, const u32 m)
2178{ 2328{
2179 u8 d; 2329 u8 d;
2180 2330
@@ -2210,21 +2360,24 @@ static void hotkey_read_nvram(struct tp_nvram_state *n, u32 m)
2210 } 2360 }
2211} 2361}
2212 2362
2363static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
2364 struct tp_nvram_state *newn,
2365 const u32 event_mask)
2366{
2367
2213#define TPACPI_COMPARE_KEY(__scancode, __member) \ 2368#define TPACPI_COMPARE_KEY(__scancode, __member) \
2214 do { \ 2369 do { \
2215 if ((mask & (1 << __scancode)) && \ 2370 if ((event_mask & (1 << __scancode)) && \
2216 oldn->__member != newn->__member) \ 2371 oldn->__member != newn->__member) \
2217 tpacpi_hotkey_send_key(__scancode); \ 2372 tpacpi_hotkey_send_key(__scancode); \
2218 } while (0) 2373 } while (0)
2219 2374
2220#define TPACPI_MAY_SEND_KEY(__scancode) \ 2375#define TPACPI_MAY_SEND_KEY(__scancode) \
2221 do { if (mask & (1 << __scancode)) \ 2376 do { \
2222 tpacpi_hotkey_send_key(__scancode); } while (0) 2377 if (event_mask & (1 << __scancode)) \
2378 tpacpi_hotkey_send_key(__scancode); \
2379 } while (0)
2223 2380
2224static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
2225 struct tp_nvram_state *newn,
2226 u32 mask)
2227{
2228 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle); 2381 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_THINKPAD, thinkpad_toggle);
2229 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle); 2382 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNSPACE, zoom_toggle);
2230 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle); 2383 TPACPI_COMPARE_KEY(TP_ACPI_HOTKEYSCAN_FNF7, display_toggle);
@@ -2270,15 +2423,22 @@ static void hotkey_compare_and_issue_event(struct tp_nvram_state *oldn,
2270 } 2423 }
2271 } 2424 }
2272 } 2425 }
2273}
2274 2426
2275#undef TPACPI_COMPARE_KEY 2427#undef TPACPI_COMPARE_KEY
2276#undef TPACPI_MAY_SEND_KEY 2428#undef TPACPI_MAY_SEND_KEY
2429}
2277 2430
2431/*
2432 * Polling driver
2433 *
2434 * We track all events in hotkey_source_mask all the time, since
2435 * most of them are edge-based. We only issue those requested by
2436 * hotkey_user_mask or hotkey_driver_mask, though.
2437 */
2278static int hotkey_kthread(void *data) 2438static int hotkey_kthread(void *data)
2279{ 2439{
2280 struct tp_nvram_state s[2]; 2440 struct tp_nvram_state s[2];
2281 u32 mask; 2441 u32 poll_mask, event_mask;
2282 unsigned int si, so; 2442 unsigned int si, so;
2283 unsigned long t; 2443 unsigned long t;
2284 unsigned int change_detector, must_reset; 2444 unsigned int change_detector, must_reset;
@@ -2298,10 +2458,12 @@ static int hotkey_kthread(void *data)
2298 /* Initial state for compares */ 2458 /* Initial state for compares */
2299 mutex_lock(&hotkey_thread_data_mutex); 2459 mutex_lock(&hotkey_thread_data_mutex);
2300 change_detector = hotkey_config_change; 2460 change_detector = hotkey_config_change;
2301 mask = hotkey_source_mask & hotkey_mask; 2461 poll_mask = hotkey_source_mask;
2462 event_mask = hotkey_source_mask &
2463 (hotkey_driver_mask | hotkey_user_mask);
2302 poll_freq = hotkey_poll_freq; 2464 poll_freq = hotkey_poll_freq;
2303 mutex_unlock(&hotkey_thread_data_mutex); 2465 mutex_unlock(&hotkey_thread_data_mutex);
2304 hotkey_read_nvram(&s[so], mask); 2466 hotkey_read_nvram(&s[so], poll_mask);
2305 2467
2306 while (!kthread_should_stop()) { 2468 while (!kthread_should_stop()) {
2307 if (t == 0) { 2469 if (t == 0) {
@@ -2324,15 +2486,17 @@ static int hotkey_kthread(void *data)
2324 t = 0; 2486 t = 0;
2325 change_detector = hotkey_config_change; 2487 change_detector = hotkey_config_change;
2326 } 2488 }
2327 mask = hotkey_source_mask & hotkey_mask; 2489 poll_mask = hotkey_source_mask;
2490 event_mask = hotkey_source_mask &
2491 (hotkey_driver_mask | hotkey_user_mask);
2328 poll_freq = hotkey_poll_freq; 2492 poll_freq = hotkey_poll_freq;
2329 mutex_unlock(&hotkey_thread_data_mutex); 2493 mutex_unlock(&hotkey_thread_data_mutex);
2330 2494
2331 if (likely(mask)) { 2495 if (likely(poll_mask)) {
2332 hotkey_read_nvram(&s[si], mask); 2496 hotkey_read_nvram(&s[si], poll_mask);
2333 if (likely(si != so)) { 2497 if (likely(si != so)) {
2334 hotkey_compare_and_issue_event(&s[so], &s[si], 2498 hotkey_compare_and_issue_event(&s[so], &s[si],
2335 mask); 2499 event_mask);
2336 } 2500 }
2337 } 2501 }
2338 2502
@@ -2364,10 +2528,12 @@ static void hotkey_poll_stop_sync(void)
2364/* call with hotkey_mutex held */ 2528/* call with hotkey_mutex held */
2365static void hotkey_poll_setup(bool may_warn) 2529static void hotkey_poll_setup(bool may_warn)
2366{ 2530{
2367 u32 hotkeys_to_poll = hotkey_source_mask & hotkey_mask; 2531 const u32 poll_driver_mask = hotkey_driver_mask & hotkey_source_mask;
2532 const u32 poll_user_mask = hotkey_user_mask & hotkey_source_mask;
2368 2533
2369 if (hotkeys_to_poll != 0 && hotkey_poll_freq > 0 && 2534 if (hotkey_poll_freq > 0 &&
2370 (tpacpi_inputdev->users > 0 || hotkey_report_mode < 2)) { 2535 (poll_driver_mask ||
2536 (poll_user_mask && tpacpi_inputdev->users > 0))) {
2371 if (!tpacpi_hotkey_task) { 2537 if (!tpacpi_hotkey_task) {
2372 tpacpi_hotkey_task = kthread_run(hotkey_kthread, 2538 tpacpi_hotkey_task = kthread_run(hotkey_kthread,
2373 NULL, TPACPI_NVRAM_KTHREAD_NAME); 2539 NULL, TPACPI_NVRAM_KTHREAD_NAME);
@@ -2380,12 +2546,13 @@ static void hotkey_poll_setup(bool may_warn)
2380 } 2546 }
2381 } else { 2547 } else {
2382 hotkey_poll_stop_sync(); 2548 hotkey_poll_stop_sync();
2383 if (may_warn && hotkeys_to_poll != 0 && 2549 if (may_warn && (poll_driver_mask || poll_user_mask) &&
2384 hotkey_poll_freq == 0) { 2550 hotkey_poll_freq == 0) {
2385 printk(TPACPI_NOTICE 2551 printk(TPACPI_NOTICE
2386 "hot keys 0x%08x require polling, " 2552 "hot keys 0x%08x and/or events 0x%08x "
2387 "which is currently disabled\n", 2553 "require polling, which is currently "
2388 hotkeys_to_poll); 2554 "disabled\n",
2555 poll_user_mask, poll_driver_mask);
2389 } 2556 }
2390 } 2557 }
2391} 2558}
@@ -2403,9 +2570,7 @@ static void hotkey_poll_set_freq(unsigned int freq)
2403 if (!freq) 2570 if (!freq)
2404 hotkey_poll_stop_sync(); 2571 hotkey_poll_stop_sync();
2405 2572
2406 HOTKEY_CONFIG_CRITICAL_START
2407 hotkey_poll_freq = freq; 2573 hotkey_poll_freq = freq;
2408 HOTKEY_CONFIG_CRITICAL_END
2409} 2574}
2410 2575
2411#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */ 2576#else /* CONFIG_THINKPAD_ACPI_HOTKEY_POLL */
@@ -2440,7 +2605,8 @@ static int hotkey_inputdev_open(struct input_dev *dev)
2440static void hotkey_inputdev_close(struct input_dev *dev) 2605static void hotkey_inputdev_close(struct input_dev *dev)
2441{ 2606{
2442 /* disable hotkey polling when possible */ 2607 /* disable hotkey polling when possible */
2443 if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING) 2608 if (tpacpi_lifecycle == TPACPI_LIFE_RUNNING &&
2609 !(hotkey_source_mask & hotkey_driver_mask))
2444 hotkey_poll_setup_safe(false); 2610 hotkey_poll_setup_safe(false);
2445} 2611}
2446 2612
@@ -2488,15 +2654,7 @@ static ssize_t hotkey_mask_show(struct device *dev,
2488 struct device_attribute *attr, 2654 struct device_attribute *attr,
2489 char *buf) 2655 char *buf)
2490{ 2656{
2491 int res; 2657 return snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_user_mask);
2492
2493 if (mutex_lock_killable(&hotkey_mutex))
2494 return -ERESTARTSYS;
2495 res = hotkey_mask_get();
2496 mutex_unlock(&hotkey_mutex);
2497
2498 return (res)?
2499 res : snprintf(buf, PAGE_SIZE, "0x%08x\n", hotkey_mask);
2500} 2658}
2501 2659
2502static ssize_t hotkey_mask_store(struct device *dev, 2660static ssize_t hotkey_mask_store(struct device *dev,
@@ -2512,7 +2670,7 @@ static ssize_t hotkey_mask_store(struct device *dev,
2512 if (mutex_lock_killable(&hotkey_mutex)) 2670 if (mutex_lock_killable(&hotkey_mutex))
2513 return -ERESTARTSYS; 2671 return -ERESTARTSYS;
2514 2672
2515 res = hotkey_mask_set(t); 2673 res = hotkey_user_mask_set(t);
2516 2674
2517#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL 2675#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
2518 hotkey_poll_setup(true); 2676 hotkey_poll_setup(true);
@@ -2594,6 +2752,8 @@ static ssize_t hotkey_source_mask_store(struct device *dev,
2594 const char *buf, size_t count) 2752 const char *buf, size_t count)
2595{ 2753{
2596 unsigned long t; 2754 unsigned long t;
2755 u32 r_ev;
2756 int rc;
2597 2757
2598 if (parse_strtoul(buf, 0xffffffffUL, &t) || 2758 if (parse_strtoul(buf, 0xffffffffUL, &t) ||
2599 ((t & ~TPACPI_HKEY_NVRAM_KNOWN_MASK) != 0)) 2759 ((t & ~TPACPI_HKEY_NVRAM_KNOWN_MASK) != 0))
@@ -2606,14 +2766,28 @@ static ssize_t hotkey_source_mask_store(struct device *dev,
2606 hotkey_source_mask = t; 2766 hotkey_source_mask = t;
2607 HOTKEY_CONFIG_CRITICAL_END 2767 HOTKEY_CONFIG_CRITICAL_END
2608 2768
2769 rc = hotkey_mask_set((hotkey_user_mask | hotkey_driver_mask) &
2770 ~hotkey_source_mask);
2609 hotkey_poll_setup(true); 2771 hotkey_poll_setup(true);
2610 hotkey_mask_set(hotkey_mask); 2772
2773 /* check if events needed by the driver got disabled */
2774 r_ev = hotkey_driver_mask & ~(hotkey_acpi_mask & hotkey_all_mask)
2775 & ~hotkey_source_mask & TPACPI_HKEY_NVRAM_KNOWN_MASK;
2611 2776
2612 mutex_unlock(&hotkey_mutex); 2777 mutex_unlock(&hotkey_mutex);
2613 2778
2779 if (rc < 0)
2780 printk(TPACPI_ERR "hotkey_source_mask: failed to update the"
2781 "firmware event mask!\n");
2782
2783 if (r_ev)
2784 printk(TPACPI_NOTICE "hotkey_source_mask: "
2785 "some important events were disabled: "
2786 "0x%04x\n", r_ev);
2787
2614 tpacpi_disclose_usertask("hotkey_source_mask", "set to 0x%08lx\n", t); 2788 tpacpi_disclose_usertask("hotkey_source_mask", "set to 0x%08lx\n", t);
2615 2789
2616 return count; 2790 return (rc < 0) ? rc : count;
2617} 2791}
2618 2792
2619static struct device_attribute dev_attr_hotkey_source_mask = 2793static struct device_attribute dev_attr_hotkey_source_mask =
@@ -2731,9 +2905,8 @@ static struct device_attribute dev_attr_hotkey_wakeup_reason =
2731 2905
2732static void hotkey_wakeup_reason_notify_change(void) 2906static void hotkey_wakeup_reason_notify_change(void)
2733{ 2907{
2734 if (tp_features.hotkey_mask) 2908 sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
2735 sysfs_notify(&tpacpi_pdev->dev.kobj, NULL, 2909 "wakeup_reason");
2736 "wakeup_reason");
2737} 2910}
2738 2911
2739/* sysfs wakeup hotunplug_complete (pollable) -------------------------- */ 2912/* sysfs wakeup hotunplug_complete (pollable) -------------------------- */
@@ -2750,9 +2923,8 @@ static struct device_attribute dev_attr_hotkey_wakeup_hotunplug_complete =
2750 2923
2751static void hotkey_wakeup_hotunplug_complete_notify_change(void) 2924static void hotkey_wakeup_hotunplug_complete_notify_change(void)
2752{ 2925{
2753 if (tp_features.hotkey_mask) 2926 sysfs_notify(&tpacpi_pdev->dev.kobj, NULL,
2754 sysfs_notify(&tpacpi_pdev->dev.kobj, NULL, 2927 "wakeup_hotunplug_complete");
2755 "wakeup_hotunplug_complete");
2756} 2928}
2757 2929
2758/* --------------------------------------------------------------------- */ 2930/* --------------------------------------------------------------------- */
@@ -2760,27 +2932,19 @@ static void hotkey_wakeup_hotunplug_complete_notify_change(void)
2760static struct attribute *hotkey_attributes[] __initdata = { 2932static struct attribute *hotkey_attributes[] __initdata = {
2761 &dev_attr_hotkey_enable.attr, 2933 &dev_attr_hotkey_enable.attr,
2762 &dev_attr_hotkey_bios_enabled.attr, 2934 &dev_attr_hotkey_bios_enabled.attr,
2935 &dev_attr_hotkey_bios_mask.attr,
2763 &dev_attr_hotkey_report_mode.attr, 2936 &dev_attr_hotkey_report_mode.attr,
2764#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL 2937 &dev_attr_hotkey_wakeup_reason.attr,
2938 &dev_attr_hotkey_wakeup_hotunplug_complete.attr,
2765 &dev_attr_hotkey_mask.attr, 2939 &dev_attr_hotkey_mask.attr,
2766 &dev_attr_hotkey_all_mask.attr, 2940 &dev_attr_hotkey_all_mask.attr,
2767 &dev_attr_hotkey_recommended_mask.attr, 2941 &dev_attr_hotkey_recommended_mask.attr,
2942#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
2768 &dev_attr_hotkey_source_mask.attr, 2943 &dev_attr_hotkey_source_mask.attr,
2769 &dev_attr_hotkey_poll_freq.attr, 2944 &dev_attr_hotkey_poll_freq.attr,
2770#endif 2945#endif
2771}; 2946};
2772 2947
2773static struct attribute *hotkey_mask_attributes[] __initdata = {
2774 &dev_attr_hotkey_bios_mask.attr,
2775#ifndef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
2776 &dev_attr_hotkey_mask.attr,
2777 &dev_attr_hotkey_all_mask.attr,
2778 &dev_attr_hotkey_recommended_mask.attr,
2779#endif
2780 &dev_attr_hotkey_wakeup_reason.attr,
2781 &dev_attr_hotkey_wakeup_hotunplug_complete.attr,
2782};
2783
2784/* 2948/*
2785 * Sync both the hw and sw blocking state of all switches 2949 * Sync both the hw and sw blocking state of all switches
2786 */ 2950 */
@@ -2843,16 +3007,16 @@ static void hotkey_exit(void)
2843 3007
2844 kfree(hotkey_keycode_map); 3008 kfree(hotkey_keycode_map);
2845 3009
2846 if (tp_features.hotkey) { 3010 dbg_printk(TPACPI_DBG_EXIT | TPACPI_DBG_HKEY,
2847 dbg_printk(TPACPI_DBG_EXIT | TPACPI_DBG_HKEY, 3011 "restoring original HKEY status and mask\n");
2848 "restoring original hot key mask\n"); 3012 /* yes, there is a bitwise or below, we want the
2849 /* no short-circuit boolean operator below! */ 3013 * functions to be called even if one of them fail */
2850 if ((hotkey_mask_set(hotkey_orig_mask) | 3014 if (((tp_features.hotkey_mask &&
2851 hotkey_status_set(false)) != 0) 3015 hotkey_mask_set(hotkey_orig_mask)) |
2852 printk(TPACPI_ERR 3016 hotkey_status_set(false)) != 0)
2853 "failed to restore hot key mask " 3017 printk(TPACPI_ERR
2854 "to BIOS defaults\n"); 3018 "failed to restore hot key mask "
2855 } 3019 "to BIOS defaults\n");
2856} 3020}
2857 3021
2858static void __init hotkey_unmap(const unsigned int scancode) 3022static void __init hotkey_unmap(const unsigned int scancode)
@@ -2864,6 +3028,35 @@ static void __init hotkey_unmap(const unsigned int scancode)
2864 } 3028 }
2865} 3029}
2866 3030
3031/*
3032 * HKEY quirks:
3033 * TPACPI_HK_Q_INIMASK: Supports FN+F3,FN+F4,FN+F12
3034 */
3035
3036#define TPACPI_HK_Q_INIMASK 0x0001
3037
3038static const struct tpacpi_quirk tpacpi_hotkey_qtable[] __initconst = {
3039 TPACPI_Q_IBM('I', 'H', TPACPI_HK_Q_INIMASK), /* 600E */
3040 TPACPI_Q_IBM('I', 'N', TPACPI_HK_Q_INIMASK), /* 600E */
3041 TPACPI_Q_IBM('I', 'D', TPACPI_HK_Q_INIMASK), /* 770, 770E, 770ED */
3042 TPACPI_Q_IBM('I', 'W', TPACPI_HK_Q_INIMASK), /* A20m */
3043 TPACPI_Q_IBM('I', 'V', TPACPI_HK_Q_INIMASK), /* A20p */
3044 TPACPI_Q_IBM('1', '0', TPACPI_HK_Q_INIMASK), /* A21e, A22e */
3045 TPACPI_Q_IBM('K', 'U', TPACPI_HK_Q_INIMASK), /* A21e */
3046 TPACPI_Q_IBM('K', 'X', TPACPI_HK_Q_INIMASK), /* A21m, A22m */
3047 TPACPI_Q_IBM('K', 'Y', TPACPI_HK_Q_INIMASK), /* A21p, A22p */
3048 TPACPI_Q_IBM('1', 'B', TPACPI_HK_Q_INIMASK), /* A22e */
3049 TPACPI_Q_IBM('1', '3', TPACPI_HK_Q_INIMASK), /* A22m */
3050 TPACPI_Q_IBM('1', 'E', TPACPI_HK_Q_INIMASK), /* A30/p (0) */
3051 TPACPI_Q_IBM('1', 'C', TPACPI_HK_Q_INIMASK), /* R30 */
3052 TPACPI_Q_IBM('1', 'F', TPACPI_HK_Q_INIMASK), /* R31 */
3053 TPACPI_Q_IBM('I', 'Y', TPACPI_HK_Q_INIMASK), /* T20 */
3054 TPACPI_Q_IBM('K', 'Z', TPACPI_HK_Q_INIMASK), /* T21 */
3055 TPACPI_Q_IBM('1', '6', TPACPI_HK_Q_INIMASK), /* T22 */
3056 TPACPI_Q_IBM('I', 'Z', TPACPI_HK_Q_INIMASK), /* X20, X21 */
3057 TPACPI_Q_IBM('1', 'D', TPACPI_HK_Q_INIMASK), /* X22, X23, X24 */
3058};
3059
2867static int __init hotkey_init(struct ibm_init_struct *iibm) 3060static int __init hotkey_init(struct ibm_init_struct *iibm)
2868{ 3061{
2869 /* Requirements for changing the default keymaps: 3062 /* Requirements for changing the default keymaps:
@@ -2906,9 +3099,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
2906 KEY_UNKNOWN, /* 0x0D: FN+INSERT */ 3099 KEY_UNKNOWN, /* 0x0D: FN+INSERT */
2907 KEY_UNKNOWN, /* 0x0E: FN+DELETE */ 3100 KEY_UNKNOWN, /* 0x0E: FN+DELETE */
2908 3101
2909 /* brightness: firmware always reacts to them, unless 3102 /* brightness: firmware always reacts to them */
2910 * X.org did some tricks in the radeon BIOS scratch
2911 * registers of *some* models */
2912 KEY_RESERVED, /* 0x0F: FN+HOME (brightness up) */ 3103 KEY_RESERVED, /* 0x0F: FN+HOME (brightness up) */
2913 KEY_RESERVED, /* 0x10: FN+END (brightness down) */ 3104 KEY_RESERVED, /* 0x10: FN+END (brightness down) */
2914 3105
@@ -2983,6 +3174,8 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
2983 int status; 3174 int status;
2984 int hkeyv; 3175 int hkeyv;
2985 3176
3177 unsigned long quirks;
3178
2986 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, 3179 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
2987 "initializing hotkey subdriver\n"); 3180 "initializing hotkey subdriver\n");
2988 3181
@@ -3008,9 +3201,16 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3008 if (!tp_features.hotkey) 3201 if (!tp_features.hotkey)
3009 return 1; 3202 return 1;
3010 3203
3204 quirks = tpacpi_check_quirks(tpacpi_hotkey_qtable,
3205 ARRAY_SIZE(tpacpi_hotkey_qtable));
3206
3011 tpacpi_disable_brightness_delay(); 3207 tpacpi_disable_brightness_delay();
3012 3208
3013 hotkey_dev_attributes = create_attr_set(13, NULL); 3209 /* MUST have enough space for all attributes to be added to
3210 * hotkey_dev_attributes */
3211 hotkey_dev_attributes = create_attr_set(
3212 ARRAY_SIZE(hotkey_attributes) + 2,
3213 NULL);
3014 if (!hotkey_dev_attributes) 3214 if (!hotkey_dev_attributes)
3015 return -ENOMEM; 3215 return -ENOMEM;
3016 res = add_many_to_attr_set(hotkey_dev_attributes, 3216 res = add_many_to_attr_set(hotkey_dev_attributes,
@@ -3019,7 +3219,7 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3019 if (res) 3219 if (res)
3020 goto err_exit; 3220 goto err_exit;
3021 3221
3022 /* mask not supported on 570, 600e/x, 770e, 770x, A21e, A2xm/p, 3222 /* mask not supported on 600e/x, 770e, 770x, A21e, A2xm/p,
3023 A30, R30, R31, T20-22, X20-21, X22-24. Detected by checking 3223 A30, R30, R31, T20-22, X20-21, X22-24. Detected by checking
3024 for HKEY interface version 0x100 */ 3224 for HKEY interface version 0x100 */
3025 if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) { 3225 if (acpi_evalf(hkey_handle, &hkeyv, "MHKV", "qd")) {
@@ -3033,10 +3233,22 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3033 * MHKV 0x100 in A31, R40, R40e, 3233 * MHKV 0x100 in A31, R40, R40e,
3034 * T4x, X31, and later 3234 * T4x, X31, and later
3035 */ 3235 */
3036 tp_features.hotkey_mask = 1;
3037 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, 3236 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
3038 "firmware HKEY interface version: 0x%x\n", 3237 "firmware HKEY interface version: 0x%x\n",
3039 hkeyv); 3238 hkeyv);
3239
3240 /* Paranoia check AND init hotkey_all_mask */
3241 if (!acpi_evalf(hkey_handle, &hotkey_all_mask,
3242 "MHKA", "qd")) {
3243 printk(TPACPI_ERR
3244 "missing MHKA handler, "
3245 "please report this to %s\n",
3246 TPACPI_MAIL);
3247 /* Fallback: pre-init for FN+F3,F4,F12 */
3248 hotkey_all_mask = 0x080cU;
3249 } else {
3250 tp_features.hotkey_mask = 1;
3251 }
3040 } 3252 }
3041 } 3253 }
3042 3254
@@ -3044,32 +3256,23 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3044 "hotkey masks are %s\n", 3256 "hotkey masks are %s\n",
3045 str_supported(tp_features.hotkey_mask)); 3257 str_supported(tp_features.hotkey_mask));
3046 3258
3047 if (tp_features.hotkey_mask) { 3259 /* Init hotkey_all_mask if not initialized yet */
3048 if (!acpi_evalf(hkey_handle, &hotkey_all_mask, 3260 if (!tp_features.hotkey_mask && !hotkey_all_mask &&
3049 "MHKA", "qd")) { 3261 (quirks & TPACPI_HK_Q_INIMASK))
3050 printk(TPACPI_ERR 3262 hotkey_all_mask = 0x080cU; /* FN+F12, FN+F4, FN+F3 */
3051 "missing MHKA handler, "
3052 "please report this to %s\n",
3053 TPACPI_MAIL);
3054 /* FN+F12, FN+F4, FN+F3 */
3055 hotkey_all_mask = 0x080cU;
3056 }
3057 }
3058 3263
3059 /* hotkey_source_mask *must* be zero for 3264 /* Init hotkey_acpi_mask and hotkey_orig_mask */
3060 * the first hotkey_mask_get */
3061 if (tp_features.hotkey_mask) { 3265 if (tp_features.hotkey_mask) {
3266 /* hotkey_source_mask *must* be zero for
3267 * the first hotkey_mask_get to return hotkey_orig_mask */
3062 res = hotkey_mask_get(); 3268 res = hotkey_mask_get();
3063 if (res) 3269 if (res)
3064 goto err_exit; 3270 goto err_exit;
3065 3271
3066 hotkey_orig_mask = hotkey_mask; 3272 hotkey_orig_mask = hotkey_acpi_mask;
3067 res = add_many_to_attr_set( 3273 } else {
3068 hotkey_dev_attributes, 3274 hotkey_orig_mask = hotkey_all_mask;
3069 hotkey_mask_attributes, 3275 hotkey_acpi_mask = hotkey_all_mask;
3070 ARRAY_SIZE(hotkey_mask_attributes));
3071 if (res)
3072 goto err_exit;
3073 } 3276 }
3074 3277
3075#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES 3278#ifdef CONFIG_THINKPAD_ACPI_DEBUGFACILITIES
@@ -3183,14 +3386,9 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3183 } 3386 }
3184 3387
3185#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL 3388#ifdef CONFIG_THINKPAD_ACPI_HOTKEY_POLL
3186 if (tp_features.hotkey_mask) { 3389 hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK
3187 hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK 3390 & ~hotkey_all_mask
3188 & ~hotkey_all_mask 3391 & ~hotkey_reserved_mask;
3189 & ~hotkey_reserved_mask;
3190 } else {
3191 hotkey_source_mask = TPACPI_HKEY_NVRAM_GOOD_MASK
3192 & ~hotkey_reserved_mask;
3193 }
3194 3392
3195 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, 3393 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
3196 "hotkey source mask 0x%08x, polling freq %u\n", 3394 "hotkey source mask 0x%08x, polling freq %u\n",
@@ -3204,13 +3402,18 @@ static int __init hotkey_init(struct ibm_init_struct *iibm)
3204 hotkey_exit(); 3402 hotkey_exit();
3205 return res; 3403 return res;
3206 } 3404 }
3207 res = hotkey_mask_set(((hotkey_all_mask | hotkey_source_mask) 3405 res = hotkey_mask_set(((hotkey_all_mask & ~hotkey_reserved_mask)
3208 & ~hotkey_reserved_mask) 3406 | hotkey_driver_mask)
3209 | hotkey_orig_mask); 3407 & ~hotkey_source_mask);
3210 if (res < 0 && res != -ENXIO) { 3408 if (res < 0 && res != -ENXIO) {
3211 hotkey_exit(); 3409 hotkey_exit();
3212 return res; 3410 return res;
3213 } 3411 }
3412 hotkey_user_mask = (hotkey_acpi_mask | hotkey_source_mask)
3413 & ~hotkey_reserved_mask;
3414 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
3415 "initial masks: user=0x%08x, fw=0x%08x, poll=0x%08x\n",
3416 hotkey_user_mask, hotkey_acpi_mask, hotkey_source_mask);
3214 3417
3215 dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY, 3418 dbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_HKEY,
3216 "legacy ibm/hotkey event reporting over procfs %s\n", 3419 "legacy ibm/hotkey event reporting over procfs %s\n",
@@ -3245,7 +3448,7 @@ static bool hotkey_notify_hotkey(const u32 hkey,
3245 if (scancode > 0 && scancode < 0x21) { 3448 if (scancode > 0 && scancode < 0x21) {
3246 scancode--; 3449 scancode--;
3247 if (!(hotkey_source_mask & (1 << scancode))) { 3450 if (!(hotkey_source_mask & (1 << scancode))) {
3248 tpacpi_input_send_key(scancode); 3451 tpacpi_input_send_key_masked(scancode);
3249 *send_acpi_ev = false; 3452 *send_acpi_ev = false;
3250 } else { 3453 } else {
3251 *ignore_acpi_ev = true; 3454 *ignore_acpi_ev = true;
@@ -3264,20 +3467,20 @@ static bool hotkey_notify_wakeup(const u32 hkey,
3264 *ignore_acpi_ev = false; 3467 *ignore_acpi_ev = false;
3265 3468
3266 switch (hkey) { 3469 switch (hkey) {
3267 case 0x2304: /* suspend, undock */ 3470 case TP_HKEY_EV_WKUP_S3_UNDOCK: /* suspend, undock */
3268 case 0x2404: /* hibernation, undock */ 3471 case TP_HKEY_EV_WKUP_S4_UNDOCK: /* hibernation, undock */
3269 hotkey_wakeup_reason = TP_ACPI_WAKEUP_UNDOCK; 3472 hotkey_wakeup_reason = TP_ACPI_WAKEUP_UNDOCK;
3270 *ignore_acpi_ev = true; 3473 *ignore_acpi_ev = true;
3271 break; 3474 break;
3272 3475
3273 case 0x2305: /* suspend, bay eject */ 3476 case TP_HKEY_EV_WKUP_S3_BAYEJ: /* suspend, bay eject */
3274 case 0x2405: /* hibernation, bay eject */ 3477 case TP_HKEY_EV_WKUP_S4_BAYEJ: /* hibernation, bay eject */
3275 hotkey_wakeup_reason = TP_ACPI_WAKEUP_BAYEJ; 3478 hotkey_wakeup_reason = TP_ACPI_WAKEUP_BAYEJ;
3276 *ignore_acpi_ev = true; 3479 *ignore_acpi_ev = true;
3277 break; 3480 break;
3278 3481
3279 case 0x2313: /* Battery on critical low level (S3) */ 3482 case TP_HKEY_EV_WKUP_S3_BATLOW: /* Battery on critical low level/S3 */
3280 case 0x2413: /* Battery on critical low level (S4) */ 3483 case TP_HKEY_EV_WKUP_S4_BATLOW: /* Battery on critical low level/S4 */
3281 printk(TPACPI_ALERT 3484 printk(TPACPI_ALERT
3282 "EMERGENCY WAKEUP: battery almost empty\n"); 3485 "EMERGENCY WAKEUP: battery almost empty\n");
3283 /* how to auto-heal: */ 3486 /* how to auto-heal: */
@@ -3307,21 +3510,21 @@ static bool hotkey_notify_usrevent(const u32 hkey,
3307 *ignore_acpi_ev = false; 3510 *ignore_acpi_ev = false;
3308 3511
3309 switch (hkey) { 3512 switch (hkey) {
3310 case 0x5010: /* Lenovo new BIOS: brightness changed */ 3513 case TP_HKEY_EV_PEN_INSERTED: /* X61t: tablet pen inserted into bay */
3311 case 0x500b: /* X61t: tablet pen inserted into bay */ 3514 case TP_HKEY_EV_PEN_REMOVED: /* X61t: tablet pen removed from bay */
3312 case 0x500c: /* X61t: tablet pen removed from bay */
3313 return true; 3515 return true;
3314 3516
3315 case 0x5009: /* X41t-X61t: swivel up (tablet mode) */ 3517 case TP_HKEY_EV_TABLET_TABLET: /* X41t-X61t: tablet mode */
3316 case 0x500a: /* X41t-X61t: swivel down (normal mode) */ 3518 case TP_HKEY_EV_TABLET_NOTEBOOK: /* X41t-X61t: normal mode */
3317 tpacpi_input_send_tabletsw(); 3519 tpacpi_input_send_tabletsw();
3318 hotkey_tablet_mode_notify_change(); 3520 hotkey_tablet_mode_notify_change();
3319 *send_acpi_ev = false; 3521 *send_acpi_ev = false;
3320 return true; 3522 return true;
3321 3523
3322 case 0x5001: 3524 case TP_HKEY_EV_LID_CLOSE: /* Lid closed */
3323 case 0x5002: 3525 case TP_HKEY_EV_LID_OPEN: /* Lid opened */
3324 /* LID switch events. Do not propagate */ 3526 case TP_HKEY_EV_BRGHT_CHANGED: /* brightness changed */
3527 /* do not propagate these events */
3325 *ignore_acpi_ev = true; 3528 *ignore_acpi_ev = true;
3326 return true; 3529 return true;
3327 3530
@@ -3339,30 +3542,30 @@ static bool hotkey_notify_thermal(const u32 hkey,
3339 *ignore_acpi_ev = false; 3542 *ignore_acpi_ev = false;
3340 3543
3341 switch (hkey) { 3544 switch (hkey) {
3342 case 0x6011: 3545 case TP_HKEY_EV_ALARM_BAT_HOT:
3343 printk(TPACPI_CRIT 3546 printk(TPACPI_CRIT
3344 "THERMAL ALARM: battery is too hot!\n"); 3547 "THERMAL ALARM: battery is too hot!\n");
3345 /* recommended action: warn user through gui */ 3548 /* recommended action: warn user through gui */
3346 return true; 3549 return true;
3347 case 0x6012: 3550 case TP_HKEY_EV_ALARM_BAT_XHOT:
3348 printk(TPACPI_ALERT 3551 printk(TPACPI_ALERT
3349 "THERMAL EMERGENCY: battery is extremely hot!\n"); 3552 "THERMAL EMERGENCY: battery is extremely hot!\n");
3350 /* recommended action: immediate sleep/hibernate */ 3553 /* recommended action: immediate sleep/hibernate */
3351 return true; 3554 return true;
3352 case 0x6021: 3555 case TP_HKEY_EV_ALARM_SENSOR_HOT:
3353 printk(TPACPI_CRIT 3556 printk(TPACPI_CRIT
3354 "THERMAL ALARM: " 3557 "THERMAL ALARM: "
3355 "a sensor reports something is too hot!\n"); 3558 "a sensor reports something is too hot!\n");
3356 /* recommended action: warn user through gui, that */ 3559 /* recommended action: warn user through gui, that */
3357 /* some internal component is too hot */ 3560 /* some internal component is too hot */
3358 return true; 3561 return true;
3359 case 0x6022: 3562 case TP_HKEY_EV_ALARM_SENSOR_XHOT:
3360 printk(TPACPI_ALERT 3563 printk(TPACPI_ALERT
3361 "THERMAL EMERGENCY: " 3564 "THERMAL EMERGENCY: "
3362 "a sensor reports something is extremely hot!\n"); 3565 "a sensor reports something is extremely hot!\n");
3363 /* recommended action: immediate sleep/hibernate */ 3566 /* recommended action: immediate sleep/hibernate */
3364 return true; 3567 return true;
3365 case 0x6030: 3568 case TP_HKEY_EV_THM_TABLE_CHANGED:
3366 printk(TPACPI_INFO 3569 printk(TPACPI_INFO
3367 "EC reports that Thermal Table has changed\n"); 3570 "EC reports that Thermal Table has changed\n");
3368 /* recommended action: do nothing, we don't have 3571 /* recommended action: do nothing, we don't have
@@ -3420,7 +3623,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
3420 break; 3623 break;
3421 case 3: 3624 case 3:
3422 /* 0x3000-0x3FFF: bay-related wakeups */ 3625 /* 0x3000-0x3FFF: bay-related wakeups */
3423 if (hkey == 0x3003) { 3626 if (hkey == TP_HKEY_EV_BAYEJ_ACK) {
3424 hotkey_autosleep_ack = 1; 3627 hotkey_autosleep_ack = 1;
3425 printk(TPACPI_INFO 3628 printk(TPACPI_INFO
3426 "bay ejected\n"); 3629 "bay ejected\n");
@@ -3432,7 +3635,7 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
3432 break; 3635 break;
3433 case 4: 3636 case 4:
3434 /* 0x4000-0x4FFF: dock-related wakeups */ 3637 /* 0x4000-0x4FFF: dock-related wakeups */
3435 if (hkey == 0x4003) { 3638 if (hkey == TP_HKEY_EV_UNDOCK_ACK) {
3436 hotkey_autosleep_ack = 1; 3639 hotkey_autosleep_ack = 1;
3437 printk(TPACPI_INFO 3640 printk(TPACPI_INFO
3438 "undocked\n"); 3641 "undocked\n");
@@ -3454,7 +3657,8 @@ static void hotkey_notify(struct ibm_struct *ibm, u32 event)
3454 break; 3657 break;
3455 case 7: 3658 case 7:
3456 /* 0x7000-0x7FFF: misc */ 3659 /* 0x7000-0x7FFF: misc */
3457 if (tp_features.hotkey_wlsw && hkey == 0x7000) { 3660 if (tp_features.hotkey_wlsw &&
3661 hkey == TP_HKEY_EV_RFKILL_CHANGED) {
3458 tpacpi_send_radiosw_update(); 3662 tpacpi_send_radiosw_update();
3459 send_acpi_ev = 0; 3663 send_acpi_ev = 0;
3460 known_ev = true; 3664 known_ev = true;
@@ -3500,10 +3704,12 @@ static void hotkey_resume(void)
3500{ 3704{
3501 tpacpi_disable_brightness_delay(); 3705 tpacpi_disable_brightness_delay();
3502 3706
3503 if (hotkey_mask_get()) 3707 if (hotkey_status_set(true) < 0 ||
3708 hotkey_mask_set(hotkey_acpi_mask) < 0)
3504 printk(TPACPI_ERR 3709 printk(TPACPI_ERR
3505 "error while trying to read hot key mask " 3710 "error while attempting to reset the event "
3506 "from firmware\n"); 3711 "firmware interface\n");
3712
3507 tpacpi_send_radiosw_update(); 3713 tpacpi_send_radiosw_update();
3508 hotkey_tablet_mode_notify_change(); 3714 hotkey_tablet_mode_notify_change();
3509 hotkey_wakeup_reason_notify_change(); 3715 hotkey_wakeup_reason_notify_change();
@@ -3532,8 +3738,8 @@ static int hotkey_read(char *p)
3532 return res; 3738 return res;
3533 3739
3534 len += sprintf(p + len, "status:\t\t%s\n", enabled(status, 0)); 3740 len += sprintf(p + len, "status:\t\t%s\n", enabled(status, 0));
3535 if (tp_features.hotkey_mask) { 3741 if (hotkey_all_mask) {
3536 len += sprintf(p + len, "mask:\t\t0x%08x\n", hotkey_mask); 3742 len += sprintf(p + len, "mask:\t\t0x%08x\n", hotkey_user_mask);
3537 len += sprintf(p + len, 3743 len += sprintf(p + len,
3538 "commands:\tenable, disable, reset, <mask>\n"); 3744 "commands:\tenable, disable, reset, <mask>\n");
3539 } else { 3745 } else {
@@ -3570,7 +3776,7 @@ static int hotkey_write(char *buf)
3570 if (mutex_lock_killable(&hotkey_mutex)) 3776 if (mutex_lock_killable(&hotkey_mutex))
3571 return -ERESTARTSYS; 3777 return -ERESTARTSYS;
3572 3778
3573 mask = hotkey_mask; 3779 mask = hotkey_user_mask;
3574 3780
3575 res = 0; 3781 res = 0;
3576 while ((cmd = next_cmd(&buf))) { 3782 while ((cmd = next_cmd(&buf))) {
@@ -3592,12 +3798,11 @@ static int hotkey_write(char *buf)
3592 } 3798 }
3593 } 3799 }
3594 3800
3595 if (!res) 3801 if (!res) {
3596 tpacpi_disclose_usertask("procfs hotkey", 3802 tpacpi_disclose_usertask("procfs hotkey",
3597 "set mask to 0x%08x\n", mask); 3803 "set mask to 0x%08x\n", mask);
3598 3804 res = hotkey_user_mask_set(mask);
3599 if (!res && mask != hotkey_mask) 3805 }
3600 res = hotkey_mask_set(mask);
3601 3806
3602errexit: 3807errexit:
3603 mutex_unlock(&hotkey_mutex); 3808 mutex_unlock(&hotkey_mutex);
@@ -6010,8 +6215,10 @@ static int __init brightness_init(struct ibm_init_struct *iibm)
6010 TPACPI_BACKLIGHT_DEV_NAME, NULL, NULL, 6215 TPACPI_BACKLIGHT_DEV_NAME, NULL, NULL,
6011 &ibm_backlight_data); 6216 &ibm_backlight_data);
6012 if (IS_ERR(ibm_backlight_device)) { 6217 if (IS_ERR(ibm_backlight_device)) {
6218 int rc = PTR_ERR(ibm_backlight_device);
6219 ibm_backlight_device = NULL;
6013 printk(TPACPI_ERR "Could not register backlight device\n"); 6220 printk(TPACPI_ERR "Could not register backlight device\n");
6014 return PTR_ERR(ibm_backlight_device); 6221 return rc;
6015 } 6222 }
6016 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT, 6223 vdbg_printk(TPACPI_DBG_INIT | TPACPI_DBG_BRGHT,
6017 "brightness is supported\n"); 6224 "brightness is supported\n");
@@ -7499,6 +7706,21 @@ static struct ibm_struct fan_driver_data = {
7499 **************************************************************************** 7706 ****************************************************************************
7500 ****************************************************************************/ 7707 ****************************************************************************/
7501 7708
7709/*
7710 * HKEY event callout for other subdrivers go here
7711 * (yes, it is ugly, but it is quick, safe, and gets the job done
7712 */
7713static void tpacpi_driver_event(const unsigned int hkey_event)
7714{
7715}
7716
7717
7718
7719static void hotkey_driver_event(const unsigned int scancode)
7720{
7721 tpacpi_driver_event(TP_HKEY_EV_HOTKEY_BASE + scancode);
7722}
7723
7502/* sysfs name ---------------------------------------------------------- */ 7724/* sysfs name ---------------------------------------------------------- */
7503static ssize_t thinkpad_acpi_pdev_name_show(struct device *dev, 7725static ssize_t thinkpad_acpi_pdev_name_show(struct device *dev,
7504 struct device_attribute *attr, 7726 struct device_attribute *attr,
diff --git a/drivers/pnp/pnpacpi/core.c b/drivers/pnp/pnpacpi/core.c
index c07fdb94d665..83b8b5ac49c9 100644
--- a/drivers/pnp/pnpacpi/core.c
+++ b/drivers/pnp/pnpacpi/core.c
@@ -153,6 +153,7 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
153 acpi_handle temp = NULL; 153 acpi_handle temp = NULL;
154 acpi_status status; 154 acpi_status status;
155 struct pnp_dev *dev; 155 struct pnp_dev *dev;
156 struct acpi_hardware_id *id;
156 157
157 /* 158 /*
158 * If a PnPacpi device is not present , the device 159 * If a PnPacpi device is not present , the device
@@ -193,15 +194,12 @@ static int __init pnpacpi_add_device(struct acpi_device *device)
193 if (dev->capabilities & PNP_CONFIGURABLE) 194 if (dev->capabilities & PNP_CONFIGURABLE)
194 pnpacpi_parse_resource_option_data(dev); 195 pnpacpi_parse_resource_option_data(dev);
195 196
196 if (device->flags.compatible_ids) { 197 list_for_each_entry(id, &device->pnp.ids, list) {
197 struct acpica_device_id_list *cid_list = device->pnp.cid_list; 198 if (!strcmp(id->id, acpi_device_hid(device)))
198 int i; 199 continue;
199 200 if (!ispnpidacpi(id->id))
200 for (i = 0; i < cid_list->count; i++) { 201 continue;
201 if (!ispnpidacpi(cid_list->ids[i].string)) 202 pnp_add_id(dev, id->id);
202 continue;
203 pnp_add_id(dev, cid_list->ids[i].string);
204 }
205 } 203 }
206 204
207 /* clear out the damaged flags */ 205 /* clear out the damaged flags */
@@ -232,9 +230,8 @@ static int __init acpi_pnp_match(struct device *dev, void *_pnp)
232 struct pnp_dev *pnp = _pnp; 230 struct pnp_dev *pnp = _pnp;
233 231
234 /* true means it matched */ 232 /* true means it matched */
235 return acpi->flags.hardware_id 233 return !acpi_get_physical_device(acpi->handle)
236 && !acpi_get_physical_device(acpi->handle) 234 && compare_pnp_id(pnp->id, acpi_device_hid(acpi));
237 && compare_pnp_id(pnp->id, acpi->pnp.hardware_id);
238} 235}
239 236
240static int __init acpi_pnp_find_device(struct device *dev, acpi_handle * handle) 237static int __init acpi_pnp_find_device(struct device *dev, acpi_handle * handle)
diff --git a/drivers/rtc/rtc-pxa.c b/drivers/rtc/rtc-pxa.c
index bb8cc05605ac..747ca194fad4 100644
--- a/drivers/rtc/rtc-pxa.c
+++ b/drivers/rtc/rtc-pxa.c
@@ -438,34 +438,37 @@ static int __exit pxa_rtc_remove(struct platform_device *pdev)
438} 438}
439 439
440#ifdef CONFIG_PM 440#ifdef CONFIG_PM
441static int pxa_rtc_suspend(struct platform_device *pdev, pm_message_t state) 441static int pxa_rtc_suspend(struct device *dev)
442{ 442{
443 struct pxa_rtc *pxa_rtc = platform_get_drvdata(pdev); 443 struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
444 444
445 if (device_may_wakeup(&pdev->dev)) 445 if (device_may_wakeup(dev))
446 enable_irq_wake(pxa_rtc->irq_Alrm); 446 enable_irq_wake(pxa_rtc->irq_Alrm);
447 return 0; 447 return 0;
448} 448}
449 449
450static int pxa_rtc_resume(struct platform_device *pdev) 450static int pxa_rtc_resume(struct device *dev)
451{ 451{
452 struct pxa_rtc *pxa_rtc = platform_get_drvdata(pdev); 452 struct pxa_rtc *pxa_rtc = dev_get_drvdata(dev);
453 453
454 if (device_may_wakeup(&pdev->dev)) 454 if (device_may_wakeup(dev))
455 disable_irq_wake(pxa_rtc->irq_Alrm); 455 disable_irq_wake(pxa_rtc->irq_Alrm);
456 return 0; 456 return 0;
457} 457}
458#else 458
459#define pxa_rtc_suspend NULL 459static struct dev_pm_ops pxa_rtc_pm_ops = {
460#define pxa_rtc_resume NULL 460 .suspend = pxa_rtc_suspend,
461 .resume = pxa_rtc_resume,
462};
461#endif 463#endif
462 464
463static struct platform_driver pxa_rtc_driver = { 465static struct platform_driver pxa_rtc_driver = {
464 .remove = __exit_p(pxa_rtc_remove), 466 .remove = __exit_p(pxa_rtc_remove),
465 .suspend = pxa_rtc_suspend,
466 .resume = pxa_rtc_resume,
467 .driver = { 467 .driver = {
468 .name = "pxa-rtc", 468 .name = "pxa-rtc",
469#ifdef CONFIG_PM
470 .pm = &pxa_rtc_pm_ops,
471#endif
469 }, 472 },
470}; 473};
471 474
diff --git a/drivers/rtc/rtc-sa1100.c b/drivers/rtc/rtc-sa1100.c
index 021b2928f0b9..29f98a70586e 100644
--- a/drivers/rtc/rtc-sa1100.c
+++ b/drivers/rtc/rtc-sa1100.c
@@ -393,31 +393,34 @@ static int sa1100_rtc_remove(struct platform_device *pdev)
393} 393}
394 394
395#ifdef CONFIG_PM 395#ifdef CONFIG_PM
396static int sa1100_rtc_suspend(struct platform_device *pdev, pm_message_t state) 396static int sa1100_rtc_suspend(struct device *dev)
397{ 397{
398 if (device_may_wakeup(&pdev->dev)) 398 if (device_may_wakeup(dev))
399 enable_irq_wake(IRQ_RTCAlrm); 399 enable_irq_wake(IRQ_RTCAlrm);
400 return 0; 400 return 0;
401} 401}
402 402
403static int sa1100_rtc_resume(struct platform_device *pdev) 403static int sa1100_rtc_resume(struct device *dev)
404{ 404{
405 if (device_may_wakeup(&pdev->dev)) 405 if (device_may_wakeup(dev))
406 disable_irq_wake(IRQ_RTCAlrm); 406 disable_irq_wake(IRQ_RTCAlrm);
407 return 0; 407 return 0;
408} 408}
409#else 409
410#define sa1100_rtc_suspend NULL 410static struct dev_pm_ops sa1100_rtc_pm_ops = {
411#define sa1100_rtc_resume NULL 411 .suspend = sa1100_rtc_suspend,
412 .resume = sa1100_rtc_resume,
413};
412#endif 414#endif
413 415
414static struct platform_driver sa1100_rtc_driver = { 416static struct platform_driver sa1100_rtc_driver = {
415 .probe = sa1100_rtc_probe, 417 .probe = sa1100_rtc_probe,
416 .remove = sa1100_rtc_remove, 418 .remove = sa1100_rtc_remove,
417 .suspend = sa1100_rtc_suspend,
418 .resume = sa1100_rtc_resume,
419 .driver = { 419 .driver = {
420 .name = "sa1100-rtc", 420 .name = "sa1100-rtc",
421#ifdef CONFIG_PM
422 .pm = &sa1100_rtc_pm_ops,
423#endif
421 }, 424 },
422}; 425};
423 426
diff --git a/drivers/s390/cio/qdio_debug.c b/drivers/s390/cio/qdio_debug.c
index 1b78f639ead3..76769978285f 100644
--- a/drivers/s390/cio/qdio_debug.c
+++ b/drivers/s390/cio/qdio_debug.c
@@ -125,7 +125,7 @@ static int qstat_seq_open(struct inode *inode, struct file *filp)
125 filp->f_path.dentry->d_inode->i_private); 125 filp->f_path.dentry->d_inode->i_private);
126} 126}
127 127
128static struct file_operations debugfs_fops = { 128static const struct file_operations debugfs_fops = {
129 .owner = THIS_MODULE, 129 .owner = THIS_MODULE,
130 .open = qstat_seq_open, 130 .open = qstat_seq_open,
131 .read = seq_read, 131 .read = seq_read,
diff --git a/drivers/s390/cio/qdio_perf.c b/drivers/s390/cio/qdio_perf.c
index eff943923c6f..968e3c7c2632 100644
--- a/drivers/s390/cio/qdio_perf.c
+++ b/drivers/s390/cio/qdio_perf.c
@@ -84,7 +84,7 @@ static int qdio_perf_seq_open(struct inode *inode, struct file *filp)
84 return single_open(filp, qdio_perf_proc_show, NULL); 84 return single_open(filp, qdio_perf_proc_show, NULL);
85} 85}
86 86
87static struct file_operations qdio_perf_proc_fops = { 87static const struct file_operations qdio_perf_proc_fops = {
88 .owner = THIS_MODULE, 88 .owner = THIS_MODULE,
89 .open = qdio_perf_seq_open, 89 .open = qdio_perf_seq_open,
90 .read = seq_read, 90 .read = seq_read,
diff --git a/drivers/s390/net/Makefile b/drivers/s390/net/Makefile
index 96eddb3b1d08..6cab5a62f99e 100644
--- a/drivers/s390/net/Makefile
+++ b/drivers/s390/net/Makefile
@@ -3,11 +3,11 @@
3# 3#
4 4
5ctcm-y += ctcm_main.o ctcm_fsms.o ctcm_mpc.o ctcm_sysfs.o ctcm_dbug.o 5ctcm-y += ctcm_main.o ctcm_fsms.o ctcm_mpc.o ctcm_sysfs.o ctcm_dbug.o
6obj-$(CONFIG_CTCM) += ctcm.o fsm.o cu3088.o 6obj-$(CONFIG_CTCM) += ctcm.o fsm.o
7obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o 7obj-$(CONFIG_NETIUCV) += netiucv.o fsm.o
8obj-$(CONFIG_SMSGIUCV) += smsgiucv.o 8obj-$(CONFIG_SMSGIUCV) += smsgiucv.o
9obj-$(CONFIG_LCS) += lcs.o cu3088.o 9obj-$(CONFIG_LCS) += lcs.o
10obj-$(CONFIG_CLAW) += claw.o cu3088.o 10obj-$(CONFIG_CLAW) += claw.o
11qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o 11qeth-y += qeth_core_sys.o qeth_core_main.o qeth_core_mpc.o
12obj-$(CONFIG_QETH) += qeth.o 12obj-$(CONFIG_QETH) += qeth.o
13qeth_l2-y += qeth_l2_main.o 13qeth_l2-y += qeth_l2_main.o
diff --git a/drivers/s390/net/claw.c b/drivers/s390/net/claw.c
index c63babefb698..cf283e3d2763 100644
--- a/drivers/s390/net/claw.c
+++ b/drivers/s390/net/claw.c
@@ -90,7 +90,6 @@
90#include <linux/timer.h> 90#include <linux/timer.h>
91#include <linux/types.h> 91#include <linux/types.h>
92 92
93#include "cu3088.h"
94#include "claw.h" 93#include "claw.h"
95 94
96/* 95/*
@@ -258,6 +257,9 @@ static int claw_pm_prepare(struct ccwgroup_device *gdev)
258 return -EPERM; 257 return -EPERM;
259} 258}
260 259
260/* the root device for claw group devices */
261static struct device *claw_root_dev;
262
261/* ccwgroup table */ 263/* ccwgroup table */
262 264
263static struct ccwgroup_driver claw_group_driver = { 265static struct ccwgroup_driver claw_group_driver = {
@@ -272,6 +274,47 @@ static struct ccwgroup_driver claw_group_driver = {
272 .prepare = claw_pm_prepare, 274 .prepare = claw_pm_prepare,
273}; 275};
274 276
277static struct ccw_device_id claw_ids[] = {
278 {CCW_DEVICE(0x3088, 0x61), .driver_info = claw_channel_type_claw},
279 {},
280};
281MODULE_DEVICE_TABLE(ccw, claw_ids);
282
283static struct ccw_driver claw_ccw_driver = {
284 .owner = THIS_MODULE,
285 .name = "claw",
286 .ids = claw_ids,
287 .probe = ccwgroup_probe_ccwdev,
288 .remove = ccwgroup_remove_ccwdev,
289};
290
291static ssize_t
292claw_driver_group_store(struct device_driver *ddrv, const char *buf,
293 size_t count)
294{
295 int err;
296 err = ccwgroup_create_from_string(claw_root_dev,
297 claw_group_driver.driver_id,
298 &claw_ccw_driver, 3, buf);
299 return err ? err : count;
300}
301
302static DRIVER_ATTR(group, 0200, NULL, claw_driver_group_store);
303
304static struct attribute *claw_group_attrs[] = {
305 &driver_attr_group.attr,
306 NULL,
307};
308
309static struct attribute_group claw_group_attr_group = {
310 .attrs = claw_group_attrs,
311};
312
313static struct attribute_group *claw_group_attr_groups[] = {
314 &claw_group_attr_group,
315 NULL,
316};
317
275/* 318/*
276* Key functions 319* Key functions
277*/ 320*/
@@ -3326,7 +3369,11 @@ claw_remove_files(struct device *dev)
3326static void __exit 3369static void __exit
3327claw_cleanup(void) 3370claw_cleanup(void)
3328{ 3371{
3329 unregister_cu3088_discipline(&claw_group_driver); 3372 driver_remove_file(&claw_group_driver.driver,
3373 &driver_attr_group);
3374 ccwgroup_driver_unregister(&claw_group_driver);
3375 ccw_driver_unregister(&claw_ccw_driver);
3376 root_device_unregister(claw_root_dev);
3330 claw_unregister_debug_facility(); 3377 claw_unregister_debug_facility();
3331 pr_info("Driver unloaded\n"); 3378 pr_info("Driver unloaded\n");
3332 3379
@@ -3348,16 +3395,31 @@ claw_init(void)
3348 if (ret) { 3395 if (ret) {
3349 pr_err("Registering with the S/390 debug feature" 3396 pr_err("Registering with the S/390 debug feature"
3350 " failed with error code %d\n", ret); 3397 " failed with error code %d\n", ret);
3351 return ret; 3398 goto out_err;
3352 } 3399 }
3353 CLAW_DBF_TEXT(2, setup, "init_mod"); 3400 CLAW_DBF_TEXT(2, setup, "init_mod");
3354 ret = register_cu3088_discipline(&claw_group_driver); 3401 claw_root_dev = root_device_register("qeth");
3355 if (ret) { 3402 ret = IS_ERR(claw_root_dev) ? PTR_ERR(claw_root_dev) : 0;
3356 CLAW_DBF_TEXT(2, setup, "init_bad"); 3403 if (ret)
3357 claw_unregister_debug_facility(); 3404 goto register_err;
3358 pr_err("Registering with the cu3088 device driver failed " 3405 ret = ccw_driver_register(&claw_ccw_driver);
3359 "with error code %d\n", ret); 3406 if (ret)
3360 } 3407 goto ccw_err;
3408 claw_group_driver.driver.groups = claw_group_attr_groups;
3409 ret = ccwgroup_driver_register(&claw_group_driver);
3410 if (ret)
3411 goto ccwgroup_err;
3412 return 0;
3413
3414ccwgroup_err:
3415 ccw_driver_unregister(&claw_ccw_driver);
3416ccw_err:
3417 root_device_unregister(claw_root_dev);
3418register_err:
3419 CLAW_DBF_TEXT(2, setup, "init_bad");
3420 claw_unregister_debug_facility();
3421out_err:
3422 pr_err("Initializing the claw device driver failed\n");
3361 return ret; 3423 return ret;
3362} 3424}
3363 3425
diff --git a/drivers/s390/net/claw.h b/drivers/s390/net/claw.h
index 005072c420d3..46d59a13db12 100644
--- a/drivers/s390/net/claw.h
+++ b/drivers/s390/net/claw.h
@@ -129,6 +129,18 @@ static inline int claw_dbf_passes(debug_info_t *dbf_grp, int level)
129 } \ 129 } \
130 } while (0) 130 } while (0)
131 131
132/**
133 * Enum for classifying detected devices.
134 */
135enum claw_channel_types {
136 /* Device is not a channel */
137 claw_channel_type_none,
138
139 /* Device is a CLAW channel device */
140 claw_channel_type_claw
141};
142
143
132/******************************************************* 144/*******************************************************
133* Define Control Blocks * 145* Define Control Blocks *
134* * 146* *
diff --git a/drivers/s390/net/ctcm_fsms.c b/drivers/s390/net/ctcm_fsms.c
index 4ded9ac2c5ef..70eb7f138414 100644
--- a/drivers/s390/net/ctcm_fsms.c
+++ b/drivers/s390/net/ctcm_fsms.c
@@ -44,7 +44,6 @@
44#include <asm/idals.h> 44#include <asm/idals.h>
45 45
46#include "fsm.h" 46#include "fsm.h"
47#include "cu3088.h"
48 47
49#include "ctcm_dbug.h" 48#include "ctcm_dbug.h"
50#include "ctcm_main.h" 49#include "ctcm_main.h"
diff --git a/drivers/s390/net/ctcm_fsms.h b/drivers/s390/net/ctcm_fsms.h
index 2326aba9807a..046d077fabbb 100644
--- a/drivers/s390/net/ctcm_fsms.h
+++ b/drivers/s390/net/ctcm_fsms.h
@@ -39,7 +39,6 @@
39#include <asm/idals.h> 39#include <asm/idals.h>
40 40
41#include "fsm.h" 41#include "fsm.h"
42#include "cu3088.h"
43#include "ctcm_main.h" 42#include "ctcm_main.h"
44 43
45/* 44/*
diff --git a/drivers/s390/net/ctcm_main.c b/drivers/s390/net/ctcm_main.c
index c5b83874500c..558dc323a947 100644
--- a/drivers/s390/net/ctcm_main.c
+++ b/drivers/s390/net/ctcm_main.c
@@ -51,12 +51,16 @@
51 51
52#include <asm/idals.h> 52#include <asm/idals.h>
53 53
54#include "cu3088.h"
55#include "ctcm_fsms.h" 54#include "ctcm_fsms.h"
56#include "ctcm_main.h" 55#include "ctcm_main.h"
57 56
58/* Some common global variables */ 57/* Some common global variables */
59 58
59/**
60 * The root device for ctcm group devices
61 */
62static struct device *ctcm_root_dev;
63
60/* 64/*
61 * Linked list of all detected channels. 65 * Linked list of all detected channels.
62 */ 66 */
@@ -246,7 +250,7 @@ static void channel_remove(struct channel *ch)
246 * 250 *
247 * returns Pointer to a channel or NULL if no matching channel available. 251 * returns Pointer to a channel or NULL if no matching channel available.
248 */ 252 */
249static struct channel *channel_get(enum channel_types type, 253static struct channel *channel_get(enum ctcm_channel_types type,
250 char *id, int direction) 254 char *id, int direction)
251{ 255{
252 struct channel *ch = channels; 256 struct channel *ch = channels;
@@ -1342,7 +1346,7 @@ static int ctcm_probe_device(struct ccwgroup_device *cgdev)
1342 * 1346 *
1343 * returns 0 on success, !0 on error. 1347 * returns 0 on success, !0 on error.
1344 */ 1348 */
1345static int add_channel(struct ccw_device *cdev, enum channel_types type, 1349static int add_channel(struct ccw_device *cdev, enum ctcm_channel_types type,
1346 struct ctcm_priv *priv) 1350 struct ctcm_priv *priv)
1347{ 1351{
1348 struct channel **c = &channels; 1352 struct channel **c = &channels;
@@ -1501,13 +1505,13 @@ free_return: /* note that all channel pointers are 0 or valid */
1501/* 1505/*
1502 * Return type of a detected device. 1506 * Return type of a detected device.
1503 */ 1507 */
1504static enum channel_types get_channel_type(struct ccw_device_id *id) 1508static enum ctcm_channel_types get_channel_type(struct ccw_device_id *id)
1505{ 1509{
1506 enum channel_types type; 1510 enum ctcm_channel_types type;
1507 type = (enum channel_types)id->driver_info; 1511 type = (enum ctcm_channel_types)id->driver_info;
1508 1512
1509 if (type == channel_type_ficon) 1513 if (type == ctcm_channel_type_ficon)
1510 type = channel_type_escon; 1514 type = ctcm_channel_type_escon;
1511 1515
1512 return type; 1516 return type;
1513} 1517}
@@ -1525,16 +1529,21 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
1525 char read_id[CTCM_ID_SIZE]; 1529 char read_id[CTCM_ID_SIZE];
1526 char write_id[CTCM_ID_SIZE]; 1530 char write_id[CTCM_ID_SIZE];
1527 int direction; 1531 int direction;
1528 enum channel_types type; 1532 enum ctcm_channel_types type;
1529 struct ctcm_priv *priv; 1533 struct ctcm_priv *priv;
1530 struct net_device *dev; 1534 struct net_device *dev;
1531 struct ccw_device *cdev0; 1535 struct ccw_device *cdev0;
1532 struct ccw_device *cdev1; 1536 struct ccw_device *cdev1;
1537 struct channel *readc;
1538 struct channel *writec;
1533 int ret; 1539 int ret;
1540 int result;
1534 1541
1535 priv = dev_get_drvdata(&cgdev->dev); 1542 priv = dev_get_drvdata(&cgdev->dev);
1536 if (!priv) 1543 if (!priv) {
1537 return -ENODEV; 1544 result = -ENODEV;
1545 goto out_err_result;
1546 }
1538 1547
1539 cdev0 = cgdev->cdev[0]; 1548 cdev0 = cgdev->cdev[0];
1540 cdev1 = cgdev->cdev[1]; 1549 cdev1 = cgdev->cdev[1];
@@ -1545,31 +1554,40 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
1545 snprintf(write_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev1->dev)); 1554 snprintf(write_id, CTCM_ID_SIZE, "ch-%s", dev_name(&cdev1->dev));
1546 1555
1547 ret = add_channel(cdev0, type, priv); 1556 ret = add_channel(cdev0, type, priv);
1548 if (ret) 1557 if (ret) {
1549 return ret; 1558 result = ret;
1559 goto out_err_result;
1560 }
1550 ret = add_channel(cdev1, type, priv); 1561 ret = add_channel(cdev1, type, priv);
1551 if (ret) 1562 if (ret) {
1552 return ret; 1563 result = ret;
1564 goto out_remove_channel1;
1565 }
1553 1566
1554 ret = ccw_device_set_online(cdev0); 1567 ret = ccw_device_set_online(cdev0);
1555 if (ret != 0) { 1568 if (ret != 0) {
1556 /* may be ok to fail now - can be done later */
1557 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 1569 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
1558 "%s(%s) set_online rc=%d", 1570 "%s(%s) set_online rc=%d",
1559 CTCM_FUNTAIL, read_id, ret); 1571 CTCM_FUNTAIL, read_id, ret);
1572 result = -EIO;
1573 goto out_remove_channel2;
1560 } 1574 }
1561 1575
1562 ret = ccw_device_set_online(cdev1); 1576 ret = ccw_device_set_online(cdev1);
1563 if (ret != 0) { 1577 if (ret != 0) {
1564 /* may be ok to fail now - can be done later */
1565 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE, 1578 CTCM_DBF_TEXT_(TRACE, CTC_DBF_NOTICE,
1566 "%s(%s) set_online rc=%d", 1579 "%s(%s) set_online rc=%d",
1567 CTCM_FUNTAIL, write_id, ret); 1580 CTCM_FUNTAIL, write_id, ret);
1581
1582 result = -EIO;
1583 goto out_ccw1;
1568 } 1584 }
1569 1585
1570 dev = ctcm_init_netdevice(priv); 1586 dev = ctcm_init_netdevice(priv);
1571 if (dev == NULL) 1587 if (dev == NULL) {
1572 goto out; 1588 result = -ENODEV;
1589 goto out_ccw2;
1590 }
1573 1591
1574 for (direction = READ; direction <= WRITE; direction++) { 1592 for (direction = READ; direction <= WRITE; direction++) {
1575 priv->channel[direction] = 1593 priv->channel[direction] =
@@ -1587,12 +1605,14 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
1587 /* sysfs magic */ 1605 /* sysfs magic */
1588 SET_NETDEV_DEV(dev, &cgdev->dev); 1606 SET_NETDEV_DEV(dev, &cgdev->dev);
1589 1607
1590 if (register_netdev(dev)) 1608 if (register_netdev(dev)) {
1591 goto out_dev; 1609 result = -ENODEV;
1610 goto out_dev;
1611 }
1592 1612
1593 if (ctcm_add_attributes(&cgdev->dev)) { 1613 if (ctcm_add_attributes(&cgdev->dev)) {
1594 unregister_netdev(dev); 1614 result = -ENODEV;
1595 goto out_dev; 1615 goto out_unregister;
1596 } 1616 }
1597 1617
1598 strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name)); 1618 strlcpy(priv->fsm->name, dev->name, sizeof(priv->fsm->name));
@@ -1608,13 +1628,22 @@ static int ctcm_new_device(struct ccwgroup_device *cgdev)
1608 priv->channel[WRITE]->id, priv->protocol); 1628 priv->channel[WRITE]->id, priv->protocol);
1609 1629
1610 return 0; 1630 return 0;
1631out_unregister:
1632 unregister_netdev(dev);
1611out_dev: 1633out_dev:
1612 ctcm_free_netdevice(dev); 1634 ctcm_free_netdevice(dev);
1613out: 1635out_ccw2:
1614 ccw_device_set_offline(cgdev->cdev[1]); 1636 ccw_device_set_offline(cgdev->cdev[1]);
1637out_ccw1:
1615 ccw_device_set_offline(cgdev->cdev[0]); 1638 ccw_device_set_offline(cgdev->cdev[0]);
1616 1639out_remove_channel2:
1617 return -ENODEV; 1640 readc = channel_get(type, read_id, READ);
1641 channel_remove(readc);
1642out_remove_channel1:
1643 writec = channel_get(type, write_id, WRITE);
1644 channel_remove(writec);
1645out_err_result:
1646 return result;
1618} 1647}
1619 1648
1620/** 1649/**
@@ -1695,6 +1724,11 @@ static int ctcm_pm_suspend(struct ccwgroup_device *gdev)
1695 return 0; 1724 return 0;
1696 netif_device_detach(priv->channel[READ]->netdev); 1725 netif_device_detach(priv->channel[READ]->netdev);
1697 ctcm_close(priv->channel[READ]->netdev); 1726 ctcm_close(priv->channel[READ]->netdev);
1727 if (!wait_event_timeout(priv->fsm->wait_q,
1728 fsm_getstate(priv->fsm) == DEV_STATE_STOPPED, CTCM_TIME_5_SEC)) {
1729 netif_device_attach(priv->channel[READ]->netdev);
1730 return -EBUSY;
1731 }
1698 ccw_device_set_offline(gdev->cdev[1]); 1732 ccw_device_set_offline(gdev->cdev[1]);
1699 ccw_device_set_offline(gdev->cdev[0]); 1733 ccw_device_set_offline(gdev->cdev[0]);
1700 return 0; 1734 return 0;
@@ -1719,6 +1753,22 @@ err_out:
1719 return rc; 1753 return rc;
1720} 1754}
1721 1755
1756static struct ccw_device_id ctcm_ids[] = {
1757 {CCW_DEVICE(0x3088, 0x08), .driver_info = ctcm_channel_type_parallel},
1758 {CCW_DEVICE(0x3088, 0x1e), .driver_info = ctcm_channel_type_ficon},
1759 {CCW_DEVICE(0x3088, 0x1f), .driver_info = ctcm_channel_type_escon},
1760 {},
1761};
1762MODULE_DEVICE_TABLE(ccw, ctcm_ids);
1763
1764static struct ccw_driver ctcm_ccw_driver = {
1765 .owner = THIS_MODULE,
1766 .name = "ctcm",
1767 .ids = ctcm_ids,
1768 .probe = ccwgroup_probe_ccwdev,
1769 .remove = ccwgroup_remove_ccwdev,
1770};
1771
1722static struct ccwgroup_driver ctcm_group_driver = { 1772static struct ccwgroup_driver ctcm_group_driver = {
1723 .owner = THIS_MODULE, 1773 .owner = THIS_MODULE,
1724 .name = CTC_DRIVER_NAME, 1774 .name = CTC_DRIVER_NAME,
@@ -1733,6 +1783,33 @@ static struct ccwgroup_driver ctcm_group_driver = {
1733 .restore = ctcm_pm_resume, 1783 .restore = ctcm_pm_resume,
1734}; 1784};
1735 1785
1786static ssize_t
1787ctcm_driver_group_store(struct device_driver *ddrv, const char *buf,
1788 size_t count)
1789{
1790 int err;
1791
1792 err = ccwgroup_create_from_string(ctcm_root_dev,
1793 ctcm_group_driver.driver_id,
1794 &ctcm_ccw_driver, 2, buf);
1795 return err ? err : count;
1796}
1797
1798static DRIVER_ATTR(group, 0200, NULL, ctcm_driver_group_store);
1799
1800static struct attribute *ctcm_group_attrs[] = {
1801 &driver_attr_group.attr,
1802 NULL,
1803};
1804
1805static struct attribute_group ctcm_group_attr_group = {
1806 .attrs = ctcm_group_attrs,
1807};
1808
1809static struct attribute_group *ctcm_group_attr_groups[] = {
1810 &ctcm_group_attr_group,
1811 NULL,
1812};
1736 1813
1737/* 1814/*
1738 * Module related routines 1815 * Module related routines
@@ -1746,7 +1823,10 @@ static struct ccwgroup_driver ctcm_group_driver = {
1746 */ 1823 */
1747static void __exit ctcm_exit(void) 1824static void __exit ctcm_exit(void)
1748{ 1825{
1749 unregister_cu3088_discipline(&ctcm_group_driver); 1826 driver_remove_file(&ctcm_group_driver.driver, &driver_attr_group);
1827 ccwgroup_driver_unregister(&ctcm_group_driver);
1828 ccw_driver_unregister(&ctcm_ccw_driver);
1829 root_device_unregister(ctcm_root_dev);
1750 ctcm_unregister_dbf_views(); 1830 ctcm_unregister_dbf_views();
1751 pr_info("CTCM driver unloaded\n"); 1831 pr_info("CTCM driver unloaded\n");
1752} 1832}
@@ -1772,17 +1852,31 @@ static int __init ctcm_init(void)
1772 channels = NULL; 1852 channels = NULL;
1773 1853
1774 ret = ctcm_register_dbf_views(); 1854 ret = ctcm_register_dbf_views();
1775 if (ret) { 1855 if (ret)
1776 return ret; 1856 goto out_err;
1777 } 1857 ctcm_root_dev = root_device_register("ctcm");
1778 ret = register_cu3088_discipline(&ctcm_group_driver); 1858 ret = IS_ERR(ctcm_root_dev) ? PTR_ERR(ctcm_root_dev) : 0;
1779 if (ret) { 1859 if (ret)
1780 ctcm_unregister_dbf_views(); 1860 goto register_err;
1781 pr_err("%s / register_cu3088_discipline failed, ret = %d\n", 1861 ret = ccw_driver_register(&ctcm_ccw_driver);
1782 __func__, ret); 1862 if (ret)
1783 return ret; 1863 goto ccw_err;
1784 } 1864 ctcm_group_driver.driver.groups = ctcm_group_attr_groups;
1865 ret = ccwgroup_driver_register(&ctcm_group_driver);
1866 if (ret)
1867 goto ccwgroup_err;
1785 print_banner(); 1868 print_banner();
1869 return 0;
1870
1871ccwgroup_err:
1872 ccw_driver_unregister(&ctcm_ccw_driver);
1873ccw_err:
1874 root_device_unregister(ctcm_root_dev);
1875register_err:
1876 ctcm_unregister_dbf_views();
1877out_err:
1878 pr_err("%s / Initializing the ctcm device driver failed, ret = %d\n",
1879 __func__, ret);
1786 return ret; 1880 return ret;
1787} 1881}
1788 1882
diff --git a/drivers/s390/net/ctcm_main.h b/drivers/s390/net/ctcm_main.h
index d925e732b7d8..d34fa14f44e7 100644
--- a/drivers/s390/net/ctcm_main.h
+++ b/drivers/s390/net/ctcm_main.h
@@ -16,7 +16,6 @@
16#include <linux/netdevice.h> 16#include <linux/netdevice.h>
17 17
18#include "fsm.h" 18#include "fsm.h"
19#include "cu3088.h"
20#include "ctcm_dbug.h" 19#include "ctcm_dbug.h"
21#include "ctcm_mpc.h" 20#include "ctcm_mpc.h"
22 21
@@ -66,6 +65,23 @@
66 ctcmpc_dumpit(buf, len); \ 65 ctcmpc_dumpit(buf, len); \
67 } while (0) 66 } while (0)
68 67
68/**
69 * Enum for classifying detected devices
70 */
71enum ctcm_channel_types {
72 /* Device is not a channel */
73 ctcm_channel_type_none,
74
75 /* Device is a CTC/A */
76 ctcm_channel_type_parallel,
77
78 /* Device is a FICON channel */
79 ctcm_channel_type_ficon,
80
81 /* Device is a ESCON channel */
82 ctcm_channel_type_escon
83};
84
69/* 85/*
70 * CCW commands, used in this driver. 86 * CCW commands, used in this driver.
71 */ 87 */
@@ -121,7 +137,7 @@ struct channel {
121 * Type of this channel. 137 * Type of this channel.
122 * CTC/A or Escon for valid channels. 138 * CTC/A or Escon for valid channels.
123 */ 139 */
124 enum channel_types type; 140 enum ctcm_channel_types type;
125 /* 141 /*
126 * Misc. flags. See CHANNEL_FLAGS_... below 142 * Misc. flags. See CHANNEL_FLAGS_... below
127 */ 143 */
diff --git a/drivers/s390/net/ctcm_mpc.c b/drivers/s390/net/ctcm_mpc.c
index 781e18be7e8f..5978b390153f 100644
--- a/drivers/s390/net/ctcm_mpc.c
+++ b/drivers/s390/net/ctcm_mpc.c
@@ -53,7 +53,6 @@
53#include <linux/moduleparam.h> 53#include <linux/moduleparam.h>
54#include <asm/idals.h> 54#include <asm/idals.h>
55 55
56#include "cu3088.h"
57#include "ctcm_mpc.h" 56#include "ctcm_mpc.h"
58#include "ctcm_main.h" 57#include "ctcm_main.h"
59#include "ctcm_fsms.h" 58#include "ctcm_fsms.h"
diff --git a/drivers/s390/net/ctcm_sysfs.c b/drivers/s390/net/ctcm_sysfs.c
index 8452bb052d68..738ad26c74a7 100644
--- a/drivers/s390/net/ctcm_sysfs.c
+++ b/drivers/s390/net/ctcm_sysfs.c
@@ -158,6 +158,15 @@ static ssize_t ctcm_proto_store(struct device *dev,
158 return count; 158 return count;
159} 159}
160 160
161const char *ctcm_type[] = {
162 "not a channel",
163 "CTC/A",
164 "FICON channel",
165 "ESCON channel",
166 "unknown channel type",
167 "unsupported channel type",
168};
169
161static ssize_t ctcm_type_show(struct device *dev, 170static ssize_t ctcm_type_show(struct device *dev,
162 struct device_attribute *attr, char *buf) 171 struct device_attribute *attr, char *buf)
163{ 172{
@@ -168,7 +177,7 @@ static ssize_t ctcm_type_show(struct device *dev,
168 return -ENODEV; 177 return -ENODEV;
169 178
170 return sprintf(buf, "%s\n", 179 return sprintf(buf, "%s\n",
171 cu3088_type[cgdev->cdev[0]->id.driver_info]); 180 ctcm_type[cgdev->cdev[0]->id.driver_info]);
172} 181}
173 182
174static DEVICE_ATTR(buffer, 0644, ctcm_buffer_show, ctcm_buffer_write); 183static DEVICE_ATTR(buffer, 0644, ctcm_buffer_show, ctcm_buffer_write);
diff --git a/drivers/s390/net/cu3088.c b/drivers/s390/net/cu3088.c
deleted file mode 100644
index 48383459e99b..000000000000
--- a/drivers/s390/net/cu3088.c
+++ /dev/null
@@ -1,148 +0,0 @@
1/*
2 * CTC / LCS ccw_device driver
3 *
4 * Copyright (C) 2002 IBM Deutschland Entwicklung GmbH, IBM Corporation
5 * Author(s): Arnd Bergmann <arndb@de.ibm.com>
6 * Cornelia Huck <cornelia.huck@de.ibm.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2, or (at your option)
11 * any later version.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
21 *
22 */
23
24#include <linux/init.h>
25#include <linux/module.h>
26#include <linux/err.h>
27
28#include <asm/ccwdev.h>
29#include <asm/ccwgroup.h>
30
31#include "cu3088.h"
32
33const char *cu3088_type[] = {
34 "not a channel",
35 "CTC/A",
36 "ESCON channel",
37 "FICON channel",
38 "OSA LCS card",
39 "CLAW channel device",
40 "unknown channel type",
41 "unsupported channel type",
42};
43
44/* static definitions */
45
46static struct ccw_device_id cu3088_ids[] = {
47 { CCW_DEVICE(0x3088, 0x08), .driver_info = channel_type_parallel },
48 { CCW_DEVICE(0x3088, 0x1f), .driver_info = channel_type_escon },
49 { CCW_DEVICE(0x3088, 0x1e), .driver_info = channel_type_ficon },
50 { CCW_DEVICE(0x3088, 0x60), .driver_info = channel_type_osa2 },
51 { CCW_DEVICE(0x3088, 0x61), .driver_info = channel_type_claw },
52 { /* end of list */ }
53};
54
55static struct ccw_driver cu3088_driver;
56
57static struct device *cu3088_root_dev;
58
59static ssize_t
60group_write(struct device_driver *drv, const char *buf, size_t count)
61{
62 int ret;
63 struct ccwgroup_driver *cdrv;
64
65 cdrv = to_ccwgroupdrv(drv);
66 if (!cdrv)
67 return -EINVAL;
68 ret = ccwgroup_create_from_string(cu3088_root_dev, cdrv->driver_id,
69 &cu3088_driver, 2, buf);
70
71 return (ret == 0) ? count : ret;
72}
73
74static DRIVER_ATTR(group, 0200, NULL, group_write);
75
76/* Register-unregister for ctc&lcs */
77int
78register_cu3088_discipline(struct ccwgroup_driver *dcp)
79{
80 int rc;
81
82 if (!dcp)
83 return -EINVAL;
84
85 /* Register discipline.*/
86 rc = ccwgroup_driver_register(dcp);
87 if (rc)
88 return rc;
89
90 rc = driver_create_file(&dcp->driver, &driver_attr_group);
91 if (rc)
92 ccwgroup_driver_unregister(dcp);
93
94 return rc;
95
96}
97
98void
99unregister_cu3088_discipline(struct ccwgroup_driver *dcp)
100{
101 if (!dcp)
102 return;
103
104 driver_remove_file(&dcp->driver, &driver_attr_group);
105 ccwgroup_driver_unregister(dcp);
106}
107
108static struct ccw_driver cu3088_driver = {
109 .owner = THIS_MODULE,
110 .ids = cu3088_ids,
111 .name = "cu3088",
112 .probe = ccwgroup_probe_ccwdev,
113 .remove = ccwgroup_remove_ccwdev,
114};
115
116/* module setup */
117static int __init
118cu3088_init (void)
119{
120 int rc;
121
122 cu3088_root_dev = root_device_register("cu3088");
123 if (IS_ERR(cu3088_root_dev))
124 return PTR_ERR(cu3088_root_dev);
125 rc = ccw_driver_register(&cu3088_driver);
126 if (rc)
127 root_device_unregister(cu3088_root_dev);
128
129 return rc;
130}
131
132static void __exit
133cu3088_exit (void)
134{
135 ccw_driver_unregister(&cu3088_driver);
136 root_device_unregister(cu3088_root_dev);
137}
138
139MODULE_DEVICE_TABLE(ccw,cu3088_ids);
140MODULE_AUTHOR("Arnd Bergmann <arndb@de.ibm.com>");
141MODULE_LICENSE("GPL");
142
143module_init(cu3088_init);
144module_exit(cu3088_exit);
145
146EXPORT_SYMBOL_GPL(cu3088_type);
147EXPORT_SYMBOL_GPL(register_cu3088_discipline);
148EXPORT_SYMBOL_GPL(unregister_cu3088_discipline);
diff --git a/drivers/s390/net/cu3088.h b/drivers/s390/net/cu3088.h
deleted file mode 100644
index d8558a7105a5..000000000000
--- a/drivers/s390/net/cu3088.h
+++ /dev/null
@@ -1,41 +0,0 @@
1#ifndef _CU3088_H
2#define _CU3088_H
3
4/**
5 * Enum for classifying detected devices.
6 */
7enum channel_types {
8 /* Device is not a channel */
9 channel_type_none,
10
11 /* Device is a CTC/A */
12 channel_type_parallel,
13
14 /* Device is a ESCON channel */
15 channel_type_escon,
16
17 /* Device is a FICON channel */
18 channel_type_ficon,
19
20 /* Device is a OSA2 card */
21 channel_type_osa2,
22
23 /* Device is a CLAW channel device */
24 channel_type_claw,
25
26 /* Device is a channel, but we don't know
27 * anything about it */
28 channel_type_unknown,
29
30 /* Device is an unsupported model */
31 channel_type_unsupported,
32
33 /* number of type entries */
34 num_channel_types
35};
36
37extern const char *cu3088_type[num_channel_types];
38extern int register_cu3088_discipline(struct ccwgroup_driver *);
39extern void unregister_cu3088_discipline(struct ccwgroup_driver *);
40
41#endif
diff --git a/drivers/s390/net/fsm.c b/drivers/s390/net/fsm.c
index 2c1db8036b7c..cae48cbc5e96 100644
--- a/drivers/s390/net/fsm.c
+++ b/drivers/s390/net/fsm.c
@@ -27,6 +27,7 @@ init_fsm(char *name, const char **state_names, const char **event_names, int nr_
27 return NULL; 27 return NULL;
28 } 28 }
29 strlcpy(this->name, name, sizeof(this->name)); 29 strlcpy(this->name, name, sizeof(this->name));
30 init_waitqueue_head(&this->wait_q);
30 31
31 f = kzalloc(sizeof(fsm), order); 32 f = kzalloc(sizeof(fsm), order);
32 if (f == NULL) { 33 if (f == NULL) {
diff --git a/drivers/s390/net/fsm.h b/drivers/s390/net/fsm.h
index af679c10f1bd..1e8b235d95b5 100644
--- a/drivers/s390/net/fsm.h
+++ b/drivers/s390/net/fsm.h
@@ -66,6 +66,7 @@ typedef struct fsm_instance_t {
66 char name[16]; 66 char name[16];
67 void *userdata; 67 void *userdata;
68 int userint; 68 int userint;
69 wait_queue_head_t wait_q;
69#if FSM_DEBUG_HISTORY 70#if FSM_DEBUG_HISTORY
70 int history_index; 71 int history_index;
71 int history_size; 72 int history_size;
@@ -197,6 +198,7 @@ fsm_newstate(fsm_instance *fi, int newstate)
197 printk(KERN_DEBUG "fsm(%s): New state %s\n", fi->name, 198 printk(KERN_DEBUG "fsm(%s): New state %s\n", fi->name,
198 fi->f->state_names[newstate]); 199 fi->f->state_names[newstate]);
199#endif 200#endif
201 wake_up(&fi->wait_q);
200} 202}
201 203
202/** 204/**
diff --git a/drivers/s390/net/lcs.c b/drivers/s390/net/lcs.c
index a70de9b4bf29..1d43d23f5ea3 100644
--- a/drivers/s390/net/lcs.c
+++ b/drivers/s390/net/lcs.c
@@ -47,7 +47,6 @@
47#include <asm/ccwgroup.h> 47#include <asm/ccwgroup.h>
48 48
49#include "lcs.h" 49#include "lcs.h"
50#include "cu3088.h"
51 50
52 51
53#if !defined(CONFIG_NET_ETHERNET) && \ 52#if !defined(CONFIG_NET_ETHERNET) && \
@@ -60,7 +59,11 @@
60 */ 59 */
61 60
62static char version[] __initdata = "LCS driver"; 61static char version[] __initdata = "LCS driver";
63static char debug_buffer[255]; 62
63/**
64 * the root device for lcs group devices
65 */
66static struct device *lcs_root_dev;
64 67
65/** 68/**
66 * Some prototypes. 69 * Some prototypes.
@@ -76,6 +79,7 @@ static int lcs_recovery(void *ptr);
76/** 79/**
77 * Debug Facility Stuff 80 * Debug Facility Stuff
78 */ 81 */
82static char debug_buffer[255];
79static debug_info_t *lcs_dbf_setup; 83static debug_info_t *lcs_dbf_setup;
80static debug_info_t *lcs_dbf_trace; 84static debug_info_t *lcs_dbf_trace;
81 85
@@ -889,7 +893,7 @@ lcs_send_lancmd(struct lcs_card *card, struct lcs_buffer *buffer,
889 rc = lcs_ready_buffer(&card->write, buffer); 893 rc = lcs_ready_buffer(&card->write, buffer);
890 if (rc) 894 if (rc)
891 return rc; 895 return rc;
892 init_timer(&timer); 896 init_timer_on_stack(&timer);
893 timer.function = lcs_lancmd_timeout; 897 timer.function = lcs_lancmd_timeout;
894 timer.data = (unsigned long) reply; 898 timer.data = (unsigned long) reply;
895 timer.expires = jiffies + HZ*card->lancmd_timeout; 899 timer.expires = jiffies + HZ*card->lancmd_timeout;
@@ -1968,6 +1972,15 @@ lcs_portno_store (struct device *dev, struct device_attribute *attr, const char
1968 1972
1969static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store); 1973static DEVICE_ATTR(portno, 0644, lcs_portno_show, lcs_portno_store);
1970 1974
1975const char *lcs_type[] = {
1976 "not a channel",
1977 "2216 parallel",
1978 "2216 channel",
1979 "OSA LCS card",
1980 "unknown channel type",
1981 "unsupported channel type",
1982};
1983
1971static ssize_t 1984static ssize_t
1972lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf) 1985lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1973{ 1986{
@@ -1977,7 +1990,7 @@ lcs_type_show(struct device *dev, struct device_attribute *attr, char *buf)
1977 if (!cgdev) 1990 if (!cgdev)
1978 return -ENODEV; 1991 return -ENODEV;
1979 1992
1980 return sprintf(buf, "%s\n", cu3088_type[cgdev->cdev[0]->id.driver_info]); 1993 return sprintf(buf, "%s\n", lcs_type[cgdev->cdev[0]->id.driver_info]);
1981} 1994}
1982 1995
1983static DEVICE_ATTR(type, 0444, lcs_type_show, NULL); 1996static DEVICE_ATTR(type, 0444, lcs_type_show, NULL);
@@ -2130,8 +2143,12 @@ lcs_new_device(struct ccwgroup_device *ccwgdev)
2130 card->write.ccwdev = ccwgdev->cdev[1]; 2143 card->write.ccwdev = ccwgdev->cdev[1];
2131 2144
2132 recover_state = card->state; 2145 recover_state = card->state;
2133 ccw_device_set_online(card->read.ccwdev); 2146 rc = ccw_device_set_online(card->read.ccwdev);
2134 ccw_device_set_online(card->write.ccwdev); 2147 if (rc)
2148 goto out_err;
2149 rc = ccw_device_set_online(card->write.ccwdev);
2150 if (rc)
2151 goto out_werr;
2135 2152
2136 LCS_DBF_TEXT(3, setup, "lcsnewdv"); 2153 LCS_DBF_TEXT(3, setup, "lcsnewdv");
2137 2154
@@ -2210,8 +2227,10 @@ netdev_out:
2210 return 0; 2227 return 0;
2211out: 2228out:
2212 2229
2213 ccw_device_set_offline(card->read.ccwdev);
2214 ccw_device_set_offline(card->write.ccwdev); 2230 ccw_device_set_offline(card->write.ccwdev);
2231out_werr:
2232 ccw_device_set_offline(card->read.ccwdev);
2233out_err:
2215 return -ENODEV; 2234 return -ENODEV;
2216} 2235}
2217 2236
@@ -2364,6 +2383,22 @@ static int lcs_restore(struct ccwgroup_device *gdev)
2364 return lcs_pm_resume(card); 2383 return lcs_pm_resume(card);
2365} 2384}
2366 2385
2386static struct ccw_device_id lcs_ids[] = {
2387 {CCW_DEVICE(0x3088, 0x08), .driver_info = lcs_channel_type_parallel},
2388 {CCW_DEVICE(0x3088, 0x1f), .driver_info = lcs_channel_type_2216},
2389 {CCW_DEVICE(0x3088, 0x60), .driver_info = lcs_channel_type_osa2},
2390 {},
2391};
2392MODULE_DEVICE_TABLE(ccw, lcs_ids);
2393
2394static struct ccw_driver lcs_ccw_driver = {
2395 .owner = THIS_MODULE,
2396 .name = "lcs",
2397 .ids = lcs_ids,
2398 .probe = ccwgroup_probe_ccwdev,
2399 .remove = ccwgroup_remove_ccwdev,
2400};
2401
2367/** 2402/**
2368 * LCS ccwgroup driver registration 2403 * LCS ccwgroup driver registration
2369 */ 2404 */
@@ -2383,6 +2418,33 @@ static struct ccwgroup_driver lcs_group_driver = {
2383 .restore = lcs_restore, 2418 .restore = lcs_restore,
2384}; 2419};
2385 2420
2421static ssize_t
2422lcs_driver_group_store(struct device_driver *ddrv, const char *buf,
2423 size_t count)
2424{
2425 int err;
2426 err = ccwgroup_create_from_string(lcs_root_dev,
2427 lcs_group_driver.driver_id,
2428 &lcs_ccw_driver, 2, buf);
2429 return err ? err : count;
2430}
2431
2432static DRIVER_ATTR(group, 0200, NULL, lcs_driver_group_store);
2433
2434static struct attribute *lcs_group_attrs[] = {
2435 &driver_attr_group.attr,
2436 NULL,
2437};
2438
2439static struct attribute_group lcs_group_attr_group = {
2440 .attrs = lcs_group_attrs,
2441};
2442
2443static struct attribute_group *lcs_group_attr_groups[] = {
2444 &lcs_group_attr_group,
2445 NULL,
2446};
2447
2386/** 2448/**
2387 * LCS Module/Kernel initialization function 2449 * LCS Module/Kernel initialization function
2388 */ 2450 */
@@ -2394,17 +2456,30 @@ __init lcs_init_module(void)
2394 pr_info("Loading %s\n", version); 2456 pr_info("Loading %s\n", version);
2395 rc = lcs_register_debug_facility(); 2457 rc = lcs_register_debug_facility();
2396 LCS_DBF_TEXT(0, setup, "lcsinit"); 2458 LCS_DBF_TEXT(0, setup, "lcsinit");
2397 if (rc) { 2459 if (rc)
2398 pr_err("Initialization failed\n"); 2460 goto out_err;
2399 return rc; 2461 lcs_root_dev = root_device_register("lcs");
2400 } 2462 rc = IS_ERR(lcs_root_dev) ? PTR_ERR(lcs_root_dev) : 0;
2401 2463 if (rc)
2402 rc = register_cu3088_discipline(&lcs_group_driver); 2464 goto register_err;
2403 if (rc) { 2465 rc = ccw_driver_register(&lcs_ccw_driver);
2404 pr_err("Initialization failed\n"); 2466 if (rc)
2405 return rc; 2467 goto ccw_err;
2406 } 2468 lcs_group_driver.driver.groups = lcs_group_attr_groups;
2469 rc = ccwgroup_driver_register(&lcs_group_driver);
2470 if (rc)
2471 goto ccwgroup_err;
2407 return 0; 2472 return 0;
2473
2474ccwgroup_err:
2475 ccw_driver_unregister(&lcs_ccw_driver);
2476ccw_err:
2477 root_device_unregister(lcs_root_dev);
2478register_err:
2479 lcs_unregister_debug_facility();
2480out_err:
2481 pr_err("Initializing the lcs device driver failed\n");
2482 return rc;
2408} 2483}
2409 2484
2410 2485
@@ -2416,7 +2491,11 @@ __exit lcs_cleanup_module(void)
2416{ 2491{
2417 pr_info("Terminating lcs module.\n"); 2492 pr_info("Terminating lcs module.\n");
2418 LCS_DBF_TEXT(0, trace, "cleanup"); 2493 LCS_DBF_TEXT(0, trace, "cleanup");
2419 unregister_cu3088_discipline(&lcs_group_driver); 2494 driver_remove_file(&lcs_group_driver.driver,
2495 &driver_attr_group);
2496 ccwgroup_driver_unregister(&lcs_group_driver);
2497 ccw_driver_unregister(&lcs_ccw_driver);
2498 root_device_unregister(lcs_root_dev);
2420 lcs_unregister_debug_facility(); 2499 lcs_unregister_debug_facility();
2421} 2500}
2422 2501
diff --git a/drivers/s390/net/lcs.h b/drivers/s390/net/lcs.h
index 6d668642af27..8c03392ac833 100644
--- a/drivers/s390/net/lcs.h
+++ b/drivers/s390/net/lcs.h
@@ -36,6 +36,24 @@ static inline int lcs_dbf_passes(debug_info_t *dbf_grp, int level)
36#define CARD_FROM_DEV(cdev) \ 36#define CARD_FROM_DEV(cdev) \
37 (struct lcs_card *) dev_get_drvdata( \ 37 (struct lcs_card *) dev_get_drvdata( \
38 &((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev); 38 &((struct ccwgroup_device *)dev_get_drvdata(&cdev->dev))->dev);
39
40/**
41 * Enum for classifying detected devices.
42 */
43enum lcs_channel_types {
44 /* Device is not a channel */
45 lcs_channel_type_none,
46
47 /* Device is a 2216 channel */
48 lcs_channel_type_parallel,
49
50 /* Device is a 2216 channel */
51 lcs_channel_type_2216,
52
53 /* Device is a OSA2 card */
54 lcs_channel_type_osa2
55};
56
39/** 57/**
40 * CCW commands used in this driver 58 * CCW commands used in this driver
41 */ 59 */
diff --git a/drivers/s390/net/netiucv.c b/drivers/s390/net/netiucv.c
index c84eadd3602a..395c04c2b00f 100644
--- a/drivers/s390/net/netiucv.c
+++ b/drivers/s390/net/netiucv.c
@@ -741,13 +741,13 @@ static void conn_action_txdone(fsm_instance *fi, int event, void *arg)
741 if (single_flag) { 741 if (single_flag) {
742 if ((skb = skb_dequeue(&conn->commit_queue))) { 742 if ((skb = skb_dequeue(&conn->commit_queue))) {
743 atomic_dec(&skb->users); 743 atomic_dec(&skb->users);
744 dev_kfree_skb_any(skb);
745 if (privptr) { 744 if (privptr) {
746 privptr->stats.tx_packets++; 745 privptr->stats.tx_packets++;
747 privptr->stats.tx_bytes += 746 privptr->stats.tx_bytes +=
748 (skb->len - NETIUCV_HDRLEN 747 (skb->len - NETIUCV_HDRLEN
749 - NETIUCV_HDRLEN); 748 - NETIUCV_HDRLEN);
750 } 749 }
750 dev_kfree_skb_any(skb);
751 } 751 }
752 } 752 }
753 conn->tx_buff->data = conn->tx_buff->head; 753 conn->tx_buff->data = conn->tx_buff->head;
diff --git a/drivers/s390/net/qeth_core.h b/drivers/s390/net/qeth_core.h
index 31a2b4e502ce..b232693378cd 100644
--- a/drivers/s390/net/qeth_core.h
+++ b/drivers/s390/net/qeth_core.h
@@ -122,7 +122,6 @@ struct qeth_perf_stats {
122 __u64 outbound_do_qdio_start_time; 122 __u64 outbound_do_qdio_start_time;
123 unsigned int outbound_do_qdio_cnt; 123 unsigned int outbound_do_qdio_cnt;
124 unsigned int outbound_do_qdio_time; 124 unsigned int outbound_do_qdio_time;
125 /* eddp data */
126 unsigned int large_send_bytes; 125 unsigned int large_send_bytes;
127 unsigned int large_send_cnt; 126 unsigned int large_send_cnt;
128 unsigned int sg_skbs_sent; 127 unsigned int sg_skbs_sent;
@@ -135,6 +134,7 @@ struct qeth_perf_stats {
135 unsigned int sg_frags_rx; 134 unsigned int sg_frags_rx;
136 unsigned int sg_alloc_page_rx; 135 unsigned int sg_alloc_page_rx;
137 unsigned int tx_csum; 136 unsigned int tx_csum;
137 unsigned int tx_lin;
138}; 138};
139 139
140/* Routing stuff */ 140/* Routing stuff */
@@ -648,6 +648,7 @@ struct qeth_card_options {
648 enum qeth_large_send_types large_send; 648 enum qeth_large_send_types large_send;
649 int performance_stats; 649 int performance_stats;
650 int rx_sg_cb; 650 int rx_sg_cb;
651 enum qeth_ipa_isolation_modes isolation;
651}; 652};
652 653
653/* 654/*
@@ -776,7 +777,6 @@ static inline void qeth_put_buffer_pool_entry(struct qeth_card *card,
776 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list); 777 list_add_tail(&entry->list, &card->qdio.in_buf_pool.entry_list);
777} 778}
778 779
779struct qeth_eddp_context;
780extern struct ccwgroup_driver qeth_l2_ccwgroup_driver; 780extern struct ccwgroup_driver qeth_l2_ccwgroup_driver;
781extern struct ccwgroup_driver qeth_l3_ccwgroup_driver; 781extern struct ccwgroup_driver qeth_l3_ccwgroup_driver;
782const char *qeth_get_cardname_short(struct qeth_card *); 782const char *qeth_get_cardname_short(struct qeth_card *);
@@ -836,7 +836,6 @@ void qeth_prepare_ipa_cmd(struct qeth_card *, struct qeth_cmd_buffer *, char);
836struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *); 836struct qeth_cmd_buffer *qeth_wait_for_buffer(struct qeth_channel *);
837int qeth_mdio_read(struct net_device *, int, int); 837int qeth_mdio_read(struct net_device *, int, int);
838int qeth_snmp_command(struct qeth_card *, char __user *); 838int qeth_snmp_command(struct qeth_card *, char __user *);
839int qeth_set_large_send(struct qeth_card *, enum qeth_large_send_types);
840struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *, __u32, __u32); 839struct qeth_cmd_buffer *qeth_get_adapter_cmd(struct qeth_card *, __u32, __u32);
841int qeth_default_setadapterparms_cb(struct qeth_card *, struct qeth_reply *, 840int qeth_default_setadapterparms_cb(struct qeth_card *, struct qeth_reply *,
842 unsigned long); 841 unsigned long);
@@ -849,13 +848,14 @@ int qeth_do_send_packet_fast(struct qeth_card *, struct qeth_qdio_out_q *,
849 struct sk_buff *, struct qeth_hdr *, int, int, int); 848 struct sk_buff *, struct qeth_hdr *, int, int, int);
850int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *, 849int qeth_do_send_packet(struct qeth_card *, struct qeth_qdio_out_q *,
851 struct sk_buff *, struct qeth_hdr *, int); 850 struct sk_buff *, struct qeth_hdr *, int);
852int qeth_core_get_stats_count(struct net_device *); 851int qeth_core_get_sset_count(struct net_device *, int);
853void qeth_core_get_ethtool_stats(struct net_device *, 852void qeth_core_get_ethtool_stats(struct net_device *,
854 struct ethtool_stats *, u64 *); 853 struct ethtool_stats *, u64 *);
855void qeth_core_get_strings(struct net_device *, u32, u8 *); 854void qeth_core_get_strings(struct net_device *, u32, u8 *);
856void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *); 855void qeth_core_get_drvinfo(struct net_device *, struct ethtool_drvinfo *);
857void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...); 856void qeth_dbf_longtext(enum qeth_dbf_names dbf_nix, int level, char *text, ...);
858int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *); 857int qeth_core_ethtool_get_settings(struct net_device *, struct ethtool_cmd *);
858int qeth_set_access_ctrl_online(struct qeth_card *card);
859 859
860/* exports for OSN */ 860/* exports for OSN */
861int qeth_osn_assist(struct net_device *, void *, int); 861int qeth_osn_assist(struct net_device *, void *, int);
diff --git a/drivers/s390/net/qeth_core_main.c b/drivers/s390/net/qeth_core_main.c
index c4a42d970158..d34804d5ece1 100644
--- a/drivers/s390/net/qeth_core_main.c
+++ b/drivers/s390/net/qeth_core_main.c
@@ -270,41 +270,6 @@ int qeth_realloc_buffer_pool(struct qeth_card *card, int bufcnt)
270 return qeth_alloc_buffer_pool(card); 270 return qeth_alloc_buffer_pool(card);
271} 271}
272 272
273int qeth_set_large_send(struct qeth_card *card,
274 enum qeth_large_send_types type)
275{
276 int rc = 0;
277
278 if (card->dev == NULL) {
279 card->options.large_send = type;
280 return 0;
281 }
282 if (card->state == CARD_STATE_UP)
283 netif_tx_disable(card->dev);
284 card->options.large_send = type;
285 switch (card->options.large_send) {
286 case QETH_LARGE_SEND_TSO:
287 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
288 card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
289 NETIF_F_HW_CSUM;
290 } else {
291 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
292 NETIF_F_HW_CSUM);
293 card->options.large_send = QETH_LARGE_SEND_NO;
294 rc = -EOPNOTSUPP;
295 }
296 break;
297 default: /* includes QETH_LARGE_SEND_NO */
298 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
299 NETIF_F_HW_CSUM);
300 break;
301 }
302 if (card->state == CARD_STATE_UP)
303 netif_wake_queue(card->dev);
304 return rc;
305}
306EXPORT_SYMBOL_GPL(qeth_set_large_send);
307
308static int qeth_issue_next_read(struct qeth_card *card) 273static int qeth_issue_next_read(struct qeth_card *card)
309{ 274{
310 int rc; 275 int rc;
@@ -1079,6 +1044,7 @@ static void qeth_set_intial_options(struct qeth_card *card)
1079 card->options.add_hhlen = DEFAULT_ADD_HHLEN; 1044 card->options.add_hhlen = DEFAULT_ADD_HHLEN;
1080 card->options.performance_stats = 0; 1045 card->options.performance_stats = 0;
1081 card->options.rx_sg_cb = QETH_RX_SG_CB; 1046 card->options.rx_sg_cb = QETH_RX_SG_CB;
1047 card->options.isolation = ISOLATION_MODE_NONE;
1082} 1048}
1083 1049
1084static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread) 1050static int qeth_do_start_thread(struct qeth_card *card, unsigned long thread)
@@ -3389,6 +3355,156 @@ int qeth_setadpparms_change_macaddr(struct qeth_card *card)
3389} 3355}
3390EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr); 3356EXPORT_SYMBOL_GPL(qeth_setadpparms_change_macaddr);
3391 3357
3358static int qeth_setadpparms_set_access_ctrl_cb(struct qeth_card *card,
3359 struct qeth_reply *reply, unsigned long data)
3360{
3361 struct qeth_ipa_cmd *cmd;
3362 struct qeth_set_access_ctrl *access_ctrl_req;
3363 int rc;
3364
3365 QETH_DBF_TEXT(TRACE, 4, "setaccb");
3366
3367 cmd = (struct qeth_ipa_cmd *) data;
3368 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
3369 QETH_DBF_TEXT_(SETUP, 2, "setaccb");
3370 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
3371 QETH_DBF_TEXT_(SETUP, 2, "rc=%d",
3372 cmd->data.setadapterparms.hdr.return_code);
3373 switch (cmd->data.setadapterparms.hdr.return_code) {
3374 case SET_ACCESS_CTRL_RC_SUCCESS:
3375 case SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED:
3376 case SET_ACCESS_CTRL_RC_ALREADY_ISOLATED:
3377 {
3378 card->options.isolation = access_ctrl_req->subcmd_code;
3379 if (card->options.isolation == ISOLATION_MODE_NONE) {
3380 dev_info(&card->gdev->dev,
3381 "QDIO data connection isolation is deactivated\n");
3382 } else {
3383 dev_info(&card->gdev->dev,
3384 "QDIO data connection isolation is activated\n");
3385 }
3386 QETH_DBF_MESSAGE(3, "OK:SET_ACCESS_CTRL(%s, %d)==%d\n",
3387 card->gdev->dev.kobj.name,
3388 access_ctrl_req->subcmd_code,
3389 cmd->data.setadapterparms.hdr.return_code);
3390 rc = 0;
3391 break;
3392 }
3393 case SET_ACCESS_CTRL_RC_NOT_SUPPORTED:
3394 {
3395 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_CTRL(%s,%d)==%d\n",
3396 card->gdev->dev.kobj.name,
3397 access_ctrl_req->subcmd_code,
3398 cmd->data.setadapterparms.hdr.return_code);
3399 dev_err(&card->gdev->dev, "Adapter does not "
3400 "support QDIO data connection isolation\n");
3401
3402 /* ensure isolation mode is "none" */
3403 card->options.isolation = ISOLATION_MODE_NONE;
3404 rc = -EOPNOTSUPP;
3405 break;
3406 }
3407 case SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER:
3408 {
3409 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n",
3410 card->gdev->dev.kobj.name,
3411 access_ctrl_req->subcmd_code,
3412 cmd->data.setadapterparms.hdr.return_code);
3413 dev_err(&card->gdev->dev,
3414 "Adapter is dedicated. "
3415 "QDIO data connection isolation not supported\n");
3416
3417 /* ensure isolation mode is "none" */
3418 card->options.isolation = ISOLATION_MODE_NONE;
3419 rc = -EOPNOTSUPP;
3420 break;
3421 }
3422 case SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF:
3423 {
3424 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d\n",
3425 card->gdev->dev.kobj.name,
3426 access_ctrl_req->subcmd_code,
3427 cmd->data.setadapterparms.hdr.return_code);
3428 dev_err(&card->gdev->dev,
3429 "TSO does not permit QDIO data connection isolation\n");
3430
3431 /* ensure isolation mode is "none" */
3432 card->options.isolation = ISOLATION_MODE_NONE;
3433 rc = -EPERM;
3434 break;
3435 }
3436 default:
3437 {
3438 /* this should never happen */
3439 QETH_DBF_MESSAGE(3, "ERR:SET_ACCESS_MODE(%s,%d)==%d"
3440 "==UNKNOWN\n",
3441 card->gdev->dev.kobj.name,
3442 access_ctrl_req->subcmd_code,
3443 cmd->data.setadapterparms.hdr.return_code);
3444
3445 /* ensure isolation mode is "none" */
3446 card->options.isolation = ISOLATION_MODE_NONE;
3447 rc = 0;
3448 break;
3449 }
3450 }
3451 qeth_default_setadapterparms_cb(card, reply, (unsigned long) cmd);
3452 return rc;
3453}
3454
3455static int qeth_setadpparms_set_access_ctrl(struct qeth_card *card,
3456 enum qeth_ipa_isolation_modes isolation)
3457{
3458 int rc;
3459 struct qeth_cmd_buffer *iob;
3460 struct qeth_ipa_cmd *cmd;
3461 struct qeth_set_access_ctrl *access_ctrl_req;
3462
3463 QETH_DBF_TEXT(TRACE, 4, "setacctl");
3464
3465 QETH_DBF_TEXT_(SETUP, 2, "setacctl");
3466 QETH_DBF_TEXT_(SETUP, 2, "%s", card->gdev->dev.kobj.name);
3467
3468 iob = qeth_get_adapter_cmd(card, IPA_SETADP_SET_ACCESS_CONTROL,
3469 sizeof(struct qeth_ipacmd_setadpparms_hdr) +
3470 sizeof(struct qeth_set_access_ctrl));
3471 cmd = (struct qeth_ipa_cmd *)(iob->data+IPA_PDU_HEADER_SIZE);
3472 access_ctrl_req = &cmd->data.setadapterparms.data.set_access_ctrl;
3473 access_ctrl_req->subcmd_code = isolation;
3474
3475 rc = qeth_send_ipa_cmd(card, iob, qeth_setadpparms_set_access_ctrl_cb,
3476 NULL);
3477 QETH_DBF_TEXT_(SETUP, 2, "rc=%d", rc);
3478 return rc;
3479}
3480
3481int qeth_set_access_ctrl_online(struct qeth_card *card)
3482{
3483 int rc = 0;
3484
3485 QETH_DBF_TEXT(TRACE, 4, "setactlo");
3486
3487 if (card->info.type == QETH_CARD_TYPE_OSAE &&
3488 qeth_adp_supported(card, IPA_SETADP_SET_ACCESS_CONTROL)) {
3489 rc = qeth_setadpparms_set_access_ctrl(card,
3490 card->options.isolation);
3491 if (rc) {
3492 QETH_DBF_MESSAGE(3,
3493 "IPA(SET_ACCESS_CTRL,%s,%d) sent failed",
3494 card->gdev->dev.kobj.name,
3495 rc);
3496 }
3497 } else if (card->options.isolation != ISOLATION_MODE_NONE) {
3498 card->options.isolation = ISOLATION_MODE_NONE;
3499
3500 dev_err(&card->gdev->dev, "Adapter does not "
3501 "support QDIO data connection isolation\n");
3502 rc = -EOPNOTSUPP;
3503 }
3504 return rc;
3505}
3506EXPORT_SYMBOL_GPL(qeth_set_access_ctrl_online);
3507
3392void qeth_tx_timeout(struct net_device *dev) 3508void qeth_tx_timeout(struct net_device *dev)
3393{ 3509{
3394 struct qeth_card *card; 3510 struct qeth_card *card;
@@ -3732,30 +3848,36 @@ static int qeth_core_driver_group(const char *buf, struct device *root_dev,
3732int qeth_core_hardsetup_card(struct qeth_card *card) 3848int qeth_core_hardsetup_card(struct qeth_card *card)
3733{ 3849{
3734 struct qdio_ssqd_desc *ssqd; 3850 struct qdio_ssqd_desc *ssqd;
3735 int retries = 3; 3851 int retries = 0;
3736 int mpno = 0; 3852 int mpno = 0;
3737 int rc; 3853 int rc;
3738 3854
3739 QETH_DBF_TEXT(SETUP, 2, "hrdsetup"); 3855 QETH_DBF_TEXT(SETUP, 2, "hrdsetup");
3740 atomic_set(&card->force_alloc_skb, 0); 3856 atomic_set(&card->force_alloc_skb, 0);
3741retry: 3857retry:
3742 if (retries < 3) { 3858 if (retries)
3743 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n", 3859 QETH_DBF_MESSAGE(2, "%s Retrying to do IDX activates.\n",
3744 dev_name(&card->gdev->dev)); 3860 dev_name(&card->gdev->dev));
3745 ccw_device_set_offline(CARD_DDEV(card)); 3861 ccw_device_set_offline(CARD_DDEV(card));
3746 ccw_device_set_offline(CARD_WDEV(card)); 3862 ccw_device_set_offline(CARD_WDEV(card));
3747 ccw_device_set_offline(CARD_RDEV(card)); 3863 ccw_device_set_offline(CARD_RDEV(card));
3748 ccw_device_set_online(CARD_RDEV(card)); 3864 rc = ccw_device_set_online(CARD_RDEV(card));
3749 ccw_device_set_online(CARD_WDEV(card)); 3865 if (rc)
3750 ccw_device_set_online(CARD_DDEV(card)); 3866 goto retriable;
3751 } 3867 rc = ccw_device_set_online(CARD_WDEV(card));
3868 if (rc)
3869 goto retriable;
3870 rc = ccw_device_set_online(CARD_DDEV(card));
3871 if (rc)
3872 goto retriable;
3752 rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD); 3873 rc = qeth_qdio_clear_card(card, card->info.type != QETH_CARD_TYPE_IQD);
3874retriable:
3753 if (rc == -ERESTARTSYS) { 3875 if (rc == -ERESTARTSYS) {
3754 QETH_DBF_TEXT(SETUP, 2, "break1"); 3876 QETH_DBF_TEXT(SETUP, 2, "break1");
3755 return rc; 3877 return rc;
3756 } else if (rc) { 3878 } else if (rc) {
3757 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc); 3879 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3758 if (--retries < 0) 3880 if (++retries > 3)
3759 goto out; 3881 goto out;
3760 else 3882 else
3761 goto retry; 3883 goto retry;
@@ -4303,13 +4425,19 @@ static struct {
4303 {"tx do_QDIO time"}, 4425 {"tx do_QDIO time"},
4304 {"tx do_QDIO count"}, 4426 {"tx do_QDIO count"},
4305 {"tx csum"}, 4427 {"tx csum"},
4428 {"tx lin"},
4306}; 4429};
4307 4430
4308int qeth_core_get_stats_count(struct net_device *dev) 4431int qeth_core_get_sset_count(struct net_device *dev, int stringset)
4309{ 4432{
4310 return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN); 4433 switch (stringset) {
4434 case ETH_SS_STATS:
4435 return (sizeof(qeth_ethtool_stats_keys) / ETH_GSTRING_LEN);
4436 default:
4437 return -EINVAL;
4438 }
4311} 4439}
4312EXPORT_SYMBOL_GPL(qeth_core_get_stats_count); 4440EXPORT_SYMBOL_GPL(qeth_core_get_sset_count);
4313 4441
4314void qeth_core_get_ethtool_stats(struct net_device *dev, 4442void qeth_core_get_ethtool_stats(struct net_device *dev,
4315 struct ethtool_stats *stats, u64 *data) 4443 struct ethtool_stats *stats, u64 *data)
@@ -4355,6 +4483,7 @@ void qeth_core_get_ethtool_stats(struct net_device *dev,
4355 data[31] = card->perf_stats.outbound_do_qdio_time; 4483 data[31] = card->perf_stats.outbound_do_qdio_time;
4356 data[32] = card->perf_stats.outbound_do_qdio_cnt; 4484 data[32] = card->perf_stats.outbound_do_qdio_cnt;
4357 data[33] = card->perf_stats.tx_csum; 4485 data[33] = card->perf_stats.tx_csum;
4486 data[34] = card->perf_stats.tx_lin;
4358} 4487}
4359EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats); 4488EXPORT_SYMBOL_GPL(qeth_core_get_ethtool_stats);
4360 4489
diff --git a/drivers/s390/net/qeth_core_mpc.h b/drivers/s390/net/qeth_core_mpc.h
index eecb2ee62e85..52c03438dbec 100644
--- a/drivers/s390/net/qeth_core_mpc.h
+++ b/drivers/s390/net/qeth_core_mpc.h
@@ -234,18 +234,19 @@ enum qeth_ipa_setdelip_flags {
234 234
235/* SETADAPTER IPA Command: ****************************************************/ 235/* SETADAPTER IPA Command: ****************************************************/
236enum qeth_ipa_setadp_cmd { 236enum qeth_ipa_setadp_cmd {
237 IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x0001, 237 IPA_SETADP_QUERY_COMMANDS_SUPPORTED = 0x00000001L,
238 IPA_SETADP_ALTER_MAC_ADDRESS = 0x0002, 238 IPA_SETADP_ALTER_MAC_ADDRESS = 0x00000002L,
239 IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x0004, 239 IPA_SETADP_ADD_DELETE_GROUP_ADDRESS = 0x00000004L,
240 IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x0008, 240 IPA_SETADP_ADD_DELETE_FUNCTIONAL_ADDR = 0x00000008L,
241 IPA_SETADP_SET_ADDRESSING_MODE = 0x0010, 241 IPA_SETADP_SET_ADDRESSING_MODE = 0x00000010L,
242 IPA_SETADP_SET_CONFIG_PARMS = 0x0020, 242 IPA_SETADP_SET_CONFIG_PARMS = 0x00000020L,
243 IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x0040, 243 IPA_SETADP_SET_CONFIG_PARMS_EXTENDED = 0x00000040L,
244 IPA_SETADP_SET_BROADCAST_MODE = 0x0080, 244 IPA_SETADP_SET_BROADCAST_MODE = 0x00000080L,
245 IPA_SETADP_SEND_OSA_MESSAGE = 0x0100, 245 IPA_SETADP_SEND_OSA_MESSAGE = 0x00000100L,
246 IPA_SETADP_SET_SNMP_CONTROL = 0x0200, 246 IPA_SETADP_SET_SNMP_CONTROL = 0x00000200L,
247 IPA_SETADP_QUERY_CARD_INFO = 0x0400, 247 IPA_SETADP_QUERY_CARD_INFO = 0x00000400L,
248 IPA_SETADP_SET_PROMISC_MODE = 0x0800, 248 IPA_SETADP_SET_PROMISC_MODE = 0x00000800L,
249 IPA_SETADP_SET_ACCESS_CONTROL = 0x00010000L,
249}; 250};
250enum qeth_ipa_mac_ops { 251enum qeth_ipa_mac_ops {
251 CHANGE_ADDR_READ_MAC = 0, 252 CHANGE_ADDR_READ_MAC = 0,
@@ -264,6 +265,20 @@ enum qeth_ipa_promisc_modes {
264 SET_PROMISC_MODE_OFF = 0, 265 SET_PROMISC_MODE_OFF = 0,
265 SET_PROMISC_MODE_ON = 1, 266 SET_PROMISC_MODE_ON = 1,
266}; 267};
268enum qeth_ipa_isolation_modes {
269 ISOLATION_MODE_NONE = 0x00000000L,
270 ISOLATION_MODE_FWD = 0x00000001L,
271 ISOLATION_MODE_DROP = 0x00000002L,
272};
273enum qeth_ipa_set_access_mode_rc {
274 SET_ACCESS_CTRL_RC_SUCCESS = 0x0000,
275 SET_ACCESS_CTRL_RC_NOT_SUPPORTED = 0x0004,
276 SET_ACCESS_CTRL_RC_ALREADY_NOT_ISOLATED = 0x0008,
277 SET_ACCESS_CTRL_RC_ALREADY_ISOLATED = 0x0010,
278 SET_ACCESS_CTRL_RC_NONE_SHARED_ADAPTER = 0x0014,
279 SET_ACCESS_CTRL_RC_ACTIVE_CHECKSUM_OFF = 0x0018,
280};
281
267 282
268/* (SET)DELIP(M) IPA stuff ***************************************************/ 283/* (SET)DELIP(M) IPA stuff ***************************************************/
269struct qeth_ipacmd_setdelip4 { 284struct qeth_ipacmd_setdelip4 {
@@ -376,6 +391,11 @@ struct qeth_snmp_ureq {
376 struct qeth_snmp_cmd cmd; 391 struct qeth_snmp_cmd cmd;
377} __attribute__((packed)); 392} __attribute__((packed));
378 393
394/* SET_ACCESS_CONTROL: same format for request and reply */
395struct qeth_set_access_ctrl {
396 __u32 subcmd_code;
397} __attribute__((packed));
398
379struct qeth_ipacmd_setadpparms_hdr { 399struct qeth_ipacmd_setadpparms_hdr {
380 __u32 supp_hw_cmds; 400 __u32 supp_hw_cmds;
381 __u32 reserved1; 401 __u32 reserved1;
@@ -394,6 +414,7 @@ struct qeth_ipacmd_setadpparms {
394 struct qeth_query_cmds_supp query_cmds_supp; 414 struct qeth_query_cmds_supp query_cmds_supp;
395 struct qeth_change_addr change_addr; 415 struct qeth_change_addr change_addr;
396 struct qeth_snmp_cmd snmp; 416 struct qeth_snmp_cmd snmp;
417 struct qeth_set_access_ctrl set_access_ctrl;
397 __u32 mode; 418 __u32 mode;
398 } data; 419 } data;
399} __attribute__ ((packed)); 420} __attribute__ ((packed));
diff --git a/drivers/s390/net/qeth_core_sys.c b/drivers/s390/net/qeth_core_sys.c
index 33505c2a0e3a..9ff2b36fdc43 100644
--- a/drivers/s390/net/qeth_core_sys.c
+++ b/drivers/s390/net/qeth_core_sys.c
@@ -416,7 +416,11 @@ static ssize_t qeth_dev_layer2_store(struct device *dev,
416static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show, 416static DEVICE_ATTR(layer2, 0644, qeth_dev_layer2_show,
417 qeth_dev_layer2_store); 417 qeth_dev_layer2_store);
418 418
419static ssize_t qeth_dev_large_send_show(struct device *dev, 419#define ATTR_QETH_ISOLATION_NONE ("none")
420#define ATTR_QETH_ISOLATION_FWD ("forward")
421#define ATTR_QETH_ISOLATION_DROP ("drop")
422
423static ssize_t qeth_dev_isolation_show(struct device *dev,
420 struct device_attribute *attr, char *buf) 424 struct device_attribute *attr, char *buf)
421{ 425{
422 struct qeth_card *card = dev_get_drvdata(dev); 426 struct qeth_card *card = dev_get_drvdata(dev);
@@ -424,44 +428,69 @@ static ssize_t qeth_dev_large_send_show(struct device *dev,
424 if (!card) 428 if (!card)
425 return -EINVAL; 429 return -EINVAL;
426 430
427 switch (card->options.large_send) { 431 switch (card->options.isolation) {
428 case QETH_LARGE_SEND_NO: 432 case ISOLATION_MODE_NONE:
429 return sprintf(buf, "%s\n", "no"); 433 return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_NONE);
430 case QETH_LARGE_SEND_TSO: 434 case ISOLATION_MODE_FWD:
431 return sprintf(buf, "%s\n", "TSO"); 435 return snprintf(buf, 9, "%s\n", ATTR_QETH_ISOLATION_FWD);
436 case ISOLATION_MODE_DROP:
437 return snprintf(buf, 6, "%s\n", ATTR_QETH_ISOLATION_DROP);
432 default: 438 default:
433 return sprintf(buf, "%s\n", "N/A"); 439 return snprintf(buf, 5, "%s\n", "N/A");
434 } 440 }
435} 441}
436 442
437static ssize_t qeth_dev_large_send_store(struct device *dev, 443static ssize_t qeth_dev_isolation_store(struct device *dev,
438 struct device_attribute *attr, const char *buf, size_t count) 444 struct device_attribute *attr, const char *buf, size_t count)
439{ 445{
440 struct qeth_card *card = dev_get_drvdata(dev); 446 struct qeth_card *card = dev_get_drvdata(dev);
441 enum qeth_large_send_types type; 447 enum qeth_ipa_isolation_modes isolation;
442 int rc = 0; 448 int rc = 0;
443 char *tmp; 449 char *tmp, *curtoken;
450 curtoken = (char *) buf;
444 451
445 if (!card) 452 if (!card) {
446 return -EINVAL; 453 rc = -EINVAL;
447 tmp = strsep((char **) &buf, "\n"); 454 goto out;
448 if (!strcmp(tmp, "no")) { 455 }
449 type = QETH_LARGE_SEND_NO; 456
450 } else if (!strcmp(tmp, "TSO")) { 457 /* check for unknown, too, in case we do not yet know who we are */
451 type = QETH_LARGE_SEND_TSO; 458 if (card->info.type != QETH_CARD_TYPE_OSAE &&
459 card->info.type != QETH_CARD_TYPE_UNKNOWN) {
460 rc = -EOPNOTSUPP;
461 dev_err(&card->gdev->dev, "Adapter does not "
462 "support QDIO data connection isolation\n");
463 goto out;
464 }
465
466 /* parse input into isolation mode */
467 tmp = strsep(&curtoken, "\n");
468 if (!strcmp(tmp, ATTR_QETH_ISOLATION_NONE)) {
469 isolation = ISOLATION_MODE_NONE;
470 } else if (!strcmp(tmp, ATTR_QETH_ISOLATION_FWD)) {
471 isolation = ISOLATION_MODE_FWD;
472 } else if (!strcmp(tmp, ATTR_QETH_ISOLATION_DROP)) {
473 isolation = ISOLATION_MODE_DROP;
452 } else { 474 } else {
453 return -EINVAL; 475 rc = -EINVAL;
476 goto out;
454 } 477 }
455 if (card->options.large_send == type) 478 rc = count;
456 return count; 479
457 rc = qeth_set_large_send(card, type); 480 /* defer IP assist if device is offline (until discipline->set_online)*/
458 if (rc) 481 card->options.isolation = isolation;
459 return rc; 482 if (card->state == CARD_STATE_SOFTSETUP ||
460 return count; 483 card->state == CARD_STATE_UP) {
484 int ipa_rc = qeth_set_access_ctrl_online(card);
485 if (ipa_rc != 0)
486 rc = ipa_rc;
487 }
488out:
489 return rc;
461} 490}
462 491
463static DEVICE_ATTR(large_send, 0644, qeth_dev_large_send_show, 492static DEVICE_ATTR(isolation, 0644, qeth_dev_isolation_show,
464 qeth_dev_large_send_store); 493 qeth_dev_isolation_store);
465 494
466static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value) 495static ssize_t qeth_dev_blkt_show(char *buf, struct qeth_card *card, int value)
467{ 496{
@@ -582,7 +611,7 @@ static struct attribute *qeth_device_attrs[] = {
582 &dev_attr_recover.attr, 611 &dev_attr_recover.attr,
583 &dev_attr_performance_stats.attr, 612 &dev_attr_performance_stats.attr,
584 &dev_attr_layer2.attr, 613 &dev_attr_layer2.attr,
585 &dev_attr_large_send.attr, 614 &dev_attr_isolation.attr,
586 NULL, 615 NULL,
587}; 616};
588 617
diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c
index f4f3ca1393b2..0b763396d5d1 100644
--- a/drivers/s390/net/qeth_l2_main.c
+++ b/drivers/s390/net/qeth_l2_main.c
@@ -866,7 +866,7 @@ static const struct ethtool_ops qeth_l2_ethtool_ops = {
866 .get_link = ethtool_op_get_link, 866 .get_link = ethtool_op_get_link,
867 .get_strings = qeth_core_get_strings, 867 .get_strings = qeth_core_get_strings,
868 .get_ethtool_stats = qeth_core_get_ethtool_stats, 868 .get_ethtool_stats = qeth_core_get_ethtool_stats,
869 .get_stats_count = qeth_core_get_stats_count, 869 .get_sset_count = qeth_core_get_sset_count,
870 .get_drvinfo = qeth_core_get_drvinfo, 870 .get_drvinfo = qeth_core_get_drvinfo,
871 .get_settings = qeth_core_ethtool_get_settings, 871 .get_settings = qeth_core_ethtool_get_settings,
872}; 872};
@@ -874,7 +874,7 @@ static const struct ethtool_ops qeth_l2_ethtool_ops = {
874static const struct ethtool_ops qeth_l2_osn_ops = { 874static const struct ethtool_ops qeth_l2_osn_ops = {
875 .get_strings = qeth_core_get_strings, 875 .get_strings = qeth_core_get_strings,
876 .get_ethtool_stats = qeth_core_get_ethtool_stats, 876 .get_ethtool_stats = qeth_core_get_ethtool_stats,
877 .get_stats_count = qeth_core_get_stats_count, 877 .get_sset_count = qeth_core_get_sset_count,
878 .get_drvinfo = qeth_core_get_drvinfo, 878 .get_drvinfo = qeth_core_get_drvinfo,
879}; 879};
880 880
@@ -940,30 +940,17 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
940 940
941 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1); 941 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
942 recover_flag = card->state; 942 recover_flag = card->state;
943 rc = ccw_device_set_online(CARD_RDEV(card));
944 if (rc) {
945 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
946 return -EIO;
947 }
948 rc = ccw_device_set_online(CARD_WDEV(card));
949 if (rc) {
950 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
951 return -EIO;
952 }
953 rc = ccw_device_set_online(CARD_DDEV(card));
954 if (rc) {
955 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
956 return -EIO;
957 }
958
959 rc = qeth_core_hardsetup_card(card); 943 rc = qeth_core_hardsetup_card(card);
960 if (rc) { 944 if (rc) {
961 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 945 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
946 rc = -ENODEV;
962 goto out_remove; 947 goto out_remove;
963 } 948 }
964 949
965 if (!card->dev && qeth_l2_setup_netdev(card)) 950 if (!card->dev && qeth_l2_setup_netdev(card)) {
951 rc = -ENODEV;
966 goto out_remove; 952 goto out_remove;
953 }
967 954
968 if (card->info.type != QETH_CARD_TYPE_OSN) 955 if (card->info.type != QETH_CARD_TYPE_OSN)
969 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]); 956 qeth_l2_send_setmac(card, &card->dev->dev_addr[0]);
@@ -983,12 +970,14 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
983 card->lan_online = 0; 970 card->lan_online = 0;
984 return 0; 971 return 0;
985 } 972 }
973 rc = -ENODEV;
986 goto out_remove; 974 goto out_remove;
987 } else 975 } else
988 card->lan_online = 1; 976 card->lan_online = 1;
989 977
990 if (card->info.type != QETH_CARD_TYPE_OSN) { 978 if (card->info.type != QETH_CARD_TYPE_OSN) {
991 qeth_set_large_send(card, card->options.large_send); 979 /* configure isolation level */
980 qeth_set_access_ctrl_online(card);
992 qeth_l2_process_vlans(card, 0); 981 qeth_l2_process_vlans(card, 0);
993 } 982 }
994 983
@@ -997,6 +986,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
997 rc = qeth_init_qdio_queues(card); 986 rc = qeth_init_qdio_queues(card);
998 if (rc) { 987 if (rc) {
999 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); 988 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
989 rc = -ENODEV;
1000 goto out_remove; 990 goto out_remove;
1001 } 991 }
1002 card->state = CARD_STATE_SOFTSETUP; 992 card->state = CARD_STATE_SOFTSETUP;
@@ -1018,6 +1008,7 @@ static int __qeth_l2_set_online(struct ccwgroup_device *gdev, int recovery_mode)
1018 /* let user_space know that device is online */ 1008 /* let user_space know that device is online */
1019 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE); 1009 kobject_uevent(&gdev->dev.kobj, KOBJ_CHANGE);
1020 return 0; 1010 return 0;
1011
1021out_remove: 1012out_remove:
1022 card->use_hard_stop = 1; 1013 card->use_hard_stop = 1;
1023 qeth_l2_stop_card(card, 0); 1014 qeth_l2_stop_card(card, 0);
@@ -1028,7 +1019,7 @@ out_remove:
1028 card->state = CARD_STATE_RECOVER; 1019 card->state = CARD_STATE_RECOVER;
1029 else 1020 else
1030 card->state = CARD_STATE_DOWN; 1021 card->state = CARD_STATE_DOWN;
1031 return -ENODEV; 1022 return rc;
1032} 1023}
1033 1024
1034static int qeth_l2_set_online(struct ccwgroup_device *gdev) 1025static int qeth_l2_set_online(struct ccwgroup_device *gdev)
diff --git a/drivers/s390/net/qeth_l3.h b/drivers/s390/net/qeth_l3.h
index 9f143c83bba3..321988fa9f7d 100644
--- a/drivers/s390/net/qeth_l3.h
+++ b/drivers/s390/net/qeth_l3.h
@@ -60,5 +60,7 @@ void qeth_l3_del_vipa(struct qeth_card *, enum qeth_prot_versions, const u8 *);
60int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *); 60int qeth_l3_add_rxip(struct qeth_card *, enum qeth_prot_versions, const u8 *);
61void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions, 61void qeth_l3_del_rxip(struct qeth_card *card, enum qeth_prot_versions,
62 const u8 *); 62 const u8 *);
63int qeth_l3_set_large_send(struct qeth_card *, enum qeth_large_send_types);
64int qeth_l3_set_rx_csum(struct qeth_card *, enum qeth_checksum_types);
63 65
64#endif /* __QETH_L3_H__ */ 66#endif /* __QETH_L3_H__ */
diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c
index 073b6d354915..fd1b6ed3721f 100644
--- a/drivers/s390/net/qeth_l3_main.c
+++ b/drivers/s390/net/qeth_l3_main.c
@@ -41,6 +41,32 @@ static int qeth_l3_deregister_addr_entry(struct qeth_card *,
41static int __qeth_l3_set_online(struct ccwgroup_device *, int); 41static int __qeth_l3_set_online(struct ccwgroup_device *, int);
42static int __qeth_l3_set_offline(struct ccwgroup_device *, int); 42static int __qeth_l3_set_offline(struct ccwgroup_device *, int);
43 43
44int qeth_l3_set_large_send(struct qeth_card *card,
45 enum qeth_large_send_types type)
46{
47 int rc = 0;
48
49 card->options.large_send = type;
50 if (card->dev == NULL)
51 return 0;
52
53 if (card->options.large_send == QETH_LARGE_SEND_TSO) {
54 if (qeth_is_supported(card, IPA_OUTBOUND_TSO)) {
55 card->dev->features |= NETIF_F_TSO | NETIF_F_SG |
56 NETIF_F_HW_CSUM;
57 } else {
58 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
59 NETIF_F_HW_CSUM);
60 card->options.large_send = QETH_LARGE_SEND_NO;
61 rc = -EOPNOTSUPP;
62 }
63 } else {
64 card->dev->features &= ~(NETIF_F_TSO | NETIF_F_SG |
65 NETIF_F_HW_CSUM);
66 card->options.large_send = QETH_LARGE_SEND_NO;
67 }
68 return rc;
69}
44 70
45static int qeth_l3_isxdigit(char *buf) 71static int qeth_l3_isxdigit(char *buf)
46{ 72{
@@ -1439,6 +1465,35 @@ static int qeth_l3_send_checksum_command(struct qeth_card *card)
1439 return 0; 1465 return 0;
1440} 1466}
1441 1467
1468int qeth_l3_set_rx_csum(struct qeth_card *card,
1469 enum qeth_checksum_types csum_type)
1470{
1471 int rc = 0;
1472
1473 if (card->options.checksum_type == HW_CHECKSUMMING) {
1474 if ((csum_type != HW_CHECKSUMMING) &&
1475 (card->state != CARD_STATE_DOWN)) {
1476 rc = qeth_l3_send_simple_setassparms(card,
1477 IPA_INBOUND_CHECKSUM, IPA_CMD_ASS_STOP, 0);
1478 if (rc)
1479 return -EIO;
1480 }
1481 } else {
1482 if (csum_type == HW_CHECKSUMMING) {
1483 if (card->state != CARD_STATE_DOWN) {
1484 if (!qeth_is_supported(card,
1485 IPA_INBOUND_CHECKSUM))
1486 return -EPERM;
1487 rc = qeth_l3_send_checksum_command(card);
1488 if (rc)
1489 return -EIO;
1490 }
1491 }
1492 }
1493 card->options.checksum_type = csum_type;
1494 return rc;
1495}
1496
1442static int qeth_l3_start_ipa_checksum(struct qeth_card *card) 1497static int qeth_l3_start_ipa_checksum(struct qeth_card *card)
1443{ 1498{
1444 int rc = 0; 1499 int rc = 0;
@@ -1506,6 +1561,8 @@ static int qeth_l3_start_ipa_tso(struct qeth_card *card)
1506static int qeth_l3_start_ipassists(struct qeth_card *card) 1561static int qeth_l3_start_ipassists(struct qeth_card *card)
1507{ 1562{
1508 QETH_DBF_TEXT(TRACE, 3, "strtipas"); 1563 QETH_DBF_TEXT(TRACE, 3, "strtipas");
1564
1565 qeth_set_access_ctrl_online(card); /* go on*/
1509 qeth_l3_start_ipa_arp_processing(card); /* go on*/ 1566 qeth_l3_start_ipa_arp_processing(card); /* go on*/
1510 qeth_l3_start_ipa_ip_fragmentation(card); /* go on*/ 1567 qeth_l3_start_ipa_ip_fragmentation(card); /* go on*/
1511 qeth_l3_start_ipa_source_mac(card); /* go on*/ 1568 qeth_l3_start_ipa_source_mac(card); /* go on*/
@@ -2684,6 +2741,24 @@ static void qeth_tx_csum(struct sk_buff *skb)
2684 *(__sum16 *)(skb->data + offset) = csum_fold(csum); 2741 *(__sum16 *)(skb->data + offset) = csum_fold(csum);
2685} 2742}
2686 2743
2744static inline int qeth_l3_tso_elements(struct sk_buff *skb)
2745{
2746 unsigned long tcpd = (unsigned long)tcp_hdr(skb) +
2747 tcp_hdr(skb)->doff * 4;
2748 int tcpd_len = skb->len - (tcpd - (unsigned long)skb->data);
2749 int elements = PFN_UP(tcpd + tcpd_len) - PFN_DOWN(tcpd);
2750 elements += skb_shinfo(skb)->nr_frags;
2751 return elements;
2752}
2753
2754static inline int qeth_l3_tso_check(struct sk_buff *skb)
2755{
2756 int len = ((unsigned long)tcp_hdr(skb) + tcp_hdr(skb)->doff * 4) -
2757 (unsigned long)skb->data;
2758 return (((unsigned long)skb->data & PAGE_MASK) !=
2759 (((unsigned long)skb->data + len) & PAGE_MASK));
2760}
2761
2687static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) 2762static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2688{ 2763{
2689 int rc; 2764 int rc;
@@ -2777,16 +2852,21 @@ static int qeth_l3_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
2777 /* fix hardware limitation: as long as we do not have sbal 2852 /* fix hardware limitation: as long as we do not have sbal
2778 * chaining we can not send long frag lists 2853 * chaining we can not send long frag lists
2779 */ 2854 */
2780 if ((large_send == QETH_LARGE_SEND_TSO) && 2855 if (large_send == QETH_LARGE_SEND_TSO) {
2781 ((skb_shinfo(new_skb)->nr_frags + 2) > 16)) { 2856 if (qeth_l3_tso_elements(new_skb) + 1 > 16) {
2782 if (skb_linearize(new_skb)) 2857 if (skb_linearize(new_skb))
2783 goto tx_drop; 2858 goto tx_drop;
2859 if (card->options.performance_stats)
2860 card->perf_stats.tx_lin++;
2861 }
2784 } 2862 }
2785 2863
2786 if ((large_send == QETH_LARGE_SEND_TSO) && 2864 if ((large_send == QETH_LARGE_SEND_TSO) &&
2787 (cast_type == RTN_UNSPEC)) { 2865 (cast_type == RTN_UNSPEC)) {
2788 hdr = (struct qeth_hdr *)skb_push(new_skb, 2866 hdr = (struct qeth_hdr *)skb_push(new_skb,
2789 sizeof(struct qeth_hdr_tso)); 2867 sizeof(struct qeth_hdr_tso));
2868 if (qeth_l3_tso_check(new_skb))
2869 QETH_DBF_MESSAGE(2, "tso skb misaligned\n");
2790 memset(hdr, 0, sizeof(struct qeth_hdr_tso)); 2870 memset(hdr, 0, sizeof(struct qeth_hdr_tso));
2791 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type); 2871 qeth_l3_fill_header(card, hdr, new_skb, ipv, cast_type);
2792 qeth_tso_fill_header(card, hdr, new_skb); 2872 qeth_tso_fill_header(card, hdr, new_skb);
@@ -2903,46 +2983,28 @@ static u32 qeth_l3_ethtool_get_rx_csum(struct net_device *dev)
2903static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data) 2983static int qeth_l3_ethtool_set_rx_csum(struct net_device *dev, u32 data)
2904{ 2984{
2905 struct qeth_card *card = dev->ml_priv; 2985 struct qeth_card *card = dev->ml_priv;
2906 enum qeth_card_states old_state;
2907 enum qeth_checksum_types csum_type; 2986 enum qeth_checksum_types csum_type;
2908 2987
2909 if ((card->state != CARD_STATE_UP) &&
2910 (card->state != CARD_STATE_DOWN))
2911 return -EPERM;
2912
2913 if (data) 2988 if (data)
2914 csum_type = HW_CHECKSUMMING; 2989 csum_type = HW_CHECKSUMMING;
2915 else 2990 else
2916 csum_type = SW_CHECKSUMMING; 2991 csum_type = SW_CHECKSUMMING;
2917 2992
2918 if (card->options.checksum_type != csum_type) { 2993 return qeth_l3_set_rx_csum(card, csum_type);
2919 old_state = card->state;
2920 if (card->state == CARD_STATE_UP)
2921 __qeth_l3_set_offline(card->gdev, 1);
2922 card->options.checksum_type = csum_type;
2923 if (old_state == CARD_STATE_UP)
2924 __qeth_l3_set_online(card->gdev, 1);
2925 }
2926 return 0;
2927} 2994}
2928 2995
2929static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data) 2996static int qeth_l3_ethtool_set_tso(struct net_device *dev, u32 data)
2930{ 2997{
2931 struct qeth_card *card = dev->ml_priv; 2998 struct qeth_card *card = dev->ml_priv;
2999 int rc = 0;
2932 3000
2933 if (data) { 3001 if (data) {
2934 if (card->options.large_send == QETH_LARGE_SEND_NO) { 3002 rc = qeth_l3_set_large_send(card, QETH_LARGE_SEND_TSO);
2935 if (card->info.type == QETH_CARD_TYPE_IQD)
2936 return -EPERM;
2937 else
2938 card->options.large_send = QETH_LARGE_SEND_TSO;
2939 dev->features |= NETIF_F_TSO;
2940 }
2941 } else { 3003 } else {
2942 dev->features &= ~NETIF_F_TSO; 3004 dev->features &= ~NETIF_F_TSO;
2943 card->options.large_send = QETH_LARGE_SEND_NO; 3005 card->options.large_send = QETH_LARGE_SEND_NO;
2944 } 3006 }
2945 return 0; 3007 return rc;
2946} 3008}
2947 3009
2948static const struct ethtool_ops qeth_l3_ethtool_ops = { 3010static const struct ethtool_ops qeth_l3_ethtool_ops = {
@@ -2957,7 +3019,7 @@ static const struct ethtool_ops qeth_l3_ethtool_ops = {
2957 .set_tso = qeth_l3_ethtool_set_tso, 3019 .set_tso = qeth_l3_ethtool_set_tso,
2958 .get_strings = qeth_core_get_strings, 3020 .get_strings = qeth_core_get_strings,
2959 .get_ethtool_stats = qeth_core_get_ethtool_stats, 3021 .get_ethtool_stats = qeth_core_get_ethtool_stats,
2960 .get_stats_count = qeth_core_get_stats_count, 3022 .get_sset_count = qeth_core_get_sset_count,
2961 .get_drvinfo = qeth_core_get_drvinfo, 3023 .get_drvinfo = qeth_core_get_drvinfo,
2962 .get_settings = qeth_core_ethtool_get_settings, 3024 .get_settings = qeth_core_ethtool_get_settings,
2963}; 3025};
@@ -3058,6 +3120,7 @@ static int qeth_l3_setup_netdev(struct qeth_card *card)
3058 NETIF_F_HW_VLAN_RX | 3120 NETIF_F_HW_VLAN_RX |
3059 NETIF_F_HW_VLAN_FILTER; 3121 NETIF_F_HW_VLAN_FILTER;
3060 card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE; 3122 card->dev->priv_flags &= ~IFF_XMIT_DST_RELEASE;
3123 card->dev->gso_max_size = 15 * PAGE_SIZE;
3061 3124
3062 SET_NETDEV_DEV(card->dev, &card->gdev->dev); 3125 SET_NETDEV_DEV(card->dev, &card->gdev->dev);
3063 return register_netdev(card->dev); 3126 return register_netdev(card->dev);
@@ -3154,32 +3217,19 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3154 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1); 3217 qeth_set_allowed_threads(card, QETH_RECOVER_THREAD, 1);
3155 3218
3156 recover_flag = card->state; 3219 recover_flag = card->state;
3157 rc = ccw_device_set_online(CARD_RDEV(card));
3158 if (rc) {
3159 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3160 return -EIO;
3161 }
3162 rc = ccw_device_set_online(CARD_WDEV(card));
3163 if (rc) {
3164 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3165 return -EIO;
3166 }
3167 rc = ccw_device_set_online(CARD_DDEV(card));
3168 if (rc) {
3169 QETH_DBF_TEXT_(SETUP, 2, "1err%d", rc);
3170 return -EIO;
3171 }
3172
3173 rc = qeth_core_hardsetup_card(card); 3220 rc = qeth_core_hardsetup_card(card);
3174 if (rc) { 3221 if (rc) {
3175 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc); 3222 QETH_DBF_TEXT_(SETUP, 2, "2err%d", rc);
3223 rc = -ENODEV;
3176 goto out_remove; 3224 goto out_remove;
3177 } 3225 }
3178 3226
3179 qeth_l3_query_ipassists(card, QETH_PROT_IPV4); 3227 qeth_l3_query_ipassists(card, QETH_PROT_IPV4);
3180 3228
3181 if (!card->dev && qeth_l3_setup_netdev(card)) 3229 if (!card->dev && qeth_l3_setup_netdev(card)) {
3230 rc = -ENODEV;
3182 goto out_remove; 3231 goto out_remove;
3232 }
3183 3233
3184 card->state = CARD_STATE_HARDSETUP; 3234 card->state = CARD_STATE_HARDSETUP;
3185 qeth_print_status_message(card); 3235 qeth_print_status_message(card);
@@ -3196,10 +3246,11 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3196 card->lan_online = 0; 3246 card->lan_online = 0;
3197 return 0; 3247 return 0;
3198 } 3248 }
3249 rc = -ENODEV;
3199 goto out_remove; 3250 goto out_remove;
3200 } else 3251 } else
3201 card->lan_online = 1; 3252 card->lan_online = 1;
3202 qeth_set_large_send(card, card->options.large_send); 3253 qeth_l3_set_large_send(card, card->options.large_send);
3203 3254
3204 rc = qeth_l3_setadapter_parms(card); 3255 rc = qeth_l3_setadapter_parms(card);
3205 if (rc) 3256 if (rc)
@@ -3218,6 +3269,7 @@ static int __qeth_l3_set_online(struct ccwgroup_device *gdev, int recovery_mode)
3218 rc = qeth_init_qdio_queues(card); 3269 rc = qeth_init_qdio_queues(card);
3219 if (rc) { 3270 if (rc) {
3220 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc); 3271 QETH_DBF_TEXT_(SETUP, 2, "6err%d", rc);
3272 rc = -ENODEV;
3221 goto out_remove; 3273 goto out_remove;
3222 } 3274 }
3223 card->state = CARD_STATE_SOFTSETUP; 3275 card->state = CARD_STATE_SOFTSETUP;
@@ -3248,7 +3300,7 @@ out_remove:
3248 card->state = CARD_STATE_RECOVER; 3300 card->state = CARD_STATE_RECOVER;
3249 else 3301 else
3250 card->state = CARD_STATE_DOWN; 3302 card->state = CARD_STATE_DOWN;
3251 return -ENODEV; 3303 return rc;
3252} 3304}
3253 3305
3254static int qeth_l3_set_online(struct ccwgroup_device *gdev) 3306static int qeth_l3_set_online(struct ccwgroup_device *gdev)
diff --git a/drivers/s390/net/qeth_l3_sys.c b/drivers/s390/net/qeth_l3_sys.c
index c144b9924d52..3360b0941aa1 100644
--- a/drivers/s390/net/qeth_l3_sys.c
+++ b/drivers/s390/net/qeth_l3_sys.c
@@ -293,31 +293,79 @@ static ssize_t qeth_l3_dev_checksum_store(struct device *dev,
293 struct device_attribute *attr, const char *buf, size_t count) 293 struct device_attribute *attr, const char *buf, size_t count)
294{ 294{
295 struct qeth_card *card = dev_get_drvdata(dev); 295 struct qeth_card *card = dev_get_drvdata(dev);
296 enum qeth_checksum_types csum_type;
296 char *tmp; 297 char *tmp;
298 int rc;
297 299
298 if (!card) 300 if (!card)
299 return -EINVAL; 301 return -EINVAL;
300 302
301 if ((card->state != CARD_STATE_DOWN) &&
302 (card->state != CARD_STATE_RECOVER))
303 return -EPERM;
304
305 tmp = strsep((char **) &buf, "\n"); 303 tmp = strsep((char **) &buf, "\n");
306 if (!strcmp(tmp, "sw_checksumming")) 304 if (!strcmp(tmp, "sw_checksumming"))
307 card->options.checksum_type = SW_CHECKSUMMING; 305 csum_type = SW_CHECKSUMMING;
308 else if (!strcmp(tmp, "hw_checksumming")) 306 else if (!strcmp(tmp, "hw_checksumming"))
309 card->options.checksum_type = HW_CHECKSUMMING; 307 csum_type = HW_CHECKSUMMING;
310 else if (!strcmp(tmp, "no_checksumming")) 308 else if (!strcmp(tmp, "no_checksumming"))
311 card->options.checksum_type = NO_CHECKSUMMING; 309 csum_type = NO_CHECKSUMMING;
312 else { 310 else
313 return -EINVAL; 311 return -EINVAL;
314 } 312
313 rc = qeth_l3_set_rx_csum(card, csum_type);
314 if (rc)
315 return rc;
315 return count; 316 return count;
316} 317}
317 318
318static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show, 319static DEVICE_ATTR(checksumming, 0644, qeth_l3_dev_checksum_show,
319 qeth_l3_dev_checksum_store); 320 qeth_l3_dev_checksum_store);
320 321
322static ssize_t qeth_l3_dev_large_send_show(struct device *dev,
323 struct device_attribute *attr, char *buf)
324{
325 struct qeth_card *card = dev_get_drvdata(dev);
326
327 if (!card)
328 return -EINVAL;
329
330 switch (card->options.large_send) {
331 case QETH_LARGE_SEND_NO:
332 return sprintf(buf, "%s\n", "no");
333 case QETH_LARGE_SEND_TSO:
334 return sprintf(buf, "%s\n", "TSO");
335 default:
336 return sprintf(buf, "%s\n", "N/A");
337 }
338}
339
340static ssize_t qeth_l3_dev_large_send_store(struct device *dev,
341 struct device_attribute *attr, const char *buf, size_t count)
342{
343 struct qeth_card *card = dev_get_drvdata(dev);
344 enum qeth_large_send_types type;
345 int rc = 0;
346 char *tmp;
347
348 if (!card)
349 return -EINVAL;
350 tmp = strsep((char **) &buf, "\n");
351 if (!strcmp(tmp, "no"))
352 type = QETH_LARGE_SEND_NO;
353 else if (!strcmp(tmp, "TSO"))
354 type = QETH_LARGE_SEND_TSO;
355 else
356 return -EINVAL;
357
358 if (card->options.large_send == type)
359 return count;
360 rc = qeth_l3_set_large_send(card, type);
361 if (rc)
362 return rc;
363 return count;
364}
365
366static DEVICE_ATTR(large_send, 0644, qeth_l3_dev_large_send_show,
367 qeth_l3_dev_large_send_store);
368
321static struct attribute *qeth_l3_device_attrs[] = { 369static struct attribute *qeth_l3_device_attrs[] = {
322 &dev_attr_route4.attr, 370 &dev_attr_route4.attr,
323 &dev_attr_route6.attr, 371 &dev_attr_route6.attr,
@@ -325,6 +373,7 @@ static struct attribute *qeth_l3_device_attrs[] = {
325 &dev_attr_broadcast_mode.attr, 373 &dev_attr_broadcast_mode.attr,
326 &dev_attr_canonical_macaddr.attr, 374 &dev_attr_canonical_macaddr.attr,
327 &dev_attr_checksumming.attr, 375 &dev_attr_checksumming.attr,
376 &dev_attr_large_send.attr,
328 NULL, 377 NULL,
329}; 378};
330 379
diff --git a/drivers/scsi/pmcraid.h b/drivers/scsi/pmcraid.h
index 614b3a764fed..3441b3f90827 100644
--- a/drivers/scsi/pmcraid.h
+++ b/drivers/scsi/pmcraid.h
@@ -26,7 +26,6 @@
26#include <linux/completion.h> 26#include <linux/completion.h>
27#include <linux/list.h> 27#include <linux/list.h>
28#include <scsi/scsi.h> 28#include <scsi/scsi.h>
29#include <linux/kref.h>
30#include <scsi/scsi_cmnd.h> 29#include <scsi/scsi_cmnd.h>
31#include <linux/cdev.h> 30#include <linux/cdev.h>
32#include <net/netlink.h> 31#include <net/netlink.h>
diff --git a/drivers/scsi/sg.c b/drivers/scsi/sg.c
index 848b59466850..747a5e5c1276 100644
--- a/drivers/scsi/sg.c
+++ b/drivers/scsi/sg.c
@@ -1185,7 +1185,7 @@ sg_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1185 return VM_FAULT_SIGBUS; 1185 return VM_FAULT_SIGBUS;
1186} 1186}
1187 1187
1188static struct vm_operations_struct sg_mmap_vm_ops = { 1188static const struct vm_operations_struct sg_mmap_vm_ops = {
1189 .fault = sg_vma_fault, 1189 .fault = sg_vma_fault,
1190}; 1190};
1191 1191
@@ -1317,7 +1317,7 @@ static void sg_rq_end_io(struct request *rq, int uptodate)
1317 } 1317 }
1318} 1318}
1319 1319
1320static struct file_operations sg_fops = { 1320static const struct file_operations sg_fops = {
1321 .owner = THIS_MODULE, 1321 .owner = THIS_MODULE,
1322 .read = sg_read, 1322 .read = sg_read,
1323 .write = sg_write, 1323 .write = sg_write,
@@ -2194,9 +2194,11 @@ static int sg_proc_seq_show_int(struct seq_file *s, void *v);
2194static int sg_proc_single_open_adio(struct inode *inode, struct file *file); 2194static int sg_proc_single_open_adio(struct inode *inode, struct file *file);
2195static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer, 2195static ssize_t sg_proc_write_adio(struct file *filp, const char __user *buffer,
2196 size_t count, loff_t *off); 2196 size_t count, loff_t *off);
2197static struct file_operations adio_fops = { 2197static const struct file_operations adio_fops = {
2198 /* .owner, .read and .llseek added in sg_proc_init() */ 2198 .owner = THIS_MODULE,
2199 .open = sg_proc_single_open_adio, 2199 .open = sg_proc_single_open_adio,
2200 .read = seq_read,
2201 .llseek = seq_lseek,
2200 .write = sg_proc_write_adio, 2202 .write = sg_proc_write_adio,
2201 .release = single_release, 2203 .release = single_release,
2202}; 2204};
@@ -2204,23 +2206,32 @@ static struct file_operations adio_fops = {
2204static int sg_proc_single_open_dressz(struct inode *inode, struct file *file); 2206static int sg_proc_single_open_dressz(struct inode *inode, struct file *file);
2205static ssize_t sg_proc_write_dressz(struct file *filp, 2207static ssize_t sg_proc_write_dressz(struct file *filp,
2206 const char __user *buffer, size_t count, loff_t *off); 2208 const char __user *buffer, size_t count, loff_t *off);
2207static struct file_operations dressz_fops = { 2209static const struct file_operations dressz_fops = {
2210 .owner = THIS_MODULE,
2208 .open = sg_proc_single_open_dressz, 2211 .open = sg_proc_single_open_dressz,
2212 .read = seq_read,
2213 .llseek = seq_lseek,
2209 .write = sg_proc_write_dressz, 2214 .write = sg_proc_write_dressz,
2210 .release = single_release, 2215 .release = single_release,
2211}; 2216};
2212 2217
2213static int sg_proc_seq_show_version(struct seq_file *s, void *v); 2218static int sg_proc_seq_show_version(struct seq_file *s, void *v);
2214static int sg_proc_single_open_version(struct inode *inode, struct file *file); 2219static int sg_proc_single_open_version(struct inode *inode, struct file *file);
2215static struct file_operations version_fops = { 2220static const struct file_operations version_fops = {
2221 .owner = THIS_MODULE,
2216 .open = sg_proc_single_open_version, 2222 .open = sg_proc_single_open_version,
2223 .read = seq_read,
2224 .llseek = seq_lseek,
2217 .release = single_release, 2225 .release = single_release,
2218}; 2226};
2219 2227
2220static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v); 2228static int sg_proc_seq_show_devhdr(struct seq_file *s, void *v);
2221static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file); 2229static int sg_proc_single_open_devhdr(struct inode *inode, struct file *file);
2222static struct file_operations devhdr_fops = { 2230static const struct file_operations devhdr_fops = {
2231 .owner = THIS_MODULE,
2223 .open = sg_proc_single_open_devhdr, 2232 .open = sg_proc_single_open_devhdr,
2233 .read = seq_read,
2234 .llseek = seq_lseek,
2224 .release = single_release, 2235 .release = single_release,
2225}; 2236};
2226 2237
@@ -2229,8 +2240,11 @@ static int sg_proc_open_dev(struct inode *inode, struct file *file);
2229static void * dev_seq_start(struct seq_file *s, loff_t *pos); 2240static void * dev_seq_start(struct seq_file *s, loff_t *pos);
2230static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos); 2241static void * dev_seq_next(struct seq_file *s, void *v, loff_t *pos);
2231static void dev_seq_stop(struct seq_file *s, void *v); 2242static void dev_seq_stop(struct seq_file *s, void *v);
2232static struct file_operations dev_fops = { 2243static const struct file_operations dev_fops = {
2244 .owner = THIS_MODULE,
2233 .open = sg_proc_open_dev, 2245 .open = sg_proc_open_dev,
2246 .read = seq_read,
2247 .llseek = seq_lseek,
2234 .release = seq_release, 2248 .release = seq_release,
2235}; 2249};
2236static const struct seq_operations dev_seq_ops = { 2250static const struct seq_operations dev_seq_ops = {
@@ -2242,8 +2256,11 @@ static const struct seq_operations dev_seq_ops = {
2242 2256
2243static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v); 2257static int sg_proc_seq_show_devstrs(struct seq_file *s, void *v);
2244static int sg_proc_open_devstrs(struct inode *inode, struct file *file); 2258static int sg_proc_open_devstrs(struct inode *inode, struct file *file);
2245static struct file_operations devstrs_fops = { 2259static const struct file_operations devstrs_fops = {
2260 .owner = THIS_MODULE,
2246 .open = sg_proc_open_devstrs, 2261 .open = sg_proc_open_devstrs,
2262 .read = seq_read,
2263 .llseek = seq_lseek,
2247 .release = seq_release, 2264 .release = seq_release,
2248}; 2265};
2249static const struct seq_operations devstrs_seq_ops = { 2266static const struct seq_operations devstrs_seq_ops = {
@@ -2255,8 +2272,11 @@ static const struct seq_operations devstrs_seq_ops = {
2255 2272
2256static int sg_proc_seq_show_debug(struct seq_file *s, void *v); 2273static int sg_proc_seq_show_debug(struct seq_file *s, void *v);
2257static int sg_proc_open_debug(struct inode *inode, struct file *file); 2274static int sg_proc_open_debug(struct inode *inode, struct file *file);
2258static struct file_operations debug_fops = { 2275static const struct file_operations debug_fops = {
2276 .owner = THIS_MODULE,
2259 .open = sg_proc_open_debug, 2277 .open = sg_proc_open_debug,
2278 .read = seq_read,
2279 .llseek = seq_lseek,
2260 .release = seq_release, 2280 .release = seq_release,
2261}; 2281};
2262static const struct seq_operations debug_seq_ops = { 2282static const struct seq_operations debug_seq_ops = {
@@ -2269,7 +2289,7 @@ static const struct seq_operations debug_seq_ops = {
2269 2289
2270struct sg_proc_leaf { 2290struct sg_proc_leaf {
2271 const char * name; 2291 const char * name;
2272 struct file_operations * fops; 2292 const struct file_operations * fops;
2273}; 2293};
2274 2294
2275static struct sg_proc_leaf sg_proc_leaf_arr[] = { 2295static struct sg_proc_leaf sg_proc_leaf_arr[] = {
@@ -2295,9 +2315,6 @@ sg_proc_init(void)
2295 for (k = 0; k < num_leaves; ++k) { 2315 for (k = 0; k < num_leaves; ++k) {
2296 leaf = &sg_proc_leaf_arr[k]; 2316 leaf = &sg_proc_leaf_arr[k];
2297 mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO; 2317 mask = leaf->fops->write ? S_IRUGO | S_IWUSR : S_IRUGO;
2298 leaf->fops->owner = THIS_MODULE;
2299 leaf->fops->read = seq_read;
2300 leaf->fops->llseek = seq_lseek;
2301 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops); 2318 proc_create(leaf->name, mask, sg_proc_sgp, leaf->fops);
2302 } 2319 }
2303 return 0; 2320 return 0;
diff --git a/drivers/serial/8250.c b/drivers/serial/8250.c
index 2209620d2349..b1ae774016f1 100644
--- a/drivers/serial/8250.c
+++ b/drivers/serial/8250.c
@@ -64,6 +64,8 @@ static int serial_index(struct uart_port *port)
64 return (serial8250_reg.minor - 64) + port->line; 64 return (serial8250_reg.minor - 64) + port->line;
65} 65}
66 66
67static unsigned int skip_txen_test; /* force skip of txen test at init time */
68
67/* 69/*
68 * Debugging. 70 * Debugging.
69 */ 71 */
@@ -2108,7 +2110,7 @@ static int serial8250_startup(struct uart_port *port)
2108 is variable. So, let's just don't test if we receive 2110 is variable. So, let's just don't test if we receive
2109 TX irq. This way, we'll never enable UART_BUG_TXEN. 2111 TX irq. This way, we'll never enable UART_BUG_TXEN.
2110 */ 2112 */
2111 if (up->port.flags & UPF_NO_TXEN_TEST) 2113 if (skip_txen_test || up->port.flags & UPF_NO_TXEN_TEST)
2112 goto dont_test_tx_en; 2114 goto dont_test_tx_en;
2113 2115
2114 /* 2116 /*
@@ -3248,6 +3250,9 @@ MODULE_PARM_DESC(share_irqs, "Share IRQs with other non-8250/16x50 devices"
3248module_param(nr_uarts, uint, 0644); 3250module_param(nr_uarts, uint, 0644);
3249MODULE_PARM_DESC(nr_uarts, "Maximum number of UARTs supported. (1-" __MODULE_STRING(CONFIG_SERIAL_8250_NR_UARTS) ")"); 3251MODULE_PARM_DESC(nr_uarts, "Maximum number of UARTs supported. (1-" __MODULE_STRING(CONFIG_SERIAL_8250_NR_UARTS) ")");
3250 3252
3253module_param(skip_txen_test, uint, 0644);
3254MODULE_PARM_DESC(skip_txen_test, "Skip checking for the TXEN bug at init time");
3255
3251#ifdef CONFIG_SERIAL_8250_RSA 3256#ifdef CONFIG_SERIAL_8250_RSA
3252module_param_array(probe_rsa, ulong, &probe_rsa_count, 0444); 3257module_param_array(probe_rsa, ulong, &probe_rsa_count, 0444);
3253MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA"); 3258MODULE_PARM_DESC(probe_rsa, "Probe I/O ports for RSA");
diff --git a/drivers/serial/Kconfig b/drivers/serial/Kconfig
index 03422ce878cf..e52257257279 100644
--- a/drivers/serial/Kconfig
+++ b/drivers/serial/Kconfig
@@ -862,7 +862,7 @@ config SERIAL_IMX_CONSOLE
862 862
863config SERIAL_UARTLITE 863config SERIAL_UARTLITE
864 tristate "Xilinx uartlite serial port support" 864 tristate "Xilinx uartlite serial port support"
865 depends on PPC32 || MICROBLAZE 865 depends on PPC32 || MICROBLAZE || MFD_TIMBERDALE
866 select SERIAL_CORE 866 select SERIAL_CORE
867 help 867 help
868 Say Y here if you want to use the Xilinx uartlite serial controller. 868 Say Y here if you want to use the Xilinx uartlite serial controller.
@@ -1458,4 +1458,23 @@ config SERIAL_TIMBERDALE
1458 ---help--- 1458 ---help---
1459 Add support for UART controller on timberdale. 1459 Add support for UART controller on timberdale.
1460 1460
1461config SERIAL_BCM63XX
1462 tristate "bcm63xx serial port support"
1463 select SERIAL_CORE
1464 depends on BCM63XX
1465 help
1466 If you have a bcm63xx CPU, you can enable its onboard
1467 serial port by enabling this options.
1468
1469 To compile this driver as a module, choose M here: the
1470 module will be called bcm963xx_uart.
1471
1472config SERIAL_BCM63XX_CONSOLE
1473 bool "Console on bcm63xx serial port"
1474 depends on SERIAL_BCM63XX=y
1475 select SERIAL_CORE_CONSOLE
1476 help
1477 If you have enabled the serial port on the bcm63xx CPU
1478 you can make it the console by answering Y to this option.
1479
1461endmenu 1480endmenu
diff --git a/drivers/serial/Makefile b/drivers/serial/Makefile
index 97f6fcc8b432..d21d5dd5d048 100644
--- a/drivers/serial/Makefile
+++ b/drivers/serial/Makefile
@@ -34,6 +34,7 @@ obj-$(CONFIG_SERIAL_CLPS711X) += clps711x.o
34obj-$(CONFIG_SERIAL_PXA) += pxa.o 34obj-$(CONFIG_SERIAL_PXA) += pxa.o
35obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o 35obj-$(CONFIG_SERIAL_PNX8XXX) += pnx8xxx_uart.o
36obj-$(CONFIG_SERIAL_SA1100) += sa1100.o 36obj-$(CONFIG_SERIAL_SA1100) += sa1100.o
37obj-$(CONFIG_SERIAL_BCM63XX) += bcm63xx_uart.o
37obj-$(CONFIG_SERIAL_BFIN) += bfin_5xx.o 38obj-$(CONFIG_SERIAL_BFIN) += bfin_5xx.o
38obj-$(CONFIG_SERIAL_BFIN_SPORT) += bfin_sport_uart.o 39obj-$(CONFIG_SERIAL_BFIN_SPORT) += bfin_sport_uart.o
39obj-$(CONFIG_SERIAL_SAMSUNG) += samsung.o 40obj-$(CONFIG_SERIAL_SAMSUNG) += samsung.o
diff --git a/drivers/serial/bcm63xx_uart.c b/drivers/serial/bcm63xx_uart.c
new file mode 100644
index 000000000000..beddaa6e9069
--- /dev/null
+++ b/drivers/serial/bcm63xx_uart.c
@@ -0,0 +1,890 @@
1/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Derived from many drivers using generic_serial interface.
7 *
8 * Copyright (C) 2008 Maxime Bizon <mbizon@freebox.fr>
9 *
10 * Serial driver for BCM63xx integrated UART.
11 *
12 * Hardware flow control was _not_ tested since I only have RX/TX on
13 * my board.
14 */
15
16#if defined(CONFIG_SERIAL_BCM63XX_CONSOLE) && defined(CONFIG_MAGIC_SYSRQ)
17#define SUPPORT_SYSRQ
18#endif
19
20#include <linux/kernel.h>
21#include <linux/platform_device.h>
22#include <linux/init.h>
23#include <linux/delay.h>
24#include <linux/module.h>
25#include <linux/console.h>
26#include <linux/clk.h>
27#include <linux/tty.h>
28#include <linux/tty_flip.h>
29#include <linux/sysrq.h>
30#include <linux/serial.h>
31#include <linux/serial_core.h>
32
33#include <bcm63xx_clk.h>
34#include <bcm63xx_irq.h>
35#include <bcm63xx_regs.h>
36#include <bcm63xx_io.h>
37
38#define BCM63XX_NR_UARTS 1
39
40static struct uart_port ports[BCM63XX_NR_UARTS];
41
42/*
43 * rx interrupt mask / stat
44 *
45 * mask:
46 * - rx fifo full
47 * - rx fifo above threshold
48 * - rx fifo not empty for too long
49 */
50#define UART_RX_INT_MASK (UART_IR_MASK(UART_IR_RXOVER) | \
51 UART_IR_MASK(UART_IR_RXTHRESH) | \
52 UART_IR_MASK(UART_IR_RXTIMEOUT))
53
54#define UART_RX_INT_STAT (UART_IR_STAT(UART_IR_RXOVER) | \
55 UART_IR_STAT(UART_IR_RXTHRESH) | \
56 UART_IR_STAT(UART_IR_RXTIMEOUT))
57
58/*
59 * tx interrupt mask / stat
60 *
61 * mask:
62 * - tx fifo empty
63 * - tx fifo below threshold
64 */
65#define UART_TX_INT_MASK (UART_IR_MASK(UART_IR_TXEMPTY) | \
66 UART_IR_MASK(UART_IR_TXTRESH))
67
68#define UART_TX_INT_STAT (UART_IR_STAT(UART_IR_TXEMPTY) | \
69 UART_IR_STAT(UART_IR_TXTRESH))
70
71/*
72 * external input interrupt
73 *
74 * mask: any edge on CTS, DCD
75 */
76#define UART_EXTINP_INT_MASK (UART_EXTINP_IRMASK(UART_EXTINP_IR_CTS) | \
77 UART_EXTINP_IRMASK(UART_EXTINP_IR_DCD))
78
79/*
80 * handy uart register accessor
81 */
82static inline unsigned int bcm_uart_readl(struct uart_port *port,
83 unsigned int offset)
84{
85 return bcm_readl(port->membase + offset);
86}
87
88static inline void bcm_uart_writel(struct uart_port *port,
89 unsigned int value, unsigned int offset)
90{
91 bcm_writel(value, port->membase + offset);
92}
93
94/*
95 * serial core request to check if uart tx fifo is empty
96 */
97static unsigned int bcm_uart_tx_empty(struct uart_port *port)
98{
99 unsigned int val;
100
101 val = bcm_uart_readl(port, UART_IR_REG);
102 return (val & UART_IR_STAT(UART_IR_TXEMPTY)) ? 1 : 0;
103}
104
105/*
106 * serial core request to set RTS and DTR pin state and loopback mode
107 */
108static void bcm_uart_set_mctrl(struct uart_port *port, unsigned int mctrl)
109{
110 unsigned int val;
111
112 val = bcm_uart_readl(port, UART_MCTL_REG);
113 val &= ~(UART_MCTL_DTR_MASK | UART_MCTL_RTS_MASK);
114 /* invert of written value is reflected on the pin */
115 if (!(mctrl & TIOCM_DTR))
116 val |= UART_MCTL_DTR_MASK;
117 if (!(mctrl & TIOCM_RTS))
118 val |= UART_MCTL_RTS_MASK;
119 bcm_uart_writel(port, val, UART_MCTL_REG);
120
121 val = bcm_uart_readl(port, UART_CTL_REG);
122 if (mctrl & TIOCM_LOOP)
123 val |= UART_CTL_LOOPBACK_MASK;
124 else
125 val &= ~UART_CTL_LOOPBACK_MASK;
126 bcm_uart_writel(port, val, UART_CTL_REG);
127}
128
129/*
130 * serial core request to return RI, CTS, DCD and DSR pin state
131 */
132static unsigned int bcm_uart_get_mctrl(struct uart_port *port)
133{
134 unsigned int val, mctrl;
135
136 mctrl = 0;
137 val = bcm_uart_readl(port, UART_EXTINP_REG);
138 if (val & UART_EXTINP_RI_MASK)
139 mctrl |= TIOCM_RI;
140 if (val & UART_EXTINP_CTS_MASK)
141 mctrl |= TIOCM_CTS;
142 if (val & UART_EXTINP_DCD_MASK)
143 mctrl |= TIOCM_CD;
144 if (val & UART_EXTINP_DSR_MASK)
145 mctrl |= TIOCM_DSR;
146 return mctrl;
147}
148
149/*
150 * serial core request to disable tx ASAP (used for flow control)
151 */
152static void bcm_uart_stop_tx(struct uart_port *port)
153{
154 unsigned int val;
155
156 val = bcm_uart_readl(port, UART_CTL_REG);
157 val &= ~(UART_CTL_TXEN_MASK);
158 bcm_uart_writel(port, val, UART_CTL_REG);
159
160 val = bcm_uart_readl(port, UART_IR_REG);
161 val &= ~UART_TX_INT_MASK;
162 bcm_uart_writel(port, val, UART_IR_REG);
163}
164
165/*
166 * serial core request to (re)enable tx
167 */
168static void bcm_uart_start_tx(struct uart_port *port)
169{
170 unsigned int val;
171
172 val = bcm_uart_readl(port, UART_IR_REG);
173 val |= UART_TX_INT_MASK;
174 bcm_uart_writel(port, val, UART_IR_REG);
175
176 val = bcm_uart_readl(port, UART_CTL_REG);
177 val |= UART_CTL_TXEN_MASK;
178 bcm_uart_writel(port, val, UART_CTL_REG);
179}
180
181/*
182 * serial core request to stop rx, called before port shutdown
183 */
184static void bcm_uart_stop_rx(struct uart_port *port)
185{
186 unsigned int val;
187
188 val = bcm_uart_readl(port, UART_IR_REG);
189 val &= ~UART_RX_INT_MASK;
190 bcm_uart_writel(port, val, UART_IR_REG);
191}
192
193/*
194 * serial core request to enable modem status interrupt reporting
195 */
196static void bcm_uart_enable_ms(struct uart_port *port)
197{
198 unsigned int val;
199
200 val = bcm_uart_readl(port, UART_IR_REG);
201 val |= UART_IR_MASK(UART_IR_EXTIP);
202 bcm_uart_writel(port, val, UART_IR_REG);
203}
204
205/*
206 * serial core request to start/stop emitting break char
207 */
208static void bcm_uart_break_ctl(struct uart_port *port, int ctl)
209{
210 unsigned long flags;
211 unsigned int val;
212
213 spin_lock_irqsave(&port->lock, flags);
214
215 val = bcm_uart_readl(port, UART_CTL_REG);
216 if (ctl)
217 val |= UART_CTL_XMITBRK_MASK;
218 else
219 val &= ~UART_CTL_XMITBRK_MASK;
220 bcm_uart_writel(port, val, UART_CTL_REG);
221
222 spin_unlock_irqrestore(&port->lock, flags);
223}
224
225/*
226 * return port type in string format
227 */
228static const char *bcm_uart_type(struct uart_port *port)
229{
230 return (port->type == PORT_BCM63XX) ? "bcm63xx_uart" : NULL;
231}
232
233/*
234 * read all chars in rx fifo and send them to core
235 */
236static void bcm_uart_do_rx(struct uart_port *port)
237{
238 struct tty_struct *tty;
239 unsigned int max_count;
240
241 /* limit number of char read in interrupt, should not be
242 * higher than fifo size anyway since we're much faster than
243 * serial port */
244 max_count = 32;
245 tty = port->info->port.tty;
246 do {
247 unsigned int iestat, c, cstat;
248 char flag;
249
250 /* get overrun/fifo empty information from ier
251 * register */
252 iestat = bcm_uart_readl(port, UART_IR_REG);
253 if (!(iestat & UART_IR_STAT(UART_IR_RXNOTEMPTY)))
254 break;
255
256 cstat = c = bcm_uart_readl(port, UART_FIFO_REG);
257 port->icount.rx++;
258 flag = TTY_NORMAL;
259 c &= 0xff;
260
261 if (unlikely((cstat & UART_FIFO_ANYERR_MASK))) {
262 /* do stats first */
263 if (cstat & UART_FIFO_BRKDET_MASK) {
264 port->icount.brk++;
265 if (uart_handle_break(port))
266 continue;
267 }
268
269 if (cstat & UART_FIFO_PARERR_MASK)
270 port->icount.parity++;
271 if (cstat & UART_FIFO_FRAMEERR_MASK)
272 port->icount.frame++;
273
274 /* update flag wrt read_status_mask */
275 cstat &= port->read_status_mask;
276 if (cstat & UART_FIFO_BRKDET_MASK)
277 flag = TTY_BREAK;
278 if (cstat & UART_FIFO_FRAMEERR_MASK)
279 flag = TTY_FRAME;
280 if (cstat & UART_FIFO_PARERR_MASK)
281 flag = TTY_PARITY;
282 }
283
284 if (uart_handle_sysrq_char(port, c))
285 continue;
286
287 if (unlikely(iestat & UART_IR_STAT(UART_IR_RXOVER))) {
288 port->icount.overrun++;
289 tty_insert_flip_char(tty, 0, TTY_OVERRUN);
290 }
291
292 if ((cstat & port->ignore_status_mask) == 0)
293 tty_insert_flip_char(tty, c, flag);
294
295 } while (--max_count);
296
297 tty_flip_buffer_push(tty);
298}
299
300/*
301 * fill tx fifo with chars to send, stop when fifo is about to be full
302 * or when all chars have been sent.
303 */
304static void bcm_uart_do_tx(struct uart_port *port)
305{
306 struct circ_buf *xmit;
307 unsigned int val, max_count;
308
309 if (port->x_char) {
310 bcm_uart_writel(port, port->x_char, UART_FIFO_REG);
311 port->icount.tx++;
312 port->x_char = 0;
313 return;
314 }
315
316 if (uart_tx_stopped(port)) {
317 bcm_uart_stop_tx(port);
318 return;
319 }
320
321 xmit = &port->info->xmit;
322 if (uart_circ_empty(xmit))
323 goto txq_empty;
324
325 val = bcm_uart_readl(port, UART_MCTL_REG);
326 val = (val & UART_MCTL_TXFIFOFILL_MASK) >> UART_MCTL_TXFIFOFILL_SHIFT;
327 max_count = port->fifosize - val;
328
329 while (max_count--) {
330 unsigned int c;
331
332 c = xmit->buf[xmit->tail];
333 bcm_uart_writel(port, c, UART_FIFO_REG);
334 xmit->tail = (xmit->tail + 1) & (UART_XMIT_SIZE - 1);
335 port->icount.tx++;
336 if (uart_circ_empty(xmit))
337 break;
338 }
339
340 if (uart_circ_chars_pending(xmit) < WAKEUP_CHARS)
341 uart_write_wakeup(port);
342
343 if (uart_circ_empty(xmit))
344 goto txq_empty;
345 return;
346
347txq_empty:
348 /* nothing to send, disable transmit interrupt */
349 val = bcm_uart_readl(port, UART_IR_REG);
350 val &= ~UART_TX_INT_MASK;
351 bcm_uart_writel(port, val, UART_IR_REG);
352 return;
353}
354
355/*
356 * process uart interrupt
357 */
358static irqreturn_t bcm_uart_interrupt(int irq, void *dev_id)
359{
360 struct uart_port *port;
361 unsigned int irqstat;
362
363 port = dev_id;
364 spin_lock(&port->lock);
365
366 irqstat = bcm_uart_readl(port, UART_IR_REG);
367 if (irqstat & UART_RX_INT_STAT)
368 bcm_uart_do_rx(port);
369
370 if (irqstat & UART_TX_INT_STAT)
371 bcm_uart_do_tx(port);
372
373 if (irqstat & UART_IR_MASK(UART_IR_EXTIP)) {
374 unsigned int estat;
375
376 estat = bcm_uart_readl(port, UART_EXTINP_REG);
377 if (estat & UART_EXTINP_IRSTAT(UART_EXTINP_IR_CTS))
378 uart_handle_cts_change(port,
379 estat & UART_EXTINP_CTS_MASK);
380 if (estat & UART_EXTINP_IRSTAT(UART_EXTINP_IR_DCD))
381 uart_handle_dcd_change(port,
382 estat & UART_EXTINP_DCD_MASK);
383 }
384
385 spin_unlock(&port->lock);
386 return IRQ_HANDLED;
387}
388
389/*
390 * enable rx & tx operation on uart
391 */
392static void bcm_uart_enable(struct uart_port *port)
393{
394 unsigned int val;
395
396 val = bcm_uart_readl(port, UART_CTL_REG);
397 val |= (UART_CTL_BRGEN_MASK | UART_CTL_TXEN_MASK | UART_CTL_RXEN_MASK);
398 bcm_uart_writel(port, val, UART_CTL_REG);
399}
400
401/*
402 * disable rx & tx operation on uart
403 */
404static void bcm_uart_disable(struct uart_port *port)
405{
406 unsigned int val;
407
408 val = bcm_uart_readl(port, UART_CTL_REG);
409 val &= ~(UART_CTL_BRGEN_MASK | UART_CTL_TXEN_MASK |
410 UART_CTL_RXEN_MASK);
411 bcm_uart_writel(port, val, UART_CTL_REG);
412}
413
414/*
415 * clear all unread data in rx fifo and unsent data in tx fifo
416 */
417static void bcm_uart_flush(struct uart_port *port)
418{
419 unsigned int val;
420
421 /* empty rx and tx fifo */
422 val = bcm_uart_readl(port, UART_CTL_REG);
423 val |= UART_CTL_RSTRXFIFO_MASK | UART_CTL_RSTTXFIFO_MASK;
424 bcm_uart_writel(port, val, UART_CTL_REG);
425
426 /* read any pending char to make sure all irq status are
427 * cleared */
428 (void)bcm_uart_readl(port, UART_FIFO_REG);
429}
430
431/*
432 * serial core request to initialize uart and start rx operation
433 */
434static int bcm_uart_startup(struct uart_port *port)
435{
436 unsigned int val;
437 int ret;
438
439 /* mask all irq and flush port */
440 bcm_uart_disable(port);
441 bcm_uart_writel(port, 0, UART_IR_REG);
442 bcm_uart_flush(port);
443
444 /* clear any pending external input interrupt */
445 (void)bcm_uart_readl(port, UART_EXTINP_REG);
446
447 /* set rx/tx fifo thresh to fifo half size */
448 val = bcm_uart_readl(port, UART_MCTL_REG);
449 val &= ~(UART_MCTL_RXFIFOTHRESH_MASK | UART_MCTL_TXFIFOTHRESH_MASK);
450 val |= (port->fifosize / 2) << UART_MCTL_RXFIFOTHRESH_SHIFT;
451 val |= (port->fifosize / 2) << UART_MCTL_TXFIFOTHRESH_SHIFT;
452 bcm_uart_writel(port, val, UART_MCTL_REG);
453
454 /* set rx fifo timeout to 1 char time */
455 val = bcm_uart_readl(port, UART_CTL_REG);
456 val &= ~UART_CTL_RXTMOUTCNT_MASK;
457 val |= 1 << UART_CTL_RXTMOUTCNT_SHIFT;
458 bcm_uart_writel(port, val, UART_CTL_REG);
459
460 /* report any edge on dcd and cts */
461 val = UART_EXTINP_INT_MASK;
462 val |= UART_EXTINP_DCD_NOSENSE_MASK;
463 val |= UART_EXTINP_CTS_NOSENSE_MASK;
464 bcm_uart_writel(port, val, UART_EXTINP_REG);
465
466 /* register irq and enable rx interrupts */
467 ret = request_irq(port->irq, bcm_uart_interrupt, 0,
468 bcm_uart_type(port), port);
469 if (ret)
470 return ret;
471 bcm_uart_writel(port, UART_RX_INT_MASK, UART_IR_REG);
472 bcm_uart_enable(port);
473 return 0;
474}
475
476/*
477 * serial core request to flush & disable uart
478 */
479static void bcm_uart_shutdown(struct uart_port *port)
480{
481 unsigned long flags;
482
483 spin_lock_irqsave(&port->lock, flags);
484 bcm_uart_writel(port, 0, UART_IR_REG);
485 spin_unlock_irqrestore(&port->lock, flags);
486
487 bcm_uart_disable(port);
488 bcm_uart_flush(port);
489 free_irq(port->irq, port);
490}
491
492/*
493 * serial core request to change current uart setting
494 */
495static void bcm_uart_set_termios(struct uart_port *port,
496 struct ktermios *new,
497 struct ktermios *old)
498{
499 unsigned int ctl, baud, quot, ier;
500 unsigned long flags;
501
502 spin_lock_irqsave(&port->lock, flags);
503
504 /* disable uart while changing speed */
505 bcm_uart_disable(port);
506 bcm_uart_flush(port);
507
508 /* update Control register */
509 ctl = bcm_uart_readl(port, UART_CTL_REG);
510 ctl &= ~UART_CTL_BITSPERSYM_MASK;
511
512 switch (new->c_cflag & CSIZE) {
513 case CS5:
514 ctl |= (0 << UART_CTL_BITSPERSYM_SHIFT);
515 break;
516 case CS6:
517 ctl |= (1 << UART_CTL_BITSPERSYM_SHIFT);
518 break;
519 case CS7:
520 ctl |= (2 << UART_CTL_BITSPERSYM_SHIFT);
521 break;
522 default:
523 ctl |= (3 << UART_CTL_BITSPERSYM_SHIFT);
524 break;
525 }
526
527 ctl &= ~UART_CTL_STOPBITS_MASK;
528 if (new->c_cflag & CSTOPB)
529 ctl |= UART_CTL_STOPBITS_2;
530 else
531 ctl |= UART_CTL_STOPBITS_1;
532
533 ctl &= ~(UART_CTL_RXPAREN_MASK | UART_CTL_TXPAREN_MASK);
534 if (new->c_cflag & PARENB)
535 ctl |= (UART_CTL_RXPAREN_MASK | UART_CTL_TXPAREN_MASK);
536 ctl &= ~(UART_CTL_RXPAREVEN_MASK | UART_CTL_TXPAREVEN_MASK);
537 if (new->c_cflag & PARODD)
538 ctl |= (UART_CTL_RXPAREVEN_MASK | UART_CTL_TXPAREVEN_MASK);
539 bcm_uart_writel(port, ctl, UART_CTL_REG);
540
541 /* update Baudword register */
542 baud = uart_get_baud_rate(port, new, old, 0, port->uartclk / 16);
543 quot = uart_get_divisor(port, baud) - 1;
544 bcm_uart_writel(port, quot, UART_BAUD_REG);
545
546 /* update Interrupt register */
547 ier = bcm_uart_readl(port, UART_IR_REG);
548
549 ier &= ~UART_IR_MASK(UART_IR_EXTIP);
550 if (UART_ENABLE_MS(port, new->c_cflag))
551 ier |= UART_IR_MASK(UART_IR_EXTIP);
552
553 bcm_uart_writel(port, ier, UART_IR_REG);
554
555 /* update read/ignore mask */
556 port->read_status_mask = UART_FIFO_VALID_MASK;
557 if (new->c_iflag & INPCK) {
558 port->read_status_mask |= UART_FIFO_FRAMEERR_MASK;
559 port->read_status_mask |= UART_FIFO_PARERR_MASK;
560 }
561 if (new->c_iflag & (BRKINT))
562 port->read_status_mask |= UART_FIFO_BRKDET_MASK;
563
564 port->ignore_status_mask = 0;
565 if (new->c_iflag & IGNPAR)
566 port->ignore_status_mask |= UART_FIFO_PARERR_MASK;
567 if (new->c_iflag & IGNBRK)
568 port->ignore_status_mask |= UART_FIFO_BRKDET_MASK;
569 if (!(new->c_cflag & CREAD))
570 port->ignore_status_mask |= UART_FIFO_VALID_MASK;
571
572 uart_update_timeout(port, new->c_cflag, baud);
573 bcm_uart_enable(port);
574 spin_unlock_irqrestore(&port->lock, flags);
575}
576
577/*
578 * serial core request to claim uart iomem
579 */
580static int bcm_uart_request_port(struct uart_port *port)
581{
582 unsigned int size;
583
584 size = RSET_UART_SIZE;
585 if (!request_mem_region(port->mapbase, size, "bcm63xx")) {
586 dev_err(port->dev, "Memory region busy\n");
587 return -EBUSY;
588 }
589
590 port->membase = ioremap(port->mapbase, size);
591 if (!port->membase) {
592 dev_err(port->dev, "Unable to map registers\n");
593 release_mem_region(port->mapbase, size);
594 return -EBUSY;
595 }
596 return 0;
597}
598
599/*
600 * serial core request to release uart iomem
601 */
602static void bcm_uart_release_port(struct uart_port *port)
603{
604 release_mem_region(port->mapbase, RSET_UART_SIZE);
605 iounmap(port->membase);
606}
607
608/*
609 * serial core request to do any port required autoconfiguration
610 */
611static void bcm_uart_config_port(struct uart_port *port, int flags)
612{
613 if (flags & UART_CONFIG_TYPE) {
614 if (bcm_uart_request_port(port))
615 return;
616 port->type = PORT_BCM63XX;
617 }
618}
619
620/*
621 * serial core request to check that port information in serinfo are
622 * suitable
623 */
624static int bcm_uart_verify_port(struct uart_port *port,
625 struct serial_struct *serinfo)
626{
627 if (port->type != PORT_BCM63XX)
628 return -EINVAL;
629 if (port->irq != serinfo->irq)
630 return -EINVAL;
631 if (port->iotype != serinfo->io_type)
632 return -EINVAL;
633 if (port->mapbase != (unsigned long)serinfo->iomem_base)
634 return -EINVAL;
635 return 0;
636}
637
638/* serial core callbacks */
639static struct uart_ops bcm_uart_ops = {
640 .tx_empty = bcm_uart_tx_empty,
641 .get_mctrl = bcm_uart_get_mctrl,
642 .set_mctrl = bcm_uart_set_mctrl,
643 .start_tx = bcm_uart_start_tx,
644 .stop_tx = bcm_uart_stop_tx,
645 .stop_rx = bcm_uart_stop_rx,
646 .enable_ms = bcm_uart_enable_ms,
647 .break_ctl = bcm_uart_break_ctl,
648 .startup = bcm_uart_startup,
649 .shutdown = bcm_uart_shutdown,
650 .set_termios = bcm_uart_set_termios,
651 .type = bcm_uart_type,
652 .release_port = bcm_uart_release_port,
653 .request_port = bcm_uart_request_port,
654 .config_port = bcm_uart_config_port,
655 .verify_port = bcm_uart_verify_port,
656};
657
658
659
660#ifdef CONFIG_SERIAL_BCM63XX_CONSOLE
661static inline void wait_for_xmitr(struct uart_port *port)
662{
663 unsigned int tmout;
664
665 /* Wait up to 10ms for the character(s) to be sent. */
666 tmout = 10000;
667 while (--tmout) {
668 unsigned int val;
669
670 val = bcm_uart_readl(port, UART_IR_REG);
671 if (val & UART_IR_STAT(UART_IR_TXEMPTY))
672 break;
673 udelay(1);
674 }
675
676 /* Wait up to 1s for flow control if necessary */
677 if (port->flags & UPF_CONS_FLOW) {
678 tmout = 1000000;
679 while (--tmout) {
680 unsigned int val;
681
682 val = bcm_uart_readl(port, UART_EXTINP_REG);
683 if (val & UART_EXTINP_CTS_MASK)
684 break;
685 udelay(1);
686 }
687 }
688}
689
690/*
691 * output given char
692 */
693static void bcm_console_putchar(struct uart_port *port, int ch)
694{
695 wait_for_xmitr(port);
696 bcm_uart_writel(port, ch, UART_FIFO_REG);
697}
698
699/*
700 * console core request to output given string
701 */
702static void bcm_console_write(struct console *co, const char *s,
703 unsigned int count)
704{
705 struct uart_port *port;
706 unsigned long flags;
707 int locked;
708
709 port = &ports[co->index];
710
711 local_irq_save(flags);
712 if (port->sysrq) {
713 /* bcm_uart_interrupt() already took the lock */
714 locked = 0;
715 } else if (oops_in_progress) {
716 locked = spin_trylock(&port->lock);
717 } else {
718 spin_lock(&port->lock);
719 locked = 1;
720 }
721
722 /* call helper to deal with \r\n */
723 uart_console_write(port, s, count, bcm_console_putchar);
724
725 /* and wait for char to be transmitted */
726 wait_for_xmitr(port);
727
728 if (locked)
729 spin_unlock(&port->lock);
730 local_irq_restore(flags);
731}
732
733/*
734 * console core request to setup given console, find matching uart
735 * port and setup it.
736 */
737static int bcm_console_setup(struct console *co, char *options)
738{
739 struct uart_port *port;
740 int baud = 9600;
741 int bits = 8;
742 int parity = 'n';
743 int flow = 'n';
744
745 if (co->index < 0 || co->index >= BCM63XX_NR_UARTS)
746 return -EINVAL;
747 port = &ports[co->index];
748 if (!port->membase)
749 return -ENODEV;
750 if (options)
751 uart_parse_options(options, &baud, &parity, &bits, &flow);
752
753 return uart_set_options(port, co, baud, parity, bits, flow);
754}
755
756static struct uart_driver bcm_uart_driver;
757
758static struct console bcm63xx_console = {
759 .name = "ttyS",
760 .write = bcm_console_write,
761 .device = uart_console_device,
762 .setup = bcm_console_setup,
763 .flags = CON_PRINTBUFFER,
764 .index = -1,
765 .data = &bcm_uart_driver,
766};
767
768static int __init bcm63xx_console_init(void)
769{
770 register_console(&bcm63xx_console);
771 return 0;
772}
773
774console_initcall(bcm63xx_console_init);
775
776#define BCM63XX_CONSOLE (&bcm63xx_console)
777#else
778#define BCM63XX_CONSOLE NULL
779#endif /* CONFIG_SERIAL_BCM63XX_CONSOLE */
780
781static struct uart_driver bcm_uart_driver = {
782 .owner = THIS_MODULE,
783 .driver_name = "bcm63xx_uart",
784 .dev_name = "ttyS",
785 .major = TTY_MAJOR,
786 .minor = 64,
787 .nr = 1,
788 .cons = BCM63XX_CONSOLE,
789};
790
791/*
792 * platform driver probe/remove callback
793 */
794static int __devinit bcm_uart_probe(struct platform_device *pdev)
795{
796 struct resource *res_mem, *res_irq;
797 struct uart_port *port;
798 struct clk *clk;
799 int ret;
800
801 if (pdev->id < 0 || pdev->id >= BCM63XX_NR_UARTS)
802 return -EINVAL;
803
804 if (ports[pdev->id].membase)
805 return -EBUSY;
806
807 res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
808 if (!res_mem)
809 return -ENODEV;
810
811 res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
812 if (!res_irq)
813 return -ENODEV;
814
815 clk = clk_get(&pdev->dev, "periph");
816 if (IS_ERR(clk))
817 return -ENODEV;
818
819 port = &ports[pdev->id];
820 memset(port, 0, sizeof(*port));
821 port->iotype = UPIO_MEM;
822 port->mapbase = res_mem->start;
823 port->irq = res_irq->start;
824 port->ops = &bcm_uart_ops;
825 port->flags = UPF_BOOT_AUTOCONF;
826 port->dev = &pdev->dev;
827 port->fifosize = 16;
828 port->uartclk = clk_get_rate(clk) / 2;
829 clk_put(clk);
830
831 ret = uart_add_one_port(&bcm_uart_driver, port);
832 if (ret) {
833 kfree(port);
834 return ret;
835 }
836 platform_set_drvdata(pdev, port);
837 return 0;
838}
839
840static int __devexit bcm_uart_remove(struct platform_device *pdev)
841{
842 struct uart_port *port;
843
844 port = platform_get_drvdata(pdev);
845 uart_remove_one_port(&bcm_uart_driver, port);
846 platform_set_drvdata(pdev, NULL);
847 /* mark port as free */
848 ports[pdev->id].membase = 0;
849 return 0;
850}
851
852/*
853 * platform driver stuff
854 */
855static struct platform_driver bcm_uart_platform_driver = {
856 .probe = bcm_uart_probe,
857 .remove = __devexit_p(bcm_uart_remove),
858 .driver = {
859 .owner = THIS_MODULE,
860 .name = "bcm63xx_uart",
861 },
862};
863
864static int __init bcm_uart_init(void)
865{
866 int ret;
867
868 ret = uart_register_driver(&bcm_uart_driver);
869 if (ret)
870 return ret;
871
872 ret = platform_driver_register(&bcm_uart_platform_driver);
873 if (ret)
874 uart_unregister_driver(&bcm_uart_driver);
875
876 return ret;
877}
878
879static void __exit bcm_uart_exit(void)
880{
881 platform_driver_unregister(&bcm_uart_platform_driver);
882 uart_unregister_driver(&bcm_uart_driver);
883}
884
885module_init(bcm_uart_init);
886module_exit(bcm_uart_exit);
887
888MODULE_AUTHOR("Maxime Bizon <mbizon@freebox.fr>");
889MODULE_DESCRIPTION("Broadcom 63<xx integrated uart driver");
890MODULE_LICENSE("GPL");
diff --git a/drivers/serial/cpm_uart/cpm_uart_core.c b/drivers/serial/cpm_uart/cpm_uart_core.c
index 8d349b23249a..300cea768d74 100644
--- a/drivers/serial/cpm_uart/cpm_uart_core.c
+++ b/drivers/serial/cpm_uart/cpm_uart_core.c
@@ -649,7 +649,7 @@ static int cpm_uart_tx_pump(struct uart_port *port)
649 u8 *p; 649 u8 *p;
650 int count; 650 int count;
651 struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port; 651 struct uart_cpm_port *pinfo = (struct uart_cpm_port *)port;
652 struct circ_buf *xmit = &port->info->xmit; 652 struct circ_buf *xmit = &port->state->xmit;
653 653
654 /* Handle xon/xoff */ 654 /* Handle xon/xoff */
655 if (port->x_char) { 655 if (port->x_char) {
diff --git a/drivers/serial/crisv10.c b/drivers/serial/crisv10.c
index 7be52fe288eb..31f172397af3 100644
--- a/drivers/serial/crisv10.c
+++ b/drivers/serial/crisv10.c
@@ -18,6 +18,7 @@ static char *serial_version = "$Revision: 1.25 $";
18#include <linux/tty.h> 18#include <linux/tty.h>
19#include <linux/tty_flip.h> 19#include <linux/tty_flip.h>
20#include <linux/major.h> 20#include <linux/major.h>
21#include <linux/smp_lock.h>
21#include <linux/string.h> 22#include <linux/string.h>
22#include <linux/fcntl.h> 23#include <linux/fcntl.h>
23#include <linux/mm.h> 24#include <linux/mm.h>
diff --git a/drivers/serial/icom.c b/drivers/serial/icom.c
index 2d7feecaf492..0028b6f89ce6 100644
--- a/drivers/serial/icom.c
+++ b/drivers/serial/icom.c
@@ -307,7 +307,7 @@ static void stop_processor(struct icom_port *icom_port)
307 if (port < 4) { 307 if (port < 4) {
308 temp = readl(stop_proc[port].global_control_reg); 308 temp = readl(stop_proc[port].global_control_reg);
309 temp = 309 temp =
310 (temp & ~start_proc[port].processor_id) | stop_proc[port].processor_id; 310 (temp & ~start_proc[port].processor_id) | stop_proc[port].processor_id;
311 writel(temp, stop_proc[port].global_control_reg); 311 writel(temp, stop_proc[port].global_control_reg);
312 312
313 /* write flush */ 313 /* write flush */
@@ -336,7 +336,7 @@ static void start_processor(struct icom_port *icom_port)
336 if (port < 4) { 336 if (port < 4) {
337 temp = readl(start_proc[port].global_control_reg); 337 temp = readl(start_proc[port].global_control_reg);
338 temp = 338 temp =
339 (temp & ~stop_proc[port].processor_id) | start_proc[port].processor_id; 339 (temp & ~stop_proc[port].processor_id) | start_proc[port].processor_id;
340 writel(temp, start_proc[port].global_control_reg); 340 writel(temp, start_proc[port].global_control_reg);
341 341
342 /* write flush */ 342 /* write flush */
@@ -509,8 +509,8 @@ static void load_code(struct icom_port *icom_port)
509 dev_err(&icom_port->adapter->pci_dev->dev,"Port not opertional\n"); 509 dev_err(&icom_port->adapter->pci_dev->dev,"Port not opertional\n");
510 } 510 }
511 511
512 if (new_page != NULL) 512 if (new_page != NULL)
513 pci_free_consistent(dev, 4096, new_page, temp_pci); 513 pci_free_consistent(dev, 4096, new_page, temp_pci);
514} 514}
515 515
516static int startup(struct icom_port *icom_port) 516static int startup(struct icom_port *icom_port)
@@ -1493,15 +1493,15 @@ static int __devinit icom_probe(struct pci_dev *dev,
1493 const struct pci_device_id *ent) 1493 const struct pci_device_id *ent)
1494{ 1494{
1495 int index; 1495 int index;
1496 unsigned int command_reg; 1496 unsigned int command_reg;
1497 int retval; 1497 int retval;
1498 struct icom_adapter *icom_adapter; 1498 struct icom_adapter *icom_adapter;
1499 struct icom_port *icom_port; 1499 struct icom_port *icom_port;
1500 1500
1501 retval = pci_enable_device(dev); 1501 retval = pci_enable_device(dev);
1502 if (retval) { 1502 if (retval) {
1503 dev_err(&dev->dev, "Device enable FAILED\n"); 1503 dev_err(&dev->dev, "Device enable FAILED\n");
1504 return retval; 1504 return retval;
1505 } 1505 }
1506 1506
1507 if ( (retval = pci_request_regions(dev, "icom"))) { 1507 if ( (retval = pci_request_regions(dev, "icom"))) {
@@ -1510,23 +1510,23 @@ static int __devinit icom_probe(struct pci_dev *dev,
1510 return retval; 1510 return retval;
1511 } 1511 }
1512 1512
1513 pci_set_master(dev); 1513 pci_set_master(dev);
1514 1514
1515 if ( (retval = pci_read_config_dword(dev, PCI_COMMAND, &command_reg))) { 1515 if ( (retval = pci_read_config_dword(dev, PCI_COMMAND, &command_reg))) {
1516 dev_err(&dev->dev, "PCI Config read FAILED\n"); 1516 dev_err(&dev->dev, "PCI Config read FAILED\n");
1517 return retval; 1517 return retval;
1518 } 1518 }
1519 1519
1520 pci_write_config_dword(dev, PCI_COMMAND, 1520 pci_write_config_dword(dev, PCI_COMMAND,
1521 command_reg | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER 1521 command_reg | PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER
1522 | PCI_COMMAND_PARITY | PCI_COMMAND_SERR); 1522 | PCI_COMMAND_PARITY | PCI_COMMAND_SERR);
1523 1523
1524 if (ent->driver_data == ADAPTER_V1) { 1524 if (ent->driver_data == ADAPTER_V1) {
1525 pci_write_config_dword(dev, 0x44, 0x8300830A); 1525 pci_write_config_dword(dev, 0x44, 0x8300830A);
1526 } else { 1526 } else {
1527 pci_write_config_dword(dev, 0x44, 0x42004200); 1527 pci_write_config_dword(dev, 0x44, 0x42004200);
1528 pci_write_config_dword(dev, 0x48, 0x42004200); 1528 pci_write_config_dword(dev, 0x48, 0x42004200);
1529 } 1529 }
1530 1530
1531 1531
1532 retval = icom_alloc_adapter(&icom_adapter); 1532 retval = icom_alloc_adapter(&icom_adapter);
@@ -1536,10 +1536,10 @@ static int __devinit icom_probe(struct pci_dev *dev,
1536 goto probe_exit0; 1536 goto probe_exit0;
1537 } 1537 }
1538 1538
1539 icom_adapter->base_addr_pci = pci_resource_start(dev, 0); 1539 icom_adapter->base_addr_pci = pci_resource_start(dev, 0);
1540 icom_adapter->pci_dev = dev; 1540 icom_adapter->pci_dev = dev;
1541 icom_adapter->version = ent->driver_data; 1541 icom_adapter->version = ent->driver_data;
1542 icom_adapter->subsystem_id = ent->subdevice; 1542 icom_adapter->subsystem_id = ent->subdevice;
1543 1543
1544 1544
1545 retval = icom_init_ports(icom_adapter); 1545 retval = icom_init_ports(icom_adapter);
@@ -1548,7 +1548,7 @@ static int __devinit icom_probe(struct pci_dev *dev,
1548 goto probe_exit1; 1548 goto probe_exit1;
1549 } 1549 }
1550 1550
1551 icom_adapter->base_addr = pci_ioremap_bar(dev, 0); 1551 icom_adapter->base_addr = pci_ioremap_bar(dev, 0);
1552 1552
1553 if (!icom_adapter->base_addr) 1553 if (!icom_adapter->base_addr)
1554 goto probe_exit1; 1554 goto probe_exit1;
@@ -1562,7 +1562,7 @@ static int __devinit icom_probe(struct pci_dev *dev,
1562 1562
1563 retval = icom_load_ports(icom_adapter); 1563 retval = icom_load_ports(icom_adapter);
1564 1564
1565 for (index = 0; index < icom_adapter->numb_ports; index++) { 1565 for (index = 0; index < icom_adapter->numb_ports; index++) {
1566 icom_port = &icom_adapter->port_info[index]; 1566 icom_port = &icom_adapter->port_info[index];
1567 1567
1568 if (icom_port->status == ICOM_PORT_ACTIVE) { 1568 if (icom_port->status == ICOM_PORT_ACTIVE) {
@@ -1579,7 +1579,7 @@ static int __devinit icom_probe(struct pci_dev *dev,
1579 icom_port->status = ICOM_PORT_OFF; 1579 icom_port->status = ICOM_PORT_OFF;
1580 dev_err(&dev->dev, "Device add failed\n"); 1580 dev_err(&dev->dev, "Device add failed\n");
1581 } else 1581 } else
1582 dev_info(&dev->dev, "Device added\n"); 1582 dev_info(&dev->dev, "Device added\n");
1583 } 1583 }
1584 } 1584 }
1585 1585
@@ -1595,9 +1595,7 @@ probe_exit0:
1595 pci_release_regions(dev); 1595 pci_release_regions(dev);
1596 pci_disable_device(dev); 1596 pci_disable_device(dev);
1597 1597
1598 return retval; 1598 return retval;
1599
1600
1601} 1599}
1602 1600
1603static void __devexit icom_remove(struct pci_dev *dev) 1601static void __devexit icom_remove(struct pci_dev *dev)
diff --git a/drivers/serial/pxa.c b/drivers/serial/pxa.c
index 6443b7ff274a..b8629d74f6a2 100644
--- a/drivers/serial/pxa.c
+++ b/drivers/serial/pxa.c
@@ -726,9 +726,10 @@ static struct uart_driver serial_pxa_reg = {
726 .cons = PXA_CONSOLE, 726 .cons = PXA_CONSOLE,
727}; 727};
728 728
729static int serial_pxa_suspend(struct platform_device *dev, pm_message_t state) 729#ifdef CONFIG_PM
730static int serial_pxa_suspend(struct device *dev)
730{ 731{
731 struct uart_pxa_port *sport = platform_get_drvdata(dev); 732 struct uart_pxa_port *sport = dev_get_drvdata(dev);
732 733
733 if (sport) 734 if (sport)
734 uart_suspend_port(&serial_pxa_reg, &sport->port); 735 uart_suspend_port(&serial_pxa_reg, &sport->port);
@@ -736,9 +737,9 @@ static int serial_pxa_suspend(struct platform_device *dev, pm_message_t state)
736 return 0; 737 return 0;
737} 738}
738 739
739static int serial_pxa_resume(struct platform_device *dev) 740static int serial_pxa_resume(struct device *dev)
740{ 741{
741 struct uart_pxa_port *sport = platform_get_drvdata(dev); 742 struct uart_pxa_port *sport = dev_get_drvdata(dev);
742 743
743 if (sport) 744 if (sport)
744 uart_resume_port(&serial_pxa_reg, &sport->port); 745 uart_resume_port(&serial_pxa_reg, &sport->port);
@@ -746,6 +747,12 @@ static int serial_pxa_resume(struct platform_device *dev)
746 return 0; 747 return 0;
747} 748}
748 749
750static struct dev_pm_ops serial_pxa_pm_ops = {
751 .suspend = serial_pxa_suspend,
752 .resume = serial_pxa_resume,
753};
754#endif
755
749static int serial_pxa_probe(struct platform_device *dev) 756static int serial_pxa_probe(struct platform_device *dev)
750{ 757{
751 struct uart_pxa_port *sport; 758 struct uart_pxa_port *sport;
@@ -825,11 +832,12 @@ static struct platform_driver serial_pxa_driver = {
825 .probe = serial_pxa_probe, 832 .probe = serial_pxa_probe,
826 .remove = serial_pxa_remove, 833 .remove = serial_pxa_remove,
827 834
828 .suspend = serial_pxa_suspend,
829 .resume = serial_pxa_resume,
830 .driver = { 835 .driver = {
831 .name = "pxa2xx-uart", 836 .name = "pxa2xx-uart",
832 .owner = THIS_MODULE, 837 .owner = THIS_MODULE,
838#ifdef CONFIG_PM
839 .pm = &serial_pxa_pm_ops,
840#endif
833 }, 841 },
834}; 842};
835 843
diff --git a/drivers/serial/sa1100.c b/drivers/serial/sa1100.c
index 7f5e26873220..2199d819a987 100644
--- a/drivers/serial/sa1100.c
+++ b/drivers/serial/sa1100.c
@@ -638,7 +638,7 @@ static void __init sa1100_init_ports(void)
638 PPSR |= PPC_TXD1 | PPC_TXD3; 638 PPSR |= PPC_TXD1 | PPC_TXD3;
639} 639}
640 640
641void __init sa1100_register_uart_fns(struct sa1100_port_fns *fns) 641void __devinit sa1100_register_uart_fns(struct sa1100_port_fns *fns)
642{ 642{
643 if (fns->get_mctrl) 643 if (fns->get_mctrl)
644 sa1100_pops.get_mctrl = fns->get_mctrl; 644 sa1100_pops.get_mctrl = fns->get_mctrl;
diff --git a/drivers/serial/serial_txx9.c b/drivers/serial/serial_txx9.c
index 0f7cf4c453e6..c50e9fbbf743 100644
--- a/drivers/serial/serial_txx9.c
+++ b/drivers/serial/serial_txx9.c
@@ -221,21 +221,26 @@ sio_quot_set(struct uart_txx9_port *up, int quot)
221 sio_out(up, TXX9_SIBGR, 0xff | TXX9_SIBGR_BCLK_T6); 221 sio_out(up, TXX9_SIBGR, 0xff | TXX9_SIBGR_BCLK_T6);
222} 222}
223 223
224static struct uart_txx9_port *to_uart_txx9_port(struct uart_port *port)
225{
226 return container_of(port, struct uart_txx9_port, port);
227}
228
224static void serial_txx9_stop_tx(struct uart_port *port) 229static void serial_txx9_stop_tx(struct uart_port *port)
225{ 230{
226 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 231 struct uart_txx9_port *up = to_uart_txx9_port(port);
227 sio_mask(up, TXX9_SIDICR, TXX9_SIDICR_TIE); 232 sio_mask(up, TXX9_SIDICR, TXX9_SIDICR_TIE);
228} 233}
229 234
230static void serial_txx9_start_tx(struct uart_port *port) 235static void serial_txx9_start_tx(struct uart_port *port)
231{ 236{
232 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 237 struct uart_txx9_port *up = to_uart_txx9_port(port);
233 sio_set(up, TXX9_SIDICR, TXX9_SIDICR_TIE); 238 sio_set(up, TXX9_SIDICR, TXX9_SIDICR_TIE);
234} 239}
235 240
236static void serial_txx9_stop_rx(struct uart_port *port) 241static void serial_txx9_stop_rx(struct uart_port *port)
237{ 242{
238 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 243 struct uart_txx9_port *up = to_uart_txx9_port(port);
239 up->port.read_status_mask &= ~TXX9_SIDISR_RDIS; 244 up->port.read_status_mask &= ~TXX9_SIDISR_RDIS;
240} 245}
241 246
@@ -246,7 +251,7 @@ static void serial_txx9_enable_ms(struct uart_port *port)
246 251
247static void serial_txx9_initialize(struct uart_port *port) 252static void serial_txx9_initialize(struct uart_port *port)
248{ 253{
249 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 254 struct uart_txx9_port *up = to_uart_txx9_port(port);
250 unsigned int tmout = 10000; 255 unsigned int tmout = 10000;
251 256
252 sio_out(up, TXX9_SIFCR, TXX9_SIFCR_SWRST); 257 sio_out(up, TXX9_SIFCR, TXX9_SIFCR_SWRST);
@@ -414,7 +419,7 @@ static irqreturn_t serial_txx9_interrupt(int irq, void *dev_id)
414 419
415static unsigned int serial_txx9_tx_empty(struct uart_port *port) 420static unsigned int serial_txx9_tx_empty(struct uart_port *port)
416{ 421{
417 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 422 struct uart_txx9_port *up = to_uart_txx9_port(port);
418 unsigned long flags; 423 unsigned long flags;
419 unsigned int ret; 424 unsigned int ret;
420 425
@@ -427,7 +432,7 @@ static unsigned int serial_txx9_tx_empty(struct uart_port *port)
427 432
428static unsigned int serial_txx9_get_mctrl(struct uart_port *port) 433static unsigned int serial_txx9_get_mctrl(struct uart_port *port)
429{ 434{
430 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 435 struct uart_txx9_port *up = to_uart_txx9_port(port);
431 unsigned int ret; 436 unsigned int ret;
432 437
433 /* no modem control lines */ 438 /* no modem control lines */
@@ -440,7 +445,7 @@ static unsigned int serial_txx9_get_mctrl(struct uart_port *port)
440 445
441static void serial_txx9_set_mctrl(struct uart_port *port, unsigned int mctrl) 446static void serial_txx9_set_mctrl(struct uart_port *port, unsigned int mctrl)
442{ 447{
443 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 448 struct uart_txx9_port *up = to_uart_txx9_port(port);
444 449
445 if (mctrl & TIOCM_RTS) 450 if (mctrl & TIOCM_RTS)
446 sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSSC); 451 sio_mask(up, TXX9_SIFLCR, TXX9_SIFLCR_RTSSC);
@@ -450,7 +455,7 @@ static void serial_txx9_set_mctrl(struct uart_port *port, unsigned int mctrl)
450 455
451static void serial_txx9_break_ctl(struct uart_port *port, int break_state) 456static void serial_txx9_break_ctl(struct uart_port *port, int break_state)
452{ 457{
453 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 458 struct uart_txx9_port *up = to_uart_txx9_port(port);
454 unsigned long flags; 459 unsigned long flags;
455 460
456 spin_lock_irqsave(&up->port.lock, flags); 461 spin_lock_irqsave(&up->port.lock, flags);
@@ -494,7 +499,7 @@ static int serial_txx9_get_poll_char(struct uart_port *port)
494{ 499{
495 unsigned int ier; 500 unsigned int ier;
496 unsigned char c; 501 unsigned char c;
497 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 502 struct uart_txx9_port *up = to_uart_txx9_port(port);
498 503
499 /* 504 /*
500 * First save the IER then disable the interrupts 505 * First save the IER then disable the interrupts
@@ -520,7 +525,7 @@ static int serial_txx9_get_poll_char(struct uart_port *port)
520static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c) 525static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c)
521{ 526{
522 unsigned int ier; 527 unsigned int ier;
523 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 528 struct uart_txx9_port *up = to_uart_txx9_port(port);
524 529
525 /* 530 /*
526 * First save the IER then disable the interrupts 531 * First save the IER then disable the interrupts
@@ -551,7 +556,7 @@ static void serial_txx9_put_poll_char(struct uart_port *port, unsigned char c)
551 556
552static int serial_txx9_startup(struct uart_port *port) 557static int serial_txx9_startup(struct uart_port *port)
553{ 558{
554 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 559 struct uart_txx9_port *up = to_uart_txx9_port(port);
555 unsigned long flags; 560 unsigned long flags;
556 int retval; 561 int retval;
557 562
@@ -596,7 +601,7 @@ static int serial_txx9_startup(struct uart_port *port)
596 601
597static void serial_txx9_shutdown(struct uart_port *port) 602static void serial_txx9_shutdown(struct uart_port *port)
598{ 603{
599 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 604 struct uart_txx9_port *up = to_uart_txx9_port(port);
600 unsigned long flags; 605 unsigned long flags;
601 606
602 /* 607 /*
@@ -636,7 +641,7 @@ static void
636serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios, 641serial_txx9_set_termios(struct uart_port *port, struct ktermios *termios,
637 struct ktermios *old) 642 struct ktermios *old)
638{ 643{
639 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 644 struct uart_txx9_port *up = to_uart_txx9_port(port);
640 unsigned int cval, fcr = 0; 645 unsigned int cval, fcr = 0;
641 unsigned long flags; 646 unsigned long flags;
642 unsigned int baud, quot; 647 unsigned int baud, quot;
@@ -814,19 +819,19 @@ static void serial_txx9_release_resource(struct uart_txx9_port *up)
814 819
815static void serial_txx9_release_port(struct uart_port *port) 820static void serial_txx9_release_port(struct uart_port *port)
816{ 821{
817 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 822 struct uart_txx9_port *up = to_uart_txx9_port(port);
818 serial_txx9_release_resource(up); 823 serial_txx9_release_resource(up);
819} 824}
820 825
821static int serial_txx9_request_port(struct uart_port *port) 826static int serial_txx9_request_port(struct uart_port *port)
822{ 827{
823 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 828 struct uart_txx9_port *up = to_uart_txx9_port(port);
824 return serial_txx9_request_resource(up); 829 return serial_txx9_request_resource(up);
825} 830}
826 831
827static void serial_txx9_config_port(struct uart_port *port, int uflags) 832static void serial_txx9_config_port(struct uart_port *port, int uflags)
828{ 833{
829 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 834 struct uart_txx9_port *up = to_uart_txx9_port(port);
830 int ret; 835 int ret;
831 836
832 /* 837 /*
@@ -897,7 +902,7 @@ static void __init serial_txx9_register_ports(struct uart_driver *drv,
897 902
898static void serial_txx9_console_putchar(struct uart_port *port, int ch) 903static void serial_txx9_console_putchar(struct uart_port *port, int ch)
899{ 904{
900 struct uart_txx9_port *up = (struct uart_txx9_port *)port; 905 struct uart_txx9_port *up = to_uart_txx9_port(port);
901 906
902 wait_for_xmitr(up); 907 wait_for_xmitr(up);
903 sio_out(up, TXX9_SITFIFO, ch); 908 sio_out(up, TXX9_SITFIFO, ch);
diff --git a/drivers/sfi/sfi_core.c b/drivers/sfi/sfi_core.c
index d3b496800477..b204a0929139 100644
--- a/drivers/sfi/sfi_core.c
+++ b/drivers/sfi/sfi_core.c
@@ -90,7 +90,11 @@ static struct sfi_table_simple *syst_va __read_mostly;
90 */ 90 */
91static u32 sfi_use_ioremap __read_mostly; 91static u32 sfi_use_ioremap __read_mostly;
92 92
93static void __iomem *sfi_map_memory(u64 phys, u32 size) 93/*
94 * sfi_un/map_memory calls early_ioremap/iounmap which is a __init function
95 * and introduces section mismatch. So use __ref to make it calm.
96 */
97static void __iomem * __ref sfi_map_memory(u64 phys, u32 size)
94{ 98{
95 if (!phys || !size) 99 if (!phys || !size)
96 return NULL; 100 return NULL;
@@ -101,7 +105,7 @@ static void __iomem *sfi_map_memory(u64 phys, u32 size)
101 return early_ioremap(phys, size); 105 return early_ioremap(phys, size);
102} 106}
103 107
104static void sfi_unmap_memory(void __iomem *virt, u32 size) 108static void __ref sfi_unmap_memory(void __iomem *virt, u32 size)
105{ 109{
106 if (!virt || !size) 110 if (!virt || !size)
107 return; 111 return;
@@ -125,7 +129,7 @@ static void sfi_print_table_header(unsigned long long pa,
125 * sfi_verify_table() 129 * sfi_verify_table()
126 * Sanity check table lengh, calculate checksum 130 * Sanity check table lengh, calculate checksum
127 */ 131 */
128static __init int sfi_verify_table(struct sfi_table_header *table) 132static int sfi_verify_table(struct sfi_table_header *table)
129{ 133{
130 134
131 u8 checksum = 0; 135 u8 checksum = 0;
@@ -213,12 +217,17 @@ static int sfi_table_check_key(struct sfi_table_header *th,
213 * the mapped virt address will be returned, and the virt space 217 * the mapped virt address will be returned, and the virt space
214 * will be released by call sfi_put_table() later 218 * will be released by call sfi_put_table() later
215 * 219 *
220 * This two cases are from two different functions with two different
221 * sections and causes section mismatch warning. So use __ref to tell
222 * modpost not to make any noise.
223 *
216 * Return value: 224 * Return value:
217 * NULL: when can't find a table matching the key 225 * NULL: when can't find a table matching the key
218 * ERR_PTR(error): error value 226 * ERR_PTR(error): error value
219 * virt table address: when a matched table is found 227 * virt table address: when a matched table is found
220 */ 228 */
221struct sfi_table_header *sfi_check_table(u64 pa, struct sfi_table_key *key) 229struct sfi_table_header *
230 __ref sfi_check_table(u64 pa, struct sfi_table_key *key)
222{ 231{
223 struct sfi_table_header *th; 232 struct sfi_table_header *th;
224 void *ret = NULL; 233 void *ret = NULL;
diff --git a/drivers/spi/Makefile b/drivers/spi/Makefile
index 6d7a3f82c54b..21a118269cac 100644
--- a/drivers/spi/Makefile
+++ b/drivers/spi/Makefile
@@ -17,7 +17,7 @@ obj-$(CONFIG_SPI_BITBANG) += spi_bitbang.o
17obj-$(CONFIG_SPI_AU1550) += au1550_spi.o 17obj-$(CONFIG_SPI_AU1550) += au1550_spi.o
18obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o 18obj-$(CONFIG_SPI_BUTTERFLY) += spi_butterfly.o
19obj-$(CONFIG_SPI_GPIO) += spi_gpio.o 19obj-$(CONFIG_SPI_GPIO) += spi_gpio.o
20obj-$(CONFIG_SPI_IMX) += mxc_spi.o 20obj-$(CONFIG_SPI_IMX) += spi_imx.o
21obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o 21obj-$(CONFIG_SPI_LM70_LLP) += spi_lm70llp.o
22obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o 22obj-$(CONFIG_SPI_PXA2XX) += pxa2xx_spi.o
23obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o 23obj-$(CONFIG_SPI_OMAP_UWIRE) += omap_uwire.o
diff --git a/drivers/spi/amba-pl022.c b/drivers/spi/amba-pl022.c
index c0f950a7cbec..958a3ffc8987 100644
--- a/drivers/spi/amba-pl022.c
+++ b/drivers/spi/amba-pl022.c
@@ -532,7 +532,7 @@ static void restore_state(struct pl022 *pl022)
532 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \ 532 GEN_MASK_BITS(SSP_DATA_BITS_12, SSP_CR0_MASK_DSS, 0) | \
533 GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP, 5) | \ 533 GEN_MASK_BITS(SSP_MICROWIRE_CHANNEL_FULL_DUPLEX, SSP_CR0_MASK_HALFDUP, 5) | \
534 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \ 534 GEN_MASK_BITS(SSP_CLK_POL_IDLE_LOW, SSP_CR0_MASK_SPO, 6) | \
535 GEN_MASK_BITS(SSP_CLK_FALLING_EDGE, SSP_CR0_MASK_SPH, 7) | \ 535 GEN_MASK_BITS(SSP_CLK_SECOND_EDGE, SSP_CR0_MASK_SPH, 7) | \
536 GEN_MASK_BITS(NMDK_SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \ 536 GEN_MASK_BITS(NMDK_SSP_DEFAULT_CLKRATE, SSP_CR0_MASK_SCR, 8) | \
537 GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS, 16) | \ 537 GEN_MASK_BITS(SSP_BITS_8, SSP_CR0_MASK_CSS, 16) | \
538 GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 21) \ 538 GEN_MASK_BITS(SSP_INTERFACE_MOTOROLA_SPI, SSP_CR0_MASK_FRF, 21) \
@@ -1247,8 +1247,8 @@ static int verify_controller_parameters(struct pl022 *pl022,
1247 return -EINVAL; 1247 return -EINVAL;
1248 } 1248 }
1249 if (chip_info->iface == SSP_INTERFACE_MOTOROLA_SPI) { 1249 if (chip_info->iface == SSP_INTERFACE_MOTOROLA_SPI) {
1250 if ((chip_info->clk_phase != SSP_CLK_RISING_EDGE) 1250 if ((chip_info->clk_phase != SSP_CLK_FIRST_EDGE)
1251 && (chip_info->clk_phase != SSP_CLK_FALLING_EDGE)) { 1251 && (chip_info->clk_phase != SSP_CLK_SECOND_EDGE)) {
1252 dev_err(chip_info->dev, 1252 dev_err(chip_info->dev,
1253 "Clock Phase is configured incorrectly\n"); 1253 "Clock Phase is configured incorrectly\n");
1254 return -EINVAL; 1254 return -EINVAL;
@@ -1485,7 +1485,7 @@ static int pl022_setup(struct spi_device *spi)
1485 chip_info->data_size = SSP_DATA_BITS_12; 1485 chip_info->data_size = SSP_DATA_BITS_12;
1486 chip_info->rx_lev_trig = SSP_RX_1_OR_MORE_ELEM; 1486 chip_info->rx_lev_trig = SSP_RX_1_OR_MORE_ELEM;
1487 chip_info->tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC; 1487 chip_info->tx_lev_trig = SSP_TX_1_OR_MORE_EMPTY_LOC;
1488 chip_info->clk_phase = SSP_CLK_FALLING_EDGE; 1488 chip_info->clk_phase = SSP_CLK_SECOND_EDGE;
1489 chip_info->clk_pol = SSP_CLK_POL_IDLE_LOW; 1489 chip_info->clk_pol = SSP_CLK_POL_IDLE_LOW;
1490 chip_info->ctrl_len = SSP_BITS_8; 1490 chip_info->ctrl_len = SSP_BITS_8;
1491 chip_info->wait_state = SSP_MWIRE_WAIT_ZERO; 1491 chip_info->wait_state = SSP_MWIRE_WAIT_ZERO;
diff --git a/drivers/spi/pxa2xx_spi.c b/drivers/spi/pxa2xx_spi.c
index 31dd56f0dae9..c8c2b693ffac 100644
--- a/drivers/spi/pxa2xx_spi.c
+++ b/drivers/spi/pxa2xx_spi.c
@@ -1668,10 +1668,9 @@ static void pxa2xx_spi_shutdown(struct platform_device *pdev)
1668} 1668}
1669 1669
1670#ifdef CONFIG_PM 1670#ifdef CONFIG_PM
1671 1671static int pxa2xx_spi_suspend(struct device *dev)
1672static int pxa2xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
1673{ 1672{
1674 struct driver_data *drv_data = platform_get_drvdata(pdev); 1673 struct driver_data *drv_data = dev_get_drvdata(dev);
1675 struct ssp_device *ssp = drv_data->ssp; 1674 struct ssp_device *ssp = drv_data->ssp;
1676 int status = 0; 1675 int status = 0;
1677 1676
@@ -1684,9 +1683,9 @@ static int pxa2xx_spi_suspend(struct platform_device *pdev, pm_message_t state)
1684 return 0; 1683 return 0;
1685} 1684}
1686 1685
1687static int pxa2xx_spi_resume(struct platform_device *pdev) 1686static int pxa2xx_spi_resume(struct device *dev)
1688{ 1687{
1689 struct driver_data *drv_data = platform_get_drvdata(pdev); 1688 struct driver_data *drv_data = dev_get_drvdata(dev);
1690 struct ssp_device *ssp = drv_data->ssp; 1689 struct ssp_device *ssp = drv_data->ssp;
1691 int status = 0; 1690 int status = 0;
1692 1691
@@ -1703,26 +1702,29 @@ static int pxa2xx_spi_resume(struct platform_device *pdev)
1703 /* Start the queue running */ 1702 /* Start the queue running */
1704 status = start_queue(drv_data); 1703 status = start_queue(drv_data);
1705 if (status != 0) { 1704 if (status != 0) {
1706 dev_err(&pdev->dev, "problem starting queue (%d)\n", status); 1705 dev_err(dev, "problem starting queue (%d)\n", status);
1707 return status; 1706 return status;
1708 } 1707 }
1709 1708
1710 return 0; 1709 return 0;
1711} 1710}
1712#else 1711
1713#define pxa2xx_spi_suspend NULL 1712static struct dev_pm_ops pxa2xx_spi_pm_ops = {
1714#define pxa2xx_spi_resume NULL 1713 .suspend = pxa2xx_spi_suspend,
1715#endif /* CONFIG_PM */ 1714 .resume = pxa2xx_spi_resume,
1715};
1716#endif
1716 1717
1717static struct platform_driver driver = { 1718static struct platform_driver driver = {
1718 .driver = { 1719 .driver = {
1719 .name = "pxa2xx-spi", 1720 .name = "pxa2xx-spi",
1720 .owner = THIS_MODULE, 1721 .owner = THIS_MODULE,
1722#ifdef CONFIG_PM
1723 .pm = &pxa2xx_spi_pm_ops,
1724#endif
1721 }, 1725 },
1722 .remove = pxa2xx_spi_remove, 1726 .remove = pxa2xx_spi_remove,
1723 .shutdown = pxa2xx_spi_shutdown, 1727 .shutdown = pxa2xx_spi_shutdown,
1724 .suspend = pxa2xx_spi_suspend,
1725 .resume = pxa2xx_spi_resume,
1726}; 1728};
1727 1729
1728static int __init pxa2xx_spi_init(void) 1730static int __init pxa2xx_spi_init(void)
diff --git a/drivers/spi/mxc_spi.c b/drivers/spi/spi_imx.c
index b1447236ae81..89c22efedfb0 100644
--- a/drivers/spi/mxc_spi.c
+++ b/drivers/spi/spi_imx.c
@@ -48,14 +48,14 @@
48#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */ 48#define MXC_INT_RR (1 << 0) /* Receive data ready interrupt */
49#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */ 49#define MXC_INT_TE (1 << 1) /* Transmit FIFO empty interrupt */
50 50
51struct mxc_spi_config { 51struct spi_imx_config {
52 unsigned int speed_hz; 52 unsigned int speed_hz;
53 unsigned int bpw; 53 unsigned int bpw;
54 unsigned int mode; 54 unsigned int mode;
55 int cs; 55 int cs;
56}; 56};
57 57
58struct mxc_spi_data { 58struct spi_imx_data {
59 struct spi_bitbang bitbang; 59 struct spi_bitbang bitbang;
60 60
61 struct completion xfer_done; 61 struct completion xfer_done;
@@ -66,43 +66,43 @@ struct mxc_spi_data {
66 int *chipselect; 66 int *chipselect;
67 67
68 unsigned int count; 68 unsigned int count;
69 void (*tx)(struct mxc_spi_data *); 69 void (*tx)(struct spi_imx_data *);
70 void (*rx)(struct mxc_spi_data *); 70 void (*rx)(struct spi_imx_data *);
71 void *rx_buf; 71 void *rx_buf;
72 const void *tx_buf; 72 const void *tx_buf;
73 unsigned int txfifo; /* number of words pushed in tx FIFO */ 73 unsigned int txfifo; /* number of words pushed in tx FIFO */
74 74
75 /* SoC specific functions */ 75 /* SoC specific functions */
76 void (*intctrl)(struct mxc_spi_data *, int); 76 void (*intctrl)(struct spi_imx_data *, int);
77 int (*config)(struct mxc_spi_data *, struct mxc_spi_config *); 77 int (*config)(struct spi_imx_data *, struct spi_imx_config *);
78 void (*trigger)(struct mxc_spi_data *); 78 void (*trigger)(struct spi_imx_data *);
79 int (*rx_available)(struct mxc_spi_data *); 79 int (*rx_available)(struct spi_imx_data *);
80}; 80};
81 81
82#define MXC_SPI_BUF_RX(type) \ 82#define MXC_SPI_BUF_RX(type) \
83static void mxc_spi_buf_rx_##type(struct mxc_spi_data *mxc_spi) \ 83static void spi_imx_buf_rx_##type(struct spi_imx_data *spi_imx) \
84{ \ 84{ \
85 unsigned int val = readl(mxc_spi->base + MXC_CSPIRXDATA); \ 85 unsigned int val = readl(spi_imx->base + MXC_CSPIRXDATA); \
86 \ 86 \
87 if (mxc_spi->rx_buf) { \ 87 if (spi_imx->rx_buf) { \
88 *(type *)mxc_spi->rx_buf = val; \ 88 *(type *)spi_imx->rx_buf = val; \
89 mxc_spi->rx_buf += sizeof(type); \ 89 spi_imx->rx_buf += sizeof(type); \
90 } \ 90 } \
91} 91}
92 92
93#define MXC_SPI_BUF_TX(type) \ 93#define MXC_SPI_BUF_TX(type) \
94static void mxc_spi_buf_tx_##type(struct mxc_spi_data *mxc_spi) \ 94static void spi_imx_buf_tx_##type(struct spi_imx_data *spi_imx) \
95{ \ 95{ \
96 type val = 0; \ 96 type val = 0; \
97 \ 97 \
98 if (mxc_spi->tx_buf) { \ 98 if (spi_imx->tx_buf) { \
99 val = *(type *)mxc_spi->tx_buf; \ 99 val = *(type *)spi_imx->tx_buf; \
100 mxc_spi->tx_buf += sizeof(type); \ 100 spi_imx->tx_buf += sizeof(type); \
101 } \ 101 } \
102 \ 102 \
103 mxc_spi->count -= sizeof(type); \ 103 spi_imx->count -= sizeof(type); \
104 \ 104 \
105 writel(val, mxc_spi->base + MXC_CSPITXDATA); \ 105 writel(val, spi_imx->base + MXC_CSPITXDATA); \
106} 106}
107 107
108MXC_SPI_BUF_RX(u8) 108MXC_SPI_BUF_RX(u8)
@@ -119,7 +119,7 @@ static int mxc_clkdivs[] = {0, 3, 4, 6, 8, 12, 16, 24, 32, 48, 64, 96, 128, 192,
119 256, 384, 512, 768, 1024}; 119 256, 384, 512, 768, 1024};
120 120
121/* MX21, MX27 */ 121/* MX21, MX27 */
122static unsigned int mxc_spi_clkdiv_1(unsigned int fin, 122static unsigned int spi_imx_clkdiv_1(unsigned int fin,
123 unsigned int fspi) 123 unsigned int fspi)
124{ 124{
125 int i, max; 125 int i, max;
@@ -137,7 +137,7 @@ static unsigned int mxc_spi_clkdiv_1(unsigned int fin,
137} 137}
138 138
139/* MX1, MX31, MX35 */ 139/* MX1, MX31, MX35 */
140static unsigned int mxc_spi_clkdiv_2(unsigned int fin, 140static unsigned int spi_imx_clkdiv_2(unsigned int fin,
141 unsigned int fspi) 141 unsigned int fspi)
142{ 142{
143 int i, div = 4; 143 int i, div = 4;
@@ -174,7 +174,7 @@ static unsigned int mxc_spi_clkdiv_2(unsigned int fin,
174 * the i.MX35 has a slightly different register layout for bits 174 * the i.MX35 has a slightly different register layout for bits
175 * we do not use here. 175 * we do not use here.
176 */ 176 */
177static void mx31_intctrl(struct mxc_spi_data *mxc_spi, int enable) 177static void mx31_intctrl(struct spi_imx_data *spi_imx, int enable)
178{ 178{
179 unsigned int val = 0; 179 unsigned int val = 0;
180 180
@@ -183,24 +183,24 @@ static void mx31_intctrl(struct mxc_spi_data *mxc_spi, int enable)
183 if (enable & MXC_INT_RR) 183 if (enable & MXC_INT_RR)
184 val |= MX31_INTREG_RREN; 184 val |= MX31_INTREG_RREN;
185 185
186 writel(val, mxc_spi->base + MXC_CSPIINT); 186 writel(val, spi_imx->base + MXC_CSPIINT);
187} 187}
188 188
189static void mx31_trigger(struct mxc_spi_data *mxc_spi) 189static void mx31_trigger(struct spi_imx_data *spi_imx)
190{ 190{
191 unsigned int reg; 191 unsigned int reg;
192 192
193 reg = readl(mxc_spi->base + MXC_CSPICTRL); 193 reg = readl(spi_imx->base + MXC_CSPICTRL);
194 reg |= MX31_CSPICTRL_XCH; 194 reg |= MX31_CSPICTRL_XCH;
195 writel(reg, mxc_spi->base + MXC_CSPICTRL); 195 writel(reg, spi_imx->base + MXC_CSPICTRL);
196} 196}
197 197
198static int mx31_config(struct mxc_spi_data *mxc_spi, 198static int mx31_config(struct spi_imx_data *spi_imx,
199 struct mxc_spi_config *config) 199 struct spi_imx_config *config)
200{ 200{
201 unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER; 201 unsigned int reg = MX31_CSPICTRL_ENABLE | MX31_CSPICTRL_MASTER;
202 202
203 reg |= mxc_spi_clkdiv_2(mxc_spi->spi_clk, config->speed_hz) << 203 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
204 MX31_CSPICTRL_DR_SHIFT; 204 MX31_CSPICTRL_DR_SHIFT;
205 205
206 if (cpu_is_mx31()) 206 if (cpu_is_mx31())
@@ -223,14 +223,14 @@ static int mx31_config(struct mxc_spi_data *mxc_spi,
223 reg |= (config->cs + 32) << MX35_CSPICTRL_CS_SHIFT; 223 reg |= (config->cs + 32) << MX35_CSPICTRL_CS_SHIFT;
224 } 224 }
225 225
226 writel(reg, mxc_spi->base + MXC_CSPICTRL); 226 writel(reg, spi_imx->base + MXC_CSPICTRL);
227 227
228 return 0; 228 return 0;
229} 229}
230 230
231static int mx31_rx_available(struct mxc_spi_data *mxc_spi) 231static int mx31_rx_available(struct spi_imx_data *spi_imx)
232{ 232{
233 return readl(mxc_spi->base + MX31_CSPISTATUS) & MX31_STATUS_RR; 233 return readl(spi_imx->base + MX31_CSPISTATUS) & MX31_STATUS_RR;
234} 234}
235 235
236#define MX27_INTREG_RR (1 << 4) 236#define MX27_INTREG_RR (1 << 4)
@@ -246,7 +246,7 @@ static int mx31_rx_available(struct mxc_spi_data *mxc_spi)
246#define MX27_CSPICTRL_DR_SHIFT 14 246#define MX27_CSPICTRL_DR_SHIFT 14
247#define MX27_CSPICTRL_CS_SHIFT 19 247#define MX27_CSPICTRL_CS_SHIFT 19
248 248
249static void mx27_intctrl(struct mxc_spi_data *mxc_spi, int enable) 249static void mx27_intctrl(struct spi_imx_data *spi_imx, int enable)
250{ 250{
251 unsigned int val = 0; 251 unsigned int val = 0;
252 252
@@ -255,24 +255,24 @@ static void mx27_intctrl(struct mxc_spi_data *mxc_spi, int enable)
255 if (enable & MXC_INT_RR) 255 if (enable & MXC_INT_RR)
256 val |= MX27_INTREG_RREN; 256 val |= MX27_INTREG_RREN;
257 257
258 writel(val, mxc_spi->base + MXC_CSPIINT); 258 writel(val, spi_imx->base + MXC_CSPIINT);
259} 259}
260 260
261static void mx27_trigger(struct mxc_spi_data *mxc_spi) 261static void mx27_trigger(struct spi_imx_data *spi_imx)
262{ 262{
263 unsigned int reg; 263 unsigned int reg;
264 264
265 reg = readl(mxc_spi->base + MXC_CSPICTRL); 265 reg = readl(spi_imx->base + MXC_CSPICTRL);
266 reg |= MX27_CSPICTRL_XCH; 266 reg |= MX27_CSPICTRL_XCH;
267 writel(reg, mxc_spi->base + MXC_CSPICTRL); 267 writel(reg, spi_imx->base + MXC_CSPICTRL);
268} 268}
269 269
270static int mx27_config(struct mxc_spi_data *mxc_spi, 270static int mx27_config(struct spi_imx_data *spi_imx,
271 struct mxc_spi_config *config) 271 struct spi_imx_config *config)
272{ 272{
273 unsigned int reg = MX27_CSPICTRL_ENABLE | MX27_CSPICTRL_MASTER; 273 unsigned int reg = MX27_CSPICTRL_ENABLE | MX27_CSPICTRL_MASTER;
274 274
275 reg |= mxc_spi_clkdiv_1(mxc_spi->spi_clk, config->speed_hz) << 275 reg |= spi_imx_clkdiv_1(spi_imx->spi_clk, config->speed_hz) <<
276 MX27_CSPICTRL_DR_SHIFT; 276 MX27_CSPICTRL_DR_SHIFT;
277 reg |= config->bpw - 1; 277 reg |= config->bpw - 1;
278 278
@@ -285,14 +285,14 @@ static int mx27_config(struct mxc_spi_data *mxc_spi,
285 if (config->cs < 0) 285 if (config->cs < 0)
286 reg |= (config->cs + 32) << MX27_CSPICTRL_CS_SHIFT; 286 reg |= (config->cs + 32) << MX27_CSPICTRL_CS_SHIFT;
287 287
288 writel(reg, mxc_spi->base + MXC_CSPICTRL); 288 writel(reg, spi_imx->base + MXC_CSPICTRL);
289 289
290 return 0; 290 return 0;
291} 291}
292 292
293static int mx27_rx_available(struct mxc_spi_data *mxc_spi) 293static int mx27_rx_available(struct spi_imx_data *spi_imx)
294{ 294{
295 return readl(mxc_spi->base + MXC_CSPIINT) & MX27_INTREG_RR; 295 return readl(spi_imx->base + MXC_CSPIINT) & MX27_INTREG_RR;
296} 296}
297 297
298#define MX1_INTREG_RR (1 << 3) 298#define MX1_INTREG_RR (1 << 3)
@@ -306,7 +306,7 @@ static int mx27_rx_available(struct mxc_spi_data *mxc_spi)
306#define MX1_CSPICTRL_MASTER (1 << 10) 306#define MX1_CSPICTRL_MASTER (1 << 10)
307#define MX1_CSPICTRL_DR_SHIFT 13 307#define MX1_CSPICTRL_DR_SHIFT 13
308 308
309static void mx1_intctrl(struct mxc_spi_data *mxc_spi, int enable) 309static void mx1_intctrl(struct spi_imx_data *spi_imx, int enable)
310{ 310{
311 unsigned int val = 0; 311 unsigned int val = 0;
312 312
@@ -315,24 +315,24 @@ static void mx1_intctrl(struct mxc_spi_data *mxc_spi, int enable)
315 if (enable & MXC_INT_RR) 315 if (enable & MXC_INT_RR)
316 val |= MX1_INTREG_RREN; 316 val |= MX1_INTREG_RREN;
317 317
318 writel(val, mxc_spi->base + MXC_CSPIINT); 318 writel(val, spi_imx->base + MXC_CSPIINT);
319} 319}
320 320
321static void mx1_trigger(struct mxc_spi_data *mxc_spi) 321static void mx1_trigger(struct spi_imx_data *spi_imx)
322{ 322{
323 unsigned int reg; 323 unsigned int reg;
324 324
325 reg = readl(mxc_spi->base + MXC_CSPICTRL); 325 reg = readl(spi_imx->base + MXC_CSPICTRL);
326 reg |= MX1_CSPICTRL_XCH; 326 reg |= MX1_CSPICTRL_XCH;
327 writel(reg, mxc_spi->base + MXC_CSPICTRL); 327 writel(reg, spi_imx->base + MXC_CSPICTRL);
328} 328}
329 329
330static int mx1_config(struct mxc_spi_data *mxc_spi, 330static int mx1_config(struct spi_imx_data *spi_imx,
331 struct mxc_spi_config *config) 331 struct spi_imx_config *config)
332{ 332{
333 unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER; 333 unsigned int reg = MX1_CSPICTRL_ENABLE | MX1_CSPICTRL_MASTER;
334 334
335 reg |= mxc_spi_clkdiv_2(mxc_spi->spi_clk, config->speed_hz) << 335 reg |= spi_imx_clkdiv_2(spi_imx->spi_clk, config->speed_hz) <<
336 MX1_CSPICTRL_DR_SHIFT; 336 MX1_CSPICTRL_DR_SHIFT;
337 reg |= config->bpw - 1; 337 reg |= config->bpw - 1;
338 338
@@ -341,156 +341,151 @@ static int mx1_config(struct mxc_spi_data *mxc_spi,
341 if (config->mode & SPI_CPOL) 341 if (config->mode & SPI_CPOL)
342 reg |= MX1_CSPICTRL_POL; 342 reg |= MX1_CSPICTRL_POL;
343 343
344 writel(reg, mxc_spi->base + MXC_CSPICTRL); 344 writel(reg, spi_imx->base + MXC_CSPICTRL);
345 345
346 return 0; 346 return 0;
347} 347}
348 348
349static int mx1_rx_available(struct mxc_spi_data *mxc_spi) 349static int mx1_rx_available(struct spi_imx_data *spi_imx)
350{ 350{
351 return readl(mxc_spi->base + MXC_CSPIINT) & MX1_INTREG_RR; 351 return readl(spi_imx->base + MXC_CSPIINT) & MX1_INTREG_RR;
352} 352}
353 353
354static void mxc_spi_chipselect(struct spi_device *spi, int is_active) 354static void spi_imx_chipselect(struct spi_device *spi, int is_active)
355{ 355{
356 struct mxc_spi_data *mxc_spi = spi_master_get_devdata(spi->master); 356 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
357 unsigned int cs = 0; 357 int gpio = spi_imx->chipselect[spi->chip_select];
358 int gpio = mxc_spi->chipselect[spi->chip_select]; 358 int active = is_active != BITBANG_CS_INACTIVE;
359 struct mxc_spi_config config; 359 int dev_is_lowactive = !(spi->mode & SPI_CS_HIGH);
360 360
361 if (spi->mode & SPI_CS_HIGH) 361 if (gpio < 0)
362 cs = 1;
363
364 if (is_active == BITBANG_CS_INACTIVE) {
365 if (gpio >= 0)
366 gpio_set_value(gpio, !cs);
367 return; 362 return;
368 }
369
370 config.bpw = spi->bits_per_word;
371 config.speed_hz = spi->max_speed_hz;
372 config.mode = spi->mode;
373 config.cs = mxc_spi->chipselect[spi->chip_select];
374
375 mxc_spi->config(mxc_spi, &config);
376
377 /* Initialize the functions for transfer */
378 if (config.bpw <= 8) {
379 mxc_spi->rx = mxc_spi_buf_rx_u8;
380 mxc_spi->tx = mxc_spi_buf_tx_u8;
381 } else if (config.bpw <= 16) {
382 mxc_spi->rx = mxc_spi_buf_rx_u16;
383 mxc_spi->tx = mxc_spi_buf_tx_u16;
384 } else if (config.bpw <= 32) {
385 mxc_spi->rx = mxc_spi_buf_rx_u32;
386 mxc_spi->tx = mxc_spi_buf_tx_u32;
387 } else
388 BUG();
389 363
390 if (gpio >= 0) 364 gpio_set_value(gpio, dev_is_lowactive ^ active);
391 gpio_set_value(gpio, cs);
392
393 return;
394} 365}
395 366
396static void mxc_spi_push(struct mxc_spi_data *mxc_spi) 367static void spi_imx_push(struct spi_imx_data *spi_imx)
397{ 368{
398 while (mxc_spi->txfifo < 8) { 369 while (spi_imx->txfifo < 8) {
399 if (!mxc_spi->count) 370 if (!spi_imx->count)
400 break; 371 break;
401 mxc_spi->tx(mxc_spi); 372 spi_imx->tx(spi_imx);
402 mxc_spi->txfifo++; 373 spi_imx->txfifo++;
403 } 374 }
404 375
405 mxc_spi->trigger(mxc_spi); 376 spi_imx->trigger(spi_imx);
406} 377}
407 378
408static irqreturn_t mxc_spi_isr(int irq, void *dev_id) 379static irqreturn_t spi_imx_isr(int irq, void *dev_id)
409{ 380{
410 struct mxc_spi_data *mxc_spi = dev_id; 381 struct spi_imx_data *spi_imx = dev_id;
411 382
412 while (mxc_spi->rx_available(mxc_spi)) { 383 while (spi_imx->rx_available(spi_imx)) {
413 mxc_spi->rx(mxc_spi); 384 spi_imx->rx(spi_imx);
414 mxc_spi->txfifo--; 385 spi_imx->txfifo--;
415 } 386 }
416 387
417 if (mxc_spi->count) { 388 if (spi_imx->count) {
418 mxc_spi_push(mxc_spi); 389 spi_imx_push(spi_imx);
419 return IRQ_HANDLED; 390 return IRQ_HANDLED;
420 } 391 }
421 392
422 if (mxc_spi->txfifo) { 393 if (spi_imx->txfifo) {
423 /* No data left to push, but still waiting for rx data, 394 /* No data left to push, but still waiting for rx data,
424 * enable receive data available interrupt. 395 * enable receive data available interrupt.
425 */ 396 */
426 mxc_spi->intctrl(mxc_spi, MXC_INT_RR); 397 spi_imx->intctrl(spi_imx, MXC_INT_RR);
427 return IRQ_HANDLED; 398 return IRQ_HANDLED;
428 } 399 }
429 400
430 mxc_spi->intctrl(mxc_spi, 0); 401 spi_imx->intctrl(spi_imx, 0);
431 complete(&mxc_spi->xfer_done); 402 complete(&spi_imx->xfer_done);
432 403
433 return IRQ_HANDLED; 404 return IRQ_HANDLED;
434} 405}
435 406
436static int mxc_spi_setupxfer(struct spi_device *spi, 407static int spi_imx_setupxfer(struct spi_device *spi,
437 struct spi_transfer *t) 408 struct spi_transfer *t)
438{ 409{
439 struct mxc_spi_data *mxc_spi = spi_master_get_devdata(spi->master); 410 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
440 struct mxc_spi_config config; 411 struct spi_imx_config config;
441 412
442 config.bpw = t ? t->bits_per_word : spi->bits_per_word; 413 config.bpw = t ? t->bits_per_word : spi->bits_per_word;
443 config.speed_hz = t ? t->speed_hz : spi->max_speed_hz; 414 config.speed_hz = t ? t->speed_hz : spi->max_speed_hz;
444 config.mode = spi->mode; 415 config.mode = spi->mode;
416 config.cs = spi_imx->chipselect[spi->chip_select];
417
418 if (!config.speed_hz)
419 config.speed_hz = spi->max_speed_hz;
420 if (!config.bpw)
421 config.bpw = spi->bits_per_word;
422 if (!config.speed_hz)
423 config.speed_hz = spi->max_speed_hz;
424
425 /* Initialize the functions for transfer */
426 if (config.bpw <= 8) {
427 spi_imx->rx = spi_imx_buf_rx_u8;
428 spi_imx->tx = spi_imx_buf_tx_u8;
429 } else if (config.bpw <= 16) {
430 spi_imx->rx = spi_imx_buf_rx_u16;
431 spi_imx->tx = spi_imx_buf_tx_u16;
432 } else if (config.bpw <= 32) {
433 spi_imx->rx = spi_imx_buf_rx_u32;
434 spi_imx->tx = spi_imx_buf_tx_u32;
435 } else
436 BUG();
445 437
446 mxc_spi->config(mxc_spi, &config); 438 spi_imx->config(spi_imx, &config);
447 439
448 return 0; 440 return 0;
449} 441}
450 442
451static int mxc_spi_transfer(struct spi_device *spi, 443static int spi_imx_transfer(struct spi_device *spi,
452 struct spi_transfer *transfer) 444 struct spi_transfer *transfer)
453{ 445{
454 struct mxc_spi_data *mxc_spi = spi_master_get_devdata(spi->master); 446 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
455 447
456 mxc_spi->tx_buf = transfer->tx_buf; 448 spi_imx->tx_buf = transfer->tx_buf;
457 mxc_spi->rx_buf = transfer->rx_buf; 449 spi_imx->rx_buf = transfer->rx_buf;
458 mxc_spi->count = transfer->len; 450 spi_imx->count = transfer->len;
459 mxc_spi->txfifo = 0; 451 spi_imx->txfifo = 0;
460 452
461 init_completion(&mxc_spi->xfer_done); 453 init_completion(&spi_imx->xfer_done);
462 454
463 mxc_spi_push(mxc_spi); 455 spi_imx_push(spi_imx);
464 456
465 mxc_spi->intctrl(mxc_spi, MXC_INT_TE); 457 spi_imx->intctrl(spi_imx, MXC_INT_TE);
466 458
467 wait_for_completion(&mxc_spi->xfer_done); 459 wait_for_completion(&spi_imx->xfer_done);
468 460
469 return transfer->len; 461 return transfer->len;
470} 462}
471 463
472static int mxc_spi_setup(struct spi_device *spi) 464static int spi_imx_setup(struct spi_device *spi)
473{ 465{
474 if (!spi->bits_per_word) 466 struct spi_imx_data *spi_imx = spi_master_get_devdata(spi->master);
475 spi->bits_per_word = 8; 467 int gpio = spi_imx->chipselect[spi->chip_select];
476 468
477 pr_debug("%s: mode %d, %u bpw, %d hz\n", __func__, 469 pr_debug("%s: mode %d, %u bpw, %d hz\n", __func__,
478 spi->mode, spi->bits_per_word, spi->max_speed_hz); 470 spi->mode, spi->bits_per_word, spi->max_speed_hz);
479 471
480 mxc_spi_chipselect(spi, BITBANG_CS_INACTIVE); 472 if (gpio >= 0)
473 gpio_direction_output(gpio, spi->mode & SPI_CS_HIGH ? 0 : 1);
474
475 spi_imx_chipselect(spi, BITBANG_CS_INACTIVE);
481 476
482 return 0; 477 return 0;
483} 478}
484 479
485static void mxc_spi_cleanup(struct spi_device *spi) 480static void spi_imx_cleanup(struct spi_device *spi)
486{ 481{
487} 482}
488 483
489static int __init mxc_spi_probe(struct platform_device *pdev) 484static int __init spi_imx_probe(struct platform_device *pdev)
490{ 485{
491 struct spi_imx_master *mxc_platform_info; 486 struct spi_imx_master *mxc_platform_info;
492 struct spi_master *master; 487 struct spi_master *master;
493 struct mxc_spi_data *mxc_spi; 488 struct spi_imx_data *spi_imx;
494 struct resource *res; 489 struct resource *res;
495 int i, ret; 490 int i, ret;
496 491
@@ -500,7 +495,7 @@ static int __init mxc_spi_probe(struct platform_device *pdev)
500 return -EINVAL; 495 return -EINVAL;
501 } 496 }
502 497
503 master = spi_alloc_master(&pdev->dev, sizeof(struct mxc_spi_data)); 498 master = spi_alloc_master(&pdev->dev, sizeof(struct spi_imx_data));
504 if (!master) 499 if (!master)
505 return -ENOMEM; 500 return -ENOMEM;
506 501
@@ -509,32 +504,32 @@ static int __init mxc_spi_probe(struct platform_device *pdev)
509 master->bus_num = pdev->id; 504 master->bus_num = pdev->id;
510 master->num_chipselect = mxc_platform_info->num_chipselect; 505 master->num_chipselect = mxc_platform_info->num_chipselect;
511 506
512 mxc_spi = spi_master_get_devdata(master); 507 spi_imx = spi_master_get_devdata(master);
513 mxc_spi->bitbang.master = spi_master_get(master); 508 spi_imx->bitbang.master = spi_master_get(master);
514 mxc_spi->chipselect = mxc_platform_info->chipselect; 509 spi_imx->chipselect = mxc_platform_info->chipselect;
515 510
516 for (i = 0; i < master->num_chipselect; i++) { 511 for (i = 0; i < master->num_chipselect; i++) {
517 if (mxc_spi->chipselect[i] < 0) 512 if (spi_imx->chipselect[i] < 0)
518 continue; 513 continue;
519 ret = gpio_request(mxc_spi->chipselect[i], DRIVER_NAME); 514 ret = gpio_request(spi_imx->chipselect[i], DRIVER_NAME);
520 if (ret) { 515 if (ret) {
521 i--; 516 i--;
522 while (i > 0) 517 while (i > 0)
523 if (mxc_spi->chipselect[i] >= 0) 518 if (spi_imx->chipselect[i] >= 0)
524 gpio_free(mxc_spi->chipselect[i--]); 519 gpio_free(spi_imx->chipselect[i--]);
525 dev_err(&pdev->dev, "can't get cs gpios"); 520 dev_err(&pdev->dev, "can't get cs gpios");
526 goto out_master_put; 521 goto out_master_put;
527 } 522 }
528 gpio_direction_output(mxc_spi->chipselect[i], 1);
529 } 523 }
530 524
531 mxc_spi->bitbang.chipselect = mxc_spi_chipselect; 525 spi_imx->bitbang.chipselect = spi_imx_chipselect;
532 mxc_spi->bitbang.setup_transfer = mxc_spi_setupxfer; 526 spi_imx->bitbang.setup_transfer = spi_imx_setupxfer;
533 mxc_spi->bitbang.txrx_bufs = mxc_spi_transfer; 527 spi_imx->bitbang.txrx_bufs = spi_imx_transfer;
534 mxc_spi->bitbang.master->setup = mxc_spi_setup; 528 spi_imx->bitbang.master->setup = spi_imx_setup;
535 mxc_spi->bitbang.master->cleanup = mxc_spi_cleanup; 529 spi_imx->bitbang.master->cleanup = spi_imx_cleanup;
530 spi_imx->bitbang.master->mode_bits = SPI_CPOL | SPI_CPHA | SPI_CS_HIGH;
536 531
537 init_completion(&mxc_spi->xfer_done); 532 init_completion(&spi_imx->xfer_done);
538 533
539 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 534 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
540 if (!res) { 535 if (!res) {
@@ -549,58 +544,58 @@ static int __init mxc_spi_probe(struct platform_device *pdev)
549 goto out_gpio_free; 544 goto out_gpio_free;
550 } 545 }
551 546
552 mxc_spi->base = ioremap(res->start, resource_size(res)); 547 spi_imx->base = ioremap(res->start, resource_size(res));
553 if (!mxc_spi->base) { 548 if (!spi_imx->base) {
554 ret = -EINVAL; 549 ret = -EINVAL;
555 goto out_release_mem; 550 goto out_release_mem;
556 } 551 }
557 552
558 mxc_spi->irq = platform_get_irq(pdev, 0); 553 spi_imx->irq = platform_get_irq(pdev, 0);
559 if (!mxc_spi->irq) { 554 if (!spi_imx->irq) {
560 ret = -EINVAL; 555 ret = -EINVAL;
561 goto out_iounmap; 556 goto out_iounmap;
562 } 557 }
563 558
564 ret = request_irq(mxc_spi->irq, mxc_spi_isr, 0, DRIVER_NAME, mxc_spi); 559 ret = request_irq(spi_imx->irq, spi_imx_isr, 0, DRIVER_NAME, spi_imx);
565 if (ret) { 560 if (ret) {
566 dev_err(&pdev->dev, "can't get irq%d: %d\n", mxc_spi->irq, ret); 561 dev_err(&pdev->dev, "can't get irq%d: %d\n", spi_imx->irq, ret);
567 goto out_iounmap; 562 goto out_iounmap;
568 } 563 }
569 564
570 if (cpu_is_mx31() || cpu_is_mx35()) { 565 if (cpu_is_mx31() || cpu_is_mx35()) {
571 mxc_spi->intctrl = mx31_intctrl; 566 spi_imx->intctrl = mx31_intctrl;
572 mxc_spi->config = mx31_config; 567 spi_imx->config = mx31_config;
573 mxc_spi->trigger = mx31_trigger; 568 spi_imx->trigger = mx31_trigger;
574 mxc_spi->rx_available = mx31_rx_available; 569 spi_imx->rx_available = mx31_rx_available;
575 } else if (cpu_is_mx27() || cpu_is_mx21()) { 570 } else if (cpu_is_mx27() || cpu_is_mx21()) {
576 mxc_spi->intctrl = mx27_intctrl; 571 spi_imx->intctrl = mx27_intctrl;
577 mxc_spi->config = mx27_config; 572 spi_imx->config = mx27_config;
578 mxc_spi->trigger = mx27_trigger; 573 spi_imx->trigger = mx27_trigger;
579 mxc_spi->rx_available = mx27_rx_available; 574 spi_imx->rx_available = mx27_rx_available;
580 } else if (cpu_is_mx1()) { 575 } else if (cpu_is_mx1()) {
581 mxc_spi->intctrl = mx1_intctrl; 576 spi_imx->intctrl = mx1_intctrl;
582 mxc_spi->config = mx1_config; 577 spi_imx->config = mx1_config;
583 mxc_spi->trigger = mx1_trigger; 578 spi_imx->trigger = mx1_trigger;
584 mxc_spi->rx_available = mx1_rx_available; 579 spi_imx->rx_available = mx1_rx_available;
585 } else 580 } else
586 BUG(); 581 BUG();
587 582
588 mxc_spi->clk = clk_get(&pdev->dev, NULL); 583 spi_imx->clk = clk_get(&pdev->dev, NULL);
589 if (IS_ERR(mxc_spi->clk)) { 584 if (IS_ERR(spi_imx->clk)) {
590 dev_err(&pdev->dev, "unable to get clock\n"); 585 dev_err(&pdev->dev, "unable to get clock\n");
591 ret = PTR_ERR(mxc_spi->clk); 586 ret = PTR_ERR(spi_imx->clk);
592 goto out_free_irq; 587 goto out_free_irq;
593 } 588 }
594 589
595 clk_enable(mxc_spi->clk); 590 clk_enable(spi_imx->clk);
596 mxc_spi->spi_clk = clk_get_rate(mxc_spi->clk); 591 spi_imx->spi_clk = clk_get_rate(spi_imx->clk);
597 592
598 if (!cpu_is_mx31() || !cpu_is_mx35()) 593 if (!cpu_is_mx31() || !cpu_is_mx35())
599 writel(1, mxc_spi->base + MXC_RESET); 594 writel(1, spi_imx->base + MXC_RESET);
600 595
601 mxc_spi->intctrl(mxc_spi, 0); 596 spi_imx->intctrl(spi_imx, 0);
602 597
603 ret = spi_bitbang_start(&mxc_spi->bitbang); 598 ret = spi_bitbang_start(&spi_imx->bitbang);
604 if (ret) { 599 if (ret) {
605 dev_err(&pdev->dev, "bitbang start failed with %d\n", ret); 600 dev_err(&pdev->dev, "bitbang start failed with %d\n", ret);
606 goto out_clk_put; 601 goto out_clk_put;
@@ -611,18 +606,18 @@ static int __init mxc_spi_probe(struct platform_device *pdev)
611 return ret; 606 return ret;
612 607
613out_clk_put: 608out_clk_put:
614 clk_disable(mxc_spi->clk); 609 clk_disable(spi_imx->clk);
615 clk_put(mxc_spi->clk); 610 clk_put(spi_imx->clk);
616out_free_irq: 611out_free_irq:
617 free_irq(mxc_spi->irq, mxc_spi); 612 free_irq(spi_imx->irq, spi_imx);
618out_iounmap: 613out_iounmap:
619 iounmap(mxc_spi->base); 614 iounmap(spi_imx->base);
620out_release_mem: 615out_release_mem:
621 release_mem_region(res->start, resource_size(res)); 616 release_mem_region(res->start, resource_size(res));
622out_gpio_free: 617out_gpio_free:
623 for (i = 0; i < master->num_chipselect; i++) 618 for (i = 0; i < master->num_chipselect; i++)
624 if (mxc_spi->chipselect[i] >= 0) 619 if (spi_imx->chipselect[i] >= 0)
625 gpio_free(mxc_spi->chipselect[i]); 620 gpio_free(spi_imx->chipselect[i]);
626out_master_put: 621out_master_put:
627 spi_master_put(master); 622 spi_master_put(master);
628 kfree(master); 623 kfree(master);
@@ -630,24 +625,24 @@ out_master_put:
630 return ret; 625 return ret;
631} 626}
632 627
633static int __exit mxc_spi_remove(struct platform_device *pdev) 628static int __exit spi_imx_remove(struct platform_device *pdev)
634{ 629{
635 struct spi_master *master = platform_get_drvdata(pdev); 630 struct spi_master *master = platform_get_drvdata(pdev);
636 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 631 struct resource *res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
637 struct mxc_spi_data *mxc_spi = spi_master_get_devdata(master); 632 struct spi_imx_data *spi_imx = spi_master_get_devdata(master);
638 int i; 633 int i;
639 634
640 spi_bitbang_stop(&mxc_spi->bitbang); 635 spi_bitbang_stop(&spi_imx->bitbang);
641 636
642 writel(0, mxc_spi->base + MXC_CSPICTRL); 637 writel(0, spi_imx->base + MXC_CSPICTRL);
643 clk_disable(mxc_spi->clk); 638 clk_disable(spi_imx->clk);
644 clk_put(mxc_spi->clk); 639 clk_put(spi_imx->clk);
645 free_irq(mxc_spi->irq, mxc_spi); 640 free_irq(spi_imx->irq, spi_imx);
646 iounmap(mxc_spi->base); 641 iounmap(spi_imx->base);
647 642
648 for (i = 0; i < master->num_chipselect; i++) 643 for (i = 0; i < master->num_chipselect; i++)
649 if (mxc_spi->chipselect[i] >= 0) 644 if (spi_imx->chipselect[i] >= 0)
650 gpio_free(mxc_spi->chipselect[i]); 645 gpio_free(spi_imx->chipselect[i]);
651 646
652 spi_master_put(master); 647 spi_master_put(master);
653 648
@@ -658,27 +653,27 @@ static int __exit mxc_spi_remove(struct platform_device *pdev)
658 return 0; 653 return 0;
659} 654}
660 655
661static struct platform_driver mxc_spi_driver = { 656static struct platform_driver spi_imx_driver = {
662 .driver = { 657 .driver = {
663 .name = DRIVER_NAME, 658 .name = DRIVER_NAME,
664 .owner = THIS_MODULE, 659 .owner = THIS_MODULE,
665 }, 660 },
666 .probe = mxc_spi_probe, 661 .probe = spi_imx_probe,
667 .remove = __exit_p(mxc_spi_remove), 662 .remove = __exit_p(spi_imx_remove),
668}; 663};
669 664
670static int __init mxc_spi_init(void) 665static int __init spi_imx_init(void)
671{ 666{
672 return platform_driver_register(&mxc_spi_driver); 667 return platform_driver_register(&spi_imx_driver);
673} 668}
674 669
675static void __exit mxc_spi_exit(void) 670static void __exit spi_imx_exit(void)
676{ 671{
677 platform_driver_unregister(&mxc_spi_driver); 672 platform_driver_unregister(&spi_imx_driver);
678} 673}
679 674
680module_init(mxc_spi_init); 675module_init(spi_imx_init);
681module_exit(mxc_spi_exit); 676module_exit(spi_imx_exit);
682 677
683MODULE_DESCRIPTION("SPI Master Controller driver"); 678MODULE_DESCRIPTION("SPI Master Controller driver");
684MODULE_AUTHOR("Sascha Hauer, Pengutronix"); 679MODULE_AUTHOR("Sascha Hauer, Pengutronix");
diff --git a/drivers/spi/spidev.c b/drivers/spi/spidev.c
index f921bd1109e1..5d23983f02fc 100644
--- a/drivers/spi/spidev.c
+++ b/drivers/spi/spidev.c
@@ -537,7 +537,7 @@ static int spidev_release(struct inode *inode, struct file *filp)
537 return status; 537 return status;
538} 538}
539 539
540static struct file_operations spidev_fops = { 540static const struct file_operations spidev_fops = {
541 .owner = THIS_MODULE, 541 .owner = THIS_MODULE,
542 /* REVISIT switch to aio primitives, so that userspace 542 /* REVISIT switch to aio primitives, so that userspace
543 * gets more complete API coverage. It'll simplify things 543 * gets more complete API coverage. It'll simplify things
diff --git a/drivers/ssb/driver_pcicore.c b/drivers/ssb/driver_pcicore.c
index 538c570df337..f1dcd7969a5c 100644
--- a/drivers/ssb/driver_pcicore.c
+++ b/drivers/ssb/driver_pcicore.c
@@ -551,13 +551,13 @@ int ssb_pcicore_dev_irqvecs_enable(struct ssb_pcicore *pc,
551 might_sleep_if(pdev->id.coreid != SSB_DEV_PCI); 551 might_sleep_if(pdev->id.coreid != SSB_DEV_PCI);
552 552
553 /* Enable interrupts for this device. */ 553 /* Enable interrupts for this device. */
554 if (bus->host_pci && 554 if ((pdev->id.revision >= 6) || (pdev->id.coreid == SSB_DEV_PCIE)) {
555 ((pdev->id.revision >= 6) || (pdev->id.coreid == SSB_DEV_PCIE))) {
556 u32 coremask; 555 u32 coremask;
557 556
558 /* Calculate the "coremask" for the device. */ 557 /* Calculate the "coremask" for the device. */
559 coremask = (1 << dev->core_index); 558 coremask = (1 << dev->core_index);
560 559
560 SSB_WARN_ON(bus->bustype != SSB_BUSTYPE_PCI);
561 err = pci_read_config_dword(bus->host_pci, SSB_PCI_IRQMASK, &tmp); 561 err = pci_read_config_dword(bus->host_pci, SSB_PCI_IRQMASK, &tmp);
562 if (err) 562 if (err)
563 goto out; 563 goto out;
diff --git a/drivers/staging/Kconfig b/drivers/staging/Kconfig
index 9a4dd5992f65..4e3873bfd010 100644
--- a/drivers/staging/Kconfig
+++ b/drivers/staging/Kconfig
@@ -131,5 +131,13 @@ source "drivers/staging/iio/Kconfig"
131 131
132source "drivers/staging/cowloop/Kconfig" 132source "drivers/staging/cowloop/Kconfig"
133 133
134source "drivers/staging/strip/Kconfig"
135
136source "drivers/staging/arlan/Kconfig"
137
138source "drivers/staging/wavelan/Kconfig"
139
140source "drivers/staging/netwave/Kconfig"
141
134endif # !STAGING_EXCLUDE_BUILD 142endif # !STAGING_EXCLUDE_BUILD
135endif # STAGING 143endif # STAGING
diff --git a/drivers/staging/Makefile b/drivers/staging/Makefile
index 104f2f8897ec..fb1d7851b563 100644
--- a/drivers/staging/Makefile
+++ b/drivers/staging/Makefile
@@ -47,3 +47,8 @@ obj-$(CONFIG_RAR_REGISTER) += rar/
47obj-$(CONFIG_DX_SEP) += sep/ 47obj-$(CONFIG_DX_SEP) += sep/
48obj-$(CONFIG_IIO) += iio/ 48obj-$(CONFIG_IIO) += iio/
49obj-$(CONFIG_COWLOOP) += cowloop/ 49obj-$(CONFIG_COWLOOP) += cowloop/
50obj-$(CONFIG_STRIP) += strip/
51obj-$(CONFIG_ARLAN) += arlan/
52obj-$(CONFIG_WAVELAN) += wavelan/
53obj-$(CONFIG_PCMCIA_WAVELAN) += wavelan/
54obj-$(CONFIG_PCMCIA_NETWAVE) += netwave/
diff --git a/drivers/staging/arlan/Kconfig b/drivers/staging/arlan/Kconfig
new file mode 100644
index 000000000000..0585ed8b4d3e
--- /dev/null
+++ b/drivers/staging/arlan/Kconfig
@@ -0,0 +1,15 @@
1config ARLAN
2 tristate "Aironet Arlan 655 & IC2200 DS support"
3 depends on ISA && !64BIT
4 select WIRELESS_EXT
5 ---help---
6 Aironet makes Arlan, a class of wireless LAN adapters. These use the
7 www.Telxon.com chip, which is also used on several similar cards.
8 This driver is tested on the 655 and IC2200 series cards. Look at
9 <http://www.ylenurme.ee/~elmer/655/> for the latest information.
10
11 The driver is built as two modules, arlan and arlan-proc. The latter
12 is the /proc interface and is not needed most of time.
13
14 On some computers the card ends up in non-valid state after some
15 time. Use a ping-reset script to clear it.
diff --git a/drivers/staging/arlan/Makefile b/drivers/staging/arlan/Makefile
new file mode 100644
index 000000000000..9e58e5fae7b9
--- /dev/null
+++ b/drivers/staging/arlan/Makefile
@@ -0,0 +1,3 @@
1obj-$(CONFIG_ARLAN) += arlan.o
2
3arlan-objs := arlan-main.o arlan-proc.o
diff --git a/drivers/staging/arlan/TODO b/drivers/staging/arlan/TODO
new file mode 100644
index 000000000000..9bd15a2f6d9e
--- /dev/null
+++ b/drivers/staging/arlan/TODO
@@ -0,0 +1,7 @@
1TODO:
2 - step up and maintain this driver to ensure that it continues
3 to work. Having the hardware for this is pretty much a
4 requirement. If this does not happen, the will be removed in
5 the 2.6.35 kernel release.
6
7Please send patches to Greg Kroah-Hartman <greg@kroah.com>.
diff --git a/drivers/net/wireless/arlan-main.c b/drivers/staging/arlan/arlan-main.c
index 921a082487a1..921a082487a1 100644
--- a/drivers/net/wireless/arlan-main.c
+++ b/drivers/staging/arlan/arlan-main.c
diff --git a/drivers/net/wireless/arlan-proc.c b/drivers/staging/arlan/arlan-proc.c
index a8b689635a3b..a8b689635a3b 100644
--- a/drivers/net/wireless/arlan-proc.c
+++ b/drivers/staging/arlan/arlan-proc.c
diff --git a/drivers/net/wireless/arlan.h b/drivers/staging/arlan/arlan.h
index fb3ad51a1caf..fb3ad51a1caf 100644
--- a/drivers/net/wireless/arlan.h
+++ b/drivers/staging/arlan/arlan.h
diff --git a/drivers/staging/dst/dcore.c b/drivers/staging/dst/dcore.c
index ee1601026fb0..c24e4e0367a2 100644
--- a/drivers/staging/dst/dcore.c
+++ b/drivers/staging/dst/dcore.c
@@ -102,7 +102,7 @@ static int dst_request(struct request_queue *q, struct bio *bio)
102 struct dst_node *n = q->queuedata; 102 struct dst_node *n = q->queuedata;
103 int err = -EIO; 103 int err = -EIO;
104 104
105 if (bio_empty_barrier(bio) && !q->prepare_discard_fn) { 105 if (bio_empty_barrier(bio) && !blk_queue_discard(q)) {
106 /* 106 /*
107 * This is a dirty^Wnice hack, but if we complete this 107 * This is a dirty^Wnice hack, but if we complete this
108 * operation with -EOPNOTSUPP like intended, XFS 108 * operation with -EOPNOTSUPP like intended, XFS
diff --git a/drivers/staging/iio/light/tsl2561.c b/drivers/staging/iio/light/tsl2561.c
index ea8a5efc19bc..fc2107f4c049 100644
--- a/drivers/staging/iio/light/tsl2561.c
+++ b/drivers/staging/iio/light/tsl2561.c
@@ -239,10 +239,6 @@ static int __devexit tsl2561_remove(struct i2c_client *client)
239 return tsl2561_powerdown(client); 239 return tsl2561_powerdown(client);
240} 240}
241 241
242static unsigned short normal_i2c[] = { 0x29, 0x39, 0x49, I2C_CLIENT_END };
243
244I2C_CLIENT_INSMOD;
245
246static const struct i2c_device_id tsl2561_id[] = { 242static const struct i2c_device_id tsl2561_id[] = {
247 { "tsl2561", 0 }, 243 { "tsl2561", 0 },
248 { } 244 { }
diff --git a/drivers/staging/netwave/Kconfig b/drivers/staging/netwave/Kconfig
new file mode 100644
index 000000000000..c0c996c0550a
--- /dev/null
+++ b/drivers/staging/netwave/Kconfig
@@ -0,0 +1,11 @@
1config PCMCIA_NETWAVE
2 tristate "Xircom Netwave AirSurfer Pcmcia wireless support"
3 depends on PCMCIA
4 select WIRELESS_EXT
5 select WEXT_PRIV
6 help
7 Say Y here if you intend to attach this type of PCMCIA (PC-card)
8 wireless Ethernet networking card to your computer.
9
10 To compile this driver as a module, choose M here: the module will be
11 called netwave_cs. If unsure, say N.
diff --git a/drivers/staging/netwave/Makefile b/drivers/staging/netwave/Makefile
new file mode 100644
index 000000000000..2ab89de59b9b
--- /dev/null
+++ b/drivers/staging/netwave/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_PCMCIA_NETWAVE) += netwave_cs.o
diff --git a/drivers/staging/netwave/TODO b/drivers/staging/netwave/TODO
new file mode 100644
index 000000000000..9bd15a2f6d9e
--- /dev/null
+++ b/drivers/staging/netwave/TODO
@@ -0,0 +1,7 @@
1TODO:
2 - step up and maintain this driver to ensure that it continues
3 to work. Having the hardware for this is pretty much a
4 requirement. If this does not happen, the will be removed in
5 the 2.6.35 kernel release.
6
7Please send patches to Greg Kroah-Hartman <greg@kroah.com>.
diff --git a/drivers/net/wireless/netwave_cs.c b/drivers/staging/netwave/netwave_cs.c
index 9498b46c99a4..9498b46c99a4 100644
--- a/drivers/net/wireless/netwave_cs.c
+++ b/drivers/staging/netwave/netwave_cs.c
diff --git a/drivers/staging/rtl8187se/Kconfig b/drivers/staging/rtl8187se/Kconfig
index 236e42725447..faf6c6087414 100644
--- a/drivers/staging/rtl8187se/Kconfig
+++ b/drivers/staging/rtl8187se/Kconfig
@@ -1,6 +1,7 @@
1config RTL8187SE 1config RTL8187SE
2 tristate "RealTek RTL8187SE Wireless LAN NIC driver" 2 tristate "RealTek RTL8187SE Wireless LAN NIC driver"
3 depends on PCI 3 depends on PCI
4 depends on WIRELESS_EXT 4 select WIRELESS_EXT
5 select WEXT_PRIV
5 default N 6 default N
6 ---help--- 7 ---help---
diff --git a/drivers/staging/rtl8192e/Kconfig b/drivers/staging/rtl8192e/Kconfig
index 3100aa58c940..5c077b9fdc77 100644
--- a/drivers/staging/rtl8192e/Kconfig
+++ b/drivers/staging/rtl8192e/Kconfig
@@ -1,6 +1,7 @@
1config RTL8192E 1config RTL8192E
2 tristate "RealTek RTL8192E Wireless LAN NIC driver" 2 tristate "RealTek RTL8192E Wireless LAN NIC driver"
3 depends on PCI 3 depends on PCI
4 depends on WIRELESS_EXT 4 select WIRELESS_EXT
5 select WEXT_PRIV
5 default N 6 default N
6 ---help--- 7 ---help---
diff --git a/drivers/staging/strip/Kconfig b/drivers/staging/strip/Kconfig
new file mode 100644
index 000000000000..36257b5cd6e1
--- /dev/null
+++ b/drivers/staging/strip/Kconfig
@@ -0,0 +1,22 @@
1config STRIP
2 tristate "STRIP (Metricom starmode radio IP)"
3 depends on INET
4 select WIRELESS_EXT
5 ---help---
6 Say Y if you have a Metricom radio and intend to use Starmode Radio
7 IP. STRIP is a radio protocol developed for the MosquitoNet project
8 to send Internet traffic using Metricom radios. Metricom radios are
9 small, battery powered, 100kbit/sec packet radio transceivers, about
10 the size and weight of a cellular telephone. (You may also have heard
11 them called "Metricom modems" but we avoid the term "modem" because
12 it misleads many people into thinking that you can plug a Metricom
13 modem into a phone line and use it as a modem.)
14
15 You can use STRIP on any Linux machine with a serial port, although
16 it is obviously most useful for people with laptop computers. If you
17 think you might get a Metricom radio in the future, there is no harm
18 in saying Y to STRIP now, except that it makes the kernel a bit
19 bigger.
20
21 To compile this as a module, choose M here: the module will be
22 called strip.
diff --git a/drivers/staging/strip/Makefile b/drivers/staging/strip/Makefile
new file mode 100644
index 000000000000..6417bdcac2fb
--- /dev/null
+++ b/drivers/staging/strip/Makefile
@@ -0,0 +1 @@
obj-$(CONFIG_STRIP) += strip.o
diff --git a/drivers/staging/strip/TODO b/drivers/staging/strip/TODO
new file mode 100644
index 000000000000..9bd15a2f6d9e
--- /dev/null
+++ b/drivers/staging/strip/TODO
@@ -0,0 +1,7 @@
1TODO:
2 - step up and maintain this driver to ensure that it continues
3 to work. Having the hardware for this is pretty much a
4 requirement. If this does not happen, the will be removed in
5 the 2.6.35 kernel release.
6
7Please send patches to Greg Kroah-Hartman <greg@kroah.com>.
diff --git a/drivers/net/wireless/strip.c b/drivers/staging/strip/strip.c
index ea6a87c19319..698aade79d40 100644
--- a/drivers/net/wireless/strip.c
+++ b/drivers/staging/strip/strip.c
@@ -106,6 +106,7 @@ static const char StripVersion[] = "1.3A-STUART.CHESHIRE";
106#include <linux/serial.h> 106#include <linux/serial.h>
107#include <linux/serialP.h> 107#include <linux/serialP.h>
108#include <linux/rcupdate.h> 108#include <linux/rcupdate.h>
109#include <linux/compat.h>
109#include <net/arp.h> 110#include <net/arp.h>
110#include <net/net_namespace.h> 111#include <net/net_namespace.h>
111 112
@@ -2725,6 +2726,19 @@ static int strip_ioctl(struct tty_struct *tty, struct file *file,
2725 return 0; 2726 return 0;
2726} 2727}
2727 2728
2729#ifdef CONFIG_COMPAT
2730static long strip_compat_ioctl(struct tty_struct *tty, struct file *file,
2731 unsigned int cmd, unsigned long arg)
2732{
2733 switch (cmd) {
2734 case SIOCGIFNAME:
2735 case SIOCSIFHWADDR:
2736 return strip_ioctl(tty, file, cmd,
2737 (unsigned long)compat_ptr(arg));
2738 }
2739 return -ENOIOCTLCMD;
2740}
2741#endif
2728 2742
2729/************************************************************************/ 2743/************************************************************************/
2730/* Initialization */ 2744/* Initialization */
@@ -2736,6 +2750,9 @@ static struct tty_ldisc_ops strip_ldisc = {
2736 .open = strip_open, 2750 .open = strip_open,
2737 .close = strip_close, 2751 .close = strip_close,
2738 .ioctl = strip_ioctl, 2752 .ioctl = strip_ioctl,
2753#ifdef CONFIG_COMPAT
2754 .compat_ioctl = strip_compat_ioctl,
2755#endif
2739 .receive_buf = strip_receive_buf, 2756 .receive_buf = strip_receive_buf,
2740 .write_wakeup = strip_write_some_more, 2757 .write_wakeup = strip_write_some_more,
2741}; 2758};
diff --git a/drivers/staging/vt6655/Kconfig b/drivers/staging/vt6655/Kconfig
index 9bec95adcce2..825bbc4fc3fa 100644
--- a/drivers/staging/vt6655/Kconfig
+++ b/drivers/staging/vt6655/Kconfig
@@ -1,6 +1,8 @@
1config VT6655 1config VT6655
2 tristate "VIA Technologies VT6655 support" 2 tristate "VIA Technologies VT6655 support"
3 depends on WIRELESS_EXT && PCI 3 depends on PCI
4 select WIRELESS_EXT
5 select WEXT_PRIV
4 ---help--- 6 ---help---
5 This is a vendor-written driver for VIA VT6655. 7 This is a vendor-written driver for VIA VT6655.
6 8
diff --git a/drivers/staging/vt6656/Kconfig b/drivers/staging/vt6656/Kconfig
index 3165f2c42079..87bcd269310c 100644
--- a/drivers/staging/vt6656/Kconfig
+++ b/drivers/staging/vt6656/Kconfig
@@ -1,6 +1,8 @@
1config VT6656 1config VT6656
2 tristate "VIA Technologies VT6656 support" 2 tristate "VIA Technologies VT6656 support"
3 depends on WIRELESS_EXT && USB 3 depends on USB
4 select WIRELESS_EXT
5 select WEXT_PRIV
4 ---help--- 6 ---help---
5 This is a vendor-written driver for VIA VT6656. 7 This is a vendor-written driver for VIA VT6656.
6 8
diff --git a/drivers/staging/wavelan/Kconfig b/drivers/staging/wavelan/Kconfig
new file mode 100644
index 000000000000..786060e025c0
--- /dev/null
+++ b/drivers/staging/wavelan/Kconfig
@@ -0,0 +1,38 @@
1config WAVELAN
2 tristate "AT&T/Lucent old WaveLAN & DEC RoamAbout DS ISA support"
3 depends on ISA
4 select WIRELESS_EXT
5 select WEXT_SPY
6 select WEXT_PRIV
7 ---help---
8 The Lucent WaveLAN (formerly NCR and AT&T; or DEC RoamAbout DS) is
9 a Radio LAN (wireless Ethernet-like Local Area Network) using the
10 radio frequencies 900 MHz and 2.4 GHz.
11
12 If you want to use an ISA WaveLAN card under Linux, say Y and read
13 the Ethernet-HOWTO, available from
14 <http://www.tldp.org/docs.html#howto>. Some more specific
15 information is contained in
16 <file:Documentation/networking/wavelan.txt> and in the source code
17 <file:drivers/net/wireless/wavelan.p.h>.
18
19 You will also need the wireless tools package available from
20 <http://www.hpl.hp.com/personal/Jean_Tourrilhes/Linux/Tools.html>.
21 Please read the man pages contained therein.
22
23 To compile this driver as a module, choose M here: the module will be
24 called wavelan.
25
26config PCMCIA_WAVELAN
27 tristate "AT&T/Lucent old WaveLAN Pcmcia wireless support"
28 depends on PCMCIA
29 select WIRELESS_EXT
30 select WEXT_SPY
31 select WEXT_PRIV
32 help
33 Say Y here if you intend to attach an AT&T/Lucent Wavelan PCMCIA
34 (PC-card) wireless Ethernet networking card to your computer. This
35 driver is for the non-IEEE-802.11 Wavelan cards.
36
37 To compile this driver as a module, choose M here: the module will be
38 called wavelan_cs. If unsure, say N.
diff --git a/drivers/staging/wavelan/Makefile b/drivers/staging/wavelan/Makefile
new file mode 100644
index 000000000000..1cde17c69a43
--- /dev/null
+++ b/drivers/staging/wavelan/Makefile
@@ -0,0 +1,2 @@
1obj-$(CONFIG_WAVELAN) += wavelan.o
2obj-$(CONFIG_PCMCIA_WAVELAN) += wavelan_cs.o
diff --git a/drivers/staging/wavelan/TODO b/drivers/staging/wavelan/TODO
new file mode 100644
index 000000000000..9bd15a2f6d9e
--- /dev/null
+++ b/drivers/staging/wavelan/TODO
@@ -0,0 +1,7 @@
1TODO:
2 - step up and maintain this driver to ensure that it continues
3 to work. Having the hardware for this is pretty much a
4 requirement. If this does not happen, the will be removed in
5 the 2.6.35 kernel release.
6
7Please send patches to Greg Kroah-Hartman <greg@kroah.com>.
diff --git a/drivers/net/wireless/i82586.h b/drivers/staging/wavelan/i82586.h
index 5f65b250646f..5f65b250646f 100644
--- a/drivers/net/wireless/i82586.h
+++ b/drivers/staging/wavelan/i82586.h
diff --git a/drivers/net/wireless/i82593.h b/drivers/staging/wavelan/i82593.h
index afac5c7a323d..afac5c7a323d 100644
--- a/drivers/net/wireless/i82593.h
+++ b/drivers/staging/wavelan/i82593.h
diff --git a/drivers/net/wireless/wavelan.c b/drivers/staging/wavelan/wavelan.c
index d634b2da3b84..d634b2da3b84 100644
--- a/drivers/net/wireless/wavelan.c
+++ b/drivers/staging/wavelan/wavelan.c
diff --git a/drivers/net/wireless/wavelan.h b/drivers/staging/wavelan/wavelan.h
index 9ab360558ffd..9ab360558ffd 100644
--- a/drivers/net/wireless/wavelan.h
+++ b/drivers/staging/wavelan/wavelan.h
diff --git a/drivers/net/wireless/wavelan.p.h b/drivers/staging/wavelan/wavelan.p.h
index dbe8de6e5f52..dbe8de6e5f52 100644
--- a/drivers/net/wireless/wavelan.p.h
+++ b/drivers/staging/wavelan/wavelan.p.h
diff --git a/drivers/net/wireless/wavelan_cs.c b/drivers/staging/wavelan/wavelan_cs.c
index 431a20ec6db6..431a20ec6db6 100644
--- a/drivers/net/wireless/wavelan_cs.c
+++ b/drivers/staging/wavelan/wavelan_cs.c
diff --git a/drivers/net/wireless/wavelan_cs.h b/drivers/staging/wavelan/wavelan_cs.h
index 2e4bfe4147c6..2e4bfe4147c6 100644
--- a/drivers/net/wireless/wavelan_cs.h
+++ b/drivers/staging/wavelan/wavelan_cs.h
diff --git a/drivers/net/wireless/wavelan_cs.p.h b/drivers/staging/wavelan/wavelan_cs.p.h
index 81d91531c4f9..81d91531c4f9 100644
--- a/drivers/net/wireless/wavelan_cs.p.h
+++ b/drivers/staging/wavelan/wavelan_cs.p.h
diff --git a/drivers/uio/uio.c b/drivers/uio/uio.c
index 03efb065455f..a9d707047202 100644
--- a/drivers/uio/uio.c
+++ b/drivers/uio/uio.c
@@ -658,7 +658,7 @@ static int uio_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
658 return 0; 658 return 0;
659} 659}
660 660
661static struct vm_operations_struct uio_vm_ops = { 661static const struct vm_operations_struct uio_vm_ops = {
662 .open = uio_vma_open, 662 .open = uio_vma_open,
663 .close = uio_vma_close, 663 .close = uio_vma_close,
664 .fault = uio_vma_fault, 664 .fault = uio_vma_fault,
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index 333ee02e7b2b..864f0ba6a344 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -993,7 +993,7 @@ skip_io_on_zombie:
993 return retval; 993 return retval;
994} 994}
995 995
996static struct file_operations fops = { 996static const struct file_operations fops = {
997 .owner = THIS_MODULE, 997 .owner = THIS_MODULE,
998 .read = usbtmc_read, 998 .read = usbtmc_read,
999 .write = usbtmc_write, 999 .write = usbtmc_write,
diff --git a/drivers/usb/gadget/inode.c b/drivers/usb/gadget/inode.c
index c44367fea185..bf0f6520c6df 100644
--- a/drivers/usb/gadget/inode.c
+++ b/drivers/usb/gadget/inode.c
@@ -30,6 +30,7 @@
30#include <linux/wait.h> 30#include <linux/wait.h>
31#include <linux/compiler.h> 31#include <linux/compiler.h>
32#include <asm/uaccess.h> 32#include <asm/uaccess.h>
33#include <linux/sched.h>
33#include <linux/slab.h> 34#include <linux/slab.h>
34#include <linux/poll.h> 35#include <linux/poll.h>
35#include <linux/smp_lock.h> 36#include <linux/smp_lock.h>
diff --git a/drivers/usb/gadget/printer.c b/drivers/usb/gadget/printer.c
index 29500154d00c..2d867fd22413 100644
--- a/drivers/usb/gadget/printer.c
+++ b/drivers/usb/gadget/printer.c
@@ -875,7 +875,7 @@ printer_ioctl(struct file *fd, unsigned int code, unsigned long arg)
875} 875}
876 876
877/* used after endpoint configuration */ 877/* used after endpoint configuration */
878static struct file_operations printer_io_operations = { 878static const struct file_operations printer_io_operations = {
879 .owner = THIS_MODULE, 879 .owner = THIS_MODULE,
880 .open = printer_open, 880 .open = printer_open,
881 .read = printer_read, 881 .read = printer_read,
diff --git a/drivers/usb/host/ohci-pxa27x.c b/drivers/usb/host/ohci-pxa27x.c
index b5294a9344de..f1c06202fdf2 100644
--- a/drivers/usb/host/ohci-pxa27x.c
+++ b/drivers/usb/host/ohci-pxa27x.c
@@ -481,38 +481,47 @@ static int ohci_hcd_pxa27x_drv_remove(struct platform_device *pdev)
481 return 0; 481 return 0;
482} 482}
483 483
484#ifdef CONFIG_PM 484#ifdef CONFIG_PM
485static int ohci_hcd_pxa27x_drv_suspend(struct platform_device *pdev, pm_message_t state) 485static int ohci_hcd_pxa27x_drv_suspend(struct device *dev)
486{ 486{
487 struct usb_hcd *hcd = platform_get_drvdata(pdev); 487 struct usb_hcd *hcd = dev_get_drvdata(dev);
488 struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd); 488 struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd);
489 489
490 if (time_before(jiffies, ohci->ohci.next_statechange)) 490 if (time_before(jiffies, ohci->ohci.next_statechange))
491 msleep(5); 491 msleep(5);
492 ohci->ohci.next_statechange = jiffies; 492 ohci->ohci.next_statechange = jiffies;
493 493
494 pxa27x_stop_hc(ohci, &pdev->dev); 494 pxa27x_stop_hc(ohci, dev);
495 hcd->state = HC_STATE_SUSPENDED; 495 hcd->state = HC_STATE_SUSPENDED;
496 496
497 return 0; 497 return 0;
498} 498}
499 499
500static int ohci_hcd_pxa27x_drv_resume(struct platform_device *pdev) 500static int ohci_hcd_pxa27x_drv_resume(struct device *dev)
501{ 501{
502 struct usb_hcd *hcd = platform_get_drvdata(pdev); 502 struct usb_hcd *hcd = dev_get_drvdata(dev);
503 struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd); 503 struct pxa27x_ohci *ohci = to_pxa27x_ohci(hcd);
504 struct pxaohci_platform_data *inf = dev->platform_data;
504 int status; 505 int status;
505 506
506 if (time_before(jiffies, ohci->ohci.next_statechange)) 507 if (time_before(jiffies, ohci->ohci.next_statechange))
507 msleep(5); 508 msleep(5);
508 ohci->ohci.next_statechange = jiffies; 509 ohci->ohci.next_statechange = jiffies;
509 510
510 if ((status = pxa27x_start_hc(ohci, &pdev->dev)) < 0) 511 if ((status = pxa27x_start_hc(ohci, dev)) < 0)
511 return status; 512 return status;
512 513
514 /* Select Power Management Mode */
515 pxa27x_ohci_select_pmm(ohci, inf->port_mode);
516
513 ohci_finish_controller_resume(hcd); 517 ohci_finish_controller_resume(hcd);
514 return 0; 518 return 0;
515} 519}
520
521static struct dev_pm_ops ohci_hcd_pxa27x_pm_ops = {
522 .suspend = ohci_hcd_pxa27x_drv_suspend,
523 .resume = ohci_hcd_pxa27x_drv_resume,
524};
516#endif 525#endif
517 526
518/* work with hotplug and coldplug */ 527/* work with hotplug and coldplug */
@@ -522,13 +531,12 @@ static struct platform_driver ohci_hcd_pxa27x_driver = {
522 .probe = ohci_hcd_pxa27x_drv_probe, 531 .probe = ohci_hcd_pxa27x_drv_probe,
523 .remove = ohci_hcd_pxa27x_drv_remove, 532 .remove = ohci_hcd_pxa27x_drv_remove,
524 .shutdown = usb_hcd_platform_shutdown, 533 .shutdown = usb_hcd_platform_shutdown,
525#ifdef CONFIG_PM
526 .suspend = ohci_hcd_pxa27x_drv_suspend,
527 .resume = ohci_hcd_pxa27x_drv_resume,
528#endif
529 .driver = { 534 .driver = {
530 .name = "pxa27x-ohci", 535 .name = "pxa27x-ohci",
531 .owner = THIS_MODULE, 536 .owner = THIS_MODULE,
537#ifdef CONFIG_PM
538 .pm = &ohci_hcd_pxa27x_pm_ops,
539#endif
532 }, 540 },
533}; 541};
534 542
diff --git a/drivers/usb/host/whci/debug.c b/drivers/usb/host/whci/debug.c
index cf2d45946c57..2273c815941f 100644
--- a/drivers/usb/host/whci/debug.c
+++ b/drivers/usb/host/whci/debug.c
@@ -134,7 +134,7 @@ static int pzl_open(struct inode *inode, struct file *file)
134 return single_open(file, pzl_print, inode->i_private); 134 return single_open(file, pzl_print, inode->i_private);
135} 135}
136 136
137static struct file_operations di_fops = { 137static const struct file_operations di_fops = {
138 .open = di_open, 138 .open = di_open,
139 .read = seq_read, 139 .read = seq_read,
140 .llseek = seq_lseek, 140 .llseek = seq_lseek,
@@ -142,7 +142,7 @@ static struct file_operations di_fops = {
142 .owner = THIS_MODULE, 142 .owner = THIS_MODULE,
143}; 143};
144 144
145static struct file_operations asl_fops = { 145static const struct file_operations asl_fops = {
146 .open = asl_open, 146 .open = asl_open,
147 .read = seq_read, 147 .read = seq_read,
148 .llseek = seq_lseek, 148 .llseek = seq_lseek,
@@ -150,7 +150,7 @@ static struct file_operations asl_fops = {
150 .owner = THIS_MODULE, 150 .owner = THIS_MODULE,
151}; 151};
152 152
153static struct file_operations pzl_fops = { 153static const struct file_operations pzl_fops = {
154 .open = pzl_open, 154 .open = pzl_open,
155 .read = seq_read, 155 .read = seq_read,
156 .llseek = seq_lseek, 156 .llseek = seq_lseek,
diff --git a/drivers/usb/misc/rio500.c b/drivers/usb/misc/rio500.c
index d645f3899fe1..32d0199d0c32 100644
--- a/drivers/usb/misc/rio500.c
+++ b/drivers/usb/misc/rio500.c
@@ -429,8 +429,7 @@ read_rio(struct file *file, char __user *buffer, size_t count, loff_t * ppos)
429 return read_count; 429 return read_count;
430} 430}
431 431
432static struct 432static const struct file_operations usb_rio_fops = {
433file_operations usb_rio_fops = {
434 .owner = THIS_MODULE, 433 .owner = THIS_MODULE,
435 .read = read_rio, 434 .read = read_rio,
436 .write = write_rio, 435 .write = write_rio,
diff --git a/drivers/usb/misc/sisusbvga/sisusb_init.c b/drivers/usb/misc/sisusbvga/sisusb_init.c
index 273de5d0934e..0ab990744830 100644
--- a/drivers/usb/misc/sisusbvga/sisusb_init.c
+++ b/drivers/usb/misc/sisusbvga/sisusb_init.c
@@ -43,7 +43,6 @@
43#include <linux/init.h> 43#include <linux/init.h>
44#include <linux/slab.h> 44#include <linux/slab.h>
45#include <linux/spinlock.h> 45#include <linux/spinlock.h>
46#include <linux/kref.h>
47 46
48#include "sisusb.h" 47#include "sisusb.h"
49 48
diff --git a/drivers/usb/mon/mon_bin.c b/drivers/usb/mon/mon_bin.c
index dfdc43e2e00d..9ed3e741bee1 100644
--- a/drivers/usb/mon/mon_bin.c
+++ b/drivers/usb/mon/mon_bin.c
@@ -1174,7 +1174,7 @@ static int mon_bin_vma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
1174 return 0; 1174 return 0;
1175} 1175}
1176 1176
1177static struct vm_operations_struct mon_bin_vm_ops = { 1177static const struct vm_operations_struct mon_bin_vm_ops = {
1178 .open = mon_bin_vma_open, 1178 .open = mon_bin_vma_open,
1179 .close = mon_bin_vma_close, 1179 .close = mon_bin_vma_close,
1180 .fault = mon_bin_vma_fault, 1180 .fault = mon_bin_vma_fault,
diff --git a/drivers/usb/serial/usb-serial.c b/drivers/usb/serial/usb-serial.c
index ff75a3589e7e..aa6b2ae951ae 100644
--- a/drivers/usb/serial/usb-serial.c
+++ b/drivers/usb/serial/usb-serial.c
@@ -192,7 +192,7 @@ void usb_serial_put(struct usb_serial *serial)
192 * This is the first place a new tty gets used. Hence this is where we 192 * This is the first place a new tty gets used. Hence this is where we
193 * acquire references to the usb_serial structure and the driver module, 193 * acquire references to the usb_serial structure and the driver module,
194 * where we store a pointer to the port, and where we do an autoresume. 194 * where we store a pointer to the port, and where we do an autoresume.
195 * All these actions are reversed in serial_release(). 195 * All these actions are reversed in serial_cleanup().
196 */ 196 */
197static int serial_install(struct tty_driver *driver, struct tty_struct *tty) 197static int serial_install(struct tty_driver *driver, struct tty_struct *tty)
198{ 198{
@@ -339,15 +339,16 @@ static void serial_close(struct tty_struct *tty, struct file *filp)
339} 339}
340 340
341/** 341/**
342 * serial_release - free resources post close/hangup 342 * serial_cleanup - free resources post close/hangup
343 * @port: port to free up 343 * @port: port to free up
344 * 344 *
345 * Do the resource freeing and refcount dropping for the port. 345 * Do the resource freeing and refcount dropping for the port.
346 * Avoid freeing the console. 346 * Avoid freeing the console.
347 * 347 *
348 * Called when the last tty kref is dropped. 348 * Called asynchronously after the last tty kref is dropped,
349 * and the tty layer has already done the tty_shutdown(tty);
349 */ 350 */
350static void serial_release(struct tty_struct *tty) 351static void serial_cleanup(struct tty_struct *tty)
351{ 352{
352 struct usb_serial_port *port = tty->driver_data; 353 struct usb_serial_port *port = tty->driver_data;
353 struct usb_serial *serial; 354 struct usb_serial *serial;
@@ -361,9 +362,6 @@ static void serial_release(struct tty_struct *tty)
361 362
362 dbg("%s - port %d", __func__, port->number); 363 dbg("%s - port %d", __func__, port->number);
363 364
364 /* Standard shutdown processing */
365 tty_shutdown(tty);
366
367 tty->driver_data = NULL; 365 tty->driver_data = NULL;
368 366
369 serial = port->serial; 367 serial = port->serial;
@@ -1210,7 +1208,7 @@ static const struct tty_operations serial_ops = {
1210 .chars_in_buffer = serial_chars_in_buffer, 1208 .chars_in_buffer = serial_chars_in_buffer,
1211 .tiocmget = serial_tiocmget, 1209 .tiocmget = serial_tiocmget,
1212 .tiocmset = serial_tiocmset, 1210 .tiocmset = serial_tiocmset,
1213 .shutdown = serial_release, 1211 .cleanup = serial_cleanup,
1214 .install = serial_install, 1212 .install = serial_install,
1215 .proc_fops = &serial_proc_fops, 1213 .proc_fops = &serial_proc_fops,
1216}; 1214};
diff --git a/drivers/uwb/uwb-debug.c b/drivers/uwb/uwb-debug.c
index 4a42993700c1..2eecec0c13c9 100644
--- a/drivers/uwb/uwb-debug.c
+++ b/drivers/uwb/uwb-debug.c
@@ -205,7 +205,7 @@ static ssize_t command_write(struct file *file, const char __user *buf,
205 return ret < 0 ? ret : len; 205 return ret < 0 ? ret : len;
206} 206}
207 207
208static struct file_operations command_fops = { 208static const struct file_operations command_fops = {
209 .open = command_open, 209 .open = command_open,
210 .write = command_write, 210 .write = command_write,
211 .read = NULL, 211 .read = NULL,
@@ -255,7 +255,7 @@ static int reservations_open(struct inode *inode, struct file *file)
255 return single_open(file, reservations_print, inode->i_private); 255 return single_open(file, reservations_print, inode->i_private);
256} 256}
257 257
258static struct file_operations reservations_fops = { 258static const struct file_operations reservations_fops = {
259 .open = reservations_open, 259 .open = reservations_open,
260 .read = seq_read, 260 .read = seq_read,
261 .llseek = seq_lseek, 261 .llseek = seq_lseek,
@@ -283,7 +283,7 @@ static int drp_avail_open(struct inode *inode, struct file *file)
283 return single_open(file, drp_avail_print, inode->i_private); 283 return single_open(file, drp_avail_print, inode->i_private);
284} 284}
285 285
286static struct file_operations drp_avail_fops = { 286static const struct file_operations drp_avail_fops = {
287 .open = drp_avail_open, 287 .open = drp_avail_open,
288 .read = seq_read, 288 .read = seq_read,
289 .llseek = seq_lseek, 289 .llseek = seq_lseek,
diff --git a/drivers/video/backlight/Kconfig b/drivers/video/backlight/Kconfig
index 90861cd93165..09bfa9662e4d 100644
--- a/drivers/video/backlight/Kconfig
+++ b/drivers/video/backlight/Kconfig
@@ -31,6 +31,13 @@ config LCD_CORGI
31 Say y here to support the LCD panels usually found on SHARP 31 Say y here to support the LCD panels usually found on SHARP
32 corgi (C7x0) and spitz (Cxx00) models. 32 corgi (C7x0) and spitz (Cxx00) models.
33 33
34config LCD_LMS283GF05
35 tristate "Samsung LMS283GF05 LCD"
36 depends on LCD_CLASS_DEVICE && SPI_MASTER && GENERIC_GPIO
37 help
38 SPI driver for Samsung LMS283GF05. This provides basic support
39 for powering the LCD up/down through a sysfs interface.
40
34config LCD_LTV350QV 41config LCD_LTV350QV
35 tristate "Samsung LTV350QV LCD Panel" 42 tristate "Samsung LTV350QV LCD Panel"
36 depends on LCD_CLASS_DEVICE && SPI_MASTER 43 depends on LCD_CLASS_DEVICE && SPI_MASTER
@@ -229,3 +236,29 @@ config BACKLIGHT_SAHARA
229 help 236 help
230 If you have a Tabletkiosk Sahara Touch-iT, say y to enable the 237 If you have a Tabletkiosk Sahara Touch-iT, say y to enable the
231 backlight driver. 238 backlight driver.
239
240config BACKLIGHT_WM831X
241 tristate "WM831x PMIC Backlight Driver"
242 depends on BACKLIGHT_CLASS_DEVICE && MFD_WM831X
243 help
244 If you have a backlight driven by the ISINK and DCDC of a
245 WM831x PMIC say y to enable the backlight driver for it.
246
247config BACKLIGHT_ADX
248 tristate "Avionic Design Xanthos Backlight Driver"
249 depends on BACKLIGHT_CLASS_DEVICE && ARCH_PXA_ADX
250 default y
251 help
252 Say Y to enable the backlight driver on Avionic Design Xanthos-based
253 boards.
254
255config BACKLIGHT_ADP5520
256 tristate "Backlight Driver for ADP5520/ADP5501 using WLED"
257 depends on BACKLIGHT_CLASS_DEVICE && PMIC_ADP5520
258 help
259 If you have a LCD backlight connected to the BST/BL_SNK output of
260 ADP5520 or ADP5501, say Y here to enable this driver.
261
262 To compile this driver as a module, choose M here: the module will
263 be called adp5520_bl.
264
diff --git a/drivers/video/backlight/Makefile b/drivers/video/backlight/Makefile
index 4eb178c1d684..9a405548874c 100644
--- a/drivers/video/backlight/Makefile
+++ b/drivers/video/backlight/Makefile
@@ -3,6 +3,7 @@
3obj-$(CONFIG_LCD_CLASS_DEVICE) += lcd.o 3obj-$(CONFIG_LCD_CLASS_DEVICE) += lcd.o
4obj-$(CONFIG_LCD_CORGI) += corgi_lcd.o 4obj-$(CONFIG_LCD_CORGI) += corgi_lcd.o
5obj-$(CONFIG_LCD_HP700) += jornada720_lcd.o 5obj-$(CONFIG_LCD_HP700) += jornada720_lcd.o
6obj-$(CONFIG_LCD_LMS283GF05) += lms283gf05.o
6obj-$(CONFIG_LCD_LTV350QV) += ltv350qv.o 7obj-$(CONFIG_LCD_LTV350QV) += ltv350qv.o
7obj-$(CONFIG_LCD_ILI9320) += ili9320.o 8obj-$(CONFIG_LCD_ILI9320) += ili9320.o
8obj-$(CONFIG_LCD_PLATFORM) += platform_lcd.o 9obj-$(CONFIG_LCD_PLATFORM) += platform_lcd.o
@@ -24,4 +25,7 @@ obj-$(CONFIG_BACKLIGHT_DA903X) += da903x_bl.o
24obj-$(CONFIG_BACKLIGHT_MBP_NVIDIA) += mbp_nvidia_bl.o 25obj-$(CONFIG_BACKLIGHT_MBP_NVIDIA) += mbp_nvidia_bl.o
25obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o 26obj-$(CONFIG_BACKLIGHT_TOSA) += tosa_bl.o
26obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o 27obj-$(CONFIG_BACKLIGHT_SAHARA) += kb3886_bl.o
28obj-$(CONFIG_BACKLIGHT_WM831X) += wm831x_bl.o
29obj-$(CONFIG_BACKLIGHT_ADX) += adx_bl.o
30obj-$(CONFIG_BACKLIGHT_ADP5520) += adp5520_bl.o
27 31
diff --git a/drivers/video/backlight/adp5520_bl.c b/drivers/video/backlight/adp5520_bl.c
new file mode 100644
index 000000000000..ad05da5ba3c7
--- /dev/null
+++ b/drivers/video/backlight/adp5520_bl.c
@@ -0,0 +1,377 @@
1/*
2 * Backlight driver for Analog Devices ADP5520/ADP5501 MFD PMICs
3 *
4 * Copyright 2009 Analog Devices Inc.
5 *
6 * Licensed under the GPL-2 or later.
7 */
8
9#include <linux/kernel.h>
10#include <linux/init.h>
11#include <linux/platform_device.h>
12#include <linux/fb.h>
13#include <linux/backlight.h>
14#include <linux/mfd/adp5520.h>
15
16struct adp5520_bl {
17 struct device *master;
18 struct adp5520_backlight_platfrom_data *pdata;
19 struct mutex lock;
20 unsigned long cached_daylight_max;
21 int id;
22 int current_brightness;
23};
24
25static int adp5520_bl_set(struct backlight_device *bl, int brightness)
26{
27 struct adp5520_bl *data = bl_get_data(bl);
28 struct device *master = data->master;
29 int ret = 0;
30
31 if (data->pdata->en_ambl_sens) {
32 if ((brightness > 0) && (brightness < ADP5020_MAX_BRIGHTNESS)) {
33 /* Disable Ambient Light auto adjust */
34 ret |= adp5520_clr_bits(master, BL_CONTROL,
35 BL_AUTO_ADJ);
36 ret |= adp5520_write(master, DAYLIGHT_MAX, brightness);
37 } else {
38 /*
39 * MAX_BRIGHTNESS -> Enable Ambient Light auto adjust
40 * restore daylight l3 sysfs brightness
41 */
42 ret |= adp5520_write(master, DAYLIGHT_MAX,
43 data->cached_daylight_max);
44 ret |= adp5520_set_bits(master, BL_CONTROL,
45 BL_AUTO_ADJ);
46 }
47 } else {
48 ret |= adp5520_write(master, DAYLIGHT_MAX, brightness);
49 }
50
51 if (data->current_brightness && brightness == 0)
52 ret |= adp5520_set_bits(master,
53 MODE_STATUS, DIM_EN);
54 else if (data->current_brightness == 0 && brightness)
55 ret |= adp5520_clr_bits(master,
56 MODE_STATUS, DIM_EN);
57
58 if (!ret)
59 data->current_brightness = brightness;
60
61 return ret;
62}
63
64static int adp5520_bl_update_status(struct backlight_device *bl)
65{
66 int brightness = bl->props.brightness;
67 if (bl->props.power != FB_BLANK_UNBLANK)
68 brightness = 0;
69
70 if (bl->props.fb_blank != FB_BLANK_UNBLANK)
71 brightness = 0;
72
73 return adp5520_bl_set(bl, brightness);
74}
75
76static int adp5520_bl_get_brightness(struct backlight_device *bl)
77{
78 struct adp5520_bl *data = bl_get_data(bl);
79 int error;
80 uint8_t reg_val;
81
82 error = adp5520_read(data->master, BL_VALUE, &reg_val);
83
84 return error ? data->current_brightness : reg_val;
85}
86
87static struct backlight_ops adp5520_bl_ops = {
88 .update_status = adp5520_bl_update_status,
89 .get_brightness = adp5520_bl_get_brightness,
90};
91
92static int adp5520_bl_setup(struct backlight_device *bl)
93{
94 struct adp5520_bl *data = bl_get_data(bl);
95 struct device *master = data->master;
96 struct adp5520_backlight_platfrom_data *pdata = data->pdata;
97 int ret = 0;
98
99 ret |= adp5520_write(master, DAYLIGHT_MAX, pdata->l1_daylight_max);
100 ret |= adp5520_write(master, DAYLIGHT_DIM, pdata->l1_daylight_dim);
101
102 if (pdata->en_ambl_sens) {
103 data->cached_daylight_max = pdata->l1_daylight_max;
104 ret |= adp5520_write(master, OFFICE_MAX, pdata->l2_office_max);
105 ret |= adp5520_write(master, OFFICE_DIM, pdata->l2_office_dim);
106 ret |= adp5520_write(master, DARK_MAX, pdata->l3_dark_max);
107 ret |= adp5520_write(master, DARK_DIM, pdata->l3_dark_dim);
108 ret |= adp5520_write(master, L2_TRIP, pdata->l2_trip);
109 ret |= adp5520_write(master, L2_HYS, pdata->l2_hyst);
110 ret |= adp5520_write(master, L3_TRIP, pdata->l3_trip);
111 ret |= adp5520_write(master, L3_HYS, pdata->l3_hyst);
112 ret |= adp5520_write(master, ALS_CMPR_CFG,
113 ALS_CMPR_CFG_VAL(pdata->abml_filt, L3_EN));
114 }
115
116 ret |= adp5520_write(master, BL_CONTROL,
117 BL_CTRL_VAL(pdata->fade_led_law, pdata->en_ambl_sens));
118
119 ret |= adp5520_write(master, BL_FADE, FADE_VAL(pdata->fade_in,
120 pdata->fade_out));
121
122 ret |= adp5520_set_bits(master, MODE_STATUS, BL_EN | DIM_EN);
123
124 return ret;
125}
126
127static ssize_t adp5520_show(struct device *dev, char *buf, int reg)
128{
129 struct adp5520_bl *data = dev_get_drvdata(dev);
130 int error;
131 uint8_t reg_val;
132
133 mutex_lock(&data->lock);
134 error = adp5520_read(data->master, reg, &reg_val);
135 mutex_unlock(&data->lock);
136
137 return sprintf(buf, "%u\n", reg_val);
138}
139
140static ssize_t adp5520_store(struct device *dev, const char *buf,
141 size_t count, int reg)
142{
143 struct adp5520_bl *data = dev_get_drvdata(dev);
144 unsigned long val;
145 int ret;
146
147 ret = strict_strtoul(buf, 10, &val);
148 if (ret)
149 return ret;
150
151 mutex_lock(&data->lock);
152 adp5520_write(data->master, reg, val);
153 mutex_unlock(&data->lock);
154
155 return count;
156}
157
158static ssize_t adp5520_bl_dark_max_show(struct device *dev,
159 struct device_attribute *attr, char *buf)
160{
161 return adp5520_show(dev, buf, DARK_MAX);
162}
163
164static ssize_t adp5520_bl_dark_max_store(struct device *dev,
165 struct device_attribute *attr, const char *buf, size_t count)
166{
167 return adp5520_store(dev, buf, count, DARK_MAX);
168}
169static DEVICE_ATTR(dark_max, 0664, adp5520_bl_dark_max_show,
170 adp5520_bl_dark_max_store);
171
172static ssize_t adp5520_bl_office_max_show(struct device *dev,
173 struct device_attribute *attr, char *buf)
174{
175 return adp5520_show(dev, buf, OFFICE_MAX);
176}
177
178static ssize_t adp5520_bl_office_max_store(struct device *dev,
179 struct device_attribute *attr, const char *buf, size_t count)
180{
181 return adp5520_store(dev, buf, count, OFFICE_MAX);
182}
183static DEVICE_ATTR(office_max, 0664, adp5520_bl_office_max_show,
184 adp5520_bl_office_max_store);
185
186static ssize_t adp5520_bl_daylight_max_show(struct device *dev,
187 struct device_attribute *attr, char *buf)
188{
189 return adp5520_show(dev, buf, DAYLIGHT_MAX);
190}
191
192static ssize_t adp5520_bl_daylight_max_store(struct device *dev,
193 struct device_attribute *attr, const char *buf, size_t count)
194{
195 struct adp5520_bl *data = dev_get_drvdata(dev);
196
197 strict_strtoul(buf, 10, &data->cached_daylight_max);
198 return adp5520_store(dev, buf, count, DAYLIGHT_MAX);
199}
200static DEVICE_ATTR(daylight_max, 0664, adp5520_bl_daylight_max_show,
201 adp5520_bl_daylight_max_store);
202
203static ssize_t adp5520_bl_dark_dim_show(struct device *dev,
204 struct device_attribute *attr, char *buf)
205{
206 return adp5520_show(dev, buf, DARK_DIM);
207}
208
209static ssize_t adp5520_bl_dark_dim_store(struct device *dev,
210 struct device_attribute *attr,
211 const char *buf, size_t count)
212{
213 return adp5520_store(dev, buf, count, DARK_DIM);
214}
215static DEVICE_ATTR(dark_dim, 0664, adp5520_bl_dark_dim_show,
216 adp5520_bl_dark_dim_store);
217
218static ssize_t adp5520_bl_office_dim_show(struct device *dev,
219 struct device_attribute *attr, char *buf)
220{
221 return adp5520_show(dev, buf, OFFICE_DIM);
222}
223
224static ssize_t adp5520_bl_office_dim_store(struct device *dev,
225 struct device_attribute *attr,
226 const char *buf, size_t count)
227{
228 return adp5520_store(dev, buf, count, OFFICE_DIM);
229}
230static DEVICE_ATTR(office_dim, 0664, adp5520_bl_office_dim_show,
231 adp5520_bl_office_dim_store);
232
233static ssize_t adp5520_bl_daylight_dim_show(struct device *dev,
234 struct device_attribute *attr, char *buf)
235{
236 return adp5520_show(dev, buf, DAYLIGHT_DIM);
237}
238
239static ssize_t adp5520_bl_daylight_dim_store(struct device *dev,
240 struct device_attribute *attr,
241 const char *buf, size_t count)
242{
243 return adp5520_store(dev, buf, count, DAYLIGHT_DIM);
244}
245static DEVICE_ATTR(daylight_dim, 0664, adp5520_bl_daylight_dim_show,
246 adp5520_bl_daylight_dim_store);
247
248static struct attribute *adp5520_bl_attributes[] = {
249 &dev_attr_dark_max.attr,
250 &dev_attr_dark_dim.attr,
251 &dev_attr_office_max.attr,
252 &dev_attr_office_dim.attr,
253 &dev_attr_daylight_max.attr,
254 &dev_attr_daylight_dim.attr,
255 NULL
256};
257
258static const struct attribute_group adp5520_bl_attr_group = {
259 .attrs = adp5520_bl_attributes,
260};
261
262static int __devinit adp5520_bl_probe(struct platform_device *pdev)
263{
264 struct backlight_device *bl;
265 struct adp5520_bl *data;
266 int ret = 0;
267
268 data = kzalloc(sizeof(*data), GFP_KERNEL);
269 if (data == NULL)
270 return -ENOMEM;
271
272 data->master = pdev->dev.parent;
273 data->pdata = pdev->dev.platform_data;
274
275 if (data->pdata == NULL) {
276 dev_err(&pdev->dev, "missing platform data\n");
277 kfree(data);
278 return -ENODEV;
279 }
280
281 data->id = pdev->id;
282 data->current_brightness = 0;
283
284 mutex_init(&data->lock);
285
286 bl = backlight_device_register(pdev->name, data->master,
287 data, &adp5520_bl_ops);
288 if (IS_ERR(bl)) {
289 dev_err(&pdev->dev, "failed to register backlight\n");
290 kfree(data);
291 return PTR_ERR(bl);
292 }
293
294 bl->props.max_brightness =
295 bl->props.brightness = ADP5020_MAX_BRIGHTNESS;
296
297 if (data->pdata->en_ambl_sens)
298 ret = sysfs_create_group(&bl->dev.kobj,
299 &adp5520_bl_attr_group);
300
301 if (ret) {
302 dev_err(&pdev->dev, "failed to register sysfs\n");
303 backlight_device_unregister(bl);
304 kfree(data);
305 }
306
307 platform_set_drvdata(pdev, bl);
308 ret |= adp5520_bl_setup(bl);
309 backlight_update_status(bl);
310
311 return ret;
312}
313
314static int __devexit adp5520_bl_remove(struct platform_device *pdev)
315{
316 struct backlight_device *bl = platform_get_drvdata(pdev);
317 struct adp5520_bl *data = bl_get_data(bl);
318
319 adp5520_clr_bits(data->master, MODE_STATUS, BL_EN);
320
321 if (data->pdata->en_ambl_sens)
322 sysfs_remove_group(&bl->dev.kobj,
323 &adp5520_bl_attr_group);
324
325 backlight_device_unregister(bl);
326 kfree(data);
327
328 return 0;
329}
330
331#ifdef CONFIG_PM
332static int adp5520_bl_suspend(struct platform_device *pdev,
333 pm_message_t state)
334{
335 struct backlight_device *bl = platform_get_drvdata(pdev);
336 return adp5520_bl_set(bl, 0);
337}
338
339static int adp5520_bl_resume(struct platform_device *pdev)
340{
341 struct backlight_device *bl = platform_get_drvdata(pdev);
342
343 backlight_update_status(bl);
344 return 0;
345}
346#else
347#define adp5520_bl_suspend NULL
348#define adp5520_bl_resume NULL
349#endif
350
351static struct platform_driver adp5520_bl_driver = {
352 .driver = {
353 .name = "adp5520-backlight",
354 .owner = THIS_MODULE,
355 },
356 .probe = adp5520_bl_probe,
357 .remove = __devexit_p(adp5520_bl_remove),
358 .suspend = adp5520_bl_suspend,
359 .resume = adp5520_bl_resume,
360};
361
362static int __init adp5520_bl_init(void)
363{
364 return platform_driver_register(&adp5520_bl_driver);
365}
366module_init(adp5520_bl_init);
367
368static void __exit adp5520_bl_exit(void)
369{
370 platform_driver_unregister(&adp5520_bl_driver);
371}
372module_exit(adp5520_bl_exit);
373
374MODULE_AUTHOR("Michael Hennerich <hennerich@blackfin.uclinux.org>");
375MODULE_DESCRIPTION("ADP5520(01) Backlight Driver");
376MODULE_LICENSE("GPL");
377MODULE_ALIAS("platform:adp5520-backlight");
diff --git a/drivers/video/backlight/adx_bl.c b/drivers/video/backlight/adx_bl.c
new file mode 100644
index 000000000000..2c3bdfc620b7
--- /dev/null
+++ b/drivers/video/backlight/adx_bl.c
@@ -0,0 +1,178 @@
1/*
2 * linux/drivers/video/backlight/adx.c
3 *
4 * Copyright (C) 2009 Avionic Design GmbH
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * Written by Thierry Reding <thierry.reding@avionic-design.de>
11 */
12
13#include <linux/backlight.h>
14#include <linux/fb.h>
15#include <linux/io.h>
16#include <linux/module.h>
17#include <linux/platform_device.h>
18
19/* register definitions */
20#define ADX_BACKLIGHT_CONTROL 0x00
21#define ADX_BACKLIGHT_CONTROL_ENABLE (1 << 0)
22#define ADX_BACKLIGHT_BRIGHTNESS 0x08
23#define ADX_BACKLIGHT_STATUS 0x10
24#define ADX_BACKLIGHT_ERROR 0x18
25
26struct adxbl {
27 void __iomem *base;
28};
29
30static int adx_backlight_update_status(struct backlight_device *bldev)
31{
32 struct adxbl *bl = bl_get_data(bldev);
33 u32 value;
34
35 value = bldev->props.brightness;
36 writel(value, bl->base + ADX_BACKLIGHT_BRIGHTNESS);
37
38 value = readl(bl->base + ADX_BACKLIGHT_CONTROL);
39
40 if (bldev->props.state & BL_CORE_FBBLANK)
41 value &= ~ADX_BACKLIGHT_CONTROL_ENABLE;
42 else
43 value |= ADX_BACKLIGHT_CONTROL_ENABLE;
44
45 writel(value, bl->base + ADX_BACKLIGHT_CONTROL);
46
47 return 0;
48}
49
50static int adx_backlight_get_brightness(struct backlight_device *bldev)
51{
52 struct adxbl *bl = bl_get_data(bldev);
53 u32 brightness;
54
55 brightness = readl(bl->base + ADX_BACKLIGHT_BRIGHTNESS);
56 return brightness & 0xff;
57}
58
59static int adx_backlight_check_fb(struct fb_info *fb)
60{
61 return 1;
62}
63
64static struct backlight_ops adx_backlight_ops = {
65 .options = 0,
66 .update_status = adx_backlight_update_status,
67 .get_brightness = adx_backlight_get_brightness,
68 .check_fb = adx_backlight_check_fb,
69};
70
71static int __devinit adx_backlight_probe(struct platform_device *pdev)
72{
73 struct backlight_device *bldev;
74 struct resource *res;
75 struct adxbl *bl;
76 int ret = 0;
77
78 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
79 if (!res) {
80 ret = -ENXIO;
81 goto out;
82 }
83
84 res = devm_request_mem_region(&pdev->dev, res->start,
85 resource_size(res), res->name);
86 if (!res) {
87 ret = -ENXIO;
88 goto out;
89 }
90
91 bl = devm_kzalloc(&pdev->dev, sizeof(*bl), GFP_KERNEL);
92 if (!bl) {
93 ret = -ENOMEM;
94 goto out;
95 }
96
97 bl->base = devm_ioremap_nocache(&pdev->dev, res->start,
98 resource_size(res));
99 if (!bl->base) {
100 ret = -ENXIO;
101 goto out;
102 }
103
104 bldev = backlight_device_register(dev_name(&pdev->dev), &pdev->dev, bl,
105 &adx_backlight_ops);
106 if (!bldev) {
107 ret = -ENOMEM;
108 goto out;
109 }
110
111 bldev->props.max_brightness = 0xff;
112 bldev->props.brightness = 0xff;
113 bldev->props.power = FB_BLANK_UNBLANK;
114
115 platform_set_drvdata(pdev, bldev);
116
117out:
118 return ret;
119}
120
121static int __devexit adx_backlight_remove(struct platform_device *pdev)
122{
123 struct backlight_device *bldev;
124 int ret = 0;
125
126 bldev = platform_get_drvdata(pdev);
127 bldev->props.power = FB_BLANK_UNBLANK;
128 bldev->props.brightness = 0xff;
129 backlight_update_status(bldev);
130 backlight_device_unregister(bldev);
131 platform_set_drvdata(pdev, NULL);
132
133 return ret;
134}
135
136#ifdef CONFIG_PM
137static int adx_backlight_suspend(struct platform_device *pdev,
138 pm_message_t state)
139{
140 return 0;
141}
142
143static int adx_backlight_resume(struct platform_device *pdev)
144{
145 return 0;
146}
147#else
148#define adx_backlight_suspend NULL
149#define adx_backlight_resume NULL
150#endif
151
152static struct platform_driver adx_backlight_driver = {
153 .probe = adx_backlight_probe,
154 .remove = __devexit_p(adx_backlight_remove),
155 .suspend = adx_backlight_suspend,
156 .resume = adx_backlight_resume,
157 .driver = {
158 .name = "adx-backlight",
159 .owner = THIS_MODULE,
160 },
161};
162
163static int __init adx_backlight_init(void)
164{
165 return platform_driver_register(&adx_backlight_driver);
166}
167
168static void __exit adx_backlight_exit(void)
169{
170 platform_driver_unregister(&adx_backlight_driver);
171}
172
173module_init(adx_backlight_init);
174module_exit(adx_backlight_exit);
175
176MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
177MODULE_DESCRIPTION("Avionic Design Xanthos Backlight Driver");
178MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/backlight.c b/drivers/video/backlight/backlight.c
index 157057c79ca3..6615ac7fa60a 100644
--- a/drivers/video/backlight/backlight.c
+++ b/drivers/video/backlight/backlight.c
@@ -73,6 +73,27 @@ static inline void backlight_unregister_fb(struct backlight_device *bd)
73} 73}
74#endif /* CONFIG_FB */ 74#endif /* CONFIG_FB */
75 75
76static void backlight_generate_event(struct backlight_device *bd,
77 enum backlight_update_reason reason)
78{
79 char *envp[2];
80
81 switch (reason) {
82 case BACKLIGHT_UPDATE_SYSFS:
83 envp[0] = "SOURCE=sysfs";
84 break;
85 case BACKLIGHT_UPDATE_HOTKEY:
86 envp[0] = "SOURCE=hotkey";
87 break;
88 default:
89 envp[0] = "SOURCE=unknown";
90 break;
91 }
92 envp[1] = NULL;
93 kobject_uevent_env(&bd->dev.kobj, KOBJ_CHANGE, envp);
94 sysfs_notify(&bd->dev.kobj, NULL, "actual_brightness");
95}
96
76static ssize_t backlight_show_power(struct device *dev, 97static ssize_t backlight_show_power(struct device *dev,
77 struct device_attribute *attr,char *buf) 98 struct device_attribute *attr,char *buf)
78{ 99{
@@ -142,6 +163,8 @@ static ssize_t backlight_store_brightness(struct device *dev,
142 } 163 }
143 mutex_unlock(&bd->ops_lock); 164 mutex_unlock(&bd->ops_lock);
144 165
166 backlight_generate_event(bd, BACKLIGHT_UPDATE_SYSFS);
167
145 return rc; 168 return rc;
146} 169}
147 170
@@ -214,6 +237,25 @@ static struct device_attribute bl_device_attributes[] = {
214}; 237};
215 238
216/** 239/**
240 * backlight_force_update - tell the backlight subsystem that hardware state
241 * has changed
242 * @bd: the backlight device to update
243 *
244 * Updates the internal state of the backlight in response to a hardware event,
245 * and generate a uevent to notify userspace
246 */
247void backlight_force_update(struct backlight_device *bd,
248 enum backlight_update_reason reason)
249{
250 mutex_lock(&bd->ops_lock);
251 if (bd->ops && bd->ops->get_brightness)
252 bd->props.brightness = bd->ops->get_brightness(bd);
253 mutex_unlock(&bd->ops_lock);
254 backlight_generate_event(bd, reason);
255}
256EXPORT_SYMBOL(backlight_force_update);
257
258/**
217 * backlight_device_register - create and register a new object of 259 * backlight_device_register - create and register a new object of
218 * backlight_device class. 260 * backlight_device class.
219 * @name: the name of the new object(must be the same as the name of the 261 * @name: the name of the new object(must be the same as the name of the
diff --git a/drivers/video/backlight/da903x_bl.c b/drivers/video/backlight/da903x_bl.c
index 93bb4340cc64..701a1081e199 100644
--- a/drivers/video/backlight/da903x_bl.c
+++ b/drivers/video/backlight/da903x_bl.c
@@ -154,34 +154,38 @@ static int da903x_backlight_remove(struct platform_device *pdev)
154} 154}
155 155
156#ifdef CONFIG_PM 156#ifdef CONFIG_PM
157static int da903x_backlight_suspend(struct platform_device *pdev, 157static int da903x_backlight_suspend(struct device *dev)
158 pm_message_t state)
159{ 158{
159 struct platform_device *pdev = to_platform_device(dev);
160 struct backlight_device *bl = platform_get_drvdata(pdev); 160 struct backlight_device *bl = platform_get_drvdata(pdev);
161 return da903x_backlight_set(bl, 0); 161 return da903x_backlight_set(bl, 0);
162} 162}
163 163
164static int da903x_backlight_resume(struct platform_device *pdev) 164static int da903x_backlight_resume(struct device *dev)
165{ 165{
166 struct platform_device *pdev = to_platform_device(dev);
166 struct backlight_device *bl = platform_get_drvdata(pdev); 167 struct backlight_device *bl = platform_get_drvdata(pdev);
167 168
168 backlight_update_status(bl); 169 backlight_update_status(bl);
169 return 0; 170 return 0;
170} 171}
171#else 172
172#define da903x_backlight_suspend NULL 173static struct dev_pm_ops da903x_backlight_pm_ops = {
173#define da903x_backlight_resume NULL 174 .suspend = da903x_backlight_suspend,
175 .resume = da903x_backlight_resume,
176};
174#endif 177#endif
175 178
176static struct platform_driver da903x_backlight_driver = { 179static struct platform_driver da903x_backlight_driver = {
177 .driver = { 180 .driver = {
178 .name = "da903x-backlight", 181 .name = "da903x-backlight",
179 .owner = THIS_MODULE, 182 .owner = THIS_MODULE,
183#ifdef CONFIG_PM
184 .pm = &da903x_backlight_pm_ops,
185#endif
180 }, 186 },
181 .probe = da903x_backlight_probe, 187 .probe = da903x_backlight_probe,
182 .remove = da903x_backlight_remove, 188 .remove = da903x_backlight_remove,
183 .suspend = da903x_backlight_suspend,
184 .resume = da903x_backlight_resume,
185}; 189};
186 190
187static int __init da903x_backlight_init(void) 191static int __init da903x_backlight_init(void)
diff --git a/drivers/video/backlight/hp680_bl.c b/drivers/video/backlight/hp680_bl.c
index 5be55a20d8c7..7fb4eefff80d 100644
--- a/drivers/video/backlight/hp680_bl.c
+++ b/drivers/video/backlight/hp680_bl.c
@@ -103,7 +103,7 @@ static struct backlight_ops hp680bl_ops = {
103 .update_status = hp680bl_set_intensity, 103 .update_status = hp680bl_set_intensity,
104}; 104};
105 105
106static int __init hp680bl_probe(struct platform_device *pdev) 106static int __devinit hp680bl_probe(struct platform_device *pdev)
107{ 107{
108 struct backlight_device *bd; 108 struct backlight_device *bd;
109 109
diff --git a/drivers/video/backlight/lms283gf05.c b/drivers/video/backlight/lms283gf05.c
new file mode 100644
index 000000000000..447b542a20ca
--- /dev/null
+++ b/drivers/video/backlight/lms283gf05.c
@@ -0,0 +1,242 @@
1/*
2 * lms283gf05.c -- support for Samsung LMS283GF05 LCD
3 *
4 * Copyright (c) 2009 Marek Vasut <marek.vasut@gmail.com>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/device.h>
12#include <linux/kernel.h>
13#include <linux/delay.h>
14#include <linux/gpio.h>
15#include <linux/lcd.h>
16
17#include <linux/spi/spi.h>
18#include <linux/spi/lms283gf05.h>
19
20struct lms283gf05_state {
21 struct spi_device *spi;
22 struct lcd_device *ld;
23};
24
25struct lms283gf05_seq {
26 unsigned char reg;
27 unsigned short value;
28 unsigned char delay;
29};
30
31/* Magic sequences supplied by manufacturer, for details refer to datasheet */
32static struct lms283gf05_seq disp_initseq[] = {
33 /* REG, VALUE, DELAY */
34 { 0x07, 0x0000, 0 },
35 { 0x13, 0x0000, 10 },
36
37 { 0x11, 0x3004, 0 },
38 { 0x14, 0x200F, 0 },
39 { 0x10, 0x1a20, 0 },
40 { 0x13, 0x0040, 50 },
41
42 { 0x13, 0x0060, 0 },
43 { 0x13, 0x0070, 200 },
44
45 { 0x01, 0x0127, 0 },
46 { 0x02, 0x0700, 0 },
47 { 0x03, 0x1030, 0 },
48 { 0x08, 0x0208, 0 },
49 { 0x0B, 0x0620, 0 },
50 { 0x0C, 0x0110, 0 },
51 { 0x30, 0x0120, 0 },
52 { 0x31, 0x0127, 0 },
53 { 0x32, 0x0000, 0 },
54 { 0x33, 0x0503, 0 },
55 { 0x34, 0x0727, 0 },
56 { 0x35, 0x0124, 0 },
57 { 0x36, 0x0706, 0 },
58 { 0x37, 0x0701, 0 },
59 { 0x38, 0x0F00, 0 },
60 { 0x39, 0x0F00, 0 },
61 { 0x40, 0x0000, 0 },
62 { 0x41, 0x0000, 0 },
63 { 0x42, 0x013f, 0 },
64 { 0x43, 0x0000, 0 },
65 { 0x44, 0x013f, 0 },
66 { 0x45, 0x0000, 0 },
67 { 0x46, 0xef00, 0 },
68 { 0x47, 0x013f, 0 },
69 { 0x48, 0x0000, 0 },
70 { 0x07, 0x0015, 30 },
71
72 { 0x07, 0x0017, 0 },
73
74 { 0x20, 0x0000, 0 },
75 { 0x21, 0x0000, 0 },
76 { 0x22, 0x0000, 0 }
77};
78
79static struct lms283gf05_seq disp_pdwnseq[] = {
80 { 0x07, 0x0016, 30 },
81
82 { 0x07, 0x0004, 0 },
83 { 0x10, 0x0220, 20 },
84
85 { 0x13, 0x0060, 50 },
86
87 { 0x13, 0x0040, 50 },
88
89 { 0x13, 0x0000, 0 },
90 { 0x10, 0x0000, 0 }
91};
92
93
94static void lms283gf05_reset(unsigned long gpio, bool inverted)
95{
96 gpio_set_value(gpio, !inverted);
97 mdelay(100);
98 gpio_set_value(gpio, inverted);
99 mdelay(20);
100 gpio_set_value(gpio, !inverted);
101 mdelay(20);
102}
103
104static void lms283gf05_toggle(struct spi_device *spi,
105 struct lms283gf05_seq *seq, int sz)
106{
107 char buf[3];
108 int i;
109
110 for (i = 0; i < sz; i++) {
111 buf[0] = 0x74;
112 buf[1] = 0x00;
113 buf[2] = seq[i].reg;
114 spi_write(spi, buf, 3);
115
116 buf[0] = 0x76;
117 buf[1] = seq[i].value >> 8;
118 buf[2] = seq[i].value & 0xff;
119 spi_write(spi, buf, 3);
120
121 mdelay(seq[i].delay);
122 }
123}
124
125static int lms283gf05_power_set(struct lcd_device *ld, int power)
126{
127 struct lms283gf05_state *st = lcd_get_data(ld);
128 struct spi_device *spi = st->spi;
129 struct lms283gf05_pdata *pdata = spi->dev.platform_data;
130
131 if (power) {
132 if (pdata)
133 lms283gf05_reset(pdata->reset_gpio,
134 pdata->reset_inverted);
135 lms283gf05_toggle(spi, disp_initseq, ARRAY_SIZE(disp_initseq));
136 } else {
137 lms283gf05_toggle(spi, disp_pdwnseq, ARRAY_SIZE(disp_pdwnseq));
138 if (pdata)
139 gpio_set_value(pdata->reset_gpio,
140 pdata->reset_inverted);
141 }
142
143 return 0;
144}
145
146static struct lcd_ops lms_ops = {
147 .set_power = lms283gf05_power_set,
148 .get_power = NULL,
149};
150
151static int __devinit lms283gf05_probe(struct spi_device *spi)
152{
153 struct lms283gf05_state *st;
154 struct lms283gf05_pdata *pdata = spi->dev.platform_data;
155 struct lcd_device *ld;
156 int ret = 0;
157
158 if (pdata != NULL) {
159 ret = gpio_request(pdata->reset_gpio, "LMS285GF05 RESET");
160 if (ret)
161 return ret;
162
163 ret = gpio_direction_output(pdata->reset_gpio,
164 !pdata->reset_inverted);
165 if (ret)
166 goto err;
167 }
168
169 st = kzalloc(sizeof(struct lms283gf05_state), GFP_KERNEL);
170 if (st == NULL) {
171 dev_err(&spi->dev, "No memory for device state\n");
172 ret = -ENOMEM;
173 goto err;
174 }
175
176 ld = lcd_device_register("lms283gf05", &spi->dev, st, &lms_ops);
177 if (IS_ERR(ld)) {
178 ret = PTR_ERR(ld);
179 goto err2;
180 }
181
182 st->spi = spi;
183 st->ld = ld;
184
185 dev_set_drvdata(&spi->dev, st);
186
187 /* kick in the LCD */
188 if (pdata)
189 lms283gf05_reset(pdata->reset_gpio, pdata->reset_inverted);
190 lms283gf05_toggle(spi, disp_initseq, ARRAY_SIZE(disp_initseq));
191
192 return 0;
193
194err2:
195 kfree(st);
196err:
197 if (pdata != NULL)
198 gpio_free(pdata->reset_gpio);
199
200 return ret;
201}
202
203static int __devexit lms283gf05_remove(struct spi_device *spi)
204{
205 struct lms283gf05_state *st = dev_get_drvdata(&spi->dev);
206 struct lms283gf05_pdata *pdata = st->spi->dev.platform_data;
207
208 lcd_device_unregister(st->ld);
209
210 if (pdata != NULL)
211 gpio_free(pdata->reset_gpio);
212
213 kfree(st);
214
215 return 0;
216}
217
218static struct spi_driver lms283gf05_driver = {
219 .driver = {
220 .name = "lms283gf05",
221 .owner = THIS_MODULE,
222 },
223 .probe = lms283gf05_probe,
224 .remove = __devexit_p(lms283gf05_remove),
225};
226
227static __init int lms283gf05_init(void)
228{
229 return spi_register_driver(&lms283gf05_driver);
230}
231
232static __exit void lms283gf05_exit(void)
233{
234 spi_unregister_driver(&lms283gf05_driver);
235}
236
237module_init(lms283gf05_init);
238module_exit(lms283gf05_exit);
239
240MODULE_AUTHOR("Marek Vasut <marek.vasut@gmail.com>");
241MODULE_DESCRIPTION("LCD283GF05 LCD");
242MODULE_LICENSE("GPL v2");
diff --git a/drivers/video/backlight/mbp_nvidia_bl.c b/drivers/video/backlight/mbp_nvidia_bl.c
index 3bb4c0a50c62..9edb8d7c295f 100644
--- a/drivers/video/backlight/mbp_nvidia_bl.c
+++ b/drivers/video/backlight/mbp_nvidia_bl.c
@@ -166,6 +166,15 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
166 }, 166 },
167 { 167 {
168 .callback = mbp_dmi_match, 168 .callback = mbp_dmi_match,
169 .ident = "MacBookAir 1,1",
170 .matches = {
171 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
172 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookAir1,1"),
173 },
174 .driver_data = (void *)&intel_chipset_data,
175 },
176 {
177 .callback = mbp_dmi_match,
169 .ident = "MacBook 5,1", 178 .ident = "MacBook 5,1",
170 .matches = { 179 .matches = {
171 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 180 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
@@ -175,6 +184,15 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
175 }, 184 },
176 { 185 {
177 .callback = mbp_dmi_match, 186 .callback = mbp_dmi_match,
187 .ident = "MacBook 5,2",
188 .matches = {
189 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
190 DMI_MATCH(DMI_PRODUCT_NAME, "MacBook5,2"),
191 },
192 .driver_data = (void *)&nvidia_chipset_data,
193 },
194 {
195 .callback = mbp_dmi_match,
178 .ident = "MacBookAir 2,1", 196 .ident = "MacBookAir 2,1",
179 .matches = { 197 .matches = {
180 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."), 198 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
@@ -191,6 +209,24 @@ static const struct dmi_system_id __initdata mbp_device_table[] = {
191 }, 209 },
192 .driver_data = (void *)&nvidia_chipset_data, 210 .driver_data = (void *)&nvidia_chipset_data,
193 }, 211 },
212 {
213 .callback = mbp_dmi_match,
214 .ident = "MacBookPro 5,2",
215 .matches = {
216 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
217 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,2"),
218 },
219 .driver_data = (void *)&nvidia_chipset_data,
220 },
221 {
222 .callback = mbp_dmi_match,
223 .ident = "MacBookPro 5,5",
224 .matches = {
225 DMI_MATCH(DMI_SYS_VENDOR, "Apple Inc."),
226 DMI_MATCH(DMI_PRODUCT_NAME, "MacBookPro5,5"),
227 },
228 .driver_data = (void *)&nvidia_chipset_data,
229 },
194 { } 230 { }
195}; 231};
196 232
diff --git a/drivers/video/backlight/wm831x_bl.c b/drivers/video/backlight/wm831x_bl.c
new file mode 100644
index 000000000000..467bdb7efb23
--- /dev/null
+++ b/drivers/video/backlight/wm831x_bl.c
@@ -0,0 +1,250 @@
1/*
2 * Backlight driver for Wolfson Microelectronics WM831x PMICs
3 *
4 * Copyright 2009 Wolfson Microelectonics plc
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
10
11#include <linux/kernel.h>
12#include <linux/init.h>
13#include <linux/platform_device.h>
14#include <linux/fb.h>
15#include <linux/backlight.h>
16
17#include <linux/mfd/wm831x/core.h>
18#include <linux/mfd/wm831x/pdata.h>
19#include <linux/mfd/wm831x/regulator.h>
20
21struct wm831x_backlight_data {
22 struct wm831x *wm831x;
23 int isink_reg;
24 int current_brightness;
25};
26
27static int wm831x_backlight_set(struct backlight_device *bl, int brightness)
28{
29 struct wm831x_backlight_data *data = bl_get_data(bl);
30 struct wm831x *wm831x = data->wm831x;
31 int power_up = !data->current_brightness && brightness;
32 int power_down = data->current_brightness && !brightness;
33 int ret;
34
35 if (power_up) {
36 /* Enable the ISINK */
37 ret = wm831x_set_bits(wm831x, data->isink_reg,
38 WM831X_CS1_ENA, WM831X_CS1_ENA);
39 if (ret < 0)
40 goto err;
41
42 /* Enable the DC-DC */
43 ret = wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE,
44 WM831X_DC4_ENA, WM831X_DC4_ENA);
45 if (ret < 0)
46 goto err;
47 }
48
49 if (power_down) {
50 /* DCDC first */
51 ret = wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE,
52 WM831X_DC4_ENA, 0);
53 if (ret < 0)
54 goto err;
55
56 /* ISINK */
57 ret = wm831x_set_bits(wm831x, data->isink_reg,
58 WM831X_CS1_DRIVE | WM831X_CS1_ENA, 0);
59 if (ret < 0)
60 goto err;
61 }
62
63 /* Set the new brightness */
64 ret = wm831x_set_bits(wm831x, data->isink_reg,
65 WM831X_CS1_ISEL_MASK, brightness);
66 if (ret < 0)
67 goto err;
68
69 if (power_up) {
70 /* Drive current through the ISINK */
71 ret = wm831x_set_bits(wm831x, data->isink_reg,
72 WM831X_CS1_DRIVE, WM831X_CS1_DRIVE);
73 if (ret < 0)
74 return ret;
75 }
76
77 data->current_brightness = brightness;
78
79 return 0;
80
81err:
82 /* If we were in the middle of a power transition always shut down
83 * for safety.
84 */
85 if (power_up || power_down) {
86 wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE, WM831X_DC4_ENA, 0);
87 wm831x_set_bits(wm831x, data->isink_reg, WM831X_CS1_ENA, 0);
88 }
89
90 return ret;
91}
92
93static int wm831x_backlight_update_status(struct backlight_device *bl)
94{
95 int brightness = bl->props.brightness;
96
97 if (bl->props.power != FB_BLANK_UNBLANK)
98 brightness = 0;
99
100 if (bl->props.fb_blank != FB_BLANK_UNBLANK)
101 brightness = 0;
102
103 if (bl->props.state & BL_CORE_SUSPENDED)
104 brightness = 0;
105
106 return wm831x_backlight_set(bl, brightness);
107}
108
109static int wm831x_backlight_get_brightness(struct backlight_device *bl)
110{
111 struct wm831x_backlight_data *data = bl_get_data(bl);
112 return data->current_brightness;
113}
114
115static struct backlight_ops wm831x_backlight_ops = {
116 .options = BL_CORE_SUSPENDRESUME,
117 .update_status = wm831x_backlight_update_status,
118 .get_brightness = wm831x_backlight_get_brightness,
119};
120
121static int wm831x_backlight_probe(struct platform_device *pdev)
122{
123 struct wm831x *wm831x = dev_get_drvdata(pdev->dev.parent);
124 struct wm831x_pdata *wm831x_pdata;
125 struct wm831x_backlight_pdata *pdata;
126 struct wm831x_backlight_data *data;
127 struct backlight_device *bl;
128 int ret, i, max_isel, isink_reg, dcdc_cfg;
129
130 /* We need platform data */
131 if (pdev->dev.parent->platform_data) {
132 wm831x_pdata = pdev->dev.parent->platform_data;
133 pdata = wm831x_pdata->backlight;
134 } else {
135 pdata = NULL;
136 }
137
138 if (!pdata) {
139 dev_err(&pdev->dev, "No platform data supplied\n");
140 return -EINVAL;
141 }
142
143 /* Figure out the maximum current we can use */
144 for (i = 0; i < WM831X_ISINK_MAX_ISEL; i++) {
145 if (wm831x_isinkv_values[i] > pdata->max_uA)
146 break;
147 }
148
149 if (i == 0) {
150 dev_err(&pdev->dev, "Invalid max_uA: %duA\n", pdata->max_uA);
151 return -EINVAL;
152 }
153 max_isel = i - 1;
154
155 if (pdata->max_uA != wm831x_isinkv_values[max_isel])
156 dev_warn(&pdev->dev,
157 "Maximum current is %duA not %duA as requested\n",
158 wm831x_isinkv_values[max_isel], pdata->max_uA);
159
160 switch (pdata->isink) {
161 case 1:
162 isink_reg = WM831X_CURRENT_SINK_1;
163 dcdc_cfg = 0;
164 break;
165 case 2:
166 isink_reg = WM831X_CURRENT_SINK_2;
167 dcdc_cfg = WM831X_DC4_FBSRC;
168 break;
169 default:
170 dev_err(&pdev->dev, "Invalid ISINK %d\n", pdata->isink);
171 return -EINVAL;
172 }
173
174 /* Configure the ISINK to use for feedback */
175 ret = wm831x_reg_unlock(wm831x);
176 if (ret < 0)
177 return ret;
178
179 ret = wm831x_set_bits(wm831x, WM831X_DC4_CONTROL, WM831X_DC4_FBSRC,
180 dcdc_cfg);
181
182 wm831x_reg_lock(wm831x);
183 if (ret < 0)
184 return ret;
185
186 data = kzalloc(sizeof(*data), GFP_KERNEL);
187 if (data == NULL)
188 return -ENOMEM;
189
190 data->wm831x = wm831x;
191 data->current_brightness = 0;
192 data->isink_reg = isink_reg;
193
194 bl = backlight_device_register("wm831x", &pdev->dev,
195 data, &wm831x_backlight_ops);
196 if (IS_ERR(bl)) {
197 dev_err(&pdev->dev, "failed to register backlight\n");
198 kfree(data);
199 return PTR_ERR(bl);
200 }
201
202 bl->props.max_brightness = max_isel;
203 bl->props.brightness = max_isel;
204
205 platform_set_drvdata(pdev, bl);
206
207 /* Disable the DCDC if it was started so we can bootstrap */
208 wm831x_set_bits(wm831x, WM831X_DCDC_ENABLE, WM831X_DC4_ENA, 0);
209
210
211 backlight_update_status(bl);
212
213 return 0;
214}
215
216static int wm831x_backlight_remove(struct platform_device *pdev)
217{
218 struct backlight_device *bl = platform_get_drvdata(pdev);
219 struct wm831x_backlight_data *data = bl_get_data(bl);
220
221 backlight_device_unregister(bl);
222 kfree(data);
223 return 0;
224}
225
226static struct platform_driver wm831x_backlight_driver = {
227 .driver = {
228 .name = "wm831x-backlight",
229 .owner = THIS_MODULE,
230 },
231 .probe = wm831x_backlight_probe,
232 .remove = wm831x_backlight_remove,
233};
234
235static int __init wm831x_backlight_init(void)
236{
237 return platform_driver_register(&wm831x_backlight_driver);
238}
239module_init(wm831x_backlight_init);
240
241static void __exit wm831x_backlight_exit(void)
242{
243 platform_driver_unregister(&wm831x_backlight_driver);
244}
245module_exit(wm831x_backlight_exit);
246
247MODULE_DESCRIPTION("Backlight Driver for WM831x PMICs");
248MODULE_AUTHOR("Mark Brown <broonie@opensource.wolfsonmicro.com");
249MODULE_LICENSE("GPL");
250MODULE_ALIAS("platform:wm831x-backlight");
diff --git a/drivers/video/fb_defio.c b/drivers/video/fb_defio.c
index 0a7a6679ee6e..c27ab1ed9604 100644
--- a/drivers/video/fb_defio.c
+++ b/drivers/video/fb_defio.c
@@ -125,7 +125,7 @@ page_already_added:
125 return 0; 125 return 0;
126} 126}
127 127
128static struct vm_operations_struct fb_deferred_io_vm_ops = { 128static const struct vm_operations_struct fb_deferred_io_vm_ops = {
129 .fault = fb_deferred_io_fault, 129 .fault = fb_deferred_io_fault,
130 .page_mkwrite = fb_deferred_io_mkwrite, 130 .page_mkwrite = fb_deferred_io_mkwrite,
131}; 131};
diff --git a/drivers/video/fbmem.c b/drivers/video/fbmem.c
index a1f2e7ce730b..99bbd282ce63 100644
--- a/drivers/video/fbmem.c
+++ b/drivers/video/fbmem.c
@@ -1800,7 +1800,7 @@ static int __init video_setup(char *options)
1800 global = 1; 1800 global = 1;
1801 } 1801 }
1802 1802
1803 if (!global && !strstr(options, "fb:")) { 1803 if (!global && !strchr(options, ':')) {
1804 fb_mode_option = options; 1804 fb_mode_option = options;
1805 global = 1; 1805 global = 1;
1806 } 1806 }
diff --git a/drivers/video/omap/dispc.c b/drivers/video/omap/dispc.c
index 80a11d078df4..f16e42154229 100644
--- a/drivers/video/omap/dispc.c
+++ b/drivers/video/omap/dispc.c
@@ -1035,7 +1035,7 @@ static void mmap_user_close(struct vm_area_struct *vma)
1035 atomic_dec(&dispc.map_count[plane]); 1035 atomic_dec(&dispc.map_count[plane]);
1036} 1036}
1037 1037
1038static struct vm_operations_struct mmap_user_ops = { 1038static const struct vm_operations_struct mmap_user_ops = {
1039 .open = mmap_user_open, 1039 .open = mmap_user_open,
1040 .close = mmap_user_close, 1040 .close = mmap_user_close,
1041}; 1041};
diff --git a/drivers/video/pxafb.c b/drivers/video/pxafb.c
index 6506117c134b..1820c4a24434 100644
--- a/drivers/video/pxafb.c
+++ b/drivers/video/pxafb.c
@@ -1638,24 +1638,26 @@ pxafb_freq_policy(struct notifier_block *nb, unsigned long val, void *data)
1638 * Power management hooks. Note that we won't be called from IRQ context, 1638 * Power management hooks. Note that we won't be called from IRQ context,
1639 * unlike the blank functions above, so we may sleep. 1639 * unlike the blank functions above, so we may sleep.
1640 */ 1640 */
1641static int pxafb_suspend(struct platform_device *dev, pm_message_t state) 1641static int pxafb_suspend(struct device *dev)
1642{ 1642{
1643 struct pxafb_info *fbi = platform_get_drvdata(dev); 1643 struct pxafb_info *fbi = dev_get_drvdata(dev);
1644 1644
1645 set_ctrlr_state(fbi, C_DISABLE_PM); 1645 set_ctrlr_state(fbi, C_DISABLE_PM);
1646 return 0; 1646 return 0;
1647} 1647}
1648 1648
1649static int pxafb_resume(struct platform_device *dev) 1649static int pxafb_resume(struct device *dev)
1650{ 1650{
1651 struct pxafb_info *fbi = platform_get_drvdata(dev); 1651 struct pxafb_info *fbi = dev_get_drvdata(dev);
1652 1652
1653 set_ctrlr_state(fbi, C_ENABLE_PM); 1653 set_ctrlr_state(fbi, C_ENABLE_PM);
1654 return 0; 1654 return 0;
1655} 1655}
1656#else 1656
1657#define pxafb_suspend NULL 1657static struct dev_pm_ops pxafb_pm_ops = {
1658#define pxafb_resume NULL 1658 .suspend = pxafb_suspend,
1659 .resume = pxafb_resume,
1660};
1659#endif 1661#endif
1660 1662
1661static int __devinit pxafb_init_video_memory(struct pxafb_info *fbi) 1663static int __devinit pxafb_init_video_memory(struct pxafb_info *fbi)
@@ -2081,6 +2083,9 @@ static int __devinit pxafb_probe(struct platform_device *dev)
2081 goto failed; 2083 goto failed;
2082 } 2084 }
2083 2085
2086 if (cpu_is_pxa3xx() && inf->acceleration_enabled)
2087 fbi->fb.fix.accel = FB_ACCEL_PXA3XX;
2088
2084 fbi->backlight_power = inf->pxafb_backlight_power; 2089 fbi->backlight_power = inf->pxafb_backlight_power;
2085 fbi->lcd_power = inf->pxafb_lcd_power; 2090 fbi->lcd_power = inf->pxafb_lcd_power;
2086 2091
@@ -2091,14 +2096,14 @@ static int __devinit pxafb_probe(struct platform_device *dev)
2091 goto failed_fbi; 2096 goto failed_fbi;
2092 } 2097 }
2093 2098
2094 r = request_mem_region(r->start, r->end - r->start + 1, dev->name); 2099 r = request_mem_region(r->start, resource_size(r), dev->name);
2095 if (r == NULL) { 2100 if (r == NULL) {
2096 dev_err(&dev->dev, "failed to request I/O memory\n"); 2101 dev_err(&dev->dev, "failed to request I/O memory\n");
2097 ret = -EBUSY; 2102 ret = -EBUSY;
2098 goto failed_fbi; 2103 goto failed_fbi;
2099 } 2104 }
2100 2105
2101 fbi->mmio_base = ioremap(r->start, r->end - r->start + 1); 2106 fbi->mmio_base = ioremap(r->start, resource_size(r));
2102 if (fbi->mmio_base == NULL) { 2107 if (fbi->mmio_base == NULL) {
2103 dev_err(&dev->dev, "failed to map I/O memory\n"); 2108 dev_err(&dev->dev, "failed to map I/O memory\n");
2104 ret = -EBUSY; 2109 ret = -EBUSY;
@@ -2197,7 +2202,7 @@ failed_free_dma:
2197failed_free_io: 2202failed_free_io:
2198 iounmap(fbi->mmio_base); 2203 iounmap(fbi->mmio_base);
2199failed_free_res: 2204failed_free_res:
2200 release_mem_region(r->start, r->end - r->start + 1); 2205 release_mem_region(r->start, resource_size(r));
2201failed_fbi: 2206failed_fbi:
2202 clk_put(fbi->clk); 2207 clk_put(fbi->clk);
2203 platform_set_drvdata(dev, NULL); 2208 platform_set_drvdata(dev, NULL);
@@ -2237,7 +2242,7 @@ static int __devexit pxafb_remove(struct platform_device *dev)
2237 iounmap(fbi->mmio_base); 2242 iounmap(fbi->mmio_base);
2238 2243
2239 r = platform_get_resource(dev, IORESOURCE_MEM, 0); 2244 r = platform_get_resource(dev, IORESOURCE_MEM, 0);
2240 release_mem_region(r->start, r->end - r->start + 1); 2245 release_mem_region(r->start, resource_size(r));
2241 2246
2242 clk_put(fbi->clk); 2247 clk_put(fbi->clk);
2243 kfree(fbi); 2248 kfree(fbi);
@@ -2248,11 +2253,12 @@ static int __devexit pxafb_remove(struct platform_device *dev)
2248static struct platform_driver pxafb_driver = { 2253static struct platform_driver pxafb_driver = {
2249 .probe = pxafb_probe, 2254 .probe = pxafb_probe,
2250 .remove = __devexit_p(pxafb_remove), 2255 .remove = __devexit_p(pxafb_remove),
2251 .suspend = pxafb_suspend,
2252 .resume = pxafb_resume,
2253 .driver = { 2256 .driver = {
2254 .owner = THIS_MODULE, 2257 .owner = THIS_MODULE,
2255 .name = "pxa2xx-fb", 2258 .name = "pxa2xx-fb",
2259#ifdef CONFIG_PM
2260 .pm = &pxafb_pm_ops,
2261#endif
2256 }, 2262 },
2257}; 2263};
2258 2264
diff --git a/drivers/w1/masters/ds2482.c b/drivers/w1/masters/ds2482.c
index df52cb355f7d..406caa6a71cb 100644
--- a/drivers/w1/masters/ds2482.c
+++ b/drivers/w1/masters/ds2482.c
@@ -24,19 +24,6 @@
24#include "../w1_int.h" 24#include "../w1_int.h"
25 25
26/** 26/**
27 * Address is selected using 2 pins, resulting in 4 possible addresses.
28 * 0x18, 0x19, 0x1a, 0x1b
29 * However, the chip cannot be detected without doing an i2c write,
30 * so use the force module parameter.
31 */
32static const unsigned short normal_i2c[] = { I2C_CLIENT_END };
33
34/**
35 * Insmod parameters
36 */
37I2C_CLIENT_INSMOD_1(ds2482);
38
39/**
40 * The DS2482 registers - there are 3 registers that are addressed by a read 27 * The DS2482 registers - there are 3 registers that are addressed by a read
41 * pointer. The read pointer is set by the last command executed. 28 * pointer. The read pointer is set by the last command executed.
42 * 29 *
@@ -96,8 +83,6 @@ static const u8 ds2482_chan_rd[8] =
96 83
97static int ds2482_probe(struct i2c_client *client, 84static int ds2482_probe(struct i2c_client *client,
98 const struct i2c_device_id *id); 85 const struct i2c_device_id *id);
99static int ds2482_detect(struct i2c_client *client, int kind,
100 struct i2c_board_info *info);
101static int ds2482_remove(struct i2c_client *client); 86static int ds2482_remove(struct i2c_client *client);
102 87
103 88
@@ -117,8 +102,6 @@ static struct i2c_driver ds2482_driver = {
117 .probe = ds2482_probe, 102 .probe = ds2482_probe,
118 .remove = ds2482_remove, 103 .remove = ds2482_remove,
119 .id_table = ds2482_id, 104 .id_table = ds2482_id,
120 .detect = ds2482_detect,
121 .address_data = &addr_data,
122}; 105};
123 106
124/* 107/*
@@ -425,19 +408,6 @@ static u8 ds2482_w1_reset_bus(void *data)
425} 408}
426 409
427 410
428static int ds2482_detect(struct i2c_client *client, int kind,
429 struct i2c_board_info *info)
430{
431 if (!i2c_check_functionality(client->adapter,
432 I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
433 I2C_FUNC_SMBUS_BYTE))
434 return -ENODEV;
435
436 strlcpy(info->type, "ds2482", I2C_NAME_SIZE);
437
438 return 0;
439}
440
441static int ds2482_probe(struct i2c_client *client, 411static int ds2482_probe(struct i2c_client *client,
442 const struct i2c_device_id *id) 412 const struct i2c_device_id *id)
443{ 413{
@@ -446,6 +416,11 @@ static int ds2482_probe(struct i2c_client *client,
446 int temp1; 416 int temp1;
447 int idx; 417 int idx;
448 418
419 if (!i2c_check_functionality(client->adapter,
420 I2C_FUNC_SMBUS_WRITE_BYTE_DATA |
421 I2C_FUNC_SMBUS_BYTE))
422 return -ENODEV;
423
449 if (!(data = kzalloc(sizeof(struct ds2482_data), GFP_KERNEL))) { 424 if (!(data = kzalloc(sizeof(struct ds2482_data), GFP_KERNEL))) {
450 err = -ENOMEM; 425 err = -ENOMEM;
451 goto exit; 426 goto exit;
diff --git a/drivers/watchdog/Kconfig b/drivers/watchdog/Kconfig
index ff3eb8ff6bd7..3711b888d482 100644
--- a/drivers/watchdog/Kconfig
+++ b/drivers/watchdog/Kconfig
@@ -282,6 +282,13 @@ config NUC900_WATCHDOG
282 To compile this driver as a module, choose M here: the 282 To compile this driver as a module, choose M here: the
283 module will be called nuc900_wdt. 283 module will be called nuc900_wdt.
284 284
285config ADX_WATCHDOG
286 tristate "Avionic Design Xanthos watchdog"
287 depends on ARCH_PXA_ADX
288 help
289 Say Y here if you want support for the watchdog timer on Avionic
290 Design Xanthos boards.
291
285# AVR32 Architecture 292# AVR32 Architecture
286 293
287config AT32AP700X_WDT 294config AT32AP700X_WDT
diff --git a/drivers/watchdog/Makefile b/drivers/watchdog/Makefile
index 348b3b862c99..699199b1baa6 100644
--- a/drivers/watchdog/Makefile
+++ b/drivers/watchdog/Makefile
@@ -45,6 +45,7 @@ obj-$(CONFIG_ORION_WATCHDOG) += orion_wdt.o
45obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o 45obj-$(CONFIG_COH901327_WATCHDOG) += coh901327_wdt.o
46obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o 46obj-$(CONFIG_STMP3XXX_WATCHDOG) += stmp3xxx_wdt.o
47obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o 47obj-$(CONFIG_NUC900_WATCHDOG) += nuc900_wdt.o
48obj-$(CONFIG_ADX_WATCHDOG) += adx_wdt.o
48 49
49# AVR32 Architecture 50# AVR32 Architecture
50obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o 51obj-$(CONFIG_AT32AP700X_WDT) += at32ap700x_wdt.o
diff --git a/drivers/watchdog/adx_wdt.c b/drivers/watchdog/adx_wdt.c
new file mode 100644
index 000000000000..77afb0acc500
--- /dev/null
+++ b/drivers/watchdog/adx_wdt.c
@@ -0,0 +1,354 @@
1/*
2 * Copyright (C) 2008-2009 Avionic Design GmbH
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/fs.h>
10#include <linux/io.h>
11#include <linux/miscdevice.h>
12#include <linux/module.h>
13#include <linux/platform_device.h>
14#include <linux/types.h>
15#include <linux/uaccess.h>
16#include <linux/watchdog.h>
17
18#define WATCHDOG_NAME "adx-wdt"
19
20/* register offsets */
21#define ADX_WDT_CONTROL 0x00
22#define ADX_WDT_CONTROL_ENABLE (1 << 0)
23#define ADX_WDT_CONTROL_nRESET (1 << 1)
24#define ADX_WDT_TIMEOUT 0x08
25
26static struct platform_device *adx_wdt_dev;
27static unsigned long driver_open;
28
29#define WDT_STATE_STOP 0
30#define WDT_STATE_START 1
31
32struct adx_wdt {
33 void __iomem *base;
34 unsigned long timeout;
35 unsigned int state;
36 unsigned int wake;
37 spinlock_t lock;
38};
39
40static struct watchdog_info adx_wdt_info = {
41 .identity = "Avionic Design Xanthos Watchdog",
42 .options = WDIOF_SETTIMEOUT | WDIOF_KEEPALIVEPING,
43};
44
45static void adx_wdt_start_locked(struct adx_wdt *wdt)
46{
47 u32 ctrl;
48
49 ctrl = readl(wdt->base + ADX_WDT_CONTROL);
50 ctrl |= ADX_WDT_CONTROL_ENABLE;
51 writel(ctrl, wdt->base + ADX_WDT_CONTROL);
52 wdt->state = WDT_STATE_START;
53}
54
55static void adx_wdt_start(struct adx_wdt *wdt)
56{
57 unsigned long flags;
58
59 spin_lock_irqsave(&wdt->lock, flags);
60 adx_wdt_start_locked(wdt);
61 spin_unlock_irqrestore(&wdt->lock, flags);
62}
63
64static void adx_wdt_stop_locked(struct adx_wdt *wdt)
65{
66 u32 ctrl;
67
68 ctrl = readl(wdt->base + ADX_WDT_CONTROL);
69 ctrl &= ~ADX_WDT_CONTROL_ENABLE;
70 writel(ctrl, wdt->base + ADX_WDT_CONTROL);
71 wdt->state = WDT_STATE_STOP;
72}
73
74static void adx_wdt_stop(struct adx_wdt *wdt)
75{
76 unsigned long flags;
77
78 spin_lock_irqsave(&wdt->lock, flags);
79 adx_wdt_stop_locked(wdt);
80 spin_unlock_irqrestore(&wdt->lock, flags);
81}
82
83static void adx_wdt_set_timeout(struct adx_wdt *wdt, unsigned long seconds)
84{
85 unsigned long timeout = seconds * 1000;
86 unsigned long flags;
87 unsigned int state;
88
89 spin_lock_irqsave(&wdt->lock, flags);
90 state = wdt->state;
91 adx_wdt_stop_locked(wdt);
92 writel(timeout, wdt->base + ADX_WDT_TIMEOUT);
93
94 if (state == WDT_STATE_START)
95 adx_wdt_start_locked(wdt);
96
97 wdt->timeout = timeout;
98 spin_unlock_irqrestore(&wdt->lock, flags);
99}
100
101static void adx_wdt_get_timeout(struct adx_wdt *wdt, unsigned long *seconds)
102{
103 *seconds = wdt->timeout / 1000;
104}
105
106static void adx_wdt_keepalive(struct adx_wdt *wdt)
107{
108 unsigned long flags;
109
110 spin_lock_irqsave(&wdt->lock, flags);
111 writel(wdt->timeout, wdt->base + ADX_WDT_TIMEOUT);
112 spin_unlock_irqrestore(&wdt->lock, flags);
113}
114
115static int adx_wdt_open(struct inode *inode, struct file *file)
116{
117 struct adx_wdt *wdt = platform_get_drvdata(adx_wdt_dev);
118
119 if (test_and_set_bit(0, &driver_open))
120 return -EBUSY;
121
122 file->private_data = wdt;
123 adx_wdt_set_timeout(wdt, 30);
124 adx_wdt_start(wdt);
125
126 return nonseekable_open(inode, file);
127}
128
129static int adx_wdt_release(struct inode *inode, struct file *file)
130{
131 struct adx_wdt *wdt = file->private_data;
132
133 adx_wdt_stop(wdt);
134 clear_bit(0, &driver_open);
135
136 return 0;
137}
138
139static long adx_wdt_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
140{
141 struct adx_wdt *wdt = file->private_data;
142 void __user *argp = (void __user *)arg;
143 unsigned long __user *p = argp;
144 unsigned long seconds = 0;
145 unsigned int options;
146 long ret = -EINVAL;
147
148 switch (cmd) {
149 case WDIOC_GETSUPPORT:
150 if (copy_to_user(argp, &adx_wdt_info, sizeof(adx_wdt_info)))
151 return -EFAULT;
152 else
153 return 0;
154
155 case WDIOC_GETSTATUS:
156 case WDIOC_GETBOOTSTATUS:
157 return put_user(0, p);
158
159 case WDIOC_KEEPALIVE:
160 adx_wdt_keepalive(wdt);
161 return 0;
162
163 case WDIOC_SETTIMEOUT:
164 if (get_user(seconds, p))
165 return -EFAULT;
166
167 adx_wdt_set_timeout(wdt, seconds);
168
169 /* fallthrough */
170 case WDIOC_GETTIMEOUT:
171 adx_wdt_get_timeout(wdt, &seconds);
172 return put_user(seconds, p);
173
174 case WDIOC_SETOPTIONS:
175 if (copy_from_user(&options, argp, sizeof(options)))
176 return -EFAULT;
177
178 if (options & WDIOS_DISABLECARD) {
179 adx_wdt_stop(wdt);
180 ret = 0;
181 }
182
183 if (options & WDIOS_ENABLECARD) {
184 adx_wdt_start(wdt);
185 ret = 0;
186 }
187
188 return ret;
189
190 default:
191 break;
192 }
193
194 return -ENOTTY;
195}
196
197static ssize_t adx_wdt_write(struct file *file, const char __user *data,
198 size_t len, loff_t *ppos)
199{
200 struct adx_wdt *wdt = file->private_data;
201
202 if (len)
203 adx_wdt_keepalive(wdt);
204
205 return len;
206}
207
208static const struct file_operations adx_wdt_fops = {
209 .owner = THIS_MODULE,
210 .llseek = no_llseek,
211 .open = adx_wdt_open,
212 .release = adx_wdt_release,
213 .unlocked_ioctl = adx_wdt_ioctl,
214 .write = adx_wdt_write,
215};
216
217static struct miscdevice adx_wdt_miscdev = {
218 .minor = WATCHDOG_MINOR,
219 .name = "watchdog",
220 .fops = &adx_wdt_fops,
221};
222
223static int __devinit adx_wdt_probe(struct platform_device *pdev)
224{
225 struct resource *res;
226 struct adx_wdt *wdt;
227 int ret = 0;
228 u32 ctrl;
229
230 wdt = devm_kzalloc(&pdev->dev, sizeof(*wdt), GFP_KERNEL);
231 if (!wdt) {
232 dev_err(&pdev->dev, "cannot allocate WDT structure\n");
233 return -ENOMEM;
234 }
235
236 spin_lock_init(&wdt->lock);
237
238 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
239 if (!res) {
240 dev_err(&pdev->dev, "cannot obtain I/O memory region\n");
241 return -ENXIO;
242 }
243
244 res = devm_request_mem_region(&pdev->dev, res->start,
245 res->end - res->start + 1, res->name);
246 if (!res) {
247 dev_err(&pdev->dev, "cannot request I/O memory region\n");
248 return -ENXIO;
249 }
250
251 wdt->base = devm_ioremap_nocache(&pdev->dev, res->start,
252 res->end - res->start + 1);
253 if (!wdt->base) {
254 dev_err(&pdev->dev, "cannot remap I/O memory region\n");
255 return -ENXIO;
256 }
257
258 /* disable watchdog and reboot on timeout */
259 ctrl = readl(wdt->base + ADX_WDT_CONTROL);
260 ctrl &= ~ADX_WDT_CONTROL_ENABLE;
261 ctrl &= ~ADX_WDT_CONTROL_nRESET;
262 writel(ctrl, wdt->base + ADX_WDT_CONTROL);
263
264 platform_set_drvdata(pdev, wdt);
265 adx_wdt_dev = pdev;
266
267 ret = misc_register(&adx_wdt_miscdev);
268 if (ret) {
269 dev_err(&pdev->dev, "cannot register miscdev on minor %d "
270 "(err=%d)\n", WATCHDOG_MINOR, ret);
271 return ret;
272 }
273
274 return 0;
275}
276
277static int __devexit adx_wdt_remove(struct platform_device *pdev)
278{
279 struct adx_wdt *wdt = platform_get_drvdata(pdev);
280
281 misc_deregister(&adx_wdt_miscdev);
282 adx_wdt_stop(wdt);
283 platform_set_drvdata(pdev, NULL);
284
285 return 0;
286}
287
288static void adx_wdt_shutdown(struct platform_device *pdev)
289{
290 struct adx_wdt *wdt = platform_get_drvdata(pdev);
291 adx_wdt_stop(wdt);
292}
293
294#ifdef CONFIG_PM
295static int adx_wdt_suspend(struct device *dev)
296{
297 struct platform_device *pdev = to_platform_device(dev);
298 struct adx_wdt *wdt = platform_get_drvdata(pdev);
299
300 wdt->wake = (wdt->state == WDT_STATE_START) ? 1 : 0;
301 adx_wdt_stop(wdt);
302
303 return 0;
304}
305
306static int adx_wdt_resume(struct device *dev)
307{
308 struct platform_device *pdev = to_platform_device(dev);
309 struct adx_wdt *wdt = platform_get_drvdata(pdev);
310
311 if (wdt->wake)
312 adx_wdt_start(wdt);
313
314 return 0;
315}
316
317static struct dev_pm_ops adx_wdt_pm_ops = {
318 .suspend = adx_wdt_suspend,
319 .resume = adx_wdt_resume,
320};
321
322# define ADX_WDT_PM_OPS (&adx_wdt_pm_ops)
323#else
324# define ADX_WDT_PM_OPS NULL
325#endif
326
327static struct platform_driver adx_wdt_driver = {
328 .probe = adx_wdt_probe,
329 .remove = __devexit_p(adx_wdt_remove),
330 .shutdown = adx_wdt_shutdown,
331 .driver = {
332 .name = WATCHDOG_NAME,
333 .owner = THIS_MODULE,
334 .pm = ADX_WDT_PM_OPS,
335 },
336};
337
338static int __init adx_wdt_init(void)
339{
340 return platform_driver_register(&adx_wdt_driver);
341}
342
343static void __exit adx_wdt_exit(void)
344{
345 platform_driver_unregister(&adx_wdt_driver);
346}
347
348module_init(adx_wdt_init);
349module_exit(adx_wdt_exit);
350
351MODULE_DESCRIPTION("Avionic Design Xanthos Watchdog Driver");
352MODULE_LICENSE("GPL v2");
353MODULE_AUTHOR("Thierry Reding <thierry.reding@avionic-design.de>");
354MODULE_ALIAS_MISCDEV(WATCHDOG_MINOR);
diff --git a/drivers/xen/xenfs/xenbus.c b/drivers/xen/xenfs/xenbus.c
index a9592d981b10..6c4269b836b7 100644
--- a/drivers/xen/xenfs/xenbus.c
+++ b/drivers/xen/xenfs/xenbus.c
@@ -43,6 +43,7 @@
43#include <linux/fs.h> 43#include <linux/fs.h>
44#include <linux/poll.h> 44#include <linux/poll.h>
45#include <linux/mutex.h> 45#include <linux/mutex.h>
46#include <linux/sched.h>
46#include <linux/spinlock.h> 47#include <linux/spinlock.h>
47#include <linux/mount.h> 48#include <linux/mount.h>
48#include <linux/pagemap.h> 49#include <linux/pagemap.h>